xref: /openbmc/linux/drivers/nvme/host/fc.c (revision 1c2dd16a)
1 /*
2  * Copyright (c) 2016 Avago Technologies.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful.
9  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10  * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11  * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12  * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13  * See the GNU General Public License for more details, a copy of which
14  * can be found in the file COPYING included with this package
15  *
16  */
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/parser.h>
20 #include <uapi/scsi/fc/fc_fs.h>
21 #include <uapi/scsi/fc/fc_els.h>
22 #include <linux/delay.h>
23 
24 #include "nvme.h"
25 #include "fabrics.h"
26 #include <linux/nvme-fc-driver.h>
27 #include <linux/nvme-fc.h>
28 
29 
30 /* *************************** Data Structures/Defines ****************** */
31 
32 
33 /*
34  * We handle AEN commands ourselves and don't even let the
35  * block layer know about them.
36  */
37 #define NVME_FC_NR_AEN_COMMANDS	1
38 #define NVME_FC_AQ_BLKMQ_DEPTH	\
39 	(NVMF_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS)
40 #define AEN_CMDID_BASE		(NVME_FC_AQ_BLKMQ_DEPTH + 1)
41 
42 enum nvme_fc_queue_flags {
43 	NVME_FC_Q_CONNECTED = (1 << 0),
44 };
45 
46 #define NVMEFC_QUEUE_DELAY	3		/* ms units */
47 
48 #define NVME_FC_MAX_CONNECT_ATTEMPTS	1
49 
50 struct nvme_fc_queue {
51 	struct nvme_fc_ctrl	*ctrl;
52 	struct device		*dev;
53 	struct blk_mq_hw_ctx	*hctx;
54 	void			*lldd_handle;
55 	int			queue_size;
56 	size_t			cmnd_capsule_len;
57 	u32			qnum;
58 	u32			rqcnt;
59 	u32			seqno;
60 
61 	u64			connection_id;
62 	atomic_t		csn;
63 
64 	unsigned long		flags;
65 } __aligned(sizeof(u64));	/* alignment for other things alloc'd with */
66 
67 enum nvme_fcop_flags {
68 	FCOP_FLAGS_TERMIO	= (1 << 0),
69 	FCOP_FLAGS_RELEASED	= (1 << 1),
70 	FCOP_FLAGS_COMPLETE	= (1 << 2),
71 	FCOP_FLAGS_AEN		= (1 << 3),
72 };
73 
74 struct nvmefc_ls_req_op {
75 	struct nvmefc_ls_req	ls_req;
76 
77 	struct nvme_fc_rport	*rport;
78 	struct nvme_fc_queue	*queue;
79 	struct request		*rq;
80 	u32			flags;
81 
82 	int			ls_error;
83 	struct completion	ls_done;
84 	struct list_head	lsreq_list;	/* rport->ls_req_list */
85 	bool			req_queued;
86 };
87 
88 enum nvme_fcpop_state {
89 	FCPOP_STATE_UNINIT	= 0,
90 	FCPOP_STATE_IDLE	= 1,
91 	FCPOP_STATE_ACTIVE	= 2,
92 	FCPOP_STATE_ABORTED	= 3,
93 	FCPOP_STATE_COMPLETE	= 4,
94 };
95 
96 struct nvme_fc_fcp_op {
97 	struct nvme_request	nreq;		/*
98 						 * nvme/host/core.c
99 						 * requires this to be
100 						 * the 1st element in the
101 						 * private structure
102 						 * associated with the
103 						 * request.
104 						 */
105 	struct nvmefc_fcp_req	fcp_req;
106 
107 	struct nvme_fc_ctrl	*ctrl;
108 	struct nvme_fc_queue	*queue;
109 	struct request		*rq;
110 
111 	atomic_t		state;
112 	u32			flags;
113 	u32			rqno;
114 	u32			nents;
115 
116 	struct nvme_fc_cmd_iu	cmd_iu;
117 	struct nvme_fc_ersp_iu	rsp_iu;
118 };
119 
120 struct nvme_fc_lport {
121 	struct nvme_fc_local_port	localport;
122 
123 	struct ida			endp_cnt;
124 	struct list_head		port_list;	/* nvme_fc_port_list */
125 	struct list_head		endp_list;
126 	struct device			*dev;	/* physical device for dma */
127 	struct nvme_fc_port_template	*ops;
128 	struct kref			ref;
129 } __aligned(sizeof(u64));	/* alignment for other things alloc'd with */
130 
131 struct nvme_fc_rport {
132 	struct nvme_fc_remote_port	remoteport;
133 
134 	struct list_head		endp_list; /* for lport->endp_list */
135 	struct list_head		ctrl_list;
136 	struct list_head		ls_req_list;
137 	struct device			*dev;	/* physical device for dma */
138 	struct nvme_fc_lport		*lport;
139 	spinlock_t			lock;
140 	struct kref			ref;
141 } __aligned(sizeof(u64));	/* alignment for other things alloc'd with */
142 
143 enum nvme_fcctrl_flags {
144 	FCCTRL_TERMIO		= (1 << 0),
145 };
146 
147 struct nvme_fc_ctrl {
148 	spinlock_t		lock;
149 	struct nvme_fc_queue	*queues;
150 	struct device		*dev;
151 	struct nvme_fc_lport	*lport;
152 	struct nvme_fc_rport	*rport;
153 	u32			queue_count;
154 	u32			cnum;
155 
156 	u64			association_id;
157 
158 	u64			cap;
159 
160 	struct list_head	ctrl_list;	/* rport->ctrl_list */
161 
162 	struct blk_mq_tag_set	admin_tag_set;
163 	struct blk_mq_tag_set	tag_set;
164 
165 	struct work_struct	delete_work;
166 	struct work_struct	reset_work;
167 	struct delayed_work	connect_work;
168 	int			reconnect_delay;
169 	int			connect_attempts;
170 
171 	struct kref		ref;
172 	u32			flags;
173 	u32			iocnt;
174 
175 	struct nvme_fc_fcp_op	aen_ops[NVME_FC_NR_AEN_COMMANDS];
176 
177 	struct nvme_ctrl	ctrl;
178 };
179 
180 static inline struct nvme_fc_ctrl *
181 to_fc_ctrl(struct nvme_ctrl *ctrl)
182 {
183 	return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
184 }
185 
186 static inline struct nvme_fc_lport *
187 localport_to_lport(struct nvme_fc_local_port *portptr)
188 {
189 	return container_of(portptr, struct nvme_fc_lport, localport);
190 }
191 
192 static inline struct nvme_fc_rport *
193 remoteport_to_rport(struct nvme_fc_remote_port *portptr)
194 {
195 	return container_of(portptr, struct nvme_fc_rport, remoteport);
196 }
197 
198 static inline struct nvmefc_ls_req_op *
199 ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
200 {
201 	return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
202 }
203 
204 static inline struct nvme_fc_fcp_op *
205 fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
206 {
207 	return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
208 }
209 
210 
211 
212 /* *************************** Globals **************************** */
213 
214 
215 static DEFINE_SPINLOCK(nvme_fc_lock);
216 
217 static LIST_HEAD(nvme_fc_lport_list);
218 static DEFINE_IDA(nvme_fc_local_port_cnt);
219 static DEFINE_IDA(nvme_fc_ctrl_cnt);
220 
221 static struct workqueue_struct *nvme_fc_wq;
222 
223 
224 
225 /* *********************** FC-NVME Port Management ************************ */
226 
227 static int __nvme_fc_del_ctrl(struct nvme_fc_ctrl *);
228 static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
229 			struct nvme_fc_queue *, unsigned int);
230 
231 
232 /**
233  * nvme_fc_register_localport - transport entry point called by an
234  *                              LLDD to register the existence of a NVME
235  *                              host FC port.
236  * @pinfo:     pointer to information about the port to be registered
237  * @template:  LLDD entrypoints and operational parameters for the port
238  * @dev:       physical hardware device node port corresponds to. Will be
239  *             used for DMA mappings
240  * @lport_p:   pointer to a local port pointer. Upon success, the routine
241  *             will allocate a nvme_fc_local_port structure and place its
242  *             address in the local port pointer. Upon failure, local port
243  *             pointer will be set to 0.
244  *
245  * Returns:
246  * a completion status. Must be 0 upon success; a negative errno
247  * (ex: -ENXIO) upon failure.
248  */
249 int
250 nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
251 			struct nvme_fc_port_template *template,
252 			struct device *dev,
253 			struct nvme_fc_local_port **portptr)
254 {
255 	struct nvme_fc_lport *newrec;
256 	unsigned long flags;
257 	int ret, idx;
258 
259 	if (!template->localport_delete || !template->remoteport_delete ||
260 	    !template->ls_req || !template->fcp_io ||
261 	    !template->ls_abort || !template->fcp_abort ||
262 	    !template->max_hw_queues || !template->max_sgl_segments ||
263 	    !template->max_dif_sgl_segments || !template->dma_boundary) {
264 		ret = -EINVAL;
265 		goto out_reghost_failed;
266 	}
267 
268 	newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
269 			 GFP_KERNEL);
270 	if (!newrec) {
271 		ret = -ENOMEM;
272 		goto out_reghost_failed;
273 	}
274 
275 	idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
276 	if (idx < 0) {
277 		ret = -ENOSPC;
278 		goto out_fail_kfree;
279 	}
280 
281 	if (!get_device(dev) && dev) {
282 		ret = -ENODEV;
283 		goto out_ida_put;
284 	}
285 
286 	INIT_LIST_HEAD(&newrec->port_list);
287 	INIT_LIST_HEAD(&newrec->endp_list);
288 	kref_init(&newrec->ref);
289 	newrec->ops = template;
290 	newrec->dev = dev;
291 	ida_init(&newrec->endp_cnt);
292 	newrec->localport.private = &newrec[1];
293 	newrec->localport.node_name = pinfo->node_name;
294 	newrec->localport.port_name = pinfo->port_name;
295 	newrec->localport.port_role = pinfo->port_role;
296 	newrec->localport.port_id = pinfo->port_id;
297 	newrec->localport.port_state = FC_OBJSTATE_ONLINE;
298 	newrec->localport.port_num = idx;
299 
300 	spin_lock_irqsave(&nvme_fc_lock, flags);
301 	list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
302 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
303 
304 	if (dev)
305 		dma_set_seg_boundary(dev, template->dma_boundary);
306 
307 	*portptr = &newrec->localport;
308 	return 0;
309 
310 out_ida_put:
311 	ida_simple_remove(&nvme_fc_local_port_cnt, idx);
312 out_fail_kfree:
313 	kfree(newrec);
314 out_reghost_failed:
315 	*portptr = NULL;
316 
317 	return ret;
318 }
319 EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
320 
321 static void
322 nvme_fc_free_lport(struct kref *ref)
323 {
324 	struct nvme_fc_lport *lport =
325 		container_of(ref, struct nvme_fc_lport, ref);
326 	unsigned long flags;
327 
328 	WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
329 	WARN_ON(!list_empty(&lport->endp_list));
330 
331 	/* remove from transport list */
332 	spin_lock_irqsave(&nvme_fc_lock, flags);
333 	list_del(&lport->port_list);
334 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
335 
336 	/* let the LLDD know we've finished tearing it down */
337 	lport->ops->localport_delete(&lport->localport);
338 
339 	ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
340 	ida_destroy(&lport->endp_cnt);
341 
342 	put_device(lport->dev);
343 
344 	kfree(lport);
345 }
346 
347 static void
348 nvme_fc_lport_put(struct nvme_fc_lport *lport)
349 {
350 	kref_put(&lport->ref, nvme_fc_free_lport);
351 }
352 
353 static int
354 nvme_fc_lport_get(struct nvme_fc_lport *lport)
355 {
356 	return kref_get_unless_zero(&lport->ref);
357 }
358 
359 /**
360  * nvme_fc_unregister_localport - transport entry point called by an
361  *                              LLDD to deregister/remove a previously
362  *                              registered a NVME host FC port.
363  * @localport: pointer to the (registered) local port that is to be
364  *             deregistered.
365  *
366  * Returns:
367  * a completion status. Must be 0 upon success; a negative errno
368  * (ex: -ENXIO) upon failure.
369  */
370 int
371 nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
372 {
373 	struct nvme_fc_lport *lport = localport_to_lport(portptr);
374 	unsigned long flags;
375 
376 	if (!portptr)
377 		return -EINVAL;
378 
379 	spin_lock_irqsave(&nvme_fc_lock, flags);
380 
381 	if (portptr->port_state != FC_OBJSTATE_ONLINE) {
382 		spin_unlock_irqrestore(&nvme_fc_lock, flags);
383 		return -EINVAL;
384 	}
385 	portptr->port_state = FC_OBJSTATE_DELETED;
386 
387 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
388 
389 	nvme_fc_lport_put(lport);
390 
391 	return 0;
392 }
393 EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
394 
395 /**
396  * nvme_fc_register_remoteport - transport entry point called by an
397  *                              LLDD to register the existence of a NVME
398  *                              subsystem FC port on its fabric.
399  * @localport: pointer to the (registered) local port that the remote
400  *             subsystem port is connected to.
401  * @pinfo:     pointer to information about the port to be registered
402  * @rport_p:   pointer to a remote port pointer. Upon success, the routine
403  *             will allocate a nvme_fc_remote_port structure and place its
404  *             address in the remote port pointer. Upon failure, remote port
405  *             pointer will be set to 0.
406  *
407  * Returns:
408  * a completion status. Must be 0 upon success; a negative errno
409  * (ex: -ENXIO) upon failure.
410  */
411 int
412 nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
413 				struct nvme_fc_port_info *pinfo,
414 				struct nvme_fc_remote_port **portptr)
415 {
416 	struct nvme_fc_lport *lport = localport_to_lport(localport);
417 	struct nvme_fc_rport *newrec;
418 	unsigned long flags;
419 	int ret, idx;
420 
421 	newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
422 			 GFP_KERNEL);
423 	if (!newrec) {
424 		ret = -ENOMEM;
425 		goto out_reghost_failed;
426 	}
427 
428 	if (!nvme_fc_lport_get(lport)) {
429 		ret = -ESHUTDOWN;
430 		goto out_kfree_rport;
431 	}
432 
433 	idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
434 	if (idx < 0) {
435 		ret = -ENOSPC;
436 		goto out_lport_put;
437 	}
438 
439 	INIT_LIST_HEAD(&newrec->endp_list);
440 	INIT_LIST_HEAD(&newrec->ctrl_list);
441 	INIT_LIST_HEAD(&newrec->ls_req_list);
442 	kref_init(&newrec->ref);
443 	spin_lock_init(&newrec->lock);
444 	newrec->remoteport.localport = &lport->localport;
445 	newrec->dev = lport->dev;
446 	newrec->lport = lport;
447 	newrec->remoteport.private = &newrec[1];
448 	newrec->remoteport.port_role = pinfo->port_role;
449 	newrec->remoteport.node_name = pinfo->node_name;
450 	newrec->remoteport.port_name = pinfo->port_name;
451 	newrec->remoteport.port_id = pinfo->port_id;
452 	newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
453 	newrec->remoteport.port_num = idx;
454 
455 	spin_lock_irqsave(&nvme_fc_lock, flags);
456 	list_add_tail(&newrec->endp_list, &lport->endp_list);
457 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
458 
459 	*portptr = &newrec->remoteport;
460 	return 0;
461 
462 out_lport_put:
463 	nvme_fc_lport_put(lport);
464 out_kfree_rport:
465 	kfree(newrec);
466 out_reghost_failed:
467 	*portptr = NULL;
468 	return ret;
469 }
470 EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
471 
472 static void
473 nvme_fc_free_rport(struct kref *ref)
474 {
475 	struct nvme_fc_rport *rport =
476 		container_of(ref, struct nvme_fc_rport, ref);
477 	struct nvme_fc_lport *lport =
478 			localport_to_lport(rport->remoteport.localport);
479 	unsigned long flags;
480 
481 	WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
482 	WARN_ON(!list_empty(&rport->ctrl_list));
483 
484 	/* remove from lport list */
485 	spin_lock_irqsave(&nvme_fc_lock, flags);
486 	list_del(&rport->endp_list);
487 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
488 
489 	/* let the LLDD know we've finished tearing it down */
490 	lport->ops->remoteport_delete(&rport->remoteport);
491 
492 	ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
493 
494 	kfree(rport);
495 
496 	nvme_fc_lport_put(lport);
497 }
498 
499 static void
500 nvme_fc_rport_put(struct nvme_fc_rport *rport)
501 {
502 	kref_put(&rport->ref, nvme_fc_free_rport);
503 }
504 
505 static int
506 nvme_fc_rport_get(struct nvme_fc_rport *rport)
507 {
508 	return kref_get_unless_zero(&rport->ref);
509 }
510 
511 static int
512 nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
513 {
514 	struct nvmefc_ls_req_op *lsop;
515 	unsigned long flags;
516 
517 restart:
518 	spin_lock_irqsave(&rport->lock, flags);
519 
520 	list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
521 		if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
522 			lsop->flags |= FCOP_FLAGS_TERMIO;
523 			spin_unlock_irqrestore(&rport->lock, flags);
524 			rport->lport->ops->ls_abort(&rport->lport->localport,
525 						&rport->remoteport,
526 						&lsop->ls_req);
527 			goto restart;
528 		}
529 	}
530 	spin_unlock_irqrestore(&rport->lock, flags);
531 
532 	return 0;
533 }
534 
535 /**
536  * nvme_fc_unregister_remoteport - transport entry point called by an
537  *                              LLDD to deregister/remove a previously
538  *                              registered a NVME subsystem FC port.
539  * @remoteport: pointer to the (registered) remote port that is to be
540  *              deregistered.
541  *
542  * Returns:
543  * a completion status. Must be 0 upon success; a negative errno
544  * (ex: -ENXIO) upon failure.
545  */
546 int
547 nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
548 {
549 	struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
550 	struct nvme_fc_ctrl *ctrl;
551 	unsigned long flags;
552 
553 	if (!portptr)
554 		return -EINVAL;
555 
556 	spin_lock_irqsave(&rport->lock, flags);
557 
558 	if (portptr->port_state != FC_OBJSTATE_ONLINE) {
559 		spin_unlock_irqrestore(&rport->lock, flags);
560 		return -EINVAL;
561 	}
562 	portptr->port_state = FC_OBJSTATE_DELETED;
563 
564 	/* tear down all associations to the remote port */
565 	list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
566 		__nvme_fc_del_ctrl(ctrl);
567 
568 	spin_unlock_irqrestore(&rport->lock, flags);
569 
570 	nvme_fc_abort_lsops(rport);
571 
572 	nvme_fc_rport_put(rport);
573 	return 0;
574 }
575 EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
576 
577 
578 /* *********************** FC-NVME DMA Handling **************************** */
579 
580 /*
581  * The fcloop device passes in a NULL device pointer. Real LLD's will
582  * pass in a valid device pointer. If NULL is passed to the dma mapping
583  * routines, depending on the platform, it may or may not succeed, and
584  * may crash.
585  *
586  * As such:
587  * Wrapper all the dma routines and check the dev pointer.
588  *
589  * If simple mappings (return just a dma address, we'll noop them,
590  * returning a dma address of 0.
591  *
592  * On more complex mappings (dma_map_sg), a pseudo routine fills
593  * in the scatter list, setting all dma addresses to 0.
594  */
595 
596 static inline dma_addr_t
597 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
598 		enum dma_data_direction dir)
599 {
600 	return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
601 }
602 
603 static inline int
604 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
605 {
606 	return dev ? dma_mapping_error(dev, dma_addr) : 0;
607 }
608 
609 static inline void
610 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
611 	enum dma_data_direction dir)
612 {
613 	if (dev)
614 		dma_unmap_single(dev, addr, size, dir);
615 }
616 
617 static inline void
618 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
619 		enum dma_data_direction dir)
620 {
621 	if (dev)
622 		dma_sync_single_for_cpu(dev, addr, size, dir);
623 }
624 
625 static inline void
626 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
627 		enum dma_data_direction dir)
628 {
629 	if (dev)
630 		dma_sync_single_for_device(dev, addr, size, dir);
631 }
632 
633 /* pseudo dma_map_sg call */
634 static int
635 fc_map_sg(struct scatterlist *sg, int nents)
636 {
637 	struct scatterlist *s;
638 	int i;
639 
640 	WARN_ON(nents == 0 || sg[0].length == 0);
641 
642 	for_each_sg(sg, s, nents, i) {
643 		s->dma_address = 0L;
644 #ifdef CONFIG_NEED_SG_DMA_LENGTH
645 		s->dma_length = s->length;
646 #endif
647 	}
648 	return nents;
649 }
650 
651 static inline int
652 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
653 		enum dma_data_direction dir)
654 {
655 	return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
656 }
657 
658 static inline void
659 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
660 		enum dma_data_direction dir)
661 {
662 	if (dev)
663 		dma_unmap_sg(dev, sg, nents, dir);
664 }
665 
666 
667 /* *********************** FC-NVME LS Handling **************************** */
668 
669 static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
670 static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
671 
672 
673 static void
674 __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
675 {
676 	struct nvme_fc_rport *rport = lsop->rport;
677 	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
678 	unsigned long flags;
679 
680 	spin_lock_irqsave(&rport->lock, flags);
681 
682 	if (!lsop->req_queued) {
683 		spin_unlock_irqrestore(&rport->lock, flags);
684 		return;
685 	}
686 
687 	list_del(&lsop->lsreq_list);
688 
689 	lsop->req_queued = false;
690 
691 	spin_unlock_irqrestore(&rport->lock, flags);
692 
693 	fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
694 				  (lsreq->rqstlen + lsreq->rsplen),
695 				  DMA_BIDIRECTIONAL);
696 
697 	nvme_fc_rport_put(rport);
698 }
699 
700 static int
701 __nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
702 		struct nvmefc_ls_req_op *lsop,
703 		void (*done)(struct nvmefc_ls_req *req, int status))
704 {
705 	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
706 	unsigned long flags;
707 	int ret = 0;
708 
709 	if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
710 		return -ECONNREFUSED;
711 
712 	if (!nvme_fc_rport_get(rport))
713 		return -ESHUTDOWN;
714 
715 	lsreq->done = done;
716 	lsop->rport = rport;
717 	lsop->req_queued = false;
718 	INIT_LIST_HEAD(&lsop->lsreq_list);
719 	init_completion(&lsop->ls_done);
720 
721 	lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
722 				  lsreq->rqstlen + lsreq->rsplen,
723 				  DMA_BIDIRECTIONAL);
724 	if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
725 		ret = -EFAULT;
726 		goto out_putrport;
727 	}
728 	lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
729 
730 	spin_lock_irqsave(&rport->lock, flags);
731 
732 	list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
733 
734 	lsop->req_queued = true;
735 
736 	spin_unlock_irqrestore(&rport->lock, flags);
737 
738 	ret = rport->lport->ops->ls_req(&rport->lport->localport,
739 					&rport->remoteport, lsreq);
740 	if (ret)
741 		goto out_unlink;
742 
743 	return 0;
744 
745 out_unlink:
746 	lsop->ls_error = ret;
747 	spin_lock_irqsave(&rport->lock, flags);
748 	lsop->req_queued = false;
749 	list_del(&lsop->lsreq_list);
750 	spin_unlock_irqrestore(&rport->lock, flags);
751 	fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
752 				  (lsreq->rqstlen + lsreq->rsplen),
753 				  DMA_BIDIRECTIONAL);
754 out_putrport:
755 	nvme_fc_rport_put(rport);
756 
757 	return ret;
758 }
759 
760 static void
761 nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
762 {
763 	struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
764 
765 	lsop->ls_error = status;
766 	complete(&lsop->ls_done);
767 }
768 
769 static int
770 nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
771 {
772 	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
773 	struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
774 	int ret;
775 
776 	ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
777 
778 	if (!ret) {
779 		/*
780 		 * No timeout/not interruptible as we need the struct
781 		 * to exist until the lldd calls us back. Thus mandate
782 		 * wait until driver calls back. lldd responsible for
783 		 * the timeout action
784 		 */
785 		wait_for_completion(&lsop->ls_done);
786 
787 		__nvme_fc_finish_ls_req(lsop);
788 
789 		ret = lsop->ls_error;
790 	}
791 
792 	if (ret)
793 		return ret;
794 
795 	/* ACC or RJT payload ? */
796 	if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
797 		return -ENXIO;
798 
799 	return 0;
800 }
801 
802 static int
803 nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
804 		struct nvmefc_ls_req_op *lsop,
805 		void (*done)(struct nvmefc_ls_req *req, int status))
806 {
807 	/* don't wait for completion */
808 
809 	return __nvme_fc_send_ls_req(rport, lsop, done);
810 }
811 
812 /* Validation Error indexes into the string table below */
813 enum {
814 	VERR_NO_ERROR		= 0,
815 	VERR_LSACC		= 1,
816 	VERR_LSDESC_RQST	= 2,
817 	VERR_LSDESC_RQST_LEN	= 3,
818 	VERR_ASSOC_ID		= 4,
819 	VERR_ASSOC_ID_LEN	= 5,
820 	VERR_CONN_ID		= 6,
821 	VERR_CONN_ID_LEN	= 7,
822 	VERR_CR_ASSOC		= 8,
823 	VERR_CR_ASSOC_ACC_LEN	= 9,
824 	VERR_CR_CONN		= 10,
825 	VERR_CR_CONN_ACC_LEN	= 11,
826 	VERR_DISCONN		= 12,
827 	VERR_DISCONN_ACC_LEN	= 13,
828 };
829 
830 static char *validation_errors[] = {
831 	"OK",
832 	"Not LS_ACC",
833 	"Not LSDESC_RQST",
834 	"Bad LSDESC_RQST Length",
835 	"Not Association ID",
836 	"Bad Association ID Length",
837 	"Not Connection ID",
838 	"Bad Connection ID Length",
839 	"Not CR_ASSOC Rqst",
840 	"Bad CR_ASSOC ACC Length",
841 	"Not CR_CONN Rqst",
842 	"Bad CR_CONN ACC Length",
843 	"Not Disconnect Rqst",
844 	"Bad Disconnect ACC Length",
845 };
846 
847 static int
848 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
849 	struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
850 {
851 	struct nvmefc_ls_req_op *lsop;
852 	struct nvmefc_ls_req *lsreq;
853 	struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
854 	struct fcnvme_ls_cr_assoc_acc *assoc_acc;
855 	int ret, fcret = 0;
856 
857 	lsop = kzalloc((sizeof(*lsop) +
858 			 ctrl->lport->ops->lsrqst_priv_sz +
859 			 sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
860 	if (!lsop) {
861 		ret = -ENOMEM;
862 		goto out_no_memory;
863 	}
864 	lsreq = &lsop->ls_req;
865 
866 	lsreq->private = (void *)&lsop[1];
867 	assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
868 			(lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
869 	assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
870 
871 	assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
872 	assoc_rqst->desc_list_len =
873 			cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
874 
875 	assoc_rqst->assoc_cmd.desc_tag =
876 			cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
877 	assoc_rqst->assoc_cmd.desc_len =
878 			fcnvme_lsdesc_len(
879 				sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
880 
881 	assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
882 	assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize);
883 	/* Linux supports only Dynamic controllers */
884 	assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
885 	memcpy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id,
886 		min_t(size_t, FCNVME_ASSOC_HOSTID_LEN, sizeof(uuid_be)));
887 	strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
888 		min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
889 	strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
890 		min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
891 
892 	lsop->queue = queue;
893 	lsreq->rqstaddr = assoc_rqst;
894 	lsreq->rqstlen = sizeof(*assoc_rqst);
895 	lsreq->rspaddr = assoc_acc;
896 	lsreq->rsplen = sizeof(*assoc_acc);
897 	lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
898 
899 	ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
900 	if (ret)
901 		goto out_free_buffer;
902 
903 	/* process connect LS completion */
904 
905 	/* validate the ACC response */
906 	if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
907 		fcret = VERR_LSACC;
908 	else if (assoc_acc->hdr.desc_list_len !=
909 			fcnvme_lsdesc_len(
910 				sizeof(struct fcnvme_ls_cr_assoc_acc)))
911 		fcret = VERR_CR_ASSOC_ACC_LEN;
912 	else if (assoc_acc->hdr.rqst.desc_tag !=
913 			cpu_to_be32(FCNVME_LSDESC_RQST))
914 		fcret = VERR_LSDESC_RQST;
915 	else if (assoc_acc->hdr.rqst.desc_len !=
916 			fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
917 		fcret = VERR_LSDESC_RQST_LEN;
918 	else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
919 		fcret = VERR_CR_ASSOC;
920 	else if (assoc_acc->associd.desc_tag !=
921 			cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
922 		fcret = VERR_ASSOC_ID;
923 	else if (assoc_acc->associd.desc_len !=
924 			fcnvme_lsdesc_len(
925 				sizeof(struct fcnvme_lsdesc_assoc_id)))
926 		fcret = VERR_ASSOC_ID_LEN;
927 	else if (assoc_acc->connectid.desc_tag !=
928 			cpu_to_be32(FCNVME_LSDESC_CONN_ID))
929 		fcret = VERR_CONN_ID;
930 	else if (assoc_acc->connectid.desc_len !=
931 			fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
932 		fcret = VERR_CONN_ID_LEN;
933 
934 	if (fcret) {
935 		ret = -EBADF;
936 		dev_err(ctrl->dev,
937 			"q %d connect failed: %s\n",
938 			queue->qnum, validation_errors[fcret]);
939 	} else {
940 		ctrl->association_id =
941 			be64_to_cpu(assoc_acc->associd.association_id);
942 		queue->connection_id =
943 			be64_to_cpu(assoc_acc->connectid.connection_id);
944 		set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
945 	}
946 
947 out_free_buffer:
948 	kfree(lsop);
949 out_no_memory:
950 	if (ret)
951 		dev_err(ctrl->dev,
952 			"queue %d connect admin queue failed (%d).\n",
953 			queue->qnum, ret);
954 	return ret;
955 }
956 
957 static int
958 nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
959 			u16 qsize, u16 ersp_ratio)
960 {
961 	struct nvmefc_ls_req_op *lsop;
962 	struct nvmefc_ls_req *lsreq;
963 	struct fcnvme_ls_cr_conn_rqst *conn_rqst;
964 	struct fcnvme_ls_cr_conn_acc *conn_acc;
965 	int ret, fcret = 0;
966 
967 	lsop = kzalloc((sizeof(*lsop) +
968 			 ctrl->lport->ops->lsrqst_priv_sz +
969 			 sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
970 	if (!lsop) {
971 		ret = -ENOMEM;
972 		goto out_no_memory;
973 	}
974 	lsreq = &lsop->ls_req;
975 
976 	lsreq->private = (void *)&lsop[1];
977 	conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
978 			(lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
979 	conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
980 
981 	conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
982 	conn_rqst->desc_list_len = cpu_to_be32(
983 				sizeof(struct fcnvme_lsdesc_assoc_id) +
984 				sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
985 
986 	conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
987 	conn_rqst->associd.desc_len =
988 			fcnvme_lsdesc_len(
989 				sizeof(struct fcnvme_lsdesc_assoc_id));
990 	conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
991 	conn_rqst->connect_cmd.desc_tag =
992 			cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
993 	conn_rqst->connect_cmd.desc_len =
994 			fcnvme_lsdesc_len(
995 				sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
996 	conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
997 	conn_rqst->connect_cmd.qid  = cpu_to_be16(queue->qnum);
998 	conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize);
999 
1000 	lsop->queue = queue;
1001 	lsreq->rqstaddr = conn_rqst;
1002 	lsreq->rqstlen = sizeof(*conn_rqst);
1003 	lsreq->rspaddr = conn_acc;
1004 	lsreq->rsplen = sizeof(*conn_acc);
1005 	lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1006 
1007 	ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1008 	if (ret)
1009 		goto out_free_buffer;
1010 
1011 	/* process connect LS completion */
1012 
1013 	/* validate the ACC response */
1014 	if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1015 		fcret = VERR_LSACC;
1016 	else if (conn_acc->hdr.desc_list_len !=
1017 			fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
1018 		fcret = VERR_CR_CONN_ACC_LEN;
1019 	else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
1020 		fcret = VERR_LSDESC_RQST;
1021 	else if (conn_acc->hdr.rqst.desc_len !=
1022 			fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1023 		fcret = VERR_LSDESC_RQST_LEN;
1024 	else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
1025 		fcret = VERR_CR_CONN;
1026 	else if (conn_acc->connectid.desc_tag !=
1027 			cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1028 		fcret = VERR_CONN_ID;
1029 	else if (conn_acc->connectid.desc_len !=
1030 			fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1031 		fcret = VERR_CONN_ID_LEN;
1032 
1033 	if (fcret) {
1034 		ret = -EBADF;
1035 		dev_err(ctrl->dev,
1036 			"q %d connect failed: %s\n",
1037 			queue->qnum, validation_errors[fcret]);
1038 	} else {
1039 		queue->connection_id =
1040 			be64_to_cpu(conn_acc->connectid.connection_id);
1041 		set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1042 	}
1043 
1044 out_free_buffer:
1045 	kfree(lsop);
1046 out_no_memory:
1047 	if (ret)
1048 		dev_err(ctrl->dev,
1049 			"queue %d connect command failed (%d).\n",
1050 			queue->qnum, ret);
1051 	return ret;
1052 }
1053 
1054 static void
1055 nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
1056 {
1057 	struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1058 
1059 	__nvme_fc_finish_ls_req(lsop);
1060 
1061 	/* fc-nvme iniator doesn't care about success or failure of cmd */
1062 
1063 	kfree(lsop);
1064 }
1065 
1066 /*
1067  * This routine sends a FC-NVME LS to disconnect (aka terminate)
1068  * the FC-NVME Association.  Terminating the association also
1069  * terminates the FC-NVME connections (per queue, both admin and io
1070  * queues) that are part of the association. E.g. things are torn
1071  * down, and the related FC-NVME Association ID and Connection IDs
1072  * become invalid.
1073  *
1074  * The behavior of the fc-nvme initiator is such that it's
1075  * understanding of the association and connections will implicitly
1076  * be torn down. The action is implicit as it may be due to a loss of
1077  * connectivity with the fc-nvme target, so you may never get a
1078  * response even if you tried.  As such, the action of this routine
1079  * is to asynchronously send the LS, ignore any results of the LS, and
1080  * continue on with terminating the association. If the fc-nvme target
1081  * is present and receives the LS, it too can tear down.
1082  */
1083 static void
1084 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1085 {
1086 	struct fcnvme_ls_disconnect_rqst *discon_rqst;
1087 	struct fcnvme_ls_disconnect_acc *discon_acc;
1088 	struct nvmefc_ls_req_op *lsop;
1089 	struct nvmefc_ls_req *lsreq;
1090 	int ret;
1091 
1092 	lsop = kzalloc((sizeof(*lsop) +
1093 			 ctrl->lport->ops->lsrqst_priv_sz +
1094 			 sizeof(*discon_rqst) + sizeof(*discon_acc)),
1095 			GFP_KERNEL);
1096 	if (!lsop)
1097 		/* couldn't sent it... too bad */
1098 		return;
1099 
1100 	lsreq = &lsop->ls_req;
1101 
1102 	lsreq->private = (void *)&lsop[1];
1103 	discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
1104 			(lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1105 	discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
1106 
1107 	discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
1108 	discon_rqst->desc_list_len = cpu_to_be32(
1109 				sizeof(struct fcnvme_lsdesc_assoc_id) +
1110 				sizeof(struct fcnvme_lsdesc_disconn_cmd));
1111 
1112 	discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1113 	discon_rqst->associd.desc_len =
1114 			fcnvme_lsdesc_len(
1115 				sizeof(struct fcnvme_lsdesc_assoc_id));
1116 
1117 	discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1118 
1119 	discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
1120 						FCNVME_LSDESC_DISCONN_CMD);
1121 	discon_rqst->discon_cmd.desc_len =
1122 			fcnvme_lsdesc_len(
1123 				sizeof(struct fcnvme_lsdesc_disconn_cmd));
1124 	discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
1125 	discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
1126 
1127 	lsreq->rqstaddr = discon_rqst;
1128 	lsreq->rqstlen = sizeof(*discon_rqst);
1129 	lsreq->rspaddr = discon_acc;
1130 	lsreq->rsplen = sizeof(*discon_acc);
1131 	lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1132 
1133 	ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1134 				nvme_fc_disconnect_assoc_done);
1135 	if (ret)
1136 		kfree(lsop);
1137 
1138 	/* only meaningful part to terminating the association */
1139 	ctrl->association_id = 0;
1140 }
1141 
1142 
1143 /* *********************** NVME Ctrl Routines **************************** */
1144 
1145 static void __nvme_fc_final_op_cleanup(struct request *rq);
1146 
1147 static int
1148 nvme_fc_reinit_request(void *data, struct request *rq)
1149 {
1150 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1151 	struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1152 
1153 	memset(cmdiu, 0, sizeof(*cmdiu));
1154 	cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1155 	cmdiu->fc_id = NVME_CMD_FC_ID;
1156 	cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1157 	memset(&op->rsp_iu, 0, sizeof(op->rsp_iu));
1158 
1159 	return 0;
1160 }
1161 
1162 static void
1163 __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1164 		struct nvme_fc_fcp_op *op)
1165 {
1166 	fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1167 				sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1168 	fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1169 				sizeof(op->cmd_iu), DMA_TO_DEVICE);
1170 
1171 	atomic_set(&op->state, FCPOP_STATE_UNINIT);
1172 }
1173 
1174 static void
1175 nvme_fc_exit_request(void *data, struct request *rq,
1176 				unsigned int hctx_idx, unsigned int rq_idx)
1177 {
1178 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1179 
1180 	return __nvme_fc_exit_request(data, op);
1181 }
1182 
1183 static int
1184 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1185 {
1186 	int state;
1187 
1188 	state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1189 	if (state != FCPOP_STATE_ACTIVE) {
1190 		atomic_set(&op->state, state);
1191 		return -ECANCELED;
1192 	}
1193 
1194 	ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1195 					&ctrl->rport->remoteport,
1196 					op->queue->lldd_handle,
1197 					&op->fcp_req);
1198 
1199 	return 0;
1200 }
1201 
1202 static void
1203 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
1204 {
1205 	struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1206 	unsigned long flags;
1207 	int i, ret;
1208 
1209 	for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
1210 		if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE)
1211 			continue;
1212 
1213 		spin_lock_irqsave(&ctrl->lock, flags);
1214 		if (ctrl->flags & FCCTRL_TERMIO) {
1215 			ctrl->iocnt++;
1216 			aen_op->flags |= FCOP_FLAGS_TERMIO;
1217 		}
1218 		spin_unlock_irqrestore(&ctrl->lock, flags);
1219 
1220 		ret = __nvme_fc_abort_op(ctrl, aen_op);
1221 		if (ret) {
1222 			/*
1223 			 * if __nvme_fc_abort_op failed the io wasn't
1224 			 * active. Thus this call path is running in
1225 			 * parallel to the io complete. Treat as non-error.
1226 			 */
1227 
1228 			/* back out the flags/counters */
1229 			spin_lock_irqsave(&ctrl->lock, flags);
1230 			if (ctrl->flags & FCCTRL_TERMIO)
1231 				ctrl->iocnt--;
1232 			aen_op->flags &= ~FCOP_FLAGS_TERMIO;
1233 			spin_unlock_irqrestore(&ctrl->lock, flags);
1234 			return;
1235 		}
1236 	}
1237 }
1238 
1239 static inline int
1240 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
1241 		struct nvme_fc_fcp_op *op)
1242 {
1243 	unsigned long flags;
1244 	bool complete_rq = false;
1245 
1246 	spin_lock_irqsave(&ctrl->lock, flags);
1247 	if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
1248 		if (ctrl->flags & FCCTRL_TERMIO)
1249 			ctrl->iocnt--;
1250 	}
1251 	if (op->flags & FCOP_FLAGS_RELEASED)
1252 		complete_rq = true;
1253 	else
1254 		op->flags |= FCOP_FLAGS_COMPLETE;
1255 	spin_unlock_irqrestore(&ctrl->lock, flags);
1256 
1257 	return complete_rq;
1258 }
1259 
1260 static void
1261 nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1262 {
1263 	struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1264 	struct request *rq = op->rq;
1265 	struct nvmefc_fcp_req *freq = &op->fcp_req;
1266 	struct nvme_fc_ctrl *ctrl = op->ctrl;
1267 	struct nvme_fc_queue *queue = op->queue;
1268 	struct nvme_completion *cqe = &op->rsp_iu.cqe;
1269 	struct nvme_command *sqe = &op->cmd_iu.sqe;
1270 	__le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
1271 	union nvme_result result;
1272 	bool complete_rq;
1273 
1274 	/*
1275 	 * WARNING:
1276 	 * The current linux implementation of a nvme controller
1277 	 * allocates a single tag set for all io queues and sizes
1278 	 * the io queues to fully hold all possible tags. Thus, the
1279 	 * implementation does not reference or care about the sqhd
1280 	 * value as it never needs to use the sqhd/sqtail pointers
1281 	 * for submission pacing.
1282 	 *
1283 	 * This affects the FC-NVME implementation in two ways:
1284 	 * 1) As the value doesn't matter, we don't need to waste
1285 	 *    cycles extracting it from ERSPs and stamping it in the
1286 	 *    cases where the transport fabricates CQEs on successful
1287 	 *    completions.
1288 	 * 2) The FC-NVME implementation requires that delivery of
1289 	 *    ERSP completions are to go back to the nvme layer in order
1290 	 *    relative to the rsn, such that the sqhd value will always
1291 	 *    be "in order" for the nvme layer. As the nvme layer in
1292 	 *    linux doesn't care about sqhd, there's no need to return
1293 	 *    them in order.
1294 	 *
1295 	 * Additionally:
1296 	 * As the core nvme layer in linux currently does not look at
1297 	 * every field in the cqe - in cases where the FC transport must
1298 	 * fabricate a CQE, the following fields will not be set as they
1299 	 * are not referenced:
1300 	 *      cqe.sqid,  cqe.sqhd,  cqe.command_id
1301 	 */
1302 
1303 	fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1304 				sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1305 
1306 	if (atomic_read(&op->state) == FCPOP_STATE_ABORTED)
1307 		status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
1308 	else if (freq->status)
1309 		status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
1310 
1311 	/*
1312 	 * For the linux implementation, if we have an unsuccesful
1313 	 * status, they blk-mq layer can typically be called with the
1314 	 * non-zero status and the content of the cqe isn't important.
1315 	 */
1316 	if (status)
1317 		goto done;
1318 
1319 	/*
1320 	 * command completed successfully relative to the wire
1321 	 * protocol. However, validate anything received and
1322 	 * extract the status and result from the cqe (create it
1323 	 * where necessary).
1324 	 */
1325 
1326 	switch (freq->rcv_rsplen) {
1327 
1328 	case 0:
1329 	case NVME_FC_SIZEOF_ZEROS_RSP:
1330 		/*
1331 		 * No response payload or 12 bytes of payload (which
1332 		 * should all be zeros) are considered successful and
1333 		 * no payload in the CQE by the transport.
1334 		 */
1335 		if (freq->transferred_length !=
1336 			be32_to_cpu(op->cmd_iu.data_len)) {
1337 			status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
1338 			goto done;
1339 		}
1340 		result.u64 = 0;
1341 		break;
1342 
1343 	case sizeof(struct nvme_fc_ersp_iu):
1344 		/*
1345 		 * The ERSP IU contains a full completion with CQE.
1346 		 * Validate ERSP IU and look at cqe.
1347 		 */
1348 		if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
1349 					(freq->rcv_rsplen / 4) ||
1350 			     be32_to_cpu(op->rsp_iu.xfrd_len) !=
1351 					freq->transferred_length ||
1352 			     op->rsp_iu.status_code ||
1353 			     sqe->common.command_id != cqe->command_id)) {
1354 			status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
1355 			goto done;
1356 		}
1357 		result = cqe->result;
1358 		status = cqe->status;
1359 		break;
1360 
1361 	default:
1362 		status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
1363 		goto done;
1364 	}
1365 
1366 done:
1367 	if (op->flags & FCOP_FLAGS_AEN) {
1368 		nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
1369 		complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
1370 		atomic_set(&op->state, FCPOP_STATE_IDLE);
1371 		op->flags = FCOP_FLAGS_AEN;	/* clear other flags */
1372 		nvme_fc_ctrl_put(ctrl);
1373 		return;
1374 	}
1375 
1376 	complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
1377 	if (!complete_rq) {
1378 		if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
1379 			status = cpu_to_le16(NVME_SC_ABORT_REQ);
1380 			if (blk_queue_dying(rq->q))
1381 				status |= cpu_to_le16(NVME_SC_DNR);
1382 		}
1383 		nvme_end_request(rq, status, result);
1384 	} else
1385 		__nvme_fc_final_op_cleanup(rq);
1386 }
1387 
1388 static int
1389 __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
1390 		struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
1391 		struct request *rq, u32 rqno)
1392 {
1393 	struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1394 	int ret = 0;
1395 
1396 	memset(op, 0, sizeof(*op));
1397 	op->fcp_req.cmdaddr = &op->cmd_iu;
1398 	op->fcp_req.cmdlen = sizeof(op->cmd_iu);
1399 	op->fcp_req.rspaddr = &op->rsp_iu;
1400 	op->fcp_req.rsplen = sizeof(op->rsp_iu);
1401 	op->fcp_req.done = nvme_fc_fcpio_done;
1402 	op->fcp_req.first_sgl = (struct scatterlist *)&op[1];
1403 	op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE];
1404 	op->ctrl = ctrl;
1405 	op->queue = queue;
1406 	op->rq = rq;
1407 	op->rqno = rqno;
1408 
1409 	cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1410 	cmdiu->fc_id = NVME_CMD_FC_ID;
1411 	cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1412 
1413 	op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
1414 				&op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
1415 	if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
1416 		dev_err(ctrl->dev,
1417 			"FCP Op failed - cmdiu dma mapping failed.\n");
1418 		ret = EFAULT;
1419 		goto out_on_error;
1420 	}
1421 
1422 	op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
1423 				&op->rsp_iu, sizeof(op->rsp_iu),
1424 				DMA_FROM_DEVICE);
1425 	if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
1426 		dev_err(ctrl->dev,
1427 			"FCP Op failed - rspiu dma mapping failed.\n");
1428 		ret = EFAULT;
1429 	}
1430 
1431 	atomic_set(&op->state, FCPOP_STATE_IDLE);
1432 out_on_error:
1433 	return ret;
1434 }
1435 
1436 static int
1437 nvme_fc_init_request(void *data, struct request *rq,
1438 				unsigned int hctx_idx, unsigned int rq_idx,
1439 				unsigned int numa_node)
1440 {
1441 	struct nvme_fc_ctrl *ctrl = data;
1442 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1443 	struct nvme_fc_queue *queue = &ctrl->queues[hctx_idx+1];
1444 
1445 	return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
1446 }
1447 
1448 static int
1449 nvme_fc_init_admin_request(void *data, struct request *rq,
1450 				unsigned int hctx_idx, unsigned int rq_idx,
1451 				unsigned int numa_node)
1452 {
1453 	struct nvme_fc_ctrl *ctrl = data;
1454 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1455 	struct nvme_fc_queue *queue = &ctrl->queues[0];
1456 
1457 	return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
1458 }
1459 
1460 static int
1461 nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
1462 {
1463 	struct nvme_fc_fcp_op *aen_op;
1464 	struct nvme_fc_cmd_iu *cmdiu;
1465 	struct nvme_command *sqe;
1466 	void *private;
1467 	int i, ret;
1468 
1469 	aen_op = ctrl->aen_ops;
1470 	for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
1471 		private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
1472 						GFP_KERNEL);
1473 		if (!private)
1474 			return -ENOMEM;
1475 
1476 		cmdiu = &aen_op->cmd_iu;
1477 		sqe = &cmdiu->sqe;
1478 		ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
1479 				aen_op, (struct request *)NULL,
1480 				(AEN_CMDID_BASE + i));
1481 		if (ret) {
1482 			kfree(private);
1483 			return ret;
1484 		}
1485 
1486 		aen_op->flags = FCOP_FLAGS_AEN;
1487 		aen_op->fcp_req.first_sgl = NULL; /* no sg list */
1488 		aen_op->fcp_req.private = private;
1489 
1490 		memset(sqe, 0, sizeof(*sqe));
1491 		sqe->common.opcode = nvme_admin_async_event;
1492 		/* Note: core layer may overwrite the sqe.command_id value */
1493 		sqe->common.command_id = AEN_CMDID_BASE + i;
1494 	}
1495 	return 0;
1496 }
1497 
1498 static void
1499 nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
1500 {
1501 	struct nvme_fc_fcp_op *aen_op;
1502 	int i;
1503 
1504 	aen_op = ctrl->aen_ops;
1505 	for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
1506 		if (!aen_op->fcp_req.private)
1507 			continue;
1508 
1509 		__nvme_fc_exit_request(ctrl, aen_op);
1510 
1511 		kfree(aen_op->fcp_req.private);
1512 		aen_op->fcp_req.private = NULL;
1513 	}
1514 }
1515 
1516 static inline void
1517 __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
1518 		unsigned int qidx)
1519 {
1520 	struct nvme_fc_queue *queue = &ctrl->queues[qidx];
1521 
1522 	hctx->driver_data = queue;
1523 	queue->hctx = hctx;
1524 }
1525 
1526 static int
1527 nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1528 		unsigned int hctx_idx)
1529 {
1530 	struct nvme_fc_ctrl *ctrl = data;
1531 
1532 	__nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
1533 
1534 	return 0;
1535 }
1536 
1537 static int
1538 nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1539 		unsigned int hctx_idx)
1540 {
1541 	struct nvme_fc_ctrl *ctrl = data;
1542 
1543 	__nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
1544 
1545 	return 0;
1546 }
1547 
1548 static void
1549 nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx, size_t queue_size)
1550 {
1551 	struct nvme_fc_queue *queue;
1552 
1553 	queue = &ctrl->queues[idx];
1554 	memset(queue, 0, sizeof(*queue));
1555 	queue->ctrl = ctrl;
1556 	queue->qnum = idx;
1557 	atomic_set(&queue->csn, 1);
1558 	queue->dev = ctrl->dev;
1559 
1560 	if (idx > 0)
1561 		queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
1562 	else
1563 		queue->cmnd_capsule_len = sizeof(struct nvme_command);
1564 
1565 	queue->queue_size = queue_size;
1566 
1567 	/*
1568 	 * Considered whether we should allocate buffers for all SQEs
1569 	 * and CQEs and dma map them - mapping their respective entries
1570 	 * into the request structures (kernel vm addr and dma address)
1571 	 * thus the driver could use the buffers/mappings directly.
1572 	 * It only makes sense if the LLDD would use them for its
1573 	 * messaging api. It's very unlikely most adapter api's would use
1574 	 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
1575 	 * structures were used instead.
1576 	 */
1577 }
1578 
1579 /*
1580  * This routine terminates a queue at the transport level.
1581  * The transport has already ensured that all outstanding ios on
1582  * the queue have been terminated.
1583  * The transport will send a Disconnect LS request to terminate
1584  * the queue's connection. Termination of the admin queue will also
1585  * terminate the association at the target.
1586  */
1587 static void
1588 nvme_fc_free_queue(struct nvme_fc_queue *queue)
1589 {
1590 	if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
1591 		return;
1592 
1593 	/*
1594 	 * Current implementation never disconnects a single queue.
1595 	 * It always terminates a whole association. So there is never
1596 	 * a disconnect(queue) LS sent to the target.
1597 	 */
1598 
1599 	queue->connection_id = 0;
1600 	clear_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1601 }
1602 
1603 static void
1604 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
1605 	struct nvme_fc_queue *queue, unsigned int qidx)
1606 {
1607 	if (ctrl->lport->ops->delete_queue)
1608 		ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
1609 				queue->lldd_handle);
1610 	queue->lldd_handle = NULL;
1611 }
1612 
1613 static void
1614 nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
1615 {
1616 	int i;
1617 
1618 	for (i = 1; i < ctrl->queue_count; i++)
1619 		nvme_fc_free_queue(&ctrl->queues[i]);
1620 }
1621 
1622 static int
1623 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
1624 	struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
1625 {
1626 	int ret = 0;
1627 
1628 	queue->lldd_handle = NULL;
1629 	if (ctrl->lport->ops->create_queue)
1630 		ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
1631 				qidx, qsize, &queue->lldd_handle);
1632 
1633 	return ret;
1634 }
1635 
1636 static void
1637 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
1638 {
1639 	struct nvme_fc_queue *queue = &ctrl->queues[ctrl->queue_count - 1];
1640 	int i;
1641 
1642 	for (i = ctrl->queue_count - 1; i >= 1; i--, queue--)
1643 		__nvme_fc_delete_hw_queue(ctrl, queue, i);
1644 }
1645 
1646 static int
1647 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1648 {
1649 	struct nvme_fc_queue *queue = &ctrl->queues[1];
1650 	int i, ret;
1651 
1652 	for (i = 1; i < ctrl->queue_count; i++, queue++) {
1653 		ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
1654 		if (ret)
1655 			goto delete_queues;
1656 	}
1657 
1658 	return 0;
1659 
1660 delete_queues:
1661 	for (; i >= 0; i--)
1662 		__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
1663 	return ret;
1664 }
1665 
1666 static int
1667 nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1668 {
1669 	int i, ret = 0;
1670 
1671 	for (i = 1; i < ctrl->queue_count; i++) {
1672 		ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
1673 					(qsize / 5));
1674 		if (ret)
1675 			break;
1676 		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
1677 		if (ret)
1678 			break;
1679 	}
1680 
1681 	return ret;
1682 }
1683 
1684 static void
1685 nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
1686 {
1687 	int i;
1688 
1689 	for (i = 1; i < ctrl->queue_count; i++)
1690 		nvme_fc_init_queue(ctrl, i, ctrl->ctrl.sqsize);
1691 }
1692 
1693 static void
1694 nvme_fc_ctrl_free(struct kref *ref)
1695 {
1696 	struct nvme_fc_ctrl *ctrl =
1697 		container_of(ref, struct nvme_fc_ctrl, ref);
1698 	unsigned long flags;
1699 
1700 	if (ctrl->ctrl.tagset) {
1701 		blk_cleanup_queue(ctrl->ctrl.connect_q);
1702 		blk_mq_free_tag_set(&ctrl->tag_set);
1703 	}
1704 
1705 	/* remove from rport list */
1706 	spin_lock_irqsave(&ctrl->rport->lock, flags);
1707 	list_del(&ctrl->ctrl_list);
1708 	spin_unlock_irqrestore(&ctrl->rport->lock, flags);
1709 
1710 	blk_cleanup_queue(ctrl->ctrl.admin_q);
1711 	blk_mq_free_tag_set(&ctrl->admin_tag_set);
1712 
1713 	kfree(ctrl->queues);
1714 
1715 	put_device(ctrl->dev);
1716 	nvme_fc_rport_put(ctrl->rport);
1717 
1718 	ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
1719 	if (ctrl->ctrl.opts)
1720 		nvmf_free_options(ctrl->ctrl.opts);
1721 	kfree(ctrl);
1722 }
1723 
1724 static void
1725 nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
1726 {
1727 	kref_put(&ctrl->ref, nvme_fc_ctrl_free);
1728 }
1729 
1730 static int
1731 nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
1732 {
1733 	return kref_get_unless_zero(&ctrl->ref);
1734 }
1735 
1736 /*
1737  * All accesses from nvme core layer done - can now free the
1738  * controller. Called after last nvme_put_ctrl() call
1739  */
1740 static void
1741 nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
1742 {
1743 	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
1744 
1745 	WARN_ON(nctrl != &ctrl->ctrl);
1746 
1747 	nvme_fc_ctrl_put(ctrl);
1748 }
1749 
1750 static void
1751 nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
1752 {
1753 	dev_warn(ctrl->ctrl.device,
1754 		"NVME-FC{%d}: transport association error detected: %s\n",
1755 		ctrl->cnum, errmsg);
1756 	dev_info(ctrl->ctrl.device,
1757 		"NVME-FC{%d}: resetting controller\n", ctrl->cnum);
1758 
1759 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
1760 		dev_err(ctrl->ctrl.device,
1761 			"NVME-FC{%d}: error_recovery: Couldn't change state "
1762 			"to RECONNECTING\n", ctrl->cnum);
1763 		return;
1764 	}
1765 
1766 	if (!queue_work(nvme_fc_wq, &ctrl->reset_work))
1767 		dev_err(ctrl->ctrl.device,
1768 			"NVME-FC{%d}: error_recovery: Failed to schedule "
1769 			"reset work\n", ctrl->cnum);
1770 }
1771 
1772 static enum blk_eh_timer_return
1773 nvme_fc_timeout(struct request *rq, bool reserved)
1774 {
1775 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1776 	struct nvme_fc_ctrl *ctrl = op->ctrl;
1777 	int ret;
1778 
1779 	if (reserved)
1780 		return BLK_EH_RESET_TIMER;
1781 
1782 	ret = __nvme_fc_abort_op(ctrl, op);
1783 	if (ret)
1784 		/* io wasn't active to abort consider it done */
1785 		return BLK_EH_HANDLED;
1786 
1787 	/*
1788 	 * we can't individually ABTS an io without affecting the queue,
1789 	 * thus killing the queue, adn thus the association.
1790 	 * So resolve by performing a controller reset, which will stop
1791 	 * the host/io stack, terminate the association on the link,
1792 	 * and recreate an association on the link.
1793 	 */
1794 	nvme_fc_error_recovery(ctrl, "io timeout error");
1795 
1796 	return BLK_EH_HANDLED;
1797 }
1798 
1799 static int
1800 nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
1801 		struct nvme_fc_fcp_op *op)
1802 {
1803 	struct nvmefc_fcp_req *freq = &op->fcp_req;
1804 	enum dma_data_direction dir;
1805 	int ret;
1806 
1807 	freq->sg_cnt = 0;
1808 
1809 	if (!blk_rq_payload_bytes(rq))
1810 		return 0;
1811 
1812 	freq->sg_table.sgl = freq->first_sgl;
1813 	ret = sg_alloc_table_chained(&freq->sg_table,
1814 			blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
1815 	if (ret)
1816 		return -ENOMEM;
1817 
1818 	op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
1819 	WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
1820 	dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
1821 	freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
1822 				op->nents, dir);
1823 	if (unlikely(freq->sg_cnt <= 0)) {
1824 		sg_free_table_chained(&freq->sg_table, true);
1825 		freq->sg_cnt = 0;
1826 		return -EFAULT;
1827 	}
1828 
1829 	/*
1830 	 * TODO: blk_integrity_rq(rq)  for DIF
1831 	 */
1832 	return 0;
1833 }
1834 
1835 static void
1836 nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
1837 		struct nvme_fc_fcp_op *op)
1838 {
1839 	struct nvmefc_fcp_req *freq = &op->fcp_req;
1840 
1841 	if (!freq->sg_cnt)
1842 		return;
1843 
1844 	fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
1845 				((rq_data_dir(rq) == WRITE) ?
1846 					DMA_TO_DEVICE : DMA_FROM_DEVICE));
1847 
1848 	nvme_cleanup_cmd(rq);
1849 
1850 	sg_free_table_chained(&freq->sg_table, true);
1851 
1852 	freq->sg_cnt = 0;
1853 }
1854 
1855 /*
1856  * In FC, the queue is a logical thing. At transport connect, the target
1857  * creates its "queue" and returns a handle that is to be given to the
1858  * target whenever it posts something to the corresponding SQ.  When an
1859  * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
1860  * command contained within the SQE, an io, and assigns a FC exchange
1861  * to it. The SQE and the associated SQ handle are sent in the initial
1862  * CMD IU sents on the exchange. All transfers relative to the io occur
1863  * as part of the exchange.  The CQE is the last thing for the io,
1864  * which is transferred (explicitly or implicitly) with the RSP IU
1865  * sent on the exchange. After the CQE is received, the FC exchange is
1866  * terminaed and the Exchange may be used on a different io.
1867  *
1868  * The transport to LLDD api has the transport making a request for a
1869  * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
1870  * resource and transfers the command. The LLDD will then process all
1871  * steps to complete the io. Upon completion, the transport done routine
1872  * is called.
1873  *
1874  * So - while the operation is outstanding to the LLDD, there is a link
1875  * level FC exchange resource that is also outstanding. This must be
1876  * considered in all cleanup operations.
1877  */
1878 static int
1879 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1880 	struct nvme_fc_fcp_op *op, u32 data_len,
1881 	enum nvmefc_fcp_datadir	io_dir)
1882 {
1883 	struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1884 	struct nvme_command *sqe = &cmdiu->sqe;
1885 	u32 csn;
1886 	int ret;
1887 
1888 	/*
1889 	 * before attempting to send the io, check to see if we believe
1890 	 * the target device is present
1891 	 */
1892 	if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1893 		return BLK_MQ_RQ_QUEUE_ERROR;
1894 
1895 	if (!nvme_fc_ctrl_get(ctrl))
1896 		return BLK_MQ_RQ_QUEUE_ERROR;
1897 
1898 	/* format the FC-NVME CMD IU and fcp_req */
1899 	cmdiu->connection_id = cpu_to_be64(queue->connection_id);
1900 	csn = atomic_inc_return(&queue->csn);
1901 	cmdiu->csn = cpu_to_be32(csn);
1902 	cmdiu->data_len = cpu_to_be32(data_len);
1903 	switch (io_dir) {
1904 	case NVMEFC_FCP_WRITE:
1905 		cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
1906 		break;
1907 	case NVMEFC_FCP_READ:
1908 		cmdiu->flags = FCNVME_CMD_FLAGS_READ;
1909 		break;
1910 	case NVMEFC_FCP_NODATA:
1911 		cmdiu->flags = 0;
1912 		break;
1913 	}
1914 	op->fcp_req.payload_length = data_len;
1915 	op->fcp_req.io_dir = io_dir;
1916 	op->fcp_req.transferred_length = 0;
1917 	op->fcp_req.rcv_rsplen = 0;
1918 	op->fcp_req.status = NVME_SC_SUCCESS;
1919 	op->fcp_req.sqid = cpu_to_le16(queue->qnum);
1920 
1921 	/*
1922 	 * validate per fabric rules, set fields mandated by fabric spec
1923 	 * as well as those by FC-NVME spec.
1924 	 */
1925 	WARN_ON_ONCE(sqe->common.metadata);
1926 	WARN_ON_ONCE(sqe->common.dptr.prp1);
1927 	WARN_ON_ONCE(sqe->common.dptr.prp2);
1928 	sqe->common.flags |= NVME_CMD_SGL_METABUF;
1929 
1930 	/*
1931 	 * format SQE DPTR field per FC-NVME rules
1932 	 *    type=data block descr; subtype=offset;
1933 	 *    offset is currently 0.
1934 	 */
1935 	sqe->rw.dptr.sgl.type = NVME_SGL_FMT_OFFSET;
1936 	sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
1937 	sqe->rw.dptr.sgl.addr = 0;
1938 
1939 	if (!(op->flags & FCOP_FLAGS_AEN)) {
1940 		ret = nvme_fc_map_data(ctrl, op->rq, op);
1941 		if (ret < 0) {
1942 			nvme_cleanup_cmd(op->rq);
1943 			nvme_fc_ctrl_put(ctrl);
1944 			return (ret == -ENOMEM || ret == -EAGAIN) ?
1945 				BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR;
1946 		}
1947 	}
1948 
1949 	fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
1950 				  sizeof(op->cmd_iu), DMA_TO_DEVICE);
1951 
1952 	atomic_set(&op->state, FCPOP_STATE_ACTIVE);
1953 
1954 	if (!(op->flags & FCOP_FLAGS_AEN))
1955 		blk_mq_start_request(op->rq);
1956 
1957 	ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
1958 					&ctrl->rport->remoteport,
1959 					queue->lldd_handle, &op->fcp_req);
1960 
1961 	if (ret) {
1962 		if (op->rq) {			/* normal request */
1963 			nvme_fc_unmap_data(ctrl, op->rq, op);
1964 			nvme_cleanup_cmd(op->rq);
1965 		}
1966 		/* else - aen. no cleanup needed */
1967 
1968 		nvme_fc_ctrl_put(ctrl);
1969 
1970 		if (ret != -EBUSY)
1971 			return BLK_MQ_RQ_QUEUE_ERROR;
1972 
1973 		if (op->rq) {
1974 			blk_mq_stop_hw_queues(op->rq->q);
1975 			blk_mq_delay_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
1976 		}
1977 		return BLK_MQ_RQ_QUEUE_BUSY;
1978 	}
1979 
1980 	return BLK_MQ_RQ_QUEUE_OK;
1981 }
1982 
1983 static int
1984 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
1985 			const struct blk_mq_queue_data *bd)
1986 {
1987 	struct nvme_ns *ns = hctx->queue->queuedata;
1988 	struct nvme_fc_queue *queue = hctx->driver_data;
1989 	struct nvme_fc_ctrl *ctrl = queue->ctrl;
1990 	struct request *rq = bd->rq;
1991 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1992 	struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1993 	struct nvme_command *sqe = &cmdiu->sqe;
1994 	enum nvmefc_fcp_datadir	io_dir;
1995 	u32 data_len;
1996 	int ret;
1997 
1998 	ret = nvme_setup_cmd(ns, rq, sqe);
1999 	if (ret)
2000 		return ret;
2001 
2002 	data_len = blk_rq_payload_bytes(rq);
2003 	if (data_len)
2004 		io_dir = ((rq_data_dir(rq) == WRITE) ?
2005 					NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
2006 	else
2007 		io_dir = NVMEFC_FCP_NODATA;
2008 
2009 	return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
2010 }
2011 
2012 static struct blk_mq_tags *
2013 nvme_fc_tagset(struct nvme_fc_queue *queue)
2014 {
2015 	if (queue->qnum == 0)
2016 		return queue->ctrl->admin_tag_set.tags[queue->qnum];
2017 
2018 	return queue->ctrl->tag_set.tags[queue->qnum - 1];
2019 }
2020 
2021 static int
2022 nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
2023 
2024 {
2025 	struct nvme_fc_queue *queue = hctx->driver_data;
2026 	struct nvme_fc_ctrl *ctrl = queue->ctrl;
2027 	struct request *req;
2028 	struct nvme_fc_fcp_op *op;
2029 
2030 	req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag);
2031 	if (!req)
2032 		return 0;
2033 
2034 	op = blk_mq_rq_to_pdu(req);
2035 
2036 	if ((atomic_read(&op->state) == FCPOP_STATE_ACTIVE) &&
2037 		 (ctrl->lport->ops->poll_queue))
2038 		ctrl->lport->ops->poll_queue(&ctrl->lport->localport,
2039 						 queue->lldd_handle);
2040 
2041 	return ((atomic_read(&op->state) != FCPOP_STATE_ACTIVE));
2042 }
2043 
2044 static void
2045 nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
2046 {
2047 	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
2048 	struct nvme_fc_fcp_op *aen_op;
2049 	unsigned long flags;
2050 	bool terminating = false;
2051 	int ret;
2052 
2053 	if (aer_idx > NVME_FC_NR_AEN_COMMANDS)
2054 		return;
2055 
2056 	spin_lock_irqsave(&ctrl->lock, flags);
2057 	if (ctrl->flags & FCCTRL_TERMIO)
2058 		terminating = true;
2059 	spin_unlock_irqrestore(&ctrl->lock, flags);
2060 
2061 	if (terminating)
2062 		return;
2063 
2064 	aen_op = &ctrl->aen_ops[aer_idx];
2065 
2066 	ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
2067 					NVMEFC_FCP_NODATA);
2068 	if (ret)
2069 		dev_err(ctrl->ctrl.device,
2070 			"failed async event work [%d]\n", aer_idx);
2071 }
2072 
2073 static void
2074 __nvme_fc_final_op_cleanup(struct request *rq)
2075 {
2076 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2077 	struct nvme_fc_ctrl *ctrl = op->ctrl;
2078 
2079 	atomic_set(&op->state, FCPOP_STATE_IDLE);
2080 	op->flags &= ~(FCOP_FLAGS_TERMIO | FCOP_FLAGS_RELEASED |
2081 			FCOP_FLAGS_COMPLETE);
2082 
2083 	nvme_cleanup_cmd(rq);
2084 	nvme_fc_unmap_data(ctrl, rq, op);
2085 	nvme_complete_rq(rq);
2086 	nvme_fc_ctrl_put(ctrl);
2087 
2088 }
2089 
2090 static void
2091 nvme_fc_complete_rq(struct request *rq)
2092 {
2093 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2094 	struct nvme_fc_ctrl *ctrl = op->ctrl;
2095 	unsigned long flags;
2096 	bool completed = false;
2097 
2098 	/*
2099 	 * the core layer, on controller resets after calling
2100 	 * nvme_shutdown_ctrl(), calls complete_rq without our
2101 	 * calling blk_mq_complete_request(), thus there may still
2102 	 * be live i/o outstanding with the LLDD. Means transport has
2103 	 * to track complete calls vs fcpio_done calls to know what
2104 	 * path to take on completes and dones.
2105 	 */
2106 	spin_lock_irqsave(&ctrl->lock, flags);
2107 	if (op->flags & FCOP_FLAGS_COMPLETE)
2108 		completed = true;
2109 	else
2110 		op->flags |= FCOP_FLAGS_RELEASED;
2111 	spin_unlock_irqrestore(&ctrl->lock, flags);
2112 
2113 	if (completed)
2114 		__nvme_fc_final_op_cleanup(rq);
2115 }
2116 
2117 /*
2118  * This routine is used by the transport when it needs to find active
2119  * io on a queue that is to be terminated. The transport uses
2120  * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
2121  * this routine to kill them on a 1 by 1 basis.
2122  *
2123  * As FC allocates FC exchange for each io, the transport must contact
2124  * the LLDD to terminate the exchange, thus releasing the FC exchange.
2125  * After terminating the exchange the LLDD will call the transport's
2126  * normal io done path for the request, but it will have an aborted
2127  * status. The done path will return the io request back to the block
2128  * layer with an error status.
2129  */
2130 static void
2131 nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2132 {
2133 	struct nvme_ctrl *nctrl = data;
2134 	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2135 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
2136 	unsigned long flags;
2137 	int status;
2138 
2139 	if (!blk_mq_request_started(req))
2140 		return;
2141 
2142 	spin_lock_irqsave(&ctrl->lock, flags);
2143 	if (ctrl->flags & FCCTRL_TERMIO) {
2144 		ctrl->iocnt++;
2145 		op->flags |= FCOP_FLAGS_TERMIO;
2146 	}
2147 	spin_unlock_irqrestore(&ctrl->lock, flags);
2148 
2149 	status = __nvme_fc_abort_op(ctrl, op);
2150 	if (status) {
2151 		/*
2152 		 * if __nvme_fc_abort_op failed the io wasn't
2153 		 * active. Thus this call path is running in
2154 		 * parallel to the io complete. Treat as non-error.
2155 		 */
2156 
2157 		/* back out the flags/counters */
2158 		spin_lock_irqsave(&ctrl->lock, flags);
2159 		if (ctrl->flags & FCCTRL_TERMIO)
2160 			ctrl->iocnt--;
2161 		op->flags &= ~FCOP_FLAGS_TERMIO;
2162 		spin_unlock_irqrestore(&ctrl->lock, flags);
2163 		return;
2164 	}
2165 }
2166 
2167 
2168 static const struct blk_mq_ops nvme_fc_mq_ops = {
2169 	.queue_rq	= nvme_fc_queue_rq,
2170 	.complete	= nvme_fc_complete_rq,
2171 	.init_request	= nvme_fc_init_request,
2172 	.exit_request	= nvme_fc_exit_request,
2173 	.reinit_request	= nvme_fc_reinit_request,
2174 	.init_hctx	= nvme_fc_init_hctx,
2175 	.poll		= nvme_fc_poll,
2176 	.timeout	= nvme_fc_timeout,
2177 };
2178 
2179 static int
2180 nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2181 {
2182 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2183 	int ret;
2184 
2185 	ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
2186 	if (ret) {
2187 		dev_info(ctrl->ctrl.device,
2188 			"set_queue_count failed: %d\n", ret);
2189 		return ret;
2190 	}
2191 
2192 	ctrl->queue_count = opts->nr_io_queues + 1;
2193 	if (!opts->nr_io_queues)
2194 		return 0;
2195 
2196 	dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
2197 			opts->nr_io_queues);
2198 
2199 	nvme_fc_init_io_queues(ctrl);
2200 
2201 	memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2202 	ctrl->tag_set.ops = &nvme_fc_mq_ops;
2203 	ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2204 	ctrl->tag_set.reserved_tags = 1; /* fabric connect */
2205 	ctrl->tag_set.numa_node = NUMA_NO_NODE;
2206 	ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2207 	ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
2208 					(SG_CHUNK_SIZE *
2209 						sizeof(struct scatterlist)) +
2210 					ctrl->lport->ops->fcprqst_priv_sz;
2211 	ctrl->tag_set.driver_data = ctrl;
2212 	ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
2213 	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
2214 
2215 	ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2216 	if (ret)
2217 		return ret;
2218 
2219 	ctrl->ctrl.tagset = &ctrl->tag_set;
2220 
2221 	ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
2222 	if (IS_ERR(ctrl->ctrl.connect_q)) {
2223 		ret = PTR_ERR(ctrl->ctrl.connect_q);
2224 		goto out_free_tag_set;
2225 	}
2226 
2227 	ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2228 	if (ret)
2229 		goto out_cleanup_blk_queue;
2230 
2231 	ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2232 	if (ret)
2233 		goto out_delete_hw_queues;
2234 
2235 	return 0;
2236 
2237 out_delete_hw_queues:
2238 	nvme_fc_delete_hw_io_queues(ctrl);
2239 out_cleanup_blk_queue:
2240 	nvme_stop_keep_alive(&ctrl->ctrl);
2241 	blk_cleanup_queue(ctrl->ctrl.connect_q);
2242 out_free_tag_set:
2243 	blk_mq_free_tag_set(&ctrl->tag_set);
2244 	nvme_fc_free_io_queues(ctrl);
2245 
2246 	/* force put free routine to ignore io queues */
2247 	ctrl->ctrl.tagset = NULL;
2248 
2249 	return ret;
2250 }
2251 
2252 static int
2253 nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
2254 {
2255 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2256 	int ret;
2257 
2258 	ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
2259 	if (ret) {
2260 		dev_info(ctrl->ctrl.device,
2261 			"set_queue_count failed: %d\n", ret);
2262 		return ret;
2263 	}
2264 
2265 	/* check for io queues existing */
2266 	if (ctrl->queue_count == 1)
2267 		return 0;
2268 
2269 	dev_info(ctrl->ctrl.device, "Recreating %d I/O queues.\n",
2270 			opts->nr_io_queues);
2271 
2272 	nvme_fc_init_io_queues(ctrl);
2273 
2274 	ret = blk_mq_reinit_tagset(&ctrl->tag_set);
2275 	if (ret)
2276 		goto out_free_io_queues;
2277 
2278 	ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2279 	if (ret)
2280 		goto out_free_io_queues;
2281 
2282 	ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2283 	if (ret)
2284 		goto out_delete_hw_queues;
2285 
2286 	return 0;
2287 
2288 out_delete_hw_queues:
2289 	nvme_fc_delete_hw_io_queues(ctrl);
2290 out_free_io_queues:
2291 	nvme_fc_free_io_queues(ctrl);
2292 	return ret;
2293 }
2294 
2295 /*
2296  * This routine restarts the controller on the host side, and
2297  * on the link side, recreates the controller association.
2298  */
2299 static int
2300 nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2301 {
2302 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2303 	u32 segs;
2304 	int ret;
2305 	bool changed;
2306 
2307 	ctrl->connect_attempts++;
2308 
2309 	/*
2310 	 * Create the admin queue
2311 	 */
2312 
2313 	nvme_fc_init_queue(ctrl, 0, NVME_FC_AQ_BLKMQ_DEPTH);
2314 
2315 	ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
2316 				NVME_FC_AQ_BLKMQ_DEPTH);
2317 	if (ret)
2318 		goto out_free_queue;
2319 
2320 	ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
2321 				NVME_FC_AQ_BLKMQ_DEPTH,
2322 				(NVME_FC_AQ_BLKMQ_DEPTH / 4));
2323 	if (ret)
2324 		goto out_delete_hw_queue;
2325 
2326 	if (ctrl->ctrl.state != NVME_CTRL_NEW)
2327 		blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
2328 
2329 	ret = nvmf_connect_admin_queue(&ctrl->ctrl);
2330 	if (ret)
2331 		goto out_disconnect_admin_queue;
2332 
2333 	/*
2334 	 * Check controller capabilities
2335 	 *
2336 	 * todo:- add code to check if ctrl attributes changed from
2337 	 * prior connection values
2338 	 */
2339 
2340 	ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
2341 	if (ret) {
2342 		dev_err(ctrl->ctrl.device,
2343 			"prop_get NVME_REG_CAP failed\n");
2344 		goto out_disconnect_admin_queue;
2345 	}
2346 
2347 	ctrl->ctrl.sqsize =
2348 		min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
2349 
2350 	ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
2351 	if (ret)
2352 		goto out_disconnect_admin_queue;
2353 
2354 	segs = min_t(u32, NVME_FC_MAX_SEGMENTS,
2355 			ctrl->lport->ops->max_sgl_segments);
2356 	ctrl->ctrl.max_hw_sectors = (segs - 1) << (PAGE_SHIFT - 9);
2357 
2358 	ret = nvme_init_identify(&ctrl->ctrl);
2359 	if (ret)
2360 		goto out_disconnect_admin_queue;
2361 
2362 	/* sanity checks */
2363 
2364 	/* FC-NVME does not have other data in the capsule */
2365 	if (ctrl->ctrl.icdoff) {
2366 		dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
2367 				ctrl->ctrl.icdoff);
2368 		goto out_disconnect_admin_queue;
2369 	}
2370 
2371 	nvme_start_keep_alive(&ctrl->ctrl);
2372 
2373 	/* FC-NVME supports normal SGL Data Block Descriptors */
2374 
2375 	if (opts->queue_size > ctrl->ctrl.maxcmd) {
2376 		/* warn if maxcmd is lower than queue_size */
2377 		dev_warn(ctrl->ctrl.device,
2378 			"queue_size %zu > ctrl maxcmd %u, reducing "
2379 			"to queue_size\n",
2380 			opts->queue_size, ctrl->ctrl.maxcmd);
2381 		opts->queue_size = ctrl->ctrl.maxcmd;
2382 	}
2383 
2384 	ret = nvme_fc_init_aen_ops(ctrl);
2385 	if (ret)
2386 		goto out_term_aen_ops;
2387 
2388 	/*
2389 	 * Create the io queues
2390 	 */
2391 
2392 	if (ctrl->queue_count > 1) {
2393 		if (ctrl->ctrl.state == NVME_CTRL_NEW)
2394 			ret = nvme_fc_create_io_queues(ctrl);
2395 		else
2396 			ret = nvme_fc_reinit_io_queues(ctrl);
2397 		if (ret)
2398 			goto out_term_aen_ops;
2399 	}
2400 
2401 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
2402 	WARN_ON_ONCE(!changed);
2403 
2404 	ctrl->connect_attempts = 0;
2405 
2406 	kref_get(&ctrl->ctrl.kref);
2407 
2408 	if (ctrl->queue_count > 1) {
2409 		nvme_start_queues(&ctrl->ctrl);
2410 		nvme_queue_scan(&ctrl->ctrl);
2411 		nvme_queue_async_events(&ctrl->ctrl);
2412 	}
2413 
2414 	return 0;	/* Success */
2415 
2416 out_term_aen_ops:
2417 	nvme_fc_term_aen_ops(ctrl);
2418 	nvme_stop_keep_alive(&ctrl->ctrl);
2419 out_disconnect_admin_queue:
2420 	/* send a Disconnect(association) LS to fc-nvme target */
2421 	nvme_fc_xmt_disconnect_assoc(ctrl);
2422 out_delete_hw_queue:
2423 	__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2424 out_free_queue:
2425 	nvme_fc_free_queue(&ctrl->queues[0]);
2426 
2427 	return ret;
2428 }
2429 
2430 /*
2431  * This routine stops operation of the controller on the host side.
2432  * On the host os stack side: Admin and IO queues are stopped,
2433  *   outstanding ios on them terminated via FC ABTS.
2434  * On the link side: the association is terminated.
2435  */
2436 static void
2437 nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
2438 {
2439 	unsigned long flags;
2440 
2441 	nvme_stop_keep_alive(&ctrl->ctrl);
2442 
2443 	spin_lock_irqsave(&ctrl->lock, flags);
2444 	ctrl->flags |= FCCTRL_TERMIO;
2445 	ctrl->iocnt = 0;
2446 	spin_unlock_irqrestore(&ctrl->lock, flags);
2447 
2448 	/*
2449 	 * If io queues are present, stop them and terminate all outstanding
2450 	 * ios on them. As FC allocates FC exchange for each io, the
2451 	 * transport must contact the LLDD to terminate the exchange,
2452 	 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
2453 	 * to tell us what io's are busy and invoke a transport routine
2454 	 * to kill them with the LLDD.  After terminating the exchange
2455 	 * the LLDD will call the transport's normal io done path, but it
2456 	 * will have an aborted status. The done path will return the
2457 	 * io requests back to the block layer as part of normal completions
2458 	 * (but with error status).
2459 	 */
2460 	if (ctrl->queue_count > 1) {
2461 		nvme_stop_queues(&ctrl->ctrl);
2462 		blk_mq_tagset_busy_iter(&ctrl->tag_set,
2463 				nvme_fc_terminate_exchange, &ctrl->ctrl);
2464 	}
2465 
2466 	/*
2467 	 * Other transports, which don't have link-level contexts bound
2468 	 * to sqe's, would try to gracefully shutdown the controller by
2469 	 * writing the registers for shutdown and polling (call
2470 	 * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
2471 	 * just aborted and we will wait on those contexts, and given
2472 	 * there was no indication of how live the controlelr is on the
2473 	 * link, don't send more io to create more contexts for the
2474 	 * shutdown. Let the controller fail via keepalive failure if
2475 	 * its still present.
2476 	 */
2477 
2478 	/*
2479 	 * clean up the admin queue. Same thing as above.
2480 	 * use blk_mq_tagset_busy_itr() and the transport routine to
2481 	 * terminate the exchanges.
2482 	 */
2483 	blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
2484 	blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2485 				nvme_fc_terminate_exchange, &ctrl->ctrl);
2486 
2487 	/* kill the aens as they are a separate path */
2488 	nvme_fc_abort_aen_ops(ctrl);
2489 
2490 	/* wait for all io that had to be aborted */
2491 	spin_lock_irqsave(&ctrl->lock, flags);
2492 	while (ctrl->iocnt) {
2493 		spin_unlock_irqrestore(&ctrl->lock, flags);
2494 		msleep(1000);
2495 		spin_lock_irqsave(&ctrl->lock, flags);
2496 	}
2497 	ctrl->flags &= ~FCCTRL_TERMIO;
2498 	spin_unlock_irqrestore(&ctrl->lock, flags);
2499 
2500 	nvme_fc_term_aen_ops(ctrl);
2501 
2502 	/*
2503 	 * send a Disconnect(association) LS to fc-nvme target
2504 	 * Note: could have been sent at top of process, but
2505 	 * cleaner on link traffic if after the aborts complete.
2506 	 * Note: if association doesn't exist, association_id will be 0
2507 	 */
2508 	if (ctrl->association_id)
2509 		nvme_fc_xmt_disconnect_assoc(ctrl);
2510 
2511 	if (ctrl->ctrl.tagset) {
2512 		nvme_fc_delete_hw_io_queues(ctrl);
2513 		nvme_fc_free_io_queues(ctrl);
2514 	}
2515 
2516 	__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2517 	nvme_fc_free_queue(&ctrl->queues[0]);
2518 }
2519 
2520 static void
2521 nvme_fc_delete_ctrl_work(struct work_struct *work)
2522 {
2523 	struct nvme_fc_ctrl *ctrl =
2524 		container_of(work, struct nvme_fc_ctrl, delete_work);
2525 
2526 	cancel_work_sync(&ctrl->reset_work);
2527 	cancel_delayed_work_sync(&ctrl->connect_work);
2528 
2529 	/*
2530 	 * kill the association on the link side.  this will block
2531 	 * waiting for io to terminate
2532 	 */
2533 	nvme_fc_delete_association(ctrl);
2534 
2535 	/*
2536 	 * tear down the controller
2537 	 * This will result in the last reference on the nvme ctrl to
2538 	 * expire, calling the transport nvme_fc_nvme_ctrl_freed() callback.
2539 	 * From there, the transport will tear down it's logical queues and
2540 	 * association.
2541 	 */
2542 	nvme_uninit_ctrl(&ctrl->ctrl);
2543 
2544 	nvme_put_ctrl(&ctrl->ctrl);
2545 }
2546 
2547 static int
2548 __nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl)
2549 {
2550 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
2551 		return -EBUSY;
2552 
2553 	if (!queue_work(nvme_fc_wq, &ctrl->delete_work))
2554 		return -EBUSY;
2555 
2556 	return 0;
2557 }
2558 
2559 /*
2560  * Request from nvme core layer to delete the controller
2561  */
2562 static int
2563 nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl)
2564 {
2565 	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2566 	int ret;
2567 
2568 	if (!kref_get_unless_zero(&ctrl->ctrl.kref))
2569 		return -EBUSY;
2570 
2571 	ret = __nvme_fc_del_ctrl(ctrl);
2572 
2573 	if (!ret)
2574 		flush_workqueue(nvme_fc_wq);
2575 
2576 	nvme_put_ctrl(&ctrl->ctrl);
2577 
2578 	return ret;
2579 }
2580 
2581 static void
2582 nvme_fc_reset_ctrl_work(struct work_struct *work)
2583 {
2584 	struct nvme_fc_ctrl *ctrl =
2585 			container_of(work, struct nvme_fc_ctrl, reset_work);
2586 	int ret;
2587 
2588 	/* will block will waiting for io to terminate */
2589 	nvme_fc_delete_association(ctrl);
2590 
2591 	ret = nvme_fc_create_association(ctrl);
2592 	if (ret) {
2593 		dev_warn(ctrl->ctrl.device,
2594 			"NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
2595 			ctrl->cnum, ret);
2596 		if (ctrl->connect_attempts >= NVME_FC_MAX_CONNECT_ATTEMPTS) {
2597 			dev_warn(ctrl->ctrl.device,
2598 				"NVME-FC{%d}: Max reconnect attempts (%d) "
2599 				"reached. Removing controller\n",
2600 				ctrl->cnum, ctrl->connect_attempts);
2601 
2602 			if (!nvme_change_ctrl_state(&ctrl->ctrl,
2603 				NVME_CTRL_DELETING)) {
2604 				dev_err(ctrl->ctrl.device,
2605 					"NVME-FC{%d}: failed to change state "
2606 					"to DELETING\n", ctrl->cnum);
2607 				return;
2608 			}
2609 
2610 			WARN_ON(!queue_work(nvme_fc_wq, &ctrl->delete_work));
2611 			return;
2612 		}
2613 
2614 		dev_warn(ctrl->ctrl.device,
2615 			"NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
2616 			ctrl->cnum, ctrl->reconnect_delay);
2617 		queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
2618 				ctrl->reconnect_delay * HZ);
2619 	} else
2620 		dev_info(ctrl->ctrl.device,
2621 			"NVME-FC{%d}: controller reset complete\n", ctrl->cnum);
2622 }
2623 
2624 /*
2625  * called by the nvme core layer, for sysfs interface that requests
2626  * a reset of the nvme controller
2627  */
2628 static int
2629 nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
2630 {
2631 	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2632 
2633 	dev_warn(ctrl->ctrl.device,
2634 		"NVME-FC{%d}: admin requested controller reset\n", ctrl->cnum);
2635 
2636 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
2637 		return -EBUSY;
2638 
2639 	if (!queue_work(nvme_fc_wq, &ctrl->reset_work))
2640 		return -EBUSY;
2641 
2642 	flush_work(&ctrl->reset_work);
2643 
2644 	return 0;
2645 }
2646 
2647 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
2648 	.name			= "fc",
2649 	.module			= THIS_MODULE,
2650 	.is_fabrics		= true,
2651 	.reg_read32		= nvmf_reg_read32,
2652 	.reg_read64		= nvmf_reg_read64,
2653 	.reg_write32		= nvmf_reg_write32,
2654 	.reset_ctrl		= nvme_fc_reset_nvme_ctrl,
2655 	.free_ctrl		= nvme_fc_nvme_ctrl_freed,
2656 	.submit_async_event	= nvme_fc_submit_async_event,
2657 	.delete_ctrl		= nvme_fc_del_nvme_ctrl,
2658 	.get_subsysnqn		= nvmf_get_subsysnqn,
2659 	.get_address		= nvmf_get_address,
2660 };
2661 
2662 static void
2663 nvme_fc_connect_ctrl_work(struct work_struct *work)
2664 {
2665 	int ret;
2666 
2667 	struct nvme_fc_ctrl *ctrl =
2668 			container_of(to_delayed_work(work),
2669 				struct nvme_fc_ctrl, connect_work);
2670 
2671 	ret = nvme_fc_create_association(ctrl);
2672 	if (ret) {
2673 		dev_warn(ctrl->ctrl.device,
2674 			"NVME-FC{%d}: Reconnect attempt failed (%d)\n",
2675 			ctrl->cnum, ret);
2676 		if (ctrl->connect_attempts >= NVME_FC_MAX_CONNECT_ATTEMPTS) {
2677 			dev_warn(ctrl->ctrl.device,
2678 				"NVME-FC{%d}: Max reconnect attempts (%d) "
2679 				"reached. Removing controller\n",
2680 				ctrl->cnum, ctrl->connect_attempts);
2681 
2682 			if (!nvme_change_ctrl_state(&ctrl->ctrl,
2683 				NVME_CTRL_DELETING)) {
2684 				dev_err(ctrl->ctrl.device,
2685 					"NVME-FC{%d}: failed to change state "
2686 					"to DELETING\n", ctrl->cnum);
2687 				return;
2688 			}
2689 
2690 			WARN_ON(!queue_work(nvme_fc_wq, &ctrl->delete_work));
2691 			return;
2692 		}
2693 
2694 		dev_warn(ctrl->ctrl.device,
2695 			"NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
2696 			ctrl->cnum, ctrl->reconnect_delay);
2697 		queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
2698 				ctrl->reconnect_delay * HZ);
2699 	} else
2700 		dev_info(ctrl->ctrl.device,
2701 			"NVME-FC{%d}: controller reconnect complete\n",
2702 			ctrl->cnum);
2703 }
2704 
2705 
2706 static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
2707 	.queue_rq	= nvme_fc_queue_rq,
2708 	.complete	= nvme_fc_complete_rq,
2709 	.init_request	= nvme_fc_init_admin_request,
2710 	.exit_request	= nvme_fc_exit_request,
2711 	.reinit_request	= nvme_fc_reinit_request,
2712 	.init_hctx	= nvme_fc_init_admin_hctx,
2713 	.timeout	= nvme_fc_timeout,
2714 };
2715 
2716 
2717 static struct nvme_ctrl *
2718 nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2719 	struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
2720 {
2721 	struct nvme_fc_ctrl *ctrl;
2722 	unsigned long flags;
2723 	int ret, idx;
2724 
2725 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2726 	if (!ctrl) {
2727 		ret = -ENOMEM;
2728 		goto out_fail;
2729 	}
2730 
2731 	idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
2732 	if (idx < 0) {
2733 		ret = -ENOSPC;
2734 		goto out_free_ctrl;
2735 	}
2736 
2737 	ctrl->ctrl.opts = opts;
2738 	INIT_LIST_HEAD(&ctrl->ctrl_list);
2739 	ctrl->lport = lport;
2740 	ctrl->rport = rport;
2741 	ctrl->dev = lport->dev;
2742 	ctrl->cnum = idx;
2743 
2744 	get_device(ctrl->dev);
2745 	kref_init(&ctrl->ref);
2746 
2747 	INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work);
2748 	INIT_WORK(&ctrl->reset_work, nvme_fc_reset_ctrl_work);
2749 	INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
2750 	ctrl->reconnect_delay = opts->reconnect_delay;
2751 	spin_lock_init(&ctrl->lock);
2752 
2753 	/* io queue count */
2754 	ctrl->queue_count = min_t(unsigned int,
2755 				opts->nr_io_queues,
2756 				lport->ops->max_hw_queues);
2757 	opts->nr_io_queues = ctrl->queue_count;	/* so opts has valid value */
2758 	ctrl->queue_count++;	/* +1 for admin queue */
2759 
2760 	ctrl->ctrl.sqsize = opts->queue_size - 1;
2761 	ctrl->ctrl.kato = opts->kato;
2762 
2763 	ret = -ENOMEM;
2764 	ctrl->queues = kcalloc(ctrl->queue_count, sizeof(struct nvme_fc_queue),
2765 				GFP_KERNEL);
2766 	if (!ctrl->queues)
2767 		goto out_free_ida;
2768 
2769 	memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
2770 	ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
2771 	ctrl->admin_tag_set.queue_depth = NVME_FC_AQ_BLKMQ_DEPTH;
2772 	ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
2773 	ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
2774 	ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
2775 					(SG_CHUNK_SIZE *
2776 						sizeof(struct scatterlist)) +
2777 					ctrl->lport->ops->fcprqst_priv_sz;
2778 	ctrl->admin_tag_set.driver_data = ctrl;
2779 	ctrl->admin_tag_set.nr_hw_queues = 1;
2780 	ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
2781 
2782 	ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
2783 	if (ret)
2784 		goto out_free_queues;
2785 
2786 	ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
2787 	if (IS_ERR(ctrl->ctrl.admin_q)) {
2788 		ret = PTR_ERR(ctrl->ctrl.admin_q);
2789 		goto out_free_admin_tag_set;
2790 	}
2791 
2792 	/*
2793 	 * Would have been nice to init io queues tag set as well.
2794 	 * However, we require interaction from the controller
2795 	 * for max io queue count before we can do so.
2796 	 * Defer this to the connect path.
2797 	 */
2798 
2799 	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
2800 	if (ret)
2801 		goto out_cleanup_admin_q;
2802 
2803 	/* at this point, teardown path changes to ref counting on nvme ctrl */
2804 
2805 	spin_lock_irqsave(&rport->lock, flags);
2806 	list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
2807 	spin_unlock_irqrestore(&rport->lock, flags);
2808 
2809 	ret = nvme_fc_create_association(ctrl);
2810 	if (ret) {
2811 		ctrl->ctrl.opts = NULL;
2812 		/* initiate nvme ctrl ref counting teardown */
2813 		nvme_uninit_ctrl(&ctrl->ctrl);
2814 		nvme_put_ctrl(&ctrl->ctrl);
2815 
2816 		/* as we're past the point where we transition to the ref
2817 		 * counting teardown path, if we return a bad pointer here,
2818 		 * the calling routine, thinking it's prior to the
2819 		 * transition, will do an rport put. Since the teardown
2820 		 * path also does a rport put, we do an extra get here to
2821 		 * so proper order/teardown happens.
2822 		 */
2823 		nvme_fc_rport_get(rport);
2824 
2825 		if (ret > 0)
2826 			ret = -EIO;
2827 		return ERR_PTR(ret);
2828 	}
2829 
2830 	dev_info(ctrl->ctrl.device,
2831 		"NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
2832 		ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
2833 
2834 	return &ctrl->ctrl;
2835 
2836 out_cleanup_admin_q:
2837 	blk_cleanup_queue(ctrl->ctrl.admin_q);
2838 out_free_admin_tag_set:
2839 	blk_mq_free_tag_set(&ctrl->admin_tag_set);
2840 out_free_queues:
2841 	kfree(ctrl->queues);
2842 out_free_ida:
2843 	put_device(ctrl->dev);
2844 	ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
2845 out_free_ctrl:
2846 	kfree(ctrl);
2847 out_fail:
2848 	/* exit via here doesn't follow ctlr ref points */
2849 	return ERR_PTR(ret);
2850 }
2851 
2852 enum {
2853 	FCT_TRADDR_ERR		= 0,
2854 	FCT_TRADDR_WWNN		= 1 << 0,
2855 	FCT_TRADDR_WWPN		= 1 << 1,
2856 };
2857 
2858 struct nvmet_fc_traddr {
2859 	u64	nn;
2860 	u64	pn;
2861 };
2862 
2863 static const match_table_t traddr_opt_tokens = {
2864 	{ FCT_TRADDR_WWNN,	"nn-%s"		},
2865 	{ FCT_TRADDR_WWPN,	"pn-%s"		},
2866 	{ FCT_TRADDR_ERR,	NULL		}
2867 };
2868 
2869 static int
2870 nvme_fc_parse_address(struct nvmet_fc_traddr *traddr, char *buf)
2871 {
2872 	substring_t args[MAX_OPT_ARGS];
2873 	char *options, *o, *p;
2874 	int token, ret = 0;
2875 	u64 token64;
2876 
2877 	options = o = kstrdup(buf, GFP_KERNEL);
2878 	if (!options)
2879 		return -ENOMEM;
2880 
2881 	while ((p = strsep(&o, ":\n")) != NULL) {
2882 		if (!*p)
2883 			continue;
2884 
2885 		token = match_token(p, traddr_opt_tokens, args);
2886 		switch (token) {
2887 		case FCT_TRADDR_WWNN:
2888 			if (match_u64(args, &token64)) {
2889 				ret = -EINVAL;
2890 				goto out;
2891 			}
2892 			traddr->nn = token64;
2893 			break;
2894 		case FCT_TRADDR_WWPN:
2895 			if (match_u64(args, &token64)) {
2896 				ret = -EINVAL;
2897 				goto out;
2898 			}
2899 			traddr->pn = token64;
2900 			break;
2901 		default:
2902 			pr_warn("unknown traddr token or missing value '%s'\n",
2903 					p);
2904 			ret = -EINVAL;
2905 			goto out;
2906 		}
2907 	}
2908 
2909 out:
2910 	kfree(options);
2911 	return ret;
2912 }
2913 
2914 static struct nvme_ctrl *
2915 nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
2916 {
2917 	struct nvme_fc_lport *lport;
2918 	struct nvme_fc_rport *rport;
2919 	struct nvme_ctrl *ctrl;
2920 	struct nvmet_fc_traddr laddr = { 0L, 0L };
2921 	struct nvmet_fc_traddr raddr = { 0L, 0L };
2922 	unsigned long flags;
2923 	int ret;
2924 
2925 	ret = nvme_fc_parse_address(&raddr, opts->traddr);
2926 	if (ret || !raddr.nn || !raddr.pn)
2927 		return ERR_PTR(-EINVAL);
2928 
2929 	ret = nvme_fc_parse_address(&laddr, opts->host_traddr);
2930 	if (ret || !laddr.nn || !laddr.pn)
2931 		return ERR_PTR(-EINVAL);
2932 
2933 	/* find the host and remote ports to connect together */
2934 	spin_lock_irqsave(&nvme_fc_lock, flags);
2935 	list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
2936 		if (lport->localport.node_name != laddr.nn ||
2937 		    lport->localport.port_name != laddr.pn)
2938 			continue;
2939 
2940 		list_for_each_entry(rport, &lport->endp_list, endp_list) {
2941 			if (rport->remoteport.node_name != raddr.nn ||
2942 			    rport->remoteport.port_name != raddr.pn)
2943 				continue;
2944 
2945 			/* if fail to get reference fall through. Will error */
2946 			if (!nvme_fc_rport_get(rport))
2947 				break;
2948 
2949 			spin_unlock_irqrestore(&nvme_fc_lock, flags);
2950 
2951 			ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
2952 			if (IS_ERR(ctrl))
2953 				nvme_fc_rport_put(rport);
2954 			return ctrl;
2955 		}
2956 	}
2957 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
2958 
2959 	return ERR_PTR(-ENOENT);
2960 }
2961 
2962 
2963 static struct nvmf_transport_ops nvme_fc_transport = {
2964 	.name		= "fc",
2965 	.required_opts	= NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
2966 	.allowed_opts	= NVMF_OPT_RECONNECT_DELAY,
2967 	.create_ctrl	= nvme_fc_create_ctrl,
2968 };
2969 
2970 static int __init nvme_fc_init_module(void)
2971 {
2972 	int ret;
2973 
2974 	nvme_fc_wq = create_workqueue("nvme_fc_wq");
2975 	if (!nvme_fc_wq)
2976 		return -ENOMEM;
2977 
2978 	ret = nvmf_register_transport(&nvme_fc_transport);
2979 	if (ret)
2980 		goto err;
2981 
2982 	return 0;
2983 err:
2984 	destroy_workqueue(nvme_fc_wq);
2985 	return ret;
2986 }
2987 
2988 static void __exit nvme_fc_exit_module(void)
2989 {
2990 	/* sanity check - all lports should be removed */
2991 	if (!list_empty(&nvme_fc_lport_list))
2992 		pr_warn("%s: localport list not empty\n", __func__);
2993 
2994 	nvmf_unregister_transport(&nvme_fc_transport);
2995 
2996 	destroy_workqueue(nvme_fc_wq);
2997 
2998 	ida_destroy(&nvme_fc_local_port_cnt);
2999 	ida_destroy(&nvme_fc_ctrl_cnt);
3000 }
3001 
3002 module_init(nvme_fc_init_module);
3003 module_exit(nvme_fc_exit_module);
3004 
3005 MODULE_LICENSE("GPL v2");
3006