xref: /openbmc/linux/drivers/nvme/target/fc.c (revision 96d3c5a7d20ec546e44695983fe0508c6f904248)
14f80fc77SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2c5343203SJames Smart /*
3c5343203SJames Smart  * Copyright (c) 2016 Avago Technologies.  All rights reserved.
4c5343203SJames Smart  */
5c5343203SJames Smart #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6c5343203SJames Smart #include <linux/module.h>
7c5343203SJames Smart #include <linux/slab.h>
8c5343203SJames Smart #include <linux/blk-mq.h>
9c5343203SJames Smart #include <linux/parser.h>
10c5343203SJames Smart #include <linux/random.h>
11c5343203SJames Smart #include <uapi/scsi/fc/fc_fs.h>
12c5343203SJames Smart #include <uapi/scsi/fc/fc_els.h>
13c5343203SJames Smart 
14c5343203SJames Smart #include "nvmet.h"
15c5343203SJames Smart #include <linux/nvme-fc-driver.h>
16c5343203SJames Smart #include <linux/nvme-fc.h>
17ca19bcd0SJames Smart #include "../host/fc.h"
18c5343203SJames Smart 
19c5343203SJames Smart 
20c5343203SJames Smart /* *************************** Data Structures/Defines ****************** */
21c5343203SJames Smart 
22c5343203SJames Smart 
2317d78252SJames Smart #define NVMET_LS_CTX_COUNT		256
24c5343203SJames Smart 
25c5343203SJames Smart struct nvmet_fc_tgtport;
26c5343203SJames Smart struct nvmet_fc_tgt_assoc;
27c5343203SJames Smart 
2847bf3241SJames Smart struct nvmet_fc_ls_iod {		/* for an LS RQST RCV */
2972e6329fSJames Smart 	struct nvmefc_ls_rsp		*lsrsp;
30c5343203SJames Smart 	struct nvmefc_tgt_fcp_req	*fcpreq;	/* only if RS */
31c5343203SJames Smart 
32349c694eSJames Smart 	struct list_head		ls_rcv_list; /* tgtport->ls_rcv_list */
33c5343203SJames Smart 
34c5343203SJames Smart 	struct nvmet_fc_tgtport		*tgtport;
35c5343203SJames Smart 	struct nvmet_fc_tgt_assoc	*assoc;
3658ab8ff9SJames Smart 	void				*hosthandle;
37c5343203SJames Smart 
383b8281b0SJames Smart 	union nvmefc_ls_requests	*rqstbuf;
393b8281b0SJames Smart 	union nvmefc_ls_responses	*rspbuf;
40c5343203SJames Smart 	u16				rqstdatalen;
41c5343203SJames Smart 	dma_addr_t			rspdma;
42c5343203SJames Smart 
43c5343203SJames Smart 	struct scatterlist		sg[2];
44c5343203SJames Smart 
45c5343203SJames Smart 	struct work_struct		work;
46c5343203SJames Smart } __aligned(sizeof(unsigned long long));
47c5343203SJames Smart 
4847bf3241SJames Smart struct nvmet_fc_ls_req_op {		/* for an LS RQST XMT */
4947bf3241SJames Smart 	struct nvmefc_ls_req		ls_req;
5047bf3241SJames Smart 
5147bf3241SJames Smart 	struct nvmet_fc_tgtport		*tgtport;
5247bf3241SJames Smart 	void				*hosthandle;
5347bf3241SJames Smart 
5447bf3241SJames Smart 	int				ls_error;
5547bf3241SJames Smart 	struct list_head		lsreq_list; /* tgtport->ls_req_list */
5647bf3241SJames Smart 	bool				req_queued;
5747bf3241SJames Smart };
5847bf3241SJames Smart 
5947bf3241SJames Smart 
60d082dc15SJames Smart /* desired maximum for a single sequence - if sg list allows it */
6148fa362bSJames Smart #define NVMET_FC_MAX_SEQ_LENGTH		(256 * 1024)
62c5343203SJames Smart 
63c5343203SJames Smart enum nvmet_fcp_datadir {
64c5343203SJames Smart 	NVMET_FCP_NODATA,
65c5343203SJames Smart 	NVMET_FCP_WRITE,
66c5343203SJames Smart 	NVMET_FCP_READ,
67c5343203SJames Smart 	NVMET_FCP_ABORTED,
68c5343203SJames Smart };
69c5343203SJames Smart 
70c5343203SJames Smart struct nvmet_fc_fcp_iod {
71c5343203SJames Smart 	struct nvmefc_tgt_fcp_req	*fcpreq;
72c5343203SJames Smart 
73c5343203SJames Smart 	struct nvme_fc_cmd_iu		cmdiubuf;
74c5343203SJames Smart 	struct nvme_fc_ersp_iu		rspiubuf;
75c5343203SJames Smart 	dma_addr_t			rspdma;
76d082dc15SJames Smart 	struct scatterlist		*next_sg;
77c5343203SJames Smart 	struct scatterlist		*data_sg;
78c5343203SJames Smart 	int				data_sg_cnt;
79c5343203SJames Smart 	u32				offset;
80c5343203SJames Smart 	enum nvmet_fcp_datadir		io_dir;
81c5343203SJames Smart 	bool				active;
82c5343203SJames Smart 	bool				abort;
83a97ec51bSJames Smart 	bool				aborted;
84a97ec51bSJames Smart 	bool				writedataactive;
85c5343203SJames Smart 	spinlock_t			flock;
86c5343203SJames Smart 
87c5343203SJames Smart 	struct nvmet_req		req;
889d625f77SJames Smart 	struct work_struct		defer_work;
89c5343203SJames Smart 
90c5343203SJames Smart 	struct nvmet_fc_tgtport		*tgtport;
91c5343203SJames Smart 	struct nvmet_fc_tgt_queue	*queue;
92c5343203SJames Smart 
93c5343203SJames Smart 	struct list_head		fcp_list;	/* tgtport->fcp_list */
94c5343203SJames Smart };
95c5343203SJames Smart 
96c5343203SJames Smart struct nvmet_fc_tgtport {
97c5343203SJames Smart 	struct nvmet_fc_target_port	fc_target_port;
98c5343203SJames Smart 
99c5343203SJames Smart 	struct list_head		tgt_list; /* nvmet_fc_target_list */
100c5343203SJames Smart 	struct device			*dev;	/* dev for dma mapping */
101c5343203SJames Smart 	struct nvmet_fc_target_template	*ops;
102c5343203SJames Smart 
103c5343203SJames Smart 	struct nvmet_fc_ls_iod		*iod;
104c5343203SJames Smart 	spinlock_t			lock;
105349c694eSJames Smart 	struct list_head		ls_rcv_list;
10647bf3241SJames Smart 	struct list_head		ls_req_list;
107c5343203SJames Smart 	struct list_head		ls_busylist;
108c5343203SJames Smart 	struct list_head		assoc_list;
10958ab8ff9SJames Smart 	struct list_head		host_list;
110c5343203SJames Smart 	struct ida			assoc_cnt;
111ea96d649SJames Smart 	struct nvmet_fc_port_entry	*pe;
112c5343203SJames Smart 	struct kref			ref;
11348fa362bSJames Smart 	u32				max_sg_cnt;
114eaf0971fSDaniel Wagner 
115eaf0971fSDaniel Wagner 	struct work_struct		put_work;
116c5343203SJames Smart };
117c5343203SJames Smart 
118ea96d649SJames Smart struct nvmet_fc_port_entry {
119ea96d649SJames Smart 	struct nvmet_fc_tgtport		*tgtport;
120ea96d649SJames Smart 	struct nvmet_port		*port;
121ea96d649SJames Smart 	u64				node_name;
122ea96d649SJames Smart 	u64				port_name;
123ea96d649SJames Smart 	struct list_head		pe_list;
124ea96d649SJames Smart };
125ea96d649SJames Smart 
1260fb228d3SJames Smart struct nvmet_fc_defer_fcp_req {
1270fb228d3SJames Smart 	struct list_head		req_list;
1280fb228d3SJames Smart 	struct nvmefc_tgt_fcp_req	*fcp_req;
1290fb228d3SJames Smart };
1300fb228d3SJames Smart 
131c5343203SJames Smart struct nvmet_fc_tgt_queue {
132c5343203SJames Smart 	bool				ninetypercent;
133c5343203SJames Smart 	u16				qid;
134c5343203SJames Smart 	u16				sqsize;
135c5343203SJames Smart 	u16				ersp_ratio;
136f63688a6SChristoph Hellwig 	__le16				sqhd;
137c5343203SJames Smart 	atomic_t			connected;
138c5343203SJames Smart 	atomic_t			sqtail;
139c5343203SJames Smart 	atomic_t			zrspcnt;
140c5343203SJames Smart 	atomic_t			rsn;
141c5343203SJames Smart 	spinlock_t			qlock;
142c5343203SJames Smart 	struct nvmet_cq			nvme_cq;
143c5343203SJames Smart 	struct nvmet_sq			nvme_sq;
144c5343203SJames Smart 	struct nvmet_fc_tgt_assoc	*assoc;
145c5343203SJames Smart 	struct list_head		fod_list;
1460fb228d3SJames Smart 	struct list_head		pending_cmd_list;
1470fb228d3SJames Smart 	struct list_head		avail_defer_list;
148c5343203SJames Smart 	struct workqueue_struct		*work_q;
149c5343203SJames Smart 	struct kref			ref;
1504e2f02bfSLeonid Ravich 	struct rcu_head			rcu;
1516b80f1d2SGustavo A. R. Silva 	struct nvmet_fc_fcp_iod		fod[];		/* array of fcp_iods */
152c5343203SJames Smart } __aligned(sizeof(unsigned long long));
153c5343203SJames Smart 
15458ab8ff9SJames Smart struct nvmet_fc_hostport {
15558ab8ff9SJames Smart 	struct nvmet_fc_tgtport		*tgtport;
15658ab8ff9SJames Smart 	void				*hosthandle;
15758ab8ff9SJames Smart 	struct list_head		host_list;
15858ab8ff9SJames Smart 	struct kref			ref;
15958ab8ff9SJames Smart 	u8				invalid;
16058ab8ff9SJames Smart };
16158ab8ff9SJames Smart 
162c5343203SJames Smart struct nvmet_fc_tgt_assoc {
163c5343203SJames Smart 	u64				association_id;
164c5343203SJames Smart 	u32				a_id;
16547bf3241SJames Smart 	atomic_t			terminating;
166c5343203SJames Smart 	struct nvmet_fc_tgtport		*tgtport;
16758ab8ff9SJames Smart 	struct nvmet_fc_hostport	*hostport;
16847bf3241SJames Smart 	struct nvmet_fc_ls_iod		*rcv_disconn;
169c5343203SJames Smart 	struct list_head		a_list;
170ccd49addSDaniel Wagner 	struct nvmet_fc_tgt_queue 	*queues[NVMET_NR_QUEUES + 1];
171c5343203SJames Smart 	struct kref			ref;
172a96d4bd8SJames Smart 	struct work_struct		del_work;
1734e2f02bfSLeonid Ravich 	struct rcu_head			rcu;
174c5343203SJames Smart };
175c5343203SJames Smart 
176c5343203SJames Smart 
177c5343203SJames Smart static inline int
nvmet_fc_iodnum(struct nvmet_fc_ls_iod * iodptr)178c5343203SJames Smart nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
179c5343203SJames Smart {
180c5343203SJames Smart 	return (iodptr - iodptr->tgtport->iod);
181c5343203SJames Smart }
182c5343203SJames Smart 
183c5343203SJames Smart static inline int
nvmet_fc_fodnum(struct nvmet_fc_fcp_iod * fodptr)184c5343203SJames Smart nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
185c5343203SJames Smart {
186c5343203SJames Smart 	return (fodptr - fodptr->queue->fod);
187c5343203SJames Smart }
188c5343203SJames Smart 
189c5343203SJames Smart 
190c5343203SJames Smart /*
191c5343203SJames Smart  * Association and Connection IDs:
192c5343203SJames Smart  *
193c5343203SJames Smart  * Association ID will have random number in upper 6 bytes and zero
194c5343203SJames Smart  *   in lower 2 bytes
195c5343203SJames Smart  *
196c5343203SJames Smart  * Connection IDs will be Association ID with QID or'd in lower 2 bytes
197c5343203SJames Smart  *
198c5343203SJames Smart  * note: Association ID = Connection ID for queue 0
199c5343203SJames Smart  */
200c5343203SJames Smart #define BYTES_FOR_QID			sizeof(u16)
201c5343203SJames Smart #define BYTES_FOR_QID_SHIFT		(BYTES_FOR_QID * 8)
202c5343203SJames Smart #define NVMET_FC_QUEUEID_MASK		((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
203c5343203SJames Smart 
204c5343203SJames Smart static inline u64
nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc * assoc,u16 qid)205c5343203SJames Smart nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
206c5343203SJames Smart {
207c5343203SJames Smart 	return (assoc->association_id | qid);
208c5343203SJames Smart }
209c5343203SJames Smart 
210c5343203SJames Smart static inline u64
nvmet_fc_getassociationid(u64 connectionid)211c5343203SJames Smart nvmet_fc_getassociationid(u64 connectionid)
212c5343203SJames Smart {
213c5343203SJames Smart 	return connectionid & ~NVMET_FC_QUEUEID_MASK;
214c5343203SJames Smart }
215c5343203SJames Smart 
216c5343203SJames Smart static inline u16
nvmet_fc_getqueueid(u64 connectionid)217c5343203SJames Smart nvmet_fc_getqueueid(u64 connectionid)
218c5343203SJames Smart {
219c5343203SJames Smart 	return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
220c5343203SJames Smart }
221c5343203SJames Smart 
222c5343203SJames Smart static inline struct nvmet_fc_tgtport *
targetport_to_tgtport(struct nvmet_fc_target_port * targetport)223c5343203SJames Smart targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
224c5343203SJames Smart {
225c5343203SJames Smart 	return container_of(targetport, struct nvmet_fc_tgtport,
226c5343203SJames Smart 				 fc_target_port);
227c5343203SJames Smart }
228c5343203SJames Smart 
229c5343203SJames Smart static inline struct nvmet_fc_fcp_iod *
nvmet_req_to_fod(struct nvmet_req * nvme_req)230c5343203SJames Smart nvmet_req_to_fod(struct nvmet_req *nvme_req)
231c5343203SJames Smart {
232c5343203SJames Smart 	return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
233c5343203SJames Smart }
234c5343203SJames Smart 
235c5343203SJames Smart 
236c5343203SJames Smart /* *************************** Globals **************************** */
237c5343203SJames Smart 
238c5343203SJames Smart 
239c5343203SJames Smart static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
240c5343203SJames Smart 
241c5343203SJames Smart static LIST_HEAD(nvmet_fc_target_list);
242c5343203SJames Smart static DEFINE_IDA(nvmet_fc_tgtport_cnt);
243ea96d649SJames Smart static LIST_HEAD(nvmet_fc_portentry_list);
244c5343203SJames Smart 
245c5343203SJames Smart 
246c5343203SJames Smart static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
2479d625f77SJames Smart static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work);
248c5343203SJames Smart static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
249c5343203SJames Smart static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
250c5343203SJames Smart static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
251c5343203SJames Smart static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
252c5343203SJames Smart static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
nvmet_fc_put_tgtport_work(struct work_struct * work)253eaf0971fSDaniel Wagner static void nvmet_fc_put_tgtport_work(struct work_struct *work)
254eaf0971fSDaniel Wagner {
255eaf0971fSDaniel Wagner 	struct nvmet_fc_tgtport *tgtport =
256eaf0971fSDaniel Wagner 		container_of(work, struct nvmet_fc_tgtport, put_work);
257eaf0971fSDaniel Wagner 
258eaf0971fSDaniel Wagner 	nvmet_fc_tgtport_put(tgtport);
259eaf0971fSDaniel Wagner }
260c5343203SJames Smart static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
2610fb228d3SJames Smart static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2620fb228d3SJames Smart 					struct nvmet_fc_fcp_iod *fod);
263a96d4bd8SJames Smart static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
26447bf3241SJames Smart static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
26547bf3241SJames Smart 				struct nvmet_fc_ls_iod *iod);
266c5343203SJames Smart 
267c5343203SJames Smart 
268c5343203SJames Smart /* *********************** FC-NVME DMA Handling **************************** */
269c5343203SJames Smart 
270c5343203SJames Smart /*
271c5343203SJames Smart  * The fcloop device passes in a NULL device pointer. Real LLD's will
272c5343203SJames Smart  * pass in a valid device pointer. If NULL is passed to the dma mapping
273c5343203SJames Smart  * routines, depending on the platform, it may or may not succeed, and
274c5343203SJames Smart  * may crash.
275c5343203SJames Smart  *
276c5343203SJames Smart  * As such:
277c5343203SJames Smart  * Wrapper all the dma routines and check the dev pointer.
278c5343203SJames Smart  *
279c5343203SJames Smart  * If simple mappings (return just a dma address, we'll noop them,
280c5343203SJames Smart  * returning a dma address of 0.
281c5343203SJames Smart  *
282c5343203SJames Smart  * On more complex mappings (dma_map_sg), a pseudo routine fills
283c5343203SJames Smart  * in the scatter list, setting all dma addresses to 0.
284c5343203SJames Smart  */
285c5343203SJames Smart 
286c5343203SJames Smart static inline dma_addr_t
fc_dma_map_single(struct device * dev,void * ptr,size_t size,enum dma_data_direction dir)287c5343203SJames Smart fc_dma_map_single(struct device *dev, void *ptr, size_t size,
288c5343203SJames Smart 		enum dma_data_direction dir)
289c5343203SJames Smart {
290c5343203SJames Smart 	return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
291c5343203SJames Smart }
292c5343203SJames Smart 
293c5343203SJames Smart static inline int
fc_dma_mapping_error(struct device * dev,dma_addr_t dma_addr)294c5343203SJames Smart fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
295c5343203SJames Smart {
296c5343203SJames Smart 	return dev ? dma_mapping_error(dev, dma_addr) : 0;
297c5343203SJames Smart }
298c5343203SJames Smart 
299c5343203SJames Smart static inline void
fc_dma_unmap_single(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)300c5343203SJames Smart fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
301c5343203SJames Smart 	enum dma_data_direction dir)
302c5343203SJames Smart {
303c5343203SJames Smart 	if (dev)
304c5343203SJames Smart 		dma_unmap_single(dev, addr, size, dir);
305c5343203SJames Smart }
306c5343203SJames Smart 
307c5343203SJames Smart static inline void
fc_dma_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)308c5343203SJames Smart fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
309c5343203SJames Smart 		enum dma_data_direction dir)
310c5343203SJames Smart {
311c5343203SJames Smart 	if (dev)
312c5343203SJames Smart 		dma_sync_single_for_cpu(dev, addr, size, dir);
313c5343203SJames Smart }
314c5343203SJames Smart 
315c5343203SJames Smart static inline void
fc_dma_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)316c5343203SJames Smart fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
317c5343203SJames Smart 		enum dma_data_direction dir)
318c5343203SJames Smart {
319c5343203SJames Smart 	if (dev)
320c5343203SJames Smart 		dma_sync_single_for_device(dev, addr, size, dir);
321c5343203SJames Smart }
322c5343203SJames Smart 
323c5343203SJames Smart /* pseudo dma_map_sg call */
324c5343203SJames Smart static int
fc_map_sg(struct scatterlist * sg,int nents)325c5343203SJames Smart fc_map_sg(struct scatterlist *sg, int nents)
326c5343203SJames Smart {
327c5343203SJames Smart 	struct scatterlist *s;
328c5343203SJames Smart 	int i;
329c5343203SJames Smart 
330c5343203SJames Smart 	WARN_ON(nents == 0 || sg[0].length == 0);
331c5343203SJames Smart 
332c5343203SJames Smart 	for_each_sg(sg, s, nents, i) {
333c5343203SJames Smart 		s->dma_address = 0L;
334c5343203SJames Smart #ifdef CONFIG_NEED_SG_DMA_LENGTH
335c5343203SJames Smart 		s->dma_length = s->length;
336c5343203SJames Smart #endif
337c5343203SJames Smart 	}
338c5343203SJames Smart 	return nents;
339c5343203SJames Smart }
340c5343203SJames Smart 
341c5343203SJames Smart static inline int
fc_dma_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)342c5343203SJames Smart fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
343c5343203SJames Smart 		enum dma_data_direction dir)
344c5343203SJames Smart {
345c5343203SJames Smart 	return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
346c5343203SJames Smart }
347c5343203SJames Smart 
348c5343203SJames Smart static inline void
fc_dma_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)349c5343203SJames Smart fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
350c5343203SJames Smart 		enum dma_data_direction dir)
351c5343203SJames Smart {
352c5343203SJames Smart 	if (dev)
353c5343203SJames Smart 		dma_unmap_sg(dev, sg, nents, dir);
354c5343203SJames Smart }
355c5343203SJames Smart 
356c5343203SJames Smart 
35747bf3241SJames Smart /* ********************** FC-NVME LS XMT Handling ************************* */
35847bf3241SJames Smart 
35947bf3241SJames Smart 
36047bf3241SJames Smart static void
__nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op * lsop)36147bf3241SJames Smart __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
36247bf3241SJames Smart {
36347bf3241SJames Smart 	struct nvmet_fc_tgtport *tgtport = lsop->tgtport;
36447bf3241SJames Smart 	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
36547bf3241SJames Smart 	unsigned long flags;
36647bf3241SJames Smart 
36747bf3241SJames Smart 	spin_lock_irqsave(&tgtport->lock, flags);
36847bf3241SJames Smart 
36947bf3241SJames Smart 	if (!lsop->req_queued) {
37047bf3241SJames Smart 		spin_unlock_irqrestore(&tgtport->lock, flags);
371eaf0971fSDaniel Wagner 		goto out_putwork;
37247bf3241SJames Smart 	}
37347bf3241SJames Smart 
37447bf3241SJames Smart 	list_del(&lsop->lsreq_list);
37547bf3241SJames Smart 
37647bf3241SJames Smart 	lsop->req_queued = false;
37747bf3241SJames Smart 
37847bf3241SJames Smart 	spin_unlock_irqrestore(&tgtport->lock, flags);
37947bf3241SJames Smart 
38047bf3241SJames Smart 	fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
38147bf3241SJames Smart 				  (lsreq->rqstlen + lsreq->rsplen),
38247bf3241SJames Smart 				  DMA_BIDIRECTIONAL);
38347bf3241SJames Smart 
384eaf0971fSDaniel Wagner out_putwork:
385eaf0971fSDaniel Wagner 	queue_work(nvmet_wq, &tgtport->put_work);
38647bf3241SJames Smart }
38747bf3241SJames Smart 
38847bf3241SJames Smart static int
__nvmet_fc_send_ls_req(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_req_op * lsop,void (* done)(struct nvmefc_ls_req * req,int status))38947bf3241SJames Smart __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
39047bf3241SJames Smart 		struct nvmet_fc_ls_req_op *lsop,
39147bf3241SJames Smart 		void (*done)(struct nvmefc_ls_req *req, int status))
39247bf3241SJames Smart {
39347bf3241SJames Smart 	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
39447bf3241SJames Smart 	unsigned long flags;
39547bf3241SJames Smart 	int ret = 0;
39647bf3241SJames Smart 
39747bf3241SJames Smart 	if (!tgtport->ops->ls_req)
39847bf3241SJames Smart 		return -EOPNOTSUPP;
39947bf3241SJames Smart 
40047bf3241SJames Smart 	if (!nvmet_fc_tgtport_get(tgtport))
40147bf3241SJames Smart 		return -ESHUTDOWN;
40247bf3241SJames Smart 
40347bf3241SJames Smart 	lsreq->done = done;
40447bf3241SJames Smart 	lsop->req_queued = false;
40547bf3241SJames Smart 	INIT_LIST_HEAD(&lsop->lsreq_list);
40647bf3241SJames Smart 
40747bf3241SJames Smart 	lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr,
40847bf3241SJames Smart 				  lsreq->rqstlen + lsreq->rsplen,
40947bf3241SJames Smart 				  DMA_BIDIRECTIONAL);
41047bf3241SJames Smart 	if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) {
41147bf3241SJames Smart 		ret = -EFAULT;
41247bf3241SJames Smart 		goto out_puttgtport;
41347bf3241SJames Smart 	}
41447bf3241SJames Smart 	lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
41547bf3241SJames Smart 
41647bf3241SJames Smart 	spin_lock_irqsave(&tgtport->lock, flags);
41747bf3241SJames Smart 
41847bf3241SJames Smart 	list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list);
41947bf3241SJames Smart 
42047bf3241SJames Smart 	lsop->req_queued = true;
42147bf3241SJames Smart 
42247bf3241SJames Smart 	spin_unlock_irqrestore(&tgtport->lock, flags);
42347bf3241SJames Smart 
42447bf3241SJames Smart 	ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle,
42547bf3241SJames Smart 				   lsreq);
42647bf3241SJames Smart 	if (ret)
42747bf3241SJames Smart 		goto out_unlink;
42847bf3241SJames Smart 
42947bf3241SJames Smart 	return 0;
43047bf3241SJames Smart 
43147bf3241SJames Smart out_unlink:
43247bf3241SJames Smart 	lsop->ls_error = ret;
43347bf3241SJames Smart 	spin_lock_irqsave(&tgtport->lock, flags);
43447bf3241SJames Smart 	lsop->req_queued = false;
43547bf3241SJames Smart 	list_del(&lsop->lsreq_list);
43647bf3241SJames Smart 	spin_unlock_irqrestore(&tgtport->lock, flags);
43747bf3241SJames Smart 	fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
43847bf3241SJames Smart 				  (lsreq->rqstlen + lsreq->rsplen),
43947bf3241SJames Smart 				  DMA_BIDIRECTIONAL);
44047bf3241SJames Smart out_puttgtport:
44147bf3241SJames Smart 	nvmet_fc_tgtport_put(tgtport);
44247bf3241SJames Smart 
44347bf3241SJames Smart 	return ret;
44447bf3241SJames Smart }
44547bf3241SJames Smart 
44647bf3241SJames Smart static int
nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_req_op * lsop,void (* done)(struct nvmefc_ls_req * req,int status))44747bf3241SJames Smart nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport,
44847bf3241SJames Smart 		struct nvmet_fc_ls_req_op *lsop,
44947bf3241SJames Smart 		void (*done)(struct nvmefc_ls_req *req, int status))
45047bf3241SJames Smart {
45147bf3241SJames Smart 	/* don't wait for completion */
45247bf3241SJames Smart 
45347bf3241SJames Smart 	return __nvmet_fc_send_ls_req(tgtport, lsop, done);
45447bf3241SJames Smart }
45547bf3241SJames Smart 
45647bf3241SJames Smart static void
nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req * lsreq,int status)45747bf3241SJames Smart nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
45847bf3241SJames Smart {
45947bf3241SJames Smart 	struct nvmet_fc_ls_req_op *lsop =
46047bf3241SJames Smart 		container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req);
46147bf3241SJames Smart 
46247bf3241SJames Smart 	__nvmet_fc_finish_ls_req(lsop);
46347bf3241SJames Smart 
46447bf3241SJames Smart 	/* fc-nvme target doesn't care about success or failure of cmd */
46547bf3241SJames Smart 
46647bf3241SJames Smart 	kfree(lsop);
46747bf3241SJames Smart }
46847bf3241SJames Smart 
46947bf3241SJames Smart /*
47047bf3241SJames Smart  * This routine sends a FC-NVME LS to disconnect (aka terminate)
47147bf3241SJames Smart  * the FC-NVME Association.  Terminating the association also
47247bf3241SJames Smart  * terminates the FC-NVME connections (per queue, both admin and io
47347bf3241SJames Smart  * queues) that are part of the association. E.g. things are torn
47447bf3241SJames Smart  * down, and the related FC-NVME Association ID and Connection IDs
47547bf3241SJames Smart  * become invalid.
47647bf3241SJames Smart  *
47747bf3241SJames Smart  * The behavior of the fc-nvme target is such that it's
47847bf3241SJames Smart  * understanding of the association and connections will implicitly
47947bf3241SJames Smart  * be torn down. The action is implicit as it may be due to a loss of
48047bf3241SJames Smart  * connectivity with the fc-nvme host, so the target may never get a
48147bf3241SJames Smart  * response even if it tried.  As such, the action of this routine
48247bf3241SJames Smart  * is to asynchronously send the LS, ignore any results of the LS, and
48347bf3241SJames Smart  * continue on with terminating the association. If the fc-nvme host
48447bf3241SJames Smart  * is present and receives the LS, it too can tear down.
48547bf3241SJames Smart  */
48647bf3241SJames Smart static void
nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc * assoc)48747bf3241SJames Smart nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
48847bf3241SJames Smart {
48947bf3241SJames Smart 	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
49047bf3241SJames Smart 	struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
49147bf3241SJames Smart 	struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
49247bf3241SJames Smart 	struct nvmet_fc_ls_req_op *lsop;
49347bf3241SJames Smart 	struct nvmefc_ls_req *lsreq;
49447bf3241SJames Smart 	int ret;
49547bf3241SJames Smart 
49647bf3241SJames Smart 	/*
49747bf3241SJames Smart 	 * If ls_req is NULL or no hosthandle, it's an older lldd and no
49847bf3241SJames Smart 	 * message is normal. Otherwise, send unless the hostport has
49947bf3241SJames Smart 	 * already been invalidated by the lldd.
50047bf3241SJames Smart 	 */
50147bf3241SJames Smart 	if (!tgtport->ops->ls_req || !assoc->hostport ||
50247bf3241SJames Smart 	    assoc->hostport->invalid)
50347bf3241SJames Smart 		return;
50447bf3241SJames Smart 
50547bf3241SJames Smart 	lsop = kzalloc((sizeof(*lsop) +
50647bf3241SJames Smart 			sizeof(*discon_rqst) + sizeof(*discon_acc) +
50747bf3241SJames Smart 			tgtport->ops->lsrqst_priv_sz), GFP_KERNEL);
50847bf3241SJames Smart 	if (!lsop) {
50947bf3241SJames Smart 		dev_info(tgtport->dev,
51047bf3241SJames Smart 			"{%d:%d} send Disconnect Association failed: ENOMEM\n",
51147bf3241SJames Smart 			tgtport->fc_target_port.port_num, assoc->a_id);
51247bf3241SJames Smart 		return;
51347bf3241SJames Smart 	}
51447bf3241SJames Smart 
51547bf3241SJames Smart 	discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
51647bf3241SJames Smart 	discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
51747bf3241SJames Smart 	lsreq = &lsop->ls_req;
51847bf3241SJames Smart 	if (tgtport->ops->lsrqst_priv_sz)
51947bf3241SJames Smart 		lsreq->private = (void *)&discon_acc[1];
52047bf3241SJames Smart 	else
52147bf3241SJames Smart 		lsreq->private = NULL;
52247bf3241SJames Smart 
52347bf3241SJames Smart 	lsop->tgtport = tgtport;
52447bf3241SJames Smart 	lsop->hosthandle = assoc->hostport->hosthandle;
52547bf3241SJames Smart 
52647bf3241SJames Smart 	nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
52747bf3241SJames Smart 				assoc->association_id);
52847bf3241SJames Smart 
52947bf3241SJames Smart 	ret = nvmet_fc_send_ls_req_async(tgtport, lsop,
53047bf3241SJames Smart 				nvmet_fc_disconnect_assoc_done);
53147bf3241SJames Smart 	if (ret) {
53247bf3241SJames Smart 		dev_info(tgtport->dev,
53347bf3241SJames Smart 			"{%d:%d} XMT Disconnect Association failed: %d\n",
53447bf3241SJames Smart 			tgtport->fc_target_port.port_num, assoc->a_id, ret);
53547bf3241SJames Smart 		kfree(lsop);
53647bf3241SJames Smart 	}
53747bf3241SJames Smart }
53847bf3241SJames Smart 
53947bf3241SJames Smart 
540c5343203SJames Smart /* *********************** FC-NVME Port Management ************************ */
541c5343203SJames Smart 
542c5343203SJames Smart 
543c5343203SJames Smart static int
nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport * tgtport)544c5343203SJames Smart nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
545c5343203SJames Smart {
546c5343203SJames Smart 	struct nvmet_fc_ls_iod *iod;
547c5343203SJames Smart 	int i;
548c5343203SJames Smart 
549c5343203SJames Smart 	iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
550c5343203SJames Smart 			GFP_KERNEL);
551c5343203SJames Smart 	if (!iod)
552c5343203SJames Smart 		return -ENOMEM;
553c5343203SJames Smart 
554c5343203SJames Smart 	tgtport->iod = iod;
555c5343203SJames Smart 
556c5343203SJames Smart 	for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
557c5343203SJames Smart 		INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
558c5343203SJames Smart 		iod->tgtport = tgtport;
559349c694eSJames Smart 		list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
560c5343203SJames Smart 
5613b8281b0SJames Smart 		iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) +
5623b8281b0SJames Smart 				       sizeof(union nvmefc_ls_responses),
563c5343203SJames Smart 				       GFP_KERNEL);
564c5343203SJames Smart 		if (!iod->rqstbuf)
565c5343203SJames Smart 			goto out_fail;
566c5343203SJames Smart 
5673b8281b0SJames Smart 		iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1];
568c5343203SJames Smart 
569c5343203SJames Smart 		iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
5703b8281b0SJames Smart 						sizeof(*iod->rspbuf),
571c5343203SJames Smart 						DMA_TO_DEVICE);
572c5343203SJames Smart 		if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
573c5343203SJames Smart 			goto out_fail;
574c5343203SJames Smart 	}
575c5343203SJames Smart 
576c5343203SJames Smart 	return 0;
577c5343203SJames Smart 
578c5343203SJames Smart out_fail:
579c5343203SJames Smart 	kfree(iod->rqstbuf);
580349c694eSJames Smart 	list_del(&iod->ls_rcv_list);
581c5343203SJames Smart 	for (iod--, i--; i >= 0; iod--, i--) {
582c5343203SJames Smart 		fc_dma_unmap_single(tgtport->dev, iod->rspdma,
5833b8281b0SJames Smart 				sizeof(*iod->rspbuf), DMA_TO_DEVICE);
584c5343203SJames Smart 		kfree(iod->rqstbuf);
585349c694eSJames Smart 		list_del(&iod->ls_rcv_list);
586c5343203SJames Smart 	}
587c5343203SJames Smart 
588c5343203SJames Smart 	kfree(iod);
589c5343203SJames Smart 
590c5343203SJames Smart 	return -EFAULT;
591c5343203SJames Smart }
592c5343203SJames Smart 
593c5343203SJames Smart static void
nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport * tgtport)594c5343203SJames Smart nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
595c5343203SJames Smart {
596c5343203SJames Smart 	struct nvmet_fc_ls_iod *iod = tgtport->iod;
597c5343203SJames Smart 	int i;
598c5343203SJames Smart 
599c5343203SJames Smart 	for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
600c5343203SJames Smart 		fc_dma_unmap_single(tgtport->dev,
6013b8281b0SJames Smart 				iod->rspdma, sizeof(*iod->rspbuf),
602c5343203SJames Smart 				DMA_TO_DEVICE);
603c5343203SJames Smart 		kfree(iod->rqstbuf);
604349c694eSJames Smart 		list_del(&iod->ls_rcv_list);
605c5343203SJames Smart 	}
606c5343203SJames Smart 	kfree(tgtport->iod);
607c5343203SJames Smart }
608c5343203SJames Smart 
609c5343203SJames Smart static struct nvmet_fc_ls_iod *
nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport * tgtport)610c5343203SJames Smart nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
611c5343203SJames Smart {
612369157b4SJames Smart 	struct nvmet_fc_ls_iod *iod;
613c5343203SJames Smart 	unsigned long flags;
614c5343203SJames Smart 
615c5343203SJames Smart 	spin_lock_irqsave(&tgtport->lock, flags);
616349c694eSJames Smart 	iod = list_first_entry_or_null(&tgtport->ls_rcv_list,
617349c694eSJames Smart 					struct nvmet_fc_ls_iod, ls_rcv_list);
618c5343203SJames Smart 	if (iod)
619349c694eSJames Smart 		list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist);
620c5343203SJames Smart 	spin_unlock_irqrestore(&tgtport->lock, flags);
621c5343203SJames Smart 	return iod;
622c5343203SJames Smart }
623c5343203SJames Smart 
624c5343203SJames Smart 
625c5343203SJames Smart static void
nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)626c5343203SJames Smart nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
627c5343203SJames Smart 			struct nvmet_fc_ls_iod *iod)
628c5343203SJames Smart {
629c5343203SJames Smart 	unsigned long flags;
630c5343203SJames Smart 
631c5343203SJames Smart 	spin_lock_irqsave(&tgtport->lock, flags);
632349c694eSJames Smart 	list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
633c5343203SJames Smart 	spin_unlock_irqrestore(&tgtport->lock, flags);
634c5343203SJames Smart }
635c5343203SJames Smart 
636c5343203SJames Smart static void
nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_tgt_queue * queue)637c5343203SJames Smart nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
638c5343203SJames Smart 				struct nvmet_fc_tgt_queue *queue)
639c5343203SJames Smart {
640c5343203SJames Smart 	struct nvmet_fc_fcp_iod *fod = queue->fod;
641c5343203SJames Smart 	int i;
642c5343203SJames Smart 
643c5343203SJames Smart 	for (i = 0; i < queue->sqsize; fod++, i++) {
6449d625f77SJames Smart 		INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work);
645c5343203SJames Smart 		fod->tgtport = tgtport;
646c5343203SJames Smart 		fod->queue = queue;
647c5343203SJames Smart 		fod->active = false;
648a97ec51bSJames Smart 		fod->abort = false;
649a97ec51bSJames Smart 		fod->aborted = false;
650a97ec51bSJames Smart 		fod->fcpreq = NULL;
651c5343203SJames Smart 		list_add_tail(&fod->fcp_list, &queue->fod_list);
652c5343203SJames Smart 		spin_lock_init(&fod->flock);
653c5343203SJames Smart 
654c5343203SJames Smart 		fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
655c5343203SJames Smart 					sizeof(fod->rspiubuf), DMA_TO_DEVICE);
656c5343203SJames Smart 		if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
657c5343203SJames Smart 			list_del(&fod->fcp_list);
658c5343203SJames Smart 			for (fod--, i--; i >= 0; fod--, i--) {
659c5343203SJames Smart 				fc_dma_unmap_single(tgtport->dev, fod->rspdma,
660c5343203SJames Smart 						sizeof(fod->rspiubuf),
661c5343203SJames Smart 						DMA_TO_DEVICE);
662c5343203SJames Smart 				fod->rspdma = 0L;
663c5343203SJames Smart 				list_del(&fod->fcp_list);
664c5343203SJames Smart 			}
665c5343203SJames Smart 
666c5343203SJames Smart 			return;
667c5343203SJames Smart 		}
668c5343203SJames Smart 	}
669c5343203SJames Smart }
670c5343203SJames Smart 
671c5343203SJames Smart static void
nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_tgt_queue * queue)672c5343203SJames Smart nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
673c5343203SJames Smart 				struct nvmet_fc_tgt_queue *queue)
674c5343203SJames Smart {
675c5343203SJames Smart 	struct nvmet_fc_fcp_iod *fod = queue->fod;
676c5343203SJames Smart 	int i;
677c5343203SJames Smart 
678c5343203SJames Smart 	for (i = 0; i < queue->sqsize; fod++, i++) {
679c5343203SJames Smart 		if (fod->rspdma)
680c5343203SJames Smart 			fc_dma_unmap_single(tgtport->dev, fod->rspdma,
681c5343203SJames Smart 				sizeof(fod->rspiubuf), DMA_TO_DEVICE);
682c5343203SJames Smart 	}
683c5343203SJames Smart }
684c5343203SJames Smart 
685c5343203SJames Smart static struct nvmet_fc_fcp_iod *
nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue * queue)686c5343203SJames Smart nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
687c5343203SJames Smart {
688369157b4SJames Smart 	struct nvmet_fc_fcp_iod *fod;
689c5343203SJames Smart 
6900fb228d3SJames Smart 	lockdep_assert_held(&queue->qlock);
6910fb228d3SJames Smart 
692c5343203SJames Smart 	fod = list_first_entry_or_null(&queue->fod_list,
693c5343203SJames Smart 					struct nvmet_fc_fcp_iod, fcp_list);
694c5343203SJames Smart 	if (fod) {
695c5343203SJames Smart 		list_del(&fod->fcp_list);
696c5343203SJames Smart 		fod->active = true;
697c5343203SJames Smart 		/*
698c5343203SJames Smart 		 * no queue reference is taken, as it was taken by the
699c5343203SJames Smart 		 * queue lookup just prior to the allocation. The iod
700c5343203SJames Smart 		 * will "inherit" that reference.
701c5343203SJames Smart 		 */
702c5343203SJames Smart 	}
703c5343203SJames Smart 	return fod;
704c5343203SJames Smart }
705c5343203SJames Smart 
706c5343203SJames Smart 
707c5343203SJames Smart static void
nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_tgt_queue * queue,struct nvmefc_tgt_fcp_req * fcpreq)7080fb228d3SJames Smart nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
7090fb228d3SJames Smart 		       struct nvmet_fc_tgt_queue *queue,
7100fb228d3SJames Smart 		       struct nvmefc_tgt_fcp_req *fcpreq)
7110fb228d3SJames Smart {
7120fb228d3SJames Smart 	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
7130fb228d3SJames Smart 
7140fb228d3SJames Smart 	/*
7150fb228d3SJames Smart 	 * put all admin cmds on hw queue id 0. All io commands go to
7160fb228d3SJames Smart 	 * the respective hw queue based on a modulo basis
7170fb228d3SJames Smart 	 */
7180fb228d3SJames Smart 	fcpreq->hwqid = queue->qid ?
7190fb228d3SJames Smart 			((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
7200fb228d3SJames Smart 
7210fb228d3SJames Smart 	nvmet_fc_handle_fcp_rqst(tgtport, fod);
7220fb228d3SJames Smart }
7230fb228d3SJames Smart 
7240fb228d3SJames Smart static void
nvmet_fc_fcp_rqst_op_defer_work(struct work_struct * work)7259d625f77SJames Smart nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work)
7269d625f77SJames Smart {
7279d625f77SJames Smart 	struct nvmet_fc_fcp_iod *fod =
7289d625f77SJames Smart 		container_of(work, struct nvmet_fc_fcp_iod, defer_work);
7299d625f77SJames Smart 
7309d625f77SJames Smart 	/* Submit deferred IO for processing */
7319d625f77SJames Smart 	nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
7329d625f77SJames Smart 
7339d625f77SJames Smart }
7349d625f77SJames Smart 
7359d625f77SJames Smart static void
nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue * queue,struct nvmet_fc_fcp_iod * fod)736c5343203SJames Smart nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
737c5343203SJames Smart 			struct nvmet_fc_fcp_iod *fod)
738c5343203SJames Smart {
73919b58d94SJames Smart 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
74019b58d94SJames Smart 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
7410fb228d3SJames Smart 	struct nvmet_fc_defer_fcp_req *deferfcp;
742c5343203SJames Smart 	unsigned long flags;
743c5343203SJames Smart 
744a97ec51bSJames Smart 	fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
745a97ec51bSJames Smart 				sizeof(fod->rspiubuf), DMA_TO_DEVICE);
746a97ec51bSJames Smart 
747a97ec51bSJames Smart 	fcpreq->nvmet_fc_private = NULL;
748a97ec51bSJames Smart 
749c5343203SJames Smart 	fod->active = false;
750a97ec51bSJames Smart 	fod->abort = false;
751a97ec51bSJames Smart 	fod->aborted = false;
752a97ec51bSJames Smart 	fod->writedataactive = false;
753a97ec51bSJames Smart 	fod->fcpreq = NULL;
75419b58d94SJames Smart 
75519b58d94SJames Smart 	tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
7560fb228d3SJames Smart 
757619c62dcSJames Smart 	/* release the queue lookup reference on the completed IO */
758619c62dcSJames Smart 	nvmet_fc_tgt_q_put(queue);
759619c62dcSJames Smart 
7600fb228d3SJames Smart 	spin_lock_irqsave(&queue->qlock, flags);
7610fb228d3SJames Smart 	deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
7620fb228d3SJames Smart 				struct nvmet_fc_defer_fcp_req, req_list);
7630fb228d3SJames Smart 	if (!deferfcp) {
7640fb228d3SJames Smart 		list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
7650fb228d3SJames Smart 		spin_unlock_irqrestore(&queue->qlock, flags);
7660fb228d3SJames Smart 		return;
7670fb228d3SJames Smart 	}
7680fb228d3SJames Smart 
7690fb228d3SJames Smart 	/* Re-use the fod for the next pending cmd that was deferred */
7700fb228d3SJames Smart 	list_del(&deferfcp->req_list);
7710fb228d3SJames Smart 
7720fb228d3SJames Smart 	fcpreq = deferfcp->fcp_req;
7730fb228d3SJames Smart 
7740fb228d3SJames Smart 	/* deferfcp can be reused for another IO at a later date */
7750fb228d3SJames Smart 	list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
7760fb228d3SJames Smart 
7770fb228d3SJames Smart 	spin_unlock_irqrestore(&queue->qlock, flags);
7780fb228d3SJames Smart 
7790fb228d3SJames Smart 	/* Save NVME CMD IO in fod */
7800fb228d3SJames Smart 	memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
7810fb228d3SJames Smart 
7820fb228d3SJames Smart 	/* Setup new fcpreq to be processed */
7830fb228d3SJames Smart 	fcpreq->rspaddr = NULL;
7840fb228d3SJames Smart 	fcpreq->rsplen  = 0;
7850fb228d3SJames Smart 	fcpreq->nvmet_fc_private = fod;
7860fb228d3SJames Smart 	fod->fcpreq = fcpreq;
7870fb228d3SJames Smart 	fod->active = true;
7880fb228d3SJames Smart 
7890fb228d3SJames Smart 	/* inform LLDD IO is now being processed */
7900fb228d3SJames Smart 	tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
7910fb228d3SJames Smart 
7920fb228d3SJames Smart 	/*
7930fb228d3SJames Smart 	 * Leave the queue lookup get reference taken when
7940fb228d3SJames Smart 	 * fod was originally allocated.
7950fb228d3SJames Smart 	 */
7969d625f77SJames Smart 
7979d625f77SJames Smart 	queue_work(queue->work_q, &fod->defer_work);
798c5343203SJames Smart }
799c5343203SJames Smart 
800c5343203SJames Smart static struct nvmet_fc_tgt_queue *
nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc * assoc,u16 qid,u16 sqsize)801c5343203SJames Smart nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
802c5343203SJames Smart 			u16 qid, u16 sqsize)
803c5343203SJames Smart {
804c5343203SJames Smart 	struct nvmet_fc_tgt_queue *queue;
805c5343203SJames Smart 	int ret;
806c5343203SJames Smart 
807deb61742SJames Smart 	if (qid > NVMET_NR_QUEUES)
808c5343203SJames Smart 		return NULL;
809c5343203SJames Smart 
8106b80f1d2SGustavo A. R. Silva 	queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL);
811c5343203SJames Smart 	if (!queue)
812c5343203SJames Smart 		return NULL;
813c5343203SJames Smart 
814c5343203SJames Smart 	queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
815c5343203SJames Smart 				assoc->tgtport->fc_target_port.port_num,
816c5343203SJames Smart 				assoc->a_id, qid);
817c5343203SJames Smart 	if (!queue->work_q)
818ccd49addSDaniel Wagner 		goto out_free_queue;
819c5343203SJames Smart 
820c5343203SJames Smart 	queue->qid = qid;
821c5343203SJames Smart 	queue->sqsize = sqsize;
822c5343203SJames Smart 	queue->assoc = assoc;
823c5343203SJames Smart 	INIT_LIST_HEAD(&queue->fod_list);
8240fb228d3SJames Smart 	INIT_LIST_HEAD(&queue->avail_defer_list);
8250fb228d3SJames Smart 	INIT_LIST_HEAD(&queue->pending_cmd_list);
826c5343203SJames Smart 	atomic_set(&queue->connected, 0);
827c5343203SJames Smart 	atomic_set(&queue->sqtail, 0);
828c5343203SJames Smart 	atomic_set(&queue->rsn, 1);
829c5343203SJames Smart 	atomic_set(&queue->zrspcnt, 0);
830c5343203SJames Smart 	spin_lock_init(&queue->qlock);
831c5343203SJames Smart 	kref_init(&queue->ref);
832c5343203SJames Smart 
833c5343203SJames Smart 	nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
834c5343203SJames Smart 
835c5343203SJames Smart 	ret = nvmet_sq_init(&queue->nvme_sq);
836c5343203SJames Smart 	if (ret)
837c5343203SJames Smart 		goto out_fail_iodlist;
838c5343203SJames Smart 
839c5343203SJames Smart 	WARN_ON(assoc->queues[qid]);
840ccd49addSDaniel Wagner 	assoc->queues[qid] = queue;
841c5343203SJames Smart 
842c5343203SJames Smart 	return queue;
843c5343203SJames Smart 
844c5343203SJames Smart out_fail_iodlist:
845c5343203SJames Smart 	nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
846c5343203SJames Smart 	destroy_workqueue(queue->work_q);
847c5343203SJames Smart out_free_queue:
848c5343203SJames Smart 	kfree(queue);
849c5343203SJames Smart 	return NULL;
850c5343203SJames Smart }
851c5343203SJames Smart 
852c5343203SJames Smart 
853c5343203SJames Smart static void
nvmet_fc_tgt_queue_free(struct kref * ref)854c5343203SJames Smart nvmet_fc_tgt_queue_free(struct kref *ref)
855c5343203SJames Smart {
856c5343203SJames Smart 	struct nvmet_fc_tgt_queue *queue =
857c5343203SJames Smart 		container_of(ref, struct nvmet_fc_tgt_queue, ref);
858c5343203SJames Smart 
859c5343203SJames Smart 	nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
860c5343203SJames Smart 
861c5343203SJames Smart 	destroy_workqueue(queue->work_q);
862c5343203SJames Smart 
8634e2f02bfSLeonid Ravich 	kfree_rcu(queue, rcu);
864c5343203SJames Smart }
865c5343203SJames Smart 
866c5343203SJames Smart static void
nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue * queue)867c5343203SJames Smart nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
868c5343203SJames Smart {
869c5343203SJames Smart 	kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
870c5343203SJames Smart }
871c5343203SJames Smart 
872c5343203SJames Smart static int
nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue * queue)873c5343203SJames Smart nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
874c5343203SJames Smart {
875c5343203SJames Smart 	return kref_get_unless_zero(&queue->ref);
876c5343203SJames Smart }
877c5343203SJames Smart 
878c5343203SJames Smart 
879c5343203SJames Smart static void
nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue * queue)880c5343203SJames Smart nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
881c5343203SJames Smart {
882a97ec51bSJames Smart 	struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
883c5343203SJames Smart 	struct nvmet_fc_fcp_iod *fod = queue->fod;
88416a5a480SJames Smart 	struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
885c5343203SJames Smart 	unsigned long flags;
8860dfb992eSJames Smart 	int i;
887c5343203SJames Smart 	bool disconnect;
888c5343203SJames Smart 
889c5343203SJames Smart 	disconnect = atomic_xchg(&queue->connected, 0);
890c5343203SJames Smart 
89147bf3241SJames Smart 	/* if not connected, nothing to do */
89247bf3241SJames Smart 	if (!disconnect)
89347bf3241SJames Smart 		return;
89447bf3241SJames Smart 
895c5343203SJames Smart 	spin_lock_irqsave(&queue->qlock, flags);
896d038dd81SJames Smart 	/* abort outstanding io's */
897c5343203SJames Smart 	for (i = 0; i < queue->sqsize; fod++, i++) {
898c5343203SJames Smart 		if (fod->active) {
899c5343203SJames Smart 			spin_lock(&fod->flock);
900c5343203SJames Smart 			fod->abort = true;
901a97ec51bSJames Smart 			/*
902a97ec51bSJames Smart 			 * only call lldd abort routine if waiting for
903a97ec51bSJames Smart 			 * writedata. other outstanding ops should finish
904a97ec51bSJames Smart 			 * on their own.
905a97ec51bSJames Smart 			 */
9060dfb992eSJames Smart 			if (fod->writedataactive) {
907a97ec51bSJames Smart 				fod->aborted = true;
908a97ec51bSJames Smart 				spin_unlock(&fod->flock);
909a97ec51bSJames Smart 				tgtport->ops->fcp_abort(
910a97ec51bSJames Smart 					&tgtport->fc_target_port, fod->fcpreq);
9110dfb992eSJames Smart 			} else
9120dfb992eSJames Smart 				spin_unlock(&fod->flock);
913c5343203SJames Smart 		}
914c5343203SJames Smart 	}
9150fb228d3SJames Smart 
9160fb228d3SJames Smart 	/* Cleanup defer'ed IOs in queue */
91716a5a480SJames Smart 	list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
91816a5a480SJames Smart 				req_list) {
9190fb228d3SJames Smart 		list_del(&deferfcp->req_list);
9200fb228d3SJames Smart 		kfree(deferfcp);
9210fb228d3SJames Smart 	}
9220fb228d3SJames Smart 
9230fb228d3SJames Smart 	for (;;) {
9240fb228d3SJames Smart 		deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
9250fb228d3SJames Smart 				struct nvmet_fc_defer_fcp_req, req_list);
9260fb228d3SJames Smart 		if (!deferfcp)
9270fb228d3SJames Smart 			break;
9280fb228d3SJames Smart 
9290fb228d3SJames Smart 		list_del(&deferfcp->req_list);
9300fb228d3SJames Smart 		spin_unlock_irqrestore(&queue->qlock, flags);
9310fb228d3SJames Smart 
9320fb228d3SJames Smart 		tgtport->ops->defer_rcv(&tgtport->fc_target_port,
9330fb228d3SJames Smart 				deferfcp->fcp_req);
9340fb228d3SJames Smart 
9350fb228d3SJames Smart 		tgtport->ops->fcp_abort(&tgtport->fc_target_port,
9360fb228d3SJames Smart 				deferfcp->fcp_req);
9370fb228d3SJames Smart 
9380fb228d3SJames Smart 		tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
9390fb228d3SJames Smart 				deferfcp->fcp_req);
9400fb228d3SJames Smart 
941619c62dcSJames Smart 		/* release the queue lookup reference */
942619c62dcSJames Smart 		nvmet_fc_tgt_q_put(queue);
943619c62dcSJames Smart 
9440fb228d3SJames Smart 		kfree(deferfcp);
9450fb228d3SJames Smart 
9460fb228d3SJames Smart 		spin_lock_irqsave(&queue->qlock, flags);
9470fb228d3SJames Smart 	}
948c5343203SJames Smart 	spin_unlock_irqrestore(&queue->qlock, flags);
949c5343203SJames Smart 
950c5343203SJames Smart 	flush_workqueue(queue->work_q);
951c5343203SJames Smart 
952c5343203SJames Smart 	nvmet_sq_destroy(&queue->nvme_sq);
953c5343203SJames Smart 
954c5343203SJames Smart 	nvmet_fc_tgt_q_put(queue);
955c5343203SJames Smart }
956c5343203SJames Smart 
957c5343203SJames Smart static struct nvmet_fc_tgt_queue *
nvmet_fc_find_target_queue(struct nvmet_fc_tgtport * tgtport,u64 connection_id)958c5343203SJames Smart nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
959c5343203SJames Smart 				u64 connection_id)
960c5343203SJames Smart {
961c5343203SJames Smart 	struct nvmet_fc_tgt_assoc *assoc;
962c5343203SJames Smart 	struct nvmet_fc_tgt_queue *queue;
963c5343203SJames Smart 	u64 association_id = nvmet_fc_getassociationid(connection_id);
964c5343203SJames Smart 	u16 qid = nvmet_fc_getqueueid(connection_id);
965c5343203SJames Smart 
9660c319d3aSJames Smart 	if (qid > NVMET_NR_QUEUES)
9670c319d3aSJames Smart 		return NULL;
9680c319d3aSJames Smart 
9694e2f02bfSLeonid Ravich 	rcu_read_lock();
9704e2f02bfSLeonid Ravich 	list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
971c5343203SJames Smart 		if (association_id == assoc->association_id) {
972ccd49addSDaniel Wagner 			queue = assoc->queues[qid];
973c5343203SJames Smart 			if (queue &&
974c5343203SJames Smart 			    (!atomic_read(&queue->connected) ||
975c5343203SJames Smart 			     !nvmet_fc_tgt_q_get(queue)))
976c5343203SJames Smart 				queue = NULL;
9774e2f02bfSLeonid Ravich 			rcu_read_unlock();
978c5343203SJames Smart 			return queue;
979c5343203SJames Smart 		}
980c5343203SJames Smart 	}
9814e2f02bfSLeonid Ravich 	rcu_read_unlock();
982c5343203SJames Smart 	return NULL;
983c5343203SJames Smart }
984c5343203SJames Smart 
985a96d4bd8SJames Smart static void
nvmet_fc_hostport_free(struct kref * ref)98658ab8ff9SJames Smart nvmet_fc_hostport_free(struct kref *ref)
98758ab8ff9SJames Smart {
98858ab8ff9SJames Smart 	struct nvmet_fc_hostport *hostport =
98958ab8ff9SJames Smart 		container_of(ref, struct nvmet_fc_hostport, ref);
99058ab8ff9SJames Smart 	struct nvmet_fc_tgtport *tgtport = hostport->tgtport;
99158ab8ff9SJames Smart 	unsigned long flags;
99258ab8ff9SJames Smart 
99358ab8ff9SJames Smart 	spin_lock_irqsave(&tgtport->lock, flags);
99458ab8ff9SJames Smart 	list_del(&hostport->host_list);
99558ab8ff9SJames Smart 	spin_unlock_irqrestore(&tgtport->lock, flags);
99658ab8ff9SJames Smart 	if (tgtport->ops->host_release && hostport->invalid)
99758ab8ff9SJames Smart 		tgtport->ops->host_release(hostport->hosthandle);
99858ab8ff9SJames Smart 	kfree(hostport);
99958ab8ff9SJames Smart 	nvmet_fc_tgtport_put(tgtport);
100058ab8ff9SJames Smart }
100158ab8ff9SJames Smart 
100258ab8ff9SJames Smart static void
nvmet_fc_hostport_put(struct nvmet_fc_hostport * hostport)100358ab8ff9SJames Smart nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport)
100458ab8ff9SJames Smart {
100558ab8ff9SJames Smart 	kref_put(&hostport->ref, nvmet_fc_hostport_free);
100658ab8ff9SJames Smart }
100758ab8ff9SJames Smart 
100858ab8ff9SJames Smart static int
nvmet_fc_hostport_get(struct nvmet_fc_hostport * hostport)100958ab8ff9SJames Smart nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport)
101058ab8ff9SJames Smart {
101158ab8ff9SJames Smart 	return kref_get_unless_zero(&hostport->ref);
101258ab8ff9SJames Smart }
101358ab8ff9SJames Smart 
101458ab8ff9SJames Smart static void
nvmet_fc_free_hostport(struct nvmet_fc_hostport * hostport)101558ab8ff9SJames Smart nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport)
101658ab8ff9SJames Smart {
101758ab8ff9SJames Smart 	/* if LLDD not implemented, leave as NULL */
1018ddd3d105SJames Smart 	if (!hostport || !hostport->hosthandle)
101958ab8ff9SJames Smart 		return;
102058ab8ff9SJames Smart 
102158ab8ff9SJames Smart 	nvmet_fc_hostport_put(hostport);
102258ab8ff9SJames Smart }
102358ab8ff9SJames Smart 
102458ab8ff9SJames Smart static struct nvmet_fc_hostport *
nvmet_fc_match_hostport(struct nvmet_fc_tgtport * tgtport,void * hosthandle)10250d8ddeeaSAmit Engel nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
10260d8ddeeaSAmit Engel {
10270d8ddeeaSAmit Engel 	struct nvmet_fc_hostport *host;
10280d8ddeeaSAmit Engel 
10290d8ddeeaSAmit Engel 	lockdep_assert_held(&tgtport->lock);
10300d8ddeeaSAmit Engel 
10310d8ddeeaSAmit Engel 	list_for_each_entry(host, &tgtport->host_list, host_list) {
10320d8ddeeaSAmit Engel 		if (host->hosthandle == hosthandle && !host->invalid) {
10330d8ddeeaSAmit Engel 			if (nvmet_fc_hostport_get(host))
10340d8ddeeaSAmit Engel 				return (host);
10350d8ddeeaSAmit Engel 		}
10360d8ddeeaSAmit Engel 	}
10370d8ddeeaSAmit Engel 
10380d8ddeeaSAmit Engel 	return NULL;
10390d8ddeeaSAmit Engel }
10400d8ddeeaSAmit Engel 
10410d8ddeeaSAmit Engel static struct nvmet_fc_hostport *
nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport * tgtport,void * hosthandle)104258ab8ff9SJames Smart nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
104358ab8ff9SJames Smart {
10440d8ddeeaSAmit Engel 	struct nvmet_fc_hostport *newhost, *match = NULL;
104558ab8ff9SJames Smart 	unsigned long flags;
104658ab8ff9SJames Smart 
104758ab8ff9SJames Smart 	/* if LLDD not implemented, leave as NULL */
104858ab8ff9SJames Smart 	if (!hosthandle)
104958ab8ff9SJames Smart 		return NULL;
105058ab8ff9SJames Smart 
10510d8ddeeaSAmit Engel 	/*
10520d8ddeeaSAmit Engel 	 * take reference for what will be the newly allocated hostport if
10530d8ddeeaSAmit Engel 	 * we end up using a new allocation
10540d8ddeeaSAmit Engel 	 */
105558ab8ff9SJames Smart 	if (!nvmet_fc_tgtport_get(tgtport))
105658ab8ff9SJames Smart 		return ERR_PTR(-EINVAL);
105758ab8ff9SJames Smart 
105858ab8ff9SJames Smart 	spin_lock_irqsave(&tgtport->lock, flags);
10590d8ddeeaSAmit Engel 	match = nvmet_fc_match_hostport(tgtport, hosthandle);
106058ab8ff9SJames Smart 	spin_unlock_irqrestore(&tgtport->lock, flags);
10610d8ddeeaSAmit Engel 
10620d8ddeeaSAmit Engel 	if (match) {
10630d8ddeeaSAmit Engel 		/* no new allocation - release reference */
106458ab8ff9SJames Smart 		nvmet_fc_tgtport_put(tgtport);
10650d8ddeeaSAmit Engel 		return match;
106658ab8ff9SJames Smart 	}
106758ab8ff9SJames Smart 
10680d8ddeeaSAmit Engel 	newhost = kzalloc(sizeof(*newhost), GFP_KERNEL);
10690d8ddeeaSAmit Engel 	if (!newhost) {
10700d8ddeeaSAmit Engel 		/* no new allocation - release reference */
10710d8ddeeaSAmit Engel 		nvmet_fc_tgtport_put(tgtport);
10720d8ddeeaSAmit Engel 		return ERR_PTR(-ENOMEM);
10730d8ddeeaSAmit Engel 	}
10740d8ddeeaSAmit Engel 
10750d8ddeeaSAmit Engel 	spin_lock_irqsave(&tgtport->lock, flags);
10760d8ddeeaSAmit Engel 	match = nvmet_fc_match_hostport(tgtport, hosthandle);
10770d8ddeeaSAmit Engel 	if (match) {
10780d8ddeeaSAmit Engel 		/* new allocation not needed */
10790d8ddeeaSAmit Engel 		kfree(newhost);
10800d8ddeeaSAmit Engel 		newhost = match;
10810d8ddeeaSAmit Engel 	} else {
108258ab8ff9SJames Smart 		newhost->tgtport = tgtport;
108358ab8ff9SJames Smart 		newhost->hosthandle = hosthandle;
108458ab8ff9SJames Smart 		INIT_LIST_HEAD(&newhost->host_list);
108558ab8ff9SJames Smart 		kref_init(&newhost->ref);
108658ab8ff9SJames Smart 
108758ab8ff9SJames Smart 		list_add_tail(&newhost->host_list, &tgtport->host_list);
10880d8ddeeaSAmit Engel 	}
108958ab8ff9SJames Smart 	spin_unlock_irqrestore(&tgtport->lock, flags);
109058ab8ff9SJames Smart 
10910d8ddeeaSAmit Engel 	return newhost;
109258ab8ff9SJames Smart }
109358ab8ff9SJames Smart 
109458ab8ff9SJames Smart static void
nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc * assoc)1095*fad689fcSDaniel Wagner nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
1096*fad689fcSDaniel Wagner {
1097*fad689fcSDaniel Wagner 	nvmet_fc_delete_target_assoc(assoc);
1098*fad689fcSDaniel Wagner 	nvmet_fc_tgt_a_put(assoc);
1099*fad689fcSDaniel Wagner }
1100*fad689fcSDaniel Wagner 
1101*fad689fcSDaniel Wagner static void
nvmet_fc_delete_assoc_work(struct work_struct * work)1102*fad689fcSDaniel Wagner nvmet_fc_delete_assoc_work(struct work_struct *work)
1103a96d4bd8SJames Smart {
1104a96d4bd8SJames Smart 	struct nvmet_fc_tgt_assoc *assoc =
1105a96d4bd8SJames Smart 		container_of(work, struct nvmet_fc_tgt_assoc, del_work);
1106*fad689fcSDaniel Wagner 	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
1107a96d4bd8SJames Smart 
1108*fad689fcSDaniel Wagner 	nvmet_fc_delete_assoc(assoc);
1109*fad689fcSDaniel Wagner 	nvmet_fc_tgtport_put(tgtport);
1110*fad689fcSDaniel Wagner }
1111*fad689fcSDaniel Wagner 
1112*fad689fcSDaniel Wagner static void
nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc * assoc)1113*fad689fcSDaniel Wagner nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
1114*fad689fcSDaniel Wagner {
1115*fad689fcSDaniel Wagner 	nvmet_fc_tgtport_get(assoc->tgtport);
1116*fad689fcSDaniel Wagner 	queue_work(nvmet_wq, &assoc->del_work);
1117a96d4bd8SJames Smart }
1118a96d4bd8SJames Smart 
1119c5343203SJames Smart static struct nvmet_fc_tgt_assoc *
nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport * tgtport,void * hosthandle)112058ab8ff9SJames Smart nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1121c5343203SJames Smart {
1122c5343203SJames Smart 	struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
1123c5343203SJames Smart 	unsigned long flags;
1124c5343203SJames Smart 	u64 ran;
1125c5343203SJames Smart 	int idx;
1126c5343203SJames Smart 	bool needrandom = true;
1127c5343203SJames Smart 
1128399b70e8SDaniel Wagner 	if (!tgtport->pe)
1129399b70e8SDaniel Wagner 		return NULL;
1130399b70e8SDaniel Wagner 
1131c5343203SJames Smart 	assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
1132c5343203SJames Smart 	if (!assoc)
1133c5343203SJames Smart 		return NULL;
1134c5343203SJames Smart 
11356dd0f465SSagi Grimberg 	idx = ida_alloc(&tgtport->assoc_cnt, GFP_KERNEL);
1136c5343203SJames Smart 	if (idx < 0)
1137c5343203SJames Smart 		goto out_free_assoc;
1138c5343203SJames Smart 
1139c5343203SJames Smart 	if (!nvmet_fc_tgtport_get(tgtport))
114058ab8ff9SJames Smart 		goto out_ida;
114158ab8ff9SJames Smart 
114258ab8ff9SJames Smart 	assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle);
114358ab8ff9SJames Smart 	if (IS_ERR(assoc->hostport))
114458ab8ff9SJames Smart 		goto out_put;
1145c5343203SJames Smart 
1146c5343203SJames Smart 	assoc->tgtport = tgtport;
1147c5343203SJames Smart 	assoc->a_id = idx;
1148c5343203SJames Smart 	INIT_LIST_HEAD(&assoc->a_list);
1149c5343203SJames Smart 	kref_init(&assoc->ref);
1150*fad689fcSDaniel Wagner 	INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work);
115147bf3241SJames Smart 	atomic_set(&assoc->terminating, 0);
1152c5343203SJames Smart 
1153c5343203SJames Smart 	while (needrandom) {
1154c5343203SJames Smart 		get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
1155c5343203SJames Smart 		ran = ran << BYTES_FOR_QID_SHIFT;
1156c5343203SJames Smart 
1157c5343203SJames Smart 		spin_lock_irqsave(&tgtport->lock, flags);
1158c5343203SJames Smart 		needrandom = false;
1159e4fcc72cSJames Smart 		list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
1160c5343203SJames Smart 			if (ran == tmpassoc->association_id) {
1161c5343203SJames Smart 				needrandom = true;
1162c5343203SJames Smart 				break;
1163c5343203SJames Smart 			}
1164e4fcc72cSJames Smart 		}
1165c5343203SJames Smart 		if (!needrandom) {
1166c5343203SJames Smart 			assoc->association_id = ran;
11674e2f02bfSLeonid Ravich 			list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
1168c5343203SJames Smart 		}
1169c5343203SJames Smart 		spin_unlock_irqrestore(&tgtport->lock, flags);
1170c5343203SJames Smart 	}
1171c5343203SJames Smart 
1172c5343203SJames Smart 	return assoc;
1173c5343203SJames Smart 
117458ab8ff9SJames Smart out_put:
117558ab8ff9SJames Smart 	nvmet_fc_tgtport_put(tgtport);
117658ab8ff9SJames Smart out_ida:
11776dd0f465SSagi Grimberg 	ida_free(&tgtport->assoc_cnt, idx);
1178c5343203SJames Smart out_free_assoc:
1179c5343203SJames Smart 	kfree(assoc);
1180c5343203SJames Smart 	return NULL;
1181c5343203SJames Smart }
1182c5343203SJames Smart 
1183c5343203SJames Smart static void
nvmet_fc_target_assoc_free(struct kref * ref)1184c5343203SJames Smart nvmet_fc_target_assoc_free(struct kref *ref)
1185c5343203SJames Smart {
1186c5343203SJames Smart 	struct nvmet_fc_tgt_assoc *assoc =
1187c5343203SJames Smart 		container_of(ref, struct nvmet_fc_tgt_assoc, ref);
1188c5343203SJames Smart 	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
118947bf3241SJames Smart 	struct nvmet_fc_ls_iod	*oldls;
1190c5343203SJames Smart 	unsigned long flags;
1191ccd49addSDaniel Wagner 	int i;
1192ccd49addSDaniel Wagner 
1193ccd49addSDaniel Wagner 	for (i = NVMET_NR_QUEUES; i >= 0; i--) {
1194ccd49addSDaniel Wagner 		if (assoc->queues[i])
1195ccd49addSDaniel Wagner 			nvmet_fc_delete_target_queue(assoc->queues[i]);
1196ccd49addSDaniel Wagner 	}
1197c5343203SJames Smart 
119847bf3241SJames Smart 	/* Send Disconnect now that all i/o has completed */
119947bf3241SJames Smart 	nvmet_fc_xmt_disconnect_assoc(assoc);
120047bf3241SJames Smart 
120158ab8ff9SJames Smart 	nvmet_fc_free_hostport(assoc->hostport);
1202c5343203SJames Smart 	spin_lock_irqsave(&tgtport->lock, flags);
120347bf3241SJames Smart 	oldls = assoc->rcv_disconn;
1204c5343203SJames Smart 	spin_unlock_irqrestore(&tgtport->lock, flags);
120547bf3241SJames Smart 	/* if pending Rcv Disconnect Association LS, send rsp now */
120647bf3241SJames Smart 	if (oldls)
120747bf3241SJames Smart 		nvmet_fc_xmt_ls_rsp(tgtport, oldls);
12086dd0f465SSagi Grimberg 	ida_free(&tgtport->assoc_cnt, assoc->a_id);
120947bf3241SJames Smart 	dev_info(tgtport->dev,
121047bf3241SJames Smart 		"{%d:%d} Association freed\n",
121147bf3241SJames Smart 		tgtport->fc_target_port.port_num, assoc->a_id);
12124e2f02bfSLeonid Ravich 	kfree_rcu(assoc, rcu);
1213c5343203SJames Smart 	nvmet_fc_tgtport_put(tgtport);
1214c5343203SJames Smart }
1215c5343203SJames Smart 
1216c5343203SJames Smart static void
nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc * assoc)1217c5343203SJames Smart nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
1218c5343203SJames Smart {
1219c5343203SJames Smart 	kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
1220c5343203SJames Smart }
1221c5343203SJames Smart 
1222c5343203SJames Smart static int
nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc * assoc)1223c5343203SJames Smart nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
1224c5343203SJames Smart {
1225c5343203SJames Smart 	return kref_get_unless_zero(&assoc->ref);
1226c5343203SJames Smart }
1227c5343203SJames Smart 
1228c5343203SJames Smart static void
nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc * assoc)1229c5343203SJames Smart nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
1230c5343203SJames Smart {
1231c5343203SJames Smart 	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
1232ccd49addSDaniel Wagner 	unsigned long flags;
123347bf3241SJames Smart 	int i, terminating;
123447bf3241SJames Smart 
123547bf3241SJames Smart 	terminating = atomic_xchg(&assoc->terminating, 1);
123647bf3241SJames Smart 
123747bf3241SJames Smart 	/* if already terminating, do nothing */
123847bf3241SJames Smart 	if (terminating)
123947bf3241SJames Smart 		return;
1240c5343203SJames Smart 
1241ccd49addSDaniel Wagner 	spin_lock_irqsave(&tgtport->lock, flags);
1242ccd49addSDaniel Wagner 	list_del_rcu(&assoc->a_list);
1243ccd49addSDaniel Wagner 	spin_unlock_irqrestore(&tgtport->lock, flags);
12444e2f02bfSLeonid Ravich 
1245ccd49addSDaniel Wagner 	synchronize_rcu();
1246ccd49addSDaniel Wagner 
1247ccd49addSDaniel Wagner 	/* ensure all in-flight I/Os have been processed */
1248deb61742SJames Smart 	for (i = NVMET_NR_QUEUES; i >= 0; i--) {
1249ccd49addSDaniel Wagner 		if (assoc->queues[i])
1250ccd49addSDaniel Wagner 			flush_workqueue(assoc->queues[i]->work_q);
1251c5343203SJames Smart 	}
1252c5343203SJames Smart 
125347bf3241SJames Smart 	dev_info(tgtport->dev,
125447bf3241SJames Smart 		"{%d:%d} Association deleted\n",
125547bf3241SJames Smart 		tgtport->fc_target_port.port_num, assoc->a_id);
1256c5343203SJames Smart }
1257c5343203SJames Smart 
1258c5343203SJames Smart static struct nvmet_fc_tgt_assoc *
nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport * tgtport,u64 association_id)1259c5343203SJames Smart nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
1260c5343203SJames Smart 				u64 association_id)
1261c5343203SJames Smart {
1262c5343203SJames Smart 	struct nvmet_fc_tgt_assoc *assoc;
1263c5343203SJames Smart 	struct nvmet_fc_tgt_assoc *ret = NULL;
1264c5343203SJames Smart 
12654e2f02bfSLeonid Ravich 	rcu_read_lock();
12664e2f02bfSLeonid Ravich 	list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
1267c5343203SJames Smart 		if (association_id == assoc->association_id) {
1268c5343203SJames Smart 			ret = assoc;
126934efa232SJames Smart 			if (!nvmet_fc_tgt_a_get(assoc))
127034efa232SJames Smart 				ret = NULL;
1271c5343203SJames Smart 			break;
1272c5343203SJames Smart 		}
1273c5343203SJames Smart 	}
12744e2f02bfSLeonid Ravich 	rcu_read_unlock();
1275c5343203SJames Smart 
1276c5343203SJames Smart 	return ret;
1277c5343203SJames Smart }
1278c5343203SJames Smart 
1279ea96d649SJames Smart static void
nvmet_fc_portentry_bind(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_port_entry * pe,struct nvmet_port * port)1280ea96d649SJames Smart nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport,
1281ea96d649SJames Smart 			struct nvmet_fc_port_entry *pe,
1282ea96d649SJames Smart 			struct nvmet_port *port)
1283ea96d649SJames Smart {
1284ea96d649SJames Smart 	lockdep_assert_held(&nvmet_fc_tgtlock);
1285ea96d649SJames Smart 
1286ea96d649SJames Smart 	pe->tgtport = tgtport;
1287ea96d649SJames Smart 	tgtport->pe = pe;
1288ea96d649SJames Smart 
1289ea96d649SJames Smart 	pe->port = port;
1290ea96d649SJames Smart 	port->priv = pe;
1291ea96d649SJames Smart 
1292ea96d649SJames Smart 	pe->node_name = tgtport->fc_target_port.node_name;
1293ea96d649SJames Smart 	pe->port_name = tgtport->fc_target_port.port_name;
1294ea96d649SJames Smart 	INIT_LIST_HEAD(&pe->pe_list);
1295ea96d649SJames Smart 
1296ea96d649SJames Smart 	list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list);
1297ea96d649SJames Smart }
1298ea96d649SJames Smart 
1299ea96d649SJames Smart static void
nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry * pe)1300ea96d649SJames Smart nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe)
1301ea96d649SJames Smart {
1302ea96d649SJames Smart 	unsigned long flags;
1303ea96d649SJames Smart 
1304ea96d649SJames Smart 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1305ea96d649SJames Smart 	if (pe->tgtport)
1306ea96d649SJames Smart 		pe->tgtport->pe = NULL;
1307ea96d649SJames Smart 	list_del(&pe->pe_list);
1308ea96d649SJames Smart 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1309ea96d649SJames Smart }
1310ea96d649SJames Smart 
1311ea96d649SJames Smart /*
1312ea96d649SJames Smart  * called when a targetport deregisters. Breaks the relationship
1313ea96d649SJames Smart  * with the nvmet port, but leaves the port_entry in place so that
1314ea96d649SJames Smart  * re-registration can resume operation.
1315ea96d649SJames Smart  */
1316ea96d649SJames Smart static void
nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport * tgtport)1317ea96d649SJames Smart nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
1318ea96d649SJames Smart {
1319ea96d649SJames Smart 	struct nvmet_fc_port_entry *pe;
1320ea96d649SJames Smart 	unsigned long flags;
1321ea96d649SJames Smart 
1322ea96d649SJames Smart 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1323ea96d649SJames Smart 	pe = tgtport->pe;
1324ea96d649SJames Smart 	if (pe)
1325ea96d649SJames Smart 		pe->tgtport = NULL;
1326ea96d649SJames Smart 	tgtport->pe = NULL;
1327ea96d649SJames Smart 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1328ea96d649SJames Smart }
1329ea96d649SJames Smart 
1330ea96d649SJames Smart /*
1331ea96d649SJames Smart  * called when a new targetport is registered. Looks in the
1332ea96d649SJames Smart  * existing nvmet port_entries to see if the nvmet layer is
1333ea96d649SJames Smart  * configured for the targetport's wwn's. (the targetport existed,
1334ea96d649SJames Smart  * nvmet configured, the lldd unregistered the tgtport, and is now
1335ea96d649SJames Smart  * reregistering the same targetport).  If so, set the nvmet port
1336ea96d649SJames Smart  * port entry on the targetport.
1337ea96d649SJames Smart  */
1338ea96d649SJames Smart static void
nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport * tgtport)1339ea96d649SJames Smart nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
1340ea96d649SJames Smart {
1341ea96d649SJames Smart 	struct nvmet_fc_port_entry *pe;
1342ea96d649SJames Smart 	unsigned long flags;
1343ea96d649SJames Smart 
1344ea96d649SJames Smart 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1345ea96d649SJames Smart 	list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) {
1346ea96d649SJames Smart 		if (tgtport->fc_target_port.node_name == pe->node_name &&
1347ea96d649SJames Smart 		    tgtport->fc_target_port.port_name == pe->port_name) {
1348ea96d649SJames Smart 			WARN_ON(pe->tgtport);
1349ea96d649SJames Smart 			tgtport->pe = pe;
1350ea96d649SJames Smart 			pe->tgtport = tgtport;
1351ea96d649SJames Smart 			break;
1352ea96d649SJames Smart 		}
1353ea96d649SJames Smart 	}
1354ea96d649SJames Smart 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1355ea96d649SJames Smart }
1356c5343203SJames Smart 
1357c5343203SJames Smart /**
1358b2fb99e4SChaitanya Kulkarni  * nvmet_fc_register_targetport - transport entry point called by an
1359c5343203SJames Smart  *                              LLDD to register the existence of a local
1360c5343203SJames Smart  *                              NVME subystem FC port.
1361c5343203SJames Smart  * @pinfo:     pointer to information about the port to be registered
1362c5343203SJames Smart  * @template:  LLDD entrypoints and operational parameters for the port
1363c5343203SJames Smart  * @dev:       physical hardware device node port corresponds to. Will be
1364c5343203SJames Smart  *             used for DMA mappings
1365c5343203SJames Smart  * @portptr:   pointer to a local port pointer. Upon success, the routine
1366c5343203SJames Smart  *             will allocate a nvme_fc_local_port structure and place its
1367c5343203SJames Smart  *             address in the local port pointer. Upon failure, local port
1368c5343203SJames Smart  *             pointer will be set to NULL.
1369c5343203SJames Smart  *
1370c5343203SJames Smart  * Returns:
1371c5343203SJames Smart  * a completion status. Must be 0 upon success; a negative errno
1372c5343203SJames Smart  * (ex: -ENXIO) upon failure.
1373c5343203SJames Smart  */
1374c5343203SJames Smart int
nvmet_fc_register_targetport(struct nvmet_fc_port_info * pinfo,struct nvmet_fc_target_template * template,struct device * dev,struct nvmet_fc_target_port ** portptr)1375c5343203SJames Smart nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
1376c5343203SJames Smart 			struct nvmet_fc_target_template *template,
1377c5343203SJames Smart 			struct device *dev,
1378c5343203SJames Smart 			struct nvmet_fc_target_port **portptr)
1379c5343203SJames Smart {
1380c5343203SJames Smart 	struct nvmet_fc_tgtport *newrec;
1381c5343203SJames Smart 	unsigned long flags;
1382c5343203SJames Smart 	int ret, idx;
1383c5343203SJames Smart 
1384c5343203SJames Smart 	if (!template->xmt_ls_rsp || !template->fcp_op ||
1385a97ec51bSJames Smart 	    !template->fcp_abort ||
138619b58d94SJames Smart 	    !template->fcp_req_release || !template->targetport_delete ||
1387c5343203SJames Smart 	    !template->max_hw_queues || !template->max_sgl_segments ||
1388c5343203SJames Smart 	    !template->max_dif_sgl_segments || !template->dma_boundary) {
1389c5343203SJames Smart 		ret = -EINVAL;
1390c5343203SJames Smart 		goto out_regtgt_failed;
1391c5343203SJames Smart 	}
1392c5343203SJames Smart 
1393c5343203SJames Smart 	newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
1394c5343203SJames Smart 			 GFP_KERNEL);
1395c5343203SJames Smart 	if (!newrec) {
1396c5343203SJames Smart 		ret = -ENOMEM;
1397c5343203SJames Smart 		goto out_regtgt_failed;
1398c5343203SJames Smart 	}
1399c5343203SJames Smart 
14006dd0f465SSagi Grimberg 	idx = ida_alloc(&nvmet_fc_tgtport_cnt, GFP_KERNEL);
1401c5343203SJames Smart 	if (idx < 0) {
1402c5343203SJames Smart 		ret = -ENOSPC;
1403c5343203SJames Smart 		goto out_fail_kfree;
1404c5343203SJames Smart 	}
1405c5343203SJames Smart 
1406c5343203SJames Smart 	if (!get_device(dev) && dev) {
1407c5343203SJames Smart 		ret = -ENODEV;
1408c5343203SJames Smart 		goto out_ida_put;
1409c5343203SJames Smart 	}
1410c5343203SJames Smart 
1411c5343203SJames Smart 	newrec->fc_target_port.node_name = pinfo->node_name;
1412c5343203SJames Smart 	newrec->fc_target_port.port_name = pinfo->port_name;
1413f56bf76fSJames Smart 	if (template->target_priv_sz)
1414c5343203SJames Smart 		newrec->fc_target_port.private = &newrec[1];
1415f56bf76fSJames Smart 	else
1416f56bf76fSJames Smart 		newrec->fc_target_port.private = NULL;
1417c5343203SJames Smart 	newrec->fc_target_port.port_id = pinfo->port_id;
1418c5343203SJames Smart 	newrec->fc_target_port.port_num = idx;
1419c5343203SJames Smart 	INIT_LIST_HEAD(&newrec->tgt_list);
1420c5343203SJames Smart 	newrec->dev = dev;
1421c5343203SJames Smart 	newrec->ops = template;
1422c5343203SJames Smart 	spin_lock_init(&newrec->lock);
1423349c694eSJames Smart 	INIT_LIST_HEAD(&newrec->ls_rcv_list);
142447bf3241SJames Smart 	INIT_LIST_HEAD(&newrec->ls_req_list);
1425c5343203SJames Smart 	INIT_LIST_HEAD(&newrec->ls_busylist);
1426c5343203SJames Smart 	INIT_LIST_HEAD(&newrec->assoc_list);
142758ab8ff9SJames Smart 	INIT_LIST_HEAD(&newrec->host_list);
1428c5343203SJames Smart 	kref_init(&newrec->ref);
1429c5343203SJames Smart 	ida_init(&newrec->assoc_cnt);
1430d082dc15SJames Smart 	newrec->max_sg_cnt = template->max_sgl_segments;
1431eaf0971fSDaniel Wagner 	INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work);
1432c5343203SJames Smart 
1433c5343203SJames Smart 	ret = nvmet_fc_alloc_ls_iodlist(newrec);
1434c5343203SJames Smart 	if (ret) {
1435c5343203SJames Smart 		ret = -ENOMEM;
1436c5343203SJames Smart 		goto out_free_newrec;
1437c5343203SJames Smart 	}
1438c5343203SJames Smart 
1439ea96d649SJames Smart 	nvmet_fc_portentry_rebind_tgt(newrec);
1440ea96d649SJames Smart 
1441c5343203SJames Smart 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1442c5343203SJames Smart 	list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
1443c5343203SJames Smart 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1444c5343203SJames Smart 
1445c5343203SJames Smart 	*portptr = &newrec->fc_target_port;
1446c5343203SJames Smart 	return 0;
1447c5343203SJames Smart 
1448c5343203SJames Smart out_free_newrec:
1449c5343203SJames Smart 	put_device(dev);
1450c5343203SJames Smart out_ida_put:
14516dd0f465SSagi Grimberg 	ida_free(&nvmet_fc_tgtport_cnt, idx);
1452c5343203SJames Smart out_fail_kfree:
1453c5343203SJames Smart 	kfree(newrec);
1454c5343203SJames Smart out_regtgt_failed:
1455c5343203SJames Smart 	*portptr = NULL;
1456c5343203SJames Smart 	return ret;
1457c5343203SJames Smart }
1458c5343203SJames Smart EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
1459c5343203SJames Smart 
1460c5343203SJames Smart 
1461c5343203SJames Smart static void
nvmet_fc_free_tgtport(struct kref * ref)1462c5343203SJames Smart nvmet_fc_free_tgtport(struct kref *ref)
1463c5343203SJames Smart {
1464c5343203SJames Smart 	struct nvmet_fc_tgtport *tgtport =
1465c5343203SJames Smart 		container_of(ref, struct nvmet_fc_tgtport, ref);
1466c5343203SJames Smart 	struct device *dev = tgtport->dev;
1467c5343203SJames Smart 	unsigned long flags;
1468c5343203SJames Smart 
1469c5343203SJames Smart 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1470c5343203SJames Smart 	list_del(&tgtport->tgt_list);
1471c5343203SJames Smart 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1472c5343203SJames Smart 
1473c5343203SJames Smart 	nvmet_fc_free_ls_iodlist(tgtport);
1474c5343203SJames Smart 
1475c5343203SJames Smart 	/* let the LLDD know we've finished tearing it down */
1476c5343203SJames Smart 	tgtport->ops->targetport_delete(&tgtport->fc_target_port);
1477c5343203SJames Smart 
14786dd0f465SSagi Grimberg 	ida_free(&nvmet_fc_tgtport_cnt,
1479c5343203SJames Smart 			tgtport->fc_target_port.port_num);
1480c5343203SJames Smart 
1481c5343203SJames Smart 	ida_destroy(&tgtport->assoc_cnt);
1482c5343203SJames Smart 
1483c5343203SJames Smart 	kfree(tgtport);
1484c5343203SJames Smart 
1485c5343203SJames Smart 	put_device(dev);
1486c5343203SJames Smart }
1487c5343203SJames Smart 
1488c5343203SJames Smart static void
nvmet_fc_tgtport_put(struct nvmet_fc_tgtport * tgtport)1489c5343203SJames Smart nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
1490c5343203SJames Smart {
1491c5343203SJames Smart 	kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
1492c5343203SJames Smart }
1493c5343203SJames Smart 
1494c5343203SJames Smart static int
nvmet_fc_tgtport_get(struct nvmet_fc_tgtport * tgtport)1495c5343203SJames Smart nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
1496c5343203SJames Smart {
1497c5343203SJames Smart 	return kref_get_unless_zero(&tgtport->ref);
1498c5343203SJames Smart }
1499c5343203SJames Smart 
1500c5343203SJames Smart static void
__nvmet_fc_free_assocs(struct nvmet_fc_tgtport * tgtport)1501c5343203SJames Smart __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
1502c5343203SJames Smart {
15034e2f02bfSLeonid Ravich 	struct nvmet_fc_tgt_assoc *assoc;
1504c5343203SJames Smart 
15054e2f02bfSLeonid Ravich 	rcu_read_lock();
15064e2f02bfSLeonid Ravich 	list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
1507c5343203SJames Smart 		if (!nvmet_fc_tgt_a_get(assoc))
1508c5343203SJames Smart 			continue;
1509*fad689fcSDaniel Wagner 		nvmet_fc_schedule_delete_assoc(assoc);
151058ab8ff9SJames Smart 		nvmet_fc_tgt_a_put(assoc);
151158ab8ff9SJames Smart 	}
15124e2f02bfSLeonid Ravich 	rcu_read_unlock();
1513c5343203SJames Smart }
1514c5343203SJames Smart 
151572e6329fSJames Smart /**
151672e6329fSJames Smart  * nvmet_fc_invalidate_host - transport entry point called by an LLDD
151772e6329fSJames Smart  *                       to remove references to a hosthandle for LS's.
151872e6329fSJames Smart  *
151972e6329fSJames Smart  * The nvmet-fc layer ensures that any references to the hosthandle
152072e6329fSJames Smart  * on the targetport are forgotten (set to NULL).  The LLDD will
152172e6329fSJames Smart  * typically call this when a login with a remote host port has been
152272e6329fSJames Smart  * lost, thus LS's for the remote host port are no longer possible.
152372e6329fSJames Smart  *
152472e6329fSJames Smart  * If an LS request is outstanding to the targetport/hosthandle (or
152572e6329fSJames Smart  * issued concurrently with the call to invalidate the host), the
152672e6329fSJames Smart  * LLDD is responsible for terminating/aborting the LS and completing
152772e6329fSJames Smart  * the LS request. It is recommended that these terminations/aborts
152872e6329fSJames Smart  * occur after calling to invalidate the host handle to avoid additional
152972e6329fSJames Smart  * retries by the nvmet-fc transport. The nvmet-fc transport may
153072e6329fSJames Smart  * continue to reference host handle while it cleans up outstanding
153172e6329fSJames Smart  * NVME associations. The nvmet-fc transport will call the
153272e6329fSJames Smart  * ops->host_release() callback to notify the LLDD that all references
153372e6329fSJames Smart  * are complete and the related host handle can be recovered.
153472e6329fSJames Smart  * Note: if there are no references, the callback may be called before
153572e6329fSJames Smart  * the invalidate host call returns.
153672e6329fSJames Smart  *
153772e6329fSJames Smart  * @target_port: pointer to the (registered) target port that a prior
153872e6329fSJames Smart  *              LS was received on and which supplied the transport the
153972e6329fSJames Smart  *              hosthandle.
154072e6329fSJames Smart  * @hosthandle: the handle (pointer) that represents the host port
154172e6329fSJames Smart  *              that no longer has connectivity and that LS's should
154272e6329fSJames Smart  *              no longer be directed to.
154372e6329fSJames Smart  */
154472e6329fSJames Smart void
nvmet_fc_invalidate_host(struct nvmet_fc_target_port * target_port,void * hosthandle)154572e6329fSJames Smart nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
154672e6329fSJames Smart 			void *hosthandle)
154772e6329fSJames Smart {
154858ab8ff9SJames Smart 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
154958ab8ff9SJames Smart 	struct nvmet_fc_tgt_assoc *assoc, *next;
155058ab8ff9SJames Smart 	unsigned long flags;
155158ab8ff9SJames Smart 	bool noassoc = true;
155258ab8ff9SJames Smart 
155358ab8ff9SJames Smart 	spin_lock_irqsave(&tgtport->lock, flags);
155458ab8ff9SJames Smart 	list_for_each_entry_safe(assoc, next,
155558ab8ff9SJames Smart 				&tgtport->assoc_list, a_list) {
155658ab8ff9SJames Smart 		if (!assoc->hostport ||
155758ab8ff9SJames Smart 		    assoc->hostport->hosthandle != hosthandle)
155858ab8ff9SJames Smart 			continue;
155958ab8ff9SJames Smart 		if (!nvmet_fc_tgt_a_get(assoc))
156058ab8ff9SJames Smart 			continue;
156158ab8ff9SJames Smart 		assoc->hostport->invalid = 1;
156258ab8ff9SJames Smart 		noassoc = false;
1563*fad689fcSDaniel Wagner 		nvmet_fc_schedule_delete_assoc(assoc);
156458ab8ff9SJames Smart 		nvmet_fc_tgt_a_put(assoc);
156558ab8ff9SJames Smart 	}
156658ab8ff9SJames Smart 	spin_unlock_irqrestore(&tgtport->lock, flags);
156758ab8ff9SJames Smart 
156858ab8ff9SJames Smart 	/* if there's nothing to wait for - call the callback */
156958ab8ff9SJames Smart 	if (noassoc && tgtport->ops->host_release)
157058ab8ff9SJames Smart 		tgtport->ops->host_release(hosthandle);
157172e6329fSJames Smart }
157272e6329fSJames Smart EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host);
157372e6329fSJames Smart 
1574c5343203SJames Smart /*
1575c5343203SJames Smart  * nvmet layer has called to terminate an association
1576c5343203SJames Smart  */
1577c5343203SJames Smart static void
nvmet_fc_delete_ctrl(struct nvmet_ctrl * ctrl)1578c5343203SJames Smart nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
1579c5343203SJames Smart {
1580c5343203SJames Smart 	struct nvmet_fc_tgtport *tgtport, *next;
1581c5343203SJames Smart 	struct nvmet_fc_tgt_assoc *assoc;
1582c5343203SJames Smart 	struct nvmet_fc_tgt_queue *queue;
1583c5343203SJames Smart 	unsigned long flags;
1584c5343203SJames Smart 	bool found_ctrl = false;
1585c5343203SJames Smart 
1586c5343203SJames Smart 	/* this is a bit ugly, but don't want to make locks layered */
1587c5343203SJames Smart 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1588c5343203SJames Smart 	list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1589c5343203SJames Smart 			tgt_list) {
1590c5343203SJames Smart 		if (!nvmet_fc_tgtport_get(tgtport))
1591c5343203SJames Smart 			continue;
1592c5343203SJames Smart 		spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1593c5343203SJames Smart 
15944e2f02bfSLeonid Ravich 		rcu_read_lock();
15954e2f02bfSLeonid Ravich 		list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
1596ccd49addSDaniel Wagner 			queue = assoc->queues[0];
1597c5343203SJames Smart 			if (queue && queue->nvme_sq.ctrl == ctrl) {
1598c5343203SJames Smart 				if (nvmet_fc_tgt_a_get(assoc))
1599c5343203SJames Smart 					found_ctrl = true;
1600c5343203SJames Smart 				break;
1601c5343203SJames Smart 			}
1602c5343203SJames Smart 		}
16034e2f02bfSLeonid Ravich 		rcu_read_unlock();
1604c5343203SJames Smart 
1605c5343203SJames Smart 		nvmet_fc_tgtport_put(tgtport);
1606c5343203SJames Smart 
1607c5343203SJames Smart 		if (found_ctrl) {
1608*fad689fcSDaniel Wagner 			nvmet_fc_schedule_delete_assoc(assoc);
160958ab8ff9SJames Smart 			nvmet_fc_tgt_a_put(assoc);
1610c5343203SJames Smart 			return;
1611c5343203SJames Smart 		}
1612c5343203SJames Smart 
1613c5343203SJames Smart 		spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1614c5343203SJames Smart 	}
1615c5343203SJames Smart 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1616c5343203SJames Smart }
1617c5343203SJames Smart 
1618c5343203SJames Smart /**
16190acb8231SChaitanya Kulkarni  * nvmet_fc_unregister_targetport - transport entry point called by an
1620c5343203SJames Smart  *                              LLDD to deregister/remove a previously
1621c5343203SJames Smart  *                              registered a local NVME subsystem FC port.
16221c466527SBart Van Assche  * @target_port: pointer to the (registered) target port that is to be
1623c5343203SJames Smart  *               deregistered.
1624c5343203SJames Smart  *
1625c5343203SJames Smart  * Returns:
1626c5343203SJames Smart  * a completion status. Must be 0 upon success; a negative errno
1627c5343203SJames Smart  * (ex: -ENXIO) upon failure.
1628c5343203SJames Smart  */
1629c5343203SJames Smart int
nvmet_fc_unregister_targetport(struct nvmet_fc_target_port * target_port)1630c5343203SJames Smart nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1631c5343203SJames Smart {
1632c5343203SJames Smart 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1633c5343203SJames Smart 
1634ea96d649SJames Smart 	nvmet_fc_portentry_unbind_tgt(tgtport);
1635ea96d649SJames Smart 
1636c5343203SJames Smart 	/* terminate any outstanding associations */
1637c5343203SJames Smart 	__nvmet_fc_free_assocs(tgtport);
1638c5343203SJames Smart 
1639ccd49addSDaniel Wagner 	flush_workqueue(nvmet_wq);
1640ccd49addSDaniel Wagner 
164147bf3241SJames Smart 	/*
164247bf3241SJames Smart 	 * should terminate LS's as well. However, LS's will be generated
164347bf3241SJames Smart 	 * at the tail end of association termination, so they likely don't
164447bf3241SJames Smart 	 * exist yet. And even if they did, it's worthwhile to just let
164547bf3241SJames Smart 	 * them finish and targetport ref counting will clean things up.
164647bf3241SJames Smart 	 */
164747bf3241SJames Smart 
1648c5343203SJames Smart 	nvmet_fc_tgtport_put(tgtport);
1649c5343203SJames Smart 
1650c5343203SJames Smart 	return 0;
1651c5343203SJames Smart }
1652c5343203SJames Smart EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1653c5343203SJames Smart 
1654c5343203SJames Smart 
165547bf3241SJames Smart /* ********************** FC-NVME LS RCV Handling ************************* */
1656c5343203SJames Smart 
1657c5343203SJames Smart 
1658c5343203SJames Smart static void
nvmet_fc_ls_create_association(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)1659c5343203SJames Smart nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1660c5343203SJames Smart 			struct nvmet_fc_ls_iod *iod)
1661c5343203SJames Smart {
16623b8281b0SJames Smart 	struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc;
16633b8281b0SJames Smart 	struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc;
1664c5343203SJames Smart 	struct nvmet_fc_tgt_queue *queue;
1665c5343203SJames Smart 	int ret = 0;
1666c5343203SJames Smart 
1667c5343203SJames Smart 	memset(acc, 0, sizeof(*acc));
1668c5343203SJames Smart 
16694cb7ca80SJames Smart 	/*
16704cb7ca80SJames Smart 	 * FC-NVME spec changes. There are initiators sending different
16714cb7ca80SJames Smart 	 * lengths as padding sizes for Create Association Cmd descriptor
16724cb7ca80SJames Smart 	 * was incorrect.
16734cb7ca80SJames Smart 	 * Accept anything of "minimum" length. Assume format per 1.15
16744cb7ca80SJames Smart 	 * spec (with HOSTID reduced to 16 bytes), ignore how long the
16754cb7ca80SJames Smart 	 * trailing pad length is.
16764cb7ca80SJames Smart 	 */
16774cb7ca80SJames Smart 	if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
1678c5343203SJames Smart 		ret = VERR_CR_ASSOC_LEN;
16797722ecdcSChristoph Hellwig 	else if (be32_to_cpu(rqst->desc_list_len) <
16807722ecdcSChristoph Hellwig 			FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
1681c5343203SJames Smart 		ret = VERR_CR_ASSOC_RQST_LEN;
1682c5343203SJames Smart 	else if (rqst->assoc_cmd.desc_tag !=
1683c5343203SJames Smart 			cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1684c5343203SJames Smart 		ret = VERR_CR_ASSOC_CMD;
16857722ecdcSChristoph Hellwig 	else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
16867722ecdcSChristoph Hellwig 			FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
1687c5343203SJames Smart 		ret = VERR_CR_ASSOC_CMD_LEN;
1688c5343203SJames Smart 	else if (!rqst->assoc_cmd.ersp_ratio ||
1689c5343203SJames Smart 		 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1690c5343203SJames Smart 				be16_to_cpu(rqst->assoc_cmd.sqsize)))
1691c5343203SJames Smart 		ret = VERR_ERSP_RATIO;
1692c5343203SJames Smart 
1693c5343203SJames Smart 	else {
1694c5343203SJames Smart 		/* new association w/ admin queue */
169558ab8ff9SJames Smart 		iod->assoc = nvmet_fc_alloc_target_assoc(
169658ab8ff9SJames Smart 						tgtport, iod->hosthandle);
1697c5343203SJames Smart 		if (!iod->assoc)
1698c5343203SJames Smart 			ret = VERR_ASSOC_ALLOC_FAIL;
1699c5343203SJames Smart 		else {
1700c5343203SJames Smart 			queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1701c5343203SJames Smart 					be16_to_cpu(rqst->assoc_cmd.sqsize));
17020cab4404SAmit Engel 			if (!queue) {
1703c5343203SJames Smart 				ret = VERR_QUEUE_ALLOC_FAIL;
17040cab4404SAmit Engel 				nvmet_fc_tgt_a_put(iod->assoc);
17050cab4404SAmit Engel 			}
1706c5343203SJames Smart 		}
1707c5343203SJames Smart 	}
1708c5343203SJames Smart 
1709c5343203SJames Smart 	if (ret) {
1710c5343203SJames Smart 		dev_err(tgtport->dev,
1711c5343203SJames Smart 			"Create Association LS failed: %s\n",
1712c5343203SJames Smart 			validation_errors[ret]);
1713ca19bcd0SJames Smart 		iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
17143b8281b0SJames Smart 				sizeof(*acc), rqst->w0.ls_cmd,
17154083aa98SJames Smart 				FCNVME_RJT_RC_LOGIC,
17164083aa98SJames Smart 				FCNVME_RJT_EXP_NONE, 0);
1717c5343203SJames Smart 		return;
1718c5343203SJames Smart 	}
1719c5343203SJames Smart 
1720c5343203SJames Smart 	queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1721c5343203SJames Smart 	atomic_set(&queue->connected, 1);
1722c5343203SJames Smart 	queue->sqhd = 0;	/* best place to init value */
1723c5343203SJames Smart 
172447bf3241SJames Smart 	dev_info(tgtport->dev,
172547bf3241SJames Smart 		"{%d:%d} Association created\n",
172647bf3241SJames Smart 		tgtport->fc_target_port.port_num, iod->assoc->a_id);
172747bf3241SJames Smart 
1728c5343203SJames Smart 	/* format a response */
1729c5343203SJames Smart 
173072e6329fSJames Smart 	iod->lsrsp->rsplen = sizeof(*acc);
1731c5343203SJames Smart 
1732ca19bcd0SJames Smart 	nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1733c5343203SJames Smart 			fcnvme_lsdesc_len(
1734c5343203SJames Smart 				sizeof(struct fcnvme_ls_cr_assoc_acc)),
1735c5343203SJames Smart 			FCNVME_LS_CREATE_ASSOCIATION);
1736c5343203SJames Smart 	acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1737c5343203SJames Smart 	acc->associd.desc_len =
1738c5343203SJames Smart 			fcnvme_lsdesc_len(
1739c5343203SJames Smart 				sizeof(struct fcnvme_lsdesc_assoc_id));
1740c5343203SJames Smart 	acc->associd.association_id =
1741c5343203SJames Smart 			cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1742c5343203SJames Smart 	acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1743c5343203SJames Smart 	acc->connectid.desc_len =
1744c5343203SJames Smart 			fcnvme_lsdesc_len(
1745c5343203SJames Smart 				sizeof(struct fcnvme_lsdesc_conn_id));
1746c5343203SJames Smart 	acc->connectid.connection_id = acc->associd.association_id;
1747c5343203SJames Smart }
1748c5343203SJames Smart 
1749c5343203SJames Smart static void
nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)1750c5343203SJames Smart nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1751c5343203SJames Smart 			struct nvmet_fc_ls_iod *iod)
1752c5343203SJames Smart {
17533b8281b0SJames Smart 	struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn;
17543b8281b0SJames Smart 	struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn;
1755c5343203SJames Smart 	struct nvmet_fc_tgt_queue *queue;
1756c5343203SJames Smart 	int ret = 0;
1757c5343203SJames Smart 
1758c5343203SJames Smart 	memset(acc, 0, sizeof(*acc));
1759c5343203SJames Smart 
1760c5343203SJames Smart 	if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1761c5343203SJames Smart 		ret = VERR_CR_CONN_LEN;
1762c5343203SJames Smart 	else if (rqst->desc_list_len !=
1763c5343203SJames Smart 			fcnvme_lsdesc_len(
1764c5343203SJames Smart 				sizeof(struct fcnvme_ls_cr_conn_rqst)))
1765c5343203SJames Smart 		ret = VERR_CR_CONN_RQST_LEN;
1766c5343203SJames Smart 	else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1767c5343203SJames Smart 		ret = VERR_ASSOC_ID;
1768c5343203SJames Smart 	else if (rqst->associd.desc_len !=
1769c5343203SJames Smart 			fcnvme_lsdesc_len(
1770c5343203SJames Smart 				sizeof(struct fcnvme_lsdesc_assoc_id)))
1771c5343203SJames Smart 		ret = VERR_ASSOC_ID_LEN;
1772c5343203SJames Smart 	else if (rqst->connect_cmd.desc_tag !=
1773c5343203SJames Smart 			cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1774c5343203SJames Smart 		ret = VERR_CR_CONN_CMD;
1775c5343203SJames Smart 	else if (rqst->connect_cmd.desc_len !=
1776c5343203SJames Smart 			fcnvme_lsdesc_len(
1777c5343203SJames Smart 				sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1778c5343203SJames Smart 		ret = VERR_CR_CONN_CMD_LEN;
1779c5343203SJames Smart 	else if (!rqst->connect_cmd.ersp_ratio ||
1780c5343203SJames Smart 		 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1781c5343203SJames Smart 				be16_to_cpu(rqst->connect_cmd.sqsize)))
1782c5343203SJames Smart 		ret = VERR_ERSP_RATIO;
1783c5343203SJames Smart 
1784c5343203SJames Smart 	else {
1785c5343203SJames Smart 		/* new io queue */
1786c5343203SJames Smart 		iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1787c5343203SJames Smart 				be64_to_cpu(rqst->associd.association_id));
1788c5343203SJames Smart 		if (!iod->assoc)
1789c5343203SJames Smart 			ret = VERR_NO_ASSOC;
1790c5343203SJames Smart 		else {
1791c5343203SJames Smart 			queue = nvmet_fc_alloc_target_queue(iod->assoc,
1792c5343203SJames Smart 					be16_to_cpu(rqst->connect_cmd.qid),
1793c5343203SJames Smart 					be16_to_cpu(rqst->connect_cmd.sqsize));
1794c5343203SJames Smart 			if (!queue)
1795c5343203SJames Smart 				ret = VERR_QUEUE_ALLOC_FAIL;
1796c5343203SJames Smart 
1797c5343203SJames Smart 			/* release get taken in nvmet_fc_find_target_assoc */
1798c5343203SJames Smart 			nvmet_fc_tgt_a_put(iod->assoc);
1799c5343203SJames Smart 		}
1800c5343203SJames Smart 	}
1801c5343203SJames Smart 
1802c5343203SJames Smart 	if (ret) {
1803c5343203SJames Smart 		dev_err(tgtport->dev,
1804c5343203SJames Smart 			"Create Connection LS failed: %s\n",
1805c5343203SJames Smart 			validation_errors[ret]);
1806ca19bcd0SJames Smart 		iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
18073b8281b0SJames Smart 				sizeof(*acc), rqst->w0.ls_cmd,
1808c5343203SJames Smart 				(ret == VERR_NO_ASSOC) ?
18094083aa98SJames Smart 					FCNVME_RJT_RC_INV_ASSOC :
18104083aa98SJames Smart 					FCNVME_RJT_RC_LOGIC,
18114083aa98SJames Smart 				FCNVME_RJT_EXP_NONE, 0);
1812c5343203SJames Smart 		return;
1813c5343203SJames Smart 	}
1814c5343203SJames Smart 
1815c5343203SJames Smart 	queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1816c5343203SJames Smart 	atomic_set(&queue->connected, 1);
1817c5343203SJames Smart 	queue->sqhd = 0;	/* best place to init value */
1818c5343203SJames Smart 
1819c5343203SJames Smart 	/* format a response */
1820c5343203SJames Smart 
182172e6329fSJames Smart 	iod->lsrsp->rsplen = sizeof(*acc);
1822c5343203SJames Smart 
1823ca19bcd0SJames Smart 	nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1824c5343203SJames Smart 			fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1825c5343203SJames Smart 			FCNVME_LS_CREATE_CONNECTION);
1826c5343203SJames Smart 	acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1827c5343203SJames Smart 	acc->connectid.desc_len =
1828c5343203SJames Smart 			fcnvme_lsdesc_len(
1829c5343203SJames Smart 				sizeof(struct fcnvme_lsdesc_conn_id));
1830c5343203SJames Smart 	acc->connectid.connection_id =
1831c5343203SJames Smart 			cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1832c5343203SJames Smart 				be16_to_cpu(rqst->connect_cmd.qid)));
1833c5343203SJames Smart }
1834c5343203SJames Smart 
183547bf3241SJames Smart /*
183647bf3241SJames Smart  * Returns true if the LS response is to be transmit
183747bf3241SJames Smart  * Returns false if the LS response is to be delayed
183847bf3241SJames Smart  */
183947bf3241SJames Smart static int
nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)1840c5343203SJames Smart nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1841c5343203SJames Smart 			struct nvmet_fc_ls_iod *iod)
1842c5343203SJames Smart {
184353b2b2f5SJames Smart 	struct fcnvme_ls_disconnect_assoc_rqst *rqst =
18443b8281b0SJames Smart 						&iod->rqstbuf->rq_dis_assoc;
184553b2b2f5SJames Smart 	struct fcnvme_ls_disconnect_assoc_acc *acc =
18463b8281b0SJames Smart 						&iod->rspbuf->rsp_dis_assoc;
1847e4fcc72cSJames Smart 	struct nvmet_fc_tgt_assoc *assoc = NULL;
184847bf3241SJames Smart 	struct nvmet_fc_ls_iod *oldls = NULL;
184947bf3241SJames Smart 	unsigned long flags;
1850c5343203SJames Smart 	int ret = 0;
1851c5343203SJames Smart 
1852c5343203SJames Smart 	memset(acc, 0, sizeof(*acc));
1853c5343203SJames Smart 
1854ec3b0e3cSJames Smart 	ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst);
1855ec3b0e3cSJames Smart 	if (!ret) {
185647bf3241SJames Smart 		/* match an active association - takes an assoc ref if !NULL */
1857c5343203SJames Smart 		assoc = nvmet_fc_find_target_assoc(tgtport,
1858c5343203SJames Smart 				be64_to_cpu(rqst->associd.association_id));
1859c5343203SJames Smart 		iod->assoc = assoc;
1860404ec31dSJames Smart 		if (!assoc)
1861c5343203SJames Smart 			ret = VERR_NO_ASSOC;
1862c5343203SJames Smart 	}
1863c5343203SJames Smart 
1864e4fcc72cSJames Smart 	if (ret || !assoc) {
1865c5343203SJames Smart 		dev_err(tgtport->dev,
1866c5343203SJames Smart 			"Disconnect LS failed: %s\n",
1867c5343203SJames Smart 			validation_errors[ret]);
1868ca19bcd0SJames Smart 		iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
18693b8281b0SJames Smart 				sizeof(*acc), rqst->w0.ls_cmd,
18704083aa98SJames Smart 				(ret == VERR_NO_ASSOC) ?
18714083aa98SJames Smart 					FCNVME_RJT_RC_INV_ASSOC :
18724083aa98SJames Smart 					FCNVME_RJT_RC_LOGIC,
18734083aa98SJames Smart 				FCNVME_RJT_EXP_NONE, 0);
187447bf3241SJames Smart 		return true;
1875c5343203SJames Smart 	}
1876c5343203SJames Smart 
1877c5343203SJames Smart 	/* format a response */
1878c5343203SJames Smart 
187972e6329fSJames Smart 	iod->lsrsp->rsplen = sizeof(*acc);
1880c5343203SJames Smart 
1881ca19bcd0SJames Smart 	nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1882c5343203SJames Smart 			fcnvme_lsdesc_len(
188353b2b2f5SJames Smart 				sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
188453b2b2f5SJames Smart 			FCNVME_LS_DISCONNECT_ASSOC);
1885c5343203SJames Smart 
188647bf3241SJames Smart 	/*
188747bf3241SJames Smart 	 * The rules for LS response says the response cannot
188847bf3241SJames Smart 	 * go back until ABTS's have been sent for all outstanding
188947bf3241SJames Smart 	 * I/O and a Disconnect Association LS has been sent.
189047bf3241SJames Smart 	 * So... save off the Disconnect LS to send the response
189147bf3241SJames Smart 	 * later. If there was a prior LS already saved, replace
189247bf3241SJames Smart 	 * it with the newer one and send a can't perform reject
189347bf3241SJames Smart 	 * on the older one.
189447bf3241SJames Smart 	 */
189547bf3241SJames Smart 	spin_lock_irqsave(&tgtport->lock, flags);
189647bf3241SJames Smart 	oldls = assoc->rcv_disconn;
189747bf3241SJames Smart 	assoc->rcv_disconn = iod;
189847bf3241SJames Smart 	spin_unlock_irqrestore(&tgtport->lock, flags);
189947bf3241SJames Smart 
190047bf3241SJames Smart 	if (oldls) {
190147bf3241SJames Smart 		dev_info(tgtport->dev,
190247bf3241SJames Smart 			"{%d:%d} Multiple Disconnect Association LS's "
190347bf3241SJames Smart 			"received\n",
190447bf3241SJames Smart 			tgtport->fc_target_port.port_num, assoc->a_id);
190547bf3241SJames Smart 		/* overwrite good response with bogus failure */
190647bf3241SJames Smart 		oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
190747bf3241SJames Smart 						sizeof(*iod->rspbuf),
190847bf3241SJames Smart 						/* ok to use rqst, LS is same */
190947bf3241SJames Smart 						rqst->w0.ls_cmd,
191047bf3241SJames Smart 						FCNVME_RJT_RC_UNAB,
191147bf3241SJames Smart 						FCNVME_RJT_EXP_NONE, 0);
191247bf3241SJames Smart 		nvmet_fc_xmt_ls_rsp(tgtport, oldls);
191347bf3241SJames Smart 	}
191447bf3241SJames Smart 
1915*fad689fcSDaniel Wagner 	nvmet_fc_schedule_delete_assoc(assoc);
1916ccd49addSDaniel Wagner 	nvmet_fc_tgt_a_put(assoc);
1917ccd49addSDaniel Wagner 
191847bf3241SJames Smart 	return false;
1919c5343203SJames Smart }
1920c5343203SJames Smart 
1921c5343203SJames Smart 
1922c5343203SJames Smart /* *********************** NVME Ctrl Routines **************************** */
1923c5343203SJames Smart 
1924c5343203SJames Smart 
1925c5343203SJames Smart static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1926c5343203SJames Smart 
1927e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1928c5343203SJames Smart 
1929c5343203SJames Smart static void
nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp * lsrsp)193072e6329fSJames Smart nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
1931c5343203SJames Smart {
193272e6329fSJames Smart 	struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private;
1933c5343203SJames Smart 	struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1934c5343203SJames Smart 
1935c5343203SJames Smart 	fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
19363b8281b0SJames Smart 				sizeof(*iod->rspbuf), DMA_TO_DEVICE);
1937c5343203SJames Smart 	nvmet_fc_free_ls_iod(tgtport, iod);
1938c5343203SJames Smart 	nvmet_fc_tgtport_put(tgtport);
1939c5343203SJames Smart }
1940c5343203SJames Smart 
1941c5343203SJames Smart static void
nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)1942c5343203SJames Smart nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1943c5343203SJames Smart 				struct nvmet_fc_ls_iod *iod)
1944c5343203SJames Smart {
1945c5343203SJames Smart 	int ret;
1946c5343203SJames Smart 
1947c5343203SJames Smart 	fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
19483b8281b0SJames Smart 				  sizeof(*iod->rspbuf), DMA_TO_DEVICE);
1949c5343203SJames Smart 
195072e6329fSJames Smart 	ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp);
1951c5343203SJames Smart 	if (ret)
195272e6329fSJames Smart 		nvmet_fc_xmt_ls_rsp_done(iod->lsrsp);
1953c5343203SJames Smart }
1954c5343203SJames Smart 
1955c5343203SJames Smart /*
1956c5343203SJames Smart  * Actual processing routine for received FC-NVME LS Requests from the LLD
1957c5343203SJames Smart  */
1958c5343203SJames Smart static void
nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)1959c5343203SJames Smart nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1960c5343203SJames Smart 			struct nvmet_fc_ls_iod *iod)
1961c5343203SJames Smart {
19623b8281b0SJames Smart 	struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0;
196347bf3241SJames Smart 	bool sendrsp = true;
1964c5343203SJames Smart 
196572e6329fSJames Smart 	iod->lsrsp->nvme_fc_private = iod;
196672e6329fSJames Smart 	iod->lsrsp->rspbuf = iod->rspbuf;
196772e6329fSJames Smart 	iod->lsrsp->rspdma = iod->rspdma;
196872e6329fSJames Smart 	iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done;
1969c5343203SJames Smart 	/* Be preventative. handlers will later set to valid length */
197072e6329fSJames Smart 	iod->lsrsp->rsplen = 0;
1971c5343203SJames Smart 
1972c5343203SJames Smart 	iod->assoc = NULL;
1973c5343203SJames Smart 
1974c5343203SJames Smart 	/*
1975c5343203SJames Smart 	 * handlers:
1976c5343203SJames Smart 	 *   parse request input, execute the request, and format the
1977c5343203SJames Smart 	 *   LS response
1978c5343203SJames Smart 	 */
1979c5343203SJames Smart 	switch (w0->ls_cmd) {
1980c5343203SJames Smart 	case FCNVME_LS_CREATE_ASSOCIATION:
1981c5343203SJames Smart 		/* Creates Association and initial Admin Queue/Connection */
1982c5343203SJames Smart 		nvmet_fc_ls_create_association(tgtport, iod);
1983c5343203SJames Smart 		break;
1984c5343203SJames Smart 	case FCNVME_LS_CREATE_CONNECTION:
1985c5343203SJames Smart 		/* Creates an IO Queue/Connection */
1986c5343203SJames Smart 		nvmet_fc_ls_create_connection(tgtport, iod);
1987c5343203SJames Smart 		break;
198853b2b2f5SJames Smart 	case FCNVME_LS_DISCONNECT_ASSOC:
1989c5343203SJames Smart 		/* Terminate a Queue/Connection or the Association */
199047bf3241SJames Smart 		sendrsp = nvmet_fc_ls_disconnect(tgtport, iod);
1991c5343203SJames Smart 		break;
1992c5343203SJames Smart 	default:
1993ca19bcd0SJames Smart 		iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf,
19943b8281b0SJames Smart 				sizeof(*iod->rspbuf), w0->ls_cmd,
19954083aa98SJames Smart 				FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1996c5343203SJames Smart 	}
1997c5343203SJames Smart 
199847bf3241SJames Smart 	if (sendrsp)
1999c5343203SJames Smart 		nvmet_fc_xmt_ls_rsp(tgtport, iod);
2000c5343203SJames Smart }
2001c5343203SJames Smart 
2002c5343203SJames Smart /*
2003c5343203SJames Smart  * Actual processing routine for received FC-NVME LS Requests from the LLD
2004c5343203SJames Smart  */
2005c5343203SJames Smart static void
nvmet_fc_handle_ls_rqst_work(struct work_struct * work)2006c5343203SJames Smart nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
2007c5343203SJames Smart {
2008c5343203SJames Smart 	struct nvmet_fc_ls_iod *iod =
2009c5343203SJames Smart 		container_of(work, struct nvmet_fc_ls_iod, work);
2010c5343203SJames Smart 	struct nvmet_fc_tgtport *tgtport = iod->tgtport;
2011c5343203SJames Smart 
2012c5343203SJames Smart 	nvmet_fc_handle_ls_rqst(tgtport, iod);
2013c5343203SJames Smart }
2014c5343203SJames Smart 
2015c5343203SJames Smart 
2016c5343203SJames Smart /**
2017c5343203SJames Smart  * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
2018c5343203SJames Smart  *                       upon the reception of a NVME LS request.
2019c5343203SJames Smart  *
2020c5343203SJames Smart  * The nvmet-fc layer will copy payload to an internal structure for
2021c5343203SJames Smart  * processing.  As such, upon completion of the routine, the LLDD may
2022c5343203SJames Smart  * immediately free/reuse the LS request buffer passed in the call.
2023c5343203SJames Smart  *
2024c5343203SJames Smart  * If this routine returns error, the LLDD should abort the exchange.
2025c5343203SJames Smart  *
20261c466527SBart Van Assche  * @target_port: pointer to the (registered) target port the LS was
2027c5343203SJames Smart  *              received on.
2028b53d4741SChaitanya Kulkarni  * @hosthandle: pointer to the host specific data, gets stored in iod.
202972e6329fSJames Smart  * @lsrsp:      pointer to a lsrsp structure to be used to reference
2030c5343203SJames Smart  *              the exchange corresponding to the LS.
2031c5343203SJames Smart  * @lsreqbuf:   pointer to the buffer containing the LS Request
2032c5343203SJames Smart  * @lsreqbuf_len: length, in bytes, of the received LS request
2033c5343203SJames Smart  */
2034c5343203SJames Smart int
nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port * target_port,void * hosthandle,struct nvmefc_ls_rsp * lsrsp,void * lsreqbuf,u32 lsreqbuf_len)2035c5343203SJames Smart nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
203672e6329fSJames Smart 			void *hosthandle,
203772e6329fSJames Smart 			struct nvmefc_ls_rsp *lsrsp,
2038c5343203SJames Smart 			void *lsreqbuf, u32 lsreqbuf_len)
2039c5343203SJames Smart {
2040c5343203SJames Smart 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2041c5343203SJames Smart 	struct nvmet_fc_ls_iod *iod;
2042a5c2b4f6SJames Smart 	struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
2043c5343203SJames Smart 
2044a5c2b4f6SJames Smart 	if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
2045a5c2b4f6SJames Smart 		dev_info(tgtport->dev,
2046a5c2b4f6SJames Smart 			"RCV %s LS failed: payload too large (%d)\n",
2047a5c2b4f6SJames Smart 			(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
2048a5c2b4f6SJames Smart 				nvmefc_ls_names[w0->ls_cmd] : "",
2049a5c2b4f6SJames Smart 			lsreqbuf_len);
2050c5343203SJames Smart 		return -E2BIG;
2051a5c2b4f6SJames Smart 	}
2052c5343203SJames Smart 
2053a5c2b4f6SJames Smart 	if (!nvmet_fc_tgtport_get(tgtport)) {
2054a5c2b4f6SJames Smart 		dev_info(tgtport->dev,
2055a5c2b4f6SJames Smart 			"RCV %s LS failed: target deleting\n",
2056a5c2b4f6SJames Smart 			(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
2057a5c2b4f6SJames Smart 				nvmefc_ls_names[w0->ls_cmd] : "");
2058c5343203SJames Smart 		return -ESHUTDOWN;
2059a5c2b4f6SJames Smart 	}
2060c5343203SJames Smart 
2061c5343203SJames Smart 	iod = nvmet_fc_alloc_ls_iod(tgtport);
2062c5343203SJames Smart 	if (!iod) {
2063a5c2b4f6SJames Smart 		dev_info(tgtport->dev,
2064a5c2b4f6SJames Smart 			"RCV %s LS failed: context allocation failed\n",
2065a5c2b4f6SJames Smart 			(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
2066a5c2b4f6SJames Smart 				nvmefc_ls_names[w0->ls_cmd] : "");
2067c5343203SJames Smart 		nvmet_fc_tgtport_put(tgtport);
2068c5343203SJames Smart 		return -ENOENT;
2069c5343203SJames Smart 	}
2070c5343203SJames Smart 
207172e6329fSJames Smart 	iod->lsrsp = lsrsp;
2072c5343203SJames Smart 	iod->fcpreq = NULL;
2073c5343203SJames Smart 	memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
2074c5343203SJames Smart 	iod->rqstdatalen = lsreqbuf_len;
207558ab8ff9SJames Smart 	iod->hosthandle = hosthandle;
2076c5343203SJames Smart 
20778832cf92SSagi Grimberg 	queue_work(nvmet_wq, &iod->work);
2078c5343203SJames Smart 
2079c5343203SJames Smart 	return 0;
2080c5343203SJames Smart }
2081c5343203SJames Smart EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
2082c5343203SJames Smart 
2083c5343203SJames Smart 
2084c5343203SJames Smart /*
2085c5343203SJames Smart  * **********************
2086c5343203SJames Smart  * Start of FCP handling
2087c5343203SJames Smart  * **********************
2088c5343203SJames Smart  */
2089c5343203SJames Smart 
2090c5343203SJames Smart static int
nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod * fod)2091c5343203SJames Smart nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
2092c5343203SJames Smart {
2093c5343203SJames Smart 	struct scatterlist *sg;
2094c5343203SJames Smart 	unsigned int nent;
2095c5343203SJames Smart 
20964442b56fSBart Van Assche 	sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
2097c5343203SJames Smart 	if (!sg)
2098c5343203SJames Smart 		goto out;
2099c5343203SJames Smart 
2100c5343203SJames Smart 	fod->data_sg = sg;
2101c5343203SJames Smart 	fod->data_sg_cnt = nent;
2102c5343203SJames Smart 	fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
2103c5343203SJames Smart 				((fod->io_dir == NVMET_FCP_WRITE) ?
2104c5343203SJames Smart 					DMA_FROM_DEVICE : DMA_TO_DEVICE));
2105c5343203SJames Smart 				/* note: write from initiator perspective */
2106d082dc15SJames Smart 	fod->next_sg = fod->data_sg;
2107c5343203SJames Smart 
2108c5343203SJames Smart 	return 0;
2109c5343203SJames Smart 
2110c5343203SJames Smart out:
2111c5343203SJames Smart 	return NVME_SC_INTERNAL;
2112c5343203SJames Smart }
2113c5343203SJames Smart 
2114c5343203SJames Smart static void
nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod * fod)2115c5343203SJames Smart nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
2116c5343203SJames Smart {
2117c5343203SJames Smart 	if (!fod->data_sg || !fod->data_sg_cnt)
2118c5343203SJames Smart 		return;
2119c5343203SJames Smart 
2120c5343203SJames Smart 	fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
2121c5343203SJames Smart 				((fod->io_dir == NVMET_FCP_WRITE) ?
2122c5343203SJames Smart 					DMA_FROM_DEVICE : DMA_TO_DEVICE));
21234442b56fSBart Van Assche 	sgl_free(fod->data_sg);
2124c820ad4cSJames Smart 	fod->data_sg = NULL;
2125c820ad4cSJames Smart 	fod->data_sg_cnt = 0;
2126c5343203SJames Smart }
2127c5343203SJames Smart 
2128c5343203SJames Smart 
2129c5343203SJames Smart static bool
queue_90percent_full(struct nvmet_fc_tgt_queue * q,u32 sqhd)2130c5343203SJames Smart queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
2131c5343203SJames Smart {
2132c5343203SJames Smart 	u32 sqtail, used;
2133c5343203SJames Smart 
2134c5343203SJames Smart 	/* egad, this is ugly. And sqtail is just a best guess */
2135c5343203SJames Smart 	sqtail = atomic_read(&q->sqtail) % q->sqsize;
2136c5343203SJames Smart 
2137c5343203SJames Smart 	used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
2138c5343203SJames Smart 	return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
2139c5343203SJames Smart }
2140c5343203SJames Smart 
2141c5343203SJames Smart /*
2142c5343203SJames Smart  * Prep RSP payload.
2143c5343203SJames Smart  * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
2144c5343203SJames Smart  */
2145c5343203SJames Smart static void
nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod)2146c5343203SJames Smart nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
2147c5343203SJames Smart 				struct nvmet_fc_fcp_iod *fod)
2148c5343203SJames Smart {
2149c5343203SJames Smart 	struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
2150c5343203SJames Smart 	struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2151c5343203SJames Smart 	struct nvme_completion *cqe = &ersp->cqe;
2152c5343203SJames Smart 	u32 *cqewd = (u32 *)cqe;
2153c5343203SJames Smart 	bool send_ersp = false;
2154c5343203SJames Smart 	u32 rsn, rspcnt, xfr_length;
2155c5343203SJames Smart 
2156c5343203SJames Smart 	if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
21575e62d5c9SChristoph Hellwig 		xfr_length = fod->req.transfer_len;
2158c5343203SJames Smart 	else
2159c5343203SJames Smart 		xfr_length = fod->offset;
2160c5343203SJames Smart 
2161c5343203SJames Smart 	/*
2162c5343203SJames Smart 	 * check to see if we can send a 0's rsp.
2163c5343203SJames Smart 	 *   Note: to send a 0's response, the NVME-FC host transport will
2164c5343203SJames Smart 	 *   recreate the CQE. The host transport knows: sq id, SQHD (last
2165c5343203SJames Smart 	 *   seen in an ersp), and command_id. Thus it will create a
2166c5343203SJames Smart 	 *   zero-filled CQE with those known fields filled in. Transport
2167c5343203SJames Smart 	 *   must send an ersp for any condition where the cqe won't match
2168c5343203SJames Smart 	 *   this.
2169c5343203SJames Smart 	 *
2170c5343203SJames Smart 	 * Here are the FC-NVME mandated cases where we must send an ersp:
2171c5343203SJames Smart 	 *  every N responses, where N=ersp_ratio
2172c5343203SJames Smart 	 *  force fabric commands to send ersp's (not in FC-NVME but good
2173c5343203SJames Smart 	 *    practice)
2174c5343203SJames Smart 	 *  normal cmds: any time status is non-zero, or status is zero
2175c5343203SJames Smart 	 *     but words 0 or 1 are non-zero.
2176c5343203SJames Smart 	 *  the SQ is 90% or more full
2177c5343203SJames Smart 	 *  the cmd is a fused command
2178c5343203SJames Smart 	 *  transferred data length not equal to cmd iu length
2179c5343203SJames Smart 	 */
2180c5343203SJames Smart 	rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
2181c5343203SJames Smart 	if (!(rspcnt % fod->queue->ersp_ratio) ||
21827a1f46e3SMinwoo Im 	    nvme_is_fabrics((struct nvme_command *) sqe) ||
21835e62d5c9SChristoph Hellwig 	    xfr_length != fod->req.transfer_len ||
2184c5343203SJames Smart 	    (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
2185c5343203SJames Smart 	    (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
21868ad76cf1SChristoph Hellwig 	    queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
2187c5343203SJames Smart 		send_ersp = true;
2188c5343203SJames Smart 
2189c5343203SJames Smart 	/* re-set the fields */
2190c5343203SJames Smart 	fod->fcpreq->rspaddr = ersp;
2191c5343203SJames Smart 	fod->fcpreq->rspdma = fod->rspdma;
2192c5343203SJames Smart 
2193c5343203SJames Smart 	if (!send_ersp) {
2194c5343203SJames Smart 		memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
2195c5343203SJames Smart 		fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
2196c5343203SJames Smart 	} else {
2197c5343203SJames Smart 		ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
2198c5343203SJames Smart 		rsn = atomic_inc_return(&fod->queue->rsn);
2199c5343203SJames Smart 		ersp->rsn = cpu_to_be32(rsn);
2200c5343203SJames Smart 		ersp->xfrd_len = cpu_to_be32(xfr_length);
2201c5343203SJames Smart 		fod->fcpreq->rsplen = sizeof(*ersp);
2202c5343203SJames Smart 	}
2203c5343203SJames Smart 
2204c5343203SJames Smart 	fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
2205c5343203SJames Smart 				  sizeof(fod->rspiubuf), DMA_TO_DEVICE);
2206c5343203SJames Smart }
2207c5343203SJames Smart 
2208c5343203SJames Smart static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
2209c5343203SJames Smart 
2210c5343203SJames Smart static void
nvmet_fc_abort_op(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod)2211a97ec51bSJames Smart nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
2212a97ec51bSJames Smart 				struct nvmet_fc_fcp_iod *fod)
2213a97ec51bSJames Smart {
2214a97ec51bSJames Smart 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2215a97ec51bSJames Smart 
2216a97ec51bSJames Smart 	/* data no longer needed */
2217a97ec51bSJames Smart 	nvmet_fc_free_tgt_pgs(fod);
2218a97ec51bSJames Smart 
2219a97ec51bSJames Smart 	/*
2220a97ec51bSJames Smart 	 * if an ABTS was received or we issued the fcp_abort early
2221a97ec51bSJames Smart 	 * don't call abort routine again.
2222a97ec51bSJames Smart 	 */
2223a97ec51bSJames Smart 	/* no need to take lock - lock was taken earlier to get here */
2224a97ec51bSJames Smart 	if (!fod->aborted)
2225a97ec51bSJames Smart 		tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
2226a97ec51bSJames Smart 
2227a97ec51bSJames Smart 	nvmet_fc_free_fcp_iod(fod->queue, fod);
2228a97ec51bSJames Smart }
2229a97ec51bSJames Smart 
2230a97ec51bSJames Smart static void
nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod)2231c5343203SJames Smart nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
2232c5343203SJames Smart 				struct nvmet_fc_fcp_iod *fod)
2233c5343203SJames Smart {
2234c5343203SJames Smart 	int ret;
2235c5343203SJames Smart 
2236c5343203SJames Smart 	fod->fcpreq->op = NVMET_FCOP_RSP;
2237c5343203SJames Smart 	fod->fcpreq->timeout = 0;
2238c5343203SJames Smart 
2239c5343203SJames Smart 	nvmet_fc_prep_fcp_rsp(tgtport, fod);
2240c5343203SJames Smart 
2241c5343203SJames Smart 	ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
2242c5343203SJames Smart 	if (ret)
2243a97ec51bSJames Smart 		nvmet_fc_abort_op(tgtport, fod);
2244c5343203SJames Smart }
2245c5343203SJames Smart 
2246c5343203SJames Smart static void
nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod,u8 op)2247c5343203SJames Smart nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
2248c5343203SJames Smart 				struct nvmet_fc_fcp_iod *fod, u8 op)
2249c5343203SJames Smart {
2250c5343203SJames Smart 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2251d082dc15SJames Smart 	struct scatterlist *sg = fod->next_sg;
2252a97ec51bSJames Smart 	unsigned long flags;
2253d082dc15SJames Smart 	u32 remaininglen = fod->req.transfer_len - fod->offset;
2254d082dc15SJames Smart 	u32 tlen = 0;
2255c5343203SJames Smart 	int ret;
2256c5343203SJames Smart 
2257c5343203SJames Smart 	fcpreq->op = op;
2258c5343203SJames Smart 	fcpreq->offset = fod->offset;
2259c5343203SJames Smart 	fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
226048fa362bSJames Smart 
2261d082dc15SJames Smart 	/*
2262d082dc15SJames Smart 	 * for next sequence:
2263d082dc15SJames Smart 	 *  break at a sg element boundary
2264d082dc15SJames Smart 	 *  attempt to keep sequence length capped at
2265d082dc15SJames Smart 	 *    NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
2266d082dc15SJames Smart 	 *    be longer if a single sg element is larger
2267d082dc15SJames Smart 	 *    than that amount. This is done to avoid creating
2268d082dc15SJames Smart 	 *    a new sg list to use for the tgtport api.
2269d082dc15SJames Smart 	 */
2270d082dc15SJames Smart 	fcpreq->sg = sg;
2271d082dc15SJames Smart 	fcpreq->sg_cnt = 0;
2272d082dc15SJames Smart 	while (tlen < remaininglen &&
2273d082dc15SJames Smart 	       fcpreq->sg_cnt < tgtport->max_sg_cnt &&
2274d082dc15SJames Smart 	       tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
2275d082dc15SJames Smart 		fcpreq->sg_cnt++;
2276d082dc15SJames Smart 		tlen += sg_dma_len(sg);
2277d082dc15SJames Smart 		sg = sg_next(sg);
2278d082dc15SJames Smart 	}
2279d082dc15SJames Smart 	if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
2280d082dc15SJames Smart 		fcpreq->sg_cnt++;
2281d082dc15SJames Smart 		tlen += min_t(u32, sg_dma_len(sg), remaininglen);
2282d082dc15SJames Smart 		sg = sg_next(sg);
2283d082dc15SJames Smart 	}
2284d082dc15SJames Smart 	if (tlen < remaininglen)
2285d082dc15SJames Smart 		fod->next_sg = sg;
2286d082dc15SJames Smart 	else
2287d082dc15SJames Smart 		fod->next_sg = NULL;
2288d082dc15SJames Smart 
2289c5343203SJames Smart 	fcpreq->transfer_length = tlen;
2290c5343203SJames Smart 	fcpreq->transferred_length = 0;
2291c5343203SJames Smart 	fcpreq->fcp_error = 0;
2292c5343203SJames Smart 	fcpreq->rsplen = 0;
2293c5343203SJames Smart 
2294c5343203SJames Smart 	/*
2295c5343203SJames Smart 	 * If the last READDATA request: check if LLDD supports
2296c5343203SJames Smart 	 * combined xfr with response.
2297c5343203SJames Smart 	 */
2298c5343203SJames Smart 	if ((op == NVMET_FCOP_READDATA) &&
22995e62d5c9SChristoph Hellwig 	    ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
2300c5343203SJames Smart 	    (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
2301c5343203SJames Smart 		fcpreq->op = NVMET_FCOP_READDATA_RSP;
2302c5343203SJames Smart 		nvmet_fc_prep_fcp_rsp(tgtport, fod);
2303c5343203SJames Smart 	}
2304c5343203SJames Smart 
2305c5343203SJames Smart 	ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
2306c5343203SJames Smart 	if (ret) {
2307c5343203SJames Smart 		/*
2308c5343203SJames Smart 		 * should be ok to set w/o lock as its in the thread of
2309c5343203SJames Smart 		 * execution (not an async timer routine) and doesn't
2310c5343203SJames Smart 		 * contend with any clearing action
2311c5343203SJames Smart 		 */
2312c5343203SJames Smart 		fod->abort = true;
2313c5343203SJames Smart 
2314a97ec51bSJames Smart 		if (op == NVMET_FCOP_WRITEDATA) {
2315a97ec51bSJames Smart 			spin_lock_irqsave(&fod->flock, flags);
2316a97ec51bSJames Smart 			fod->writedataactive = false;
2317a97ec51bSJames Smart 			spin_unlock_irqrestore(&fod->flock, flags);
231829b3d26eSJames Smart 			nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2319a97ec51bSJames Smart 		} else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
2320c5343203SJames Smart 			fcpreq->fcp_error = ret;
2321c5343203SJames Smart 			fcpreq->transferred_length = 0;
2322c5343203SJames Smart 			nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
2323c5343203SJames Smart 		}
2324c5343203SJames Smart 	}
2325c5343203SJames Smart }
2326c5343203SJames Smart 
2327a97ec51bSJames Smart static inline bool
__nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod * fod,bool abort)2328a97ec51bSJames Smart __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
2329a97ec51bSJames Smart {
2330a97ec51bSJames Smart 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2331a97ec51bSJames Smart 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2332a97ec51bSJames Smart 
2333a97ec51bSJames Smart 	/* if in the middle of an io and we need to tear down */
2334a97ec51bSJames Smart 	if (abort) {
2335a97ec51bSJames Smart 		if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
233629b3d26eSJames Smart 			nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2337a97ec51bSJames Smart 			return true;
2338a97ec51bSJames Smart 		}
2339a97ec51bSJames Smart 
2340a97ec51bSJames Smart 		nvmet_fc_abort_op(tgtport, fod);
2341a97ec51bSJames Smart 		return true;
2342a97ec51bSJames Smart 	}
2343a97ec51bSJames Smart 
2344a97ec51bSJames Smart 	return false;
2345a97ec51bSJames Smart }
2346a97ec51bSJames Smart 
234739498faeSJames Smart /*
234839498faeSJames Smart  * actual done handler for FCP operations when completed by the lldd
234939498faeSJames Smart  */
2350c5343203SJames Smart static void
nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod * fod)235139498faeSJames Smart nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
2352c5343203SJames Smart {
235339498faeSJames Smart 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2354c5343203SJames Smart 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2355c5343203SJames Smart 	unsigned long flags;
2356c5343203SJames Smart 	bool abort;
2357c5343203SJames Smart 
2358c5343203SJames Smart 	spin_lock_irqsave(&fod->flock, flags);
2359c5343203SJames Smart 	abort = fod->abort;
2360a97ec51bSJames Smart 	fod->writedataactive = false;
2361c5343203SJames Smart 	spin_unlock_irqrestore(&fod->flock, flags);
2362c5343203SJames Smart 
2363c5343203SJames Smart 	switch (fcpreq->op) {
2364c5343203SJames Smart 
2365c5343203SJames Smart 	case NVMET_FCOP_WRITEDATA:
2366a97ec51bSJames Smart 		if (__nvmet_fc_fod_op_abort(fod, abort))
2367a97ec51bSJames Smart 			return;
2368f64935abSJames Smart 		if (fcpreq->fcp_error ||
2369c5343203SJames Smart 		    fcpreq->transferred_length != fcpreq->transfer_length) {
237070e37988SChristophe JAILLET 			spin_lock_irqsave(&fod->flock, flags);
2371a97ec51bSJames Smart 			fod->abort = true;
237270e37988SChristophe JAILLET 			spin_unlock_irqrestore(&fod->flock, flags);
2373a97ec51bSJames Smart 
237429b3d26eSJames Smart 			nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2375c5343203SJames Smart 			return;
2376c5343203SJames Smart 		}
2377c5343203SJames Smart 
2378c5343203SJames Smart 		fod->offset += fcpreq->transferred_length;
23795e62d5c9SChristoph Hellwig 		if (fod->offset != fod->req.transfer_len) {
2380a97ec51bSJames Smart 			spin_lock_irqsave(&fod->flock, flags);
2381a97ec51bSJames Smart 			fod->writedataactive = true;
2382a97ec51bSJames Smart 			spin_unlock_irqrestore(&fod->flock, flags);
2383a97ec51bSJames Smart 
2384c5343203SJames Smart 			/* transfer the next chunk */
2385c5343203SJames Smart 			nvmet_fc_transfer_fcp_data(tgtport, fod,
2386c5343203SJames Smart 						NVMET_FCOP_WRITEDATA);
2387c5343203SJames Smart 			return;
2388c5343203SJames Smart 		}
2389c5343203SJames Smart 
2390c5343203SJames Smart 		/* data transfer complete, resume with nvmet layer */
2391be3f3114SChristoph Hellwig 		fod->req.execute(&fod->req);
2392c5343203SJames Smart 		break;
2393c5343203SJames Smart 
2394c5343203SJames Smart 	case NVMET_FCOP_READDATA:
2395c5343203SJames Smart 	case NVMET_FCOP_READDATA_RSP:
2396a97ec51bSJames Smart 		if (__nvmet_fc_fod_op_abort(fod, abort))
2397a97ec51bSJames Smart 			return;
2398f64935abSJames Smart 		if (fcpreq->fcp_error ||
2399c5343203SJames Smart 		    fcpreq->transferred_length != fcpreq->transfer_length) {
2400a97ec51bSJames Smart 			nvmet_fc_abort_op(tgtport, fod);
2401c5343203SJames Smart 			return;
2402c5343203SJames Smart 		}
2403c5343203SJames Smart 
2404c5343203SJames Smart 		/* success */
2405c5343203SJames Smart 
2406c5343203SJames Smart 		if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
2407c5343203SJames Smart 			/* data no longer needed */
2408c5343203SJames Smart 			nvmet_fc_free_tgt_pgs(fod);
2409c5343203SJames Smart 			nvmet_fc_free_fcp_iod(fod->queue, fod);
2410c5343203SJames Smart 			return;
2411c5343203SJames Smart 		}
2412c5343203SJames Smart 
2413c5343203SJames Smart 		fod->offset += fcpreq->transferred_length;
24145e62d5c9SChristoph Hellwig 		if (fod->offset != fod->req.transfer_len) {
2415c5343203SJames Smart 			/* transfer the next chunk */
2416c5343203SJames Smart 			nvmet_fc_transfer_fcp_data(tgtport, fod,
2417c5343203SJames Smart 						NVMET_FCOP_READDATA);
2418c5343203SJames Smart 			return;
2419c5343203SJames Smart 		}
2420c5343203SJames Smart 
2421c5343203SJames Smart 		/* data transfer complete, send response */
2422c5343203SJames Smart 
2423c5343203SJames Smart 		/* data no longer needed */
2424c5343203SJames Smart 		nvmet_fc_free_tgt_pgs(fod);
2425c5343203SJames Smart 
2426c5343203SJames Smart 		nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2427c5343203SJames Smart 
2428c5343203SJames Smart 		break;
2429c5343203SJames Smart 
2430c5343203SJames Smart 	case NVMET_FCOP_RSP:
2431a97ec51bSJames Smart 		if (__nvmet_fc_fod_op_abort(fod, abort))
2432a97ec51bSJames Smart 			return;
2433c5343203SJames Smart 		nvmet_fc_free_fcp_iod(fod->queue, fod);
2434c5343203SJames Smart 		break;
2435c5343203SJames Smart 
2436c5343203SJames Smart 	default:
2437c5343203SJames Smart 		break;
2438c5343203SJames Smart 	}
2439c5343203SJames Smart }
2440c5343203SJames Smart 
244139498faeSJames Smart static void
nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req * fcpreq)244239498faeSJames Smart nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
244339498faeSJames Smart {
244439498faeSJames Smart 	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
244539498faeSJames Smart 
244639498faeSJames Smart 	nvmet_fc_fod_op_done(fod);
244739498faeSJames Smart }
244839498faeSJames Smart 
2449c5343203SJames Smart /*
2450c5343203SJames Smart  * actual completion handler after execution by the nvmet layer
2451c5343203SJames Smart  */
2452c5343203SJames Smart static void
__nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod,int status)2453c5343203SJames Smart __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
2454c5343203SJames Smart 			struct nvmet_fc_fcp_iod *fod, int status)
2455c5343203SJames Smart {
2456c5343203SJames Smart 	struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2457c5343203SJames Smart 	struct nvme_completion *cqe = &fod->rspiubuf.cqe;
2458c5343203SJames Smart 	unsigned long flags;
2459c5343203SJames Smart 	bool abort;
2460c5343203SJames Smart 
2461c5343203SJames Smart 	spin_lock_irqsave(&fod->flock, flags);
2462c5343203SJames Smart 	abort = fod->abort;
2463c5343203SJames Smart 	spin_unlock_irqrestore(&fod->flock, flags);
2464c5343203SJames Smart 
2465c5343203SJames Smart 	/* if we have a CQE, snoop the last sq_head value */
2466c5343203SJames Smart 	if (!status)
2467c5343203SJames Smart 		fod->queue->sqhd = cqe->sq_head;
2468c5343203SJames Smart 
2469c5343203SJames Smart 	if (abort) {
2470a97ec51bSJames Smart 		nvmet_fc_abort_op(tgtport, fod);
2471c5343203SJames Smart 		return;
2472c5343203SJames Smart 	}
2473c5343203SJames Smart 
2474c5343203SJames Smart 	/* if an error handling the cmd post initial parsing */
2475c5343203SJames Smart 	if (status) {
2476c5343203SJames Smart 		/* fudge up a failed CQE status for our transport error */
2477c5343203SJames Smart 		memset(cqe, 0, sizeof(*cqe));
2478c5343203SJames Smart 		cqe->sq_head = fod->queue->sqhd;	/* echo last cqe sqhd */
2479c5343203SJames Smart 		cqe->sq_id = cpu_to_le16(fod->queue->qid);
2480c5343203SJames Smart 		cqe->command_id = sqe->command_id;
2481c5343203SJames Smart 		cqe->status = cpu_to_le16(status);
2482c5343203SJames Smart 	} else {
2483c5343203SJames Smart 
2484c5343203SJames Smart 		/*
2485c5343203SJames Smart 		 * try to push the data even if the SQE status is non-zero.
2486c5343203SJames Smart 		 * There may be a status where data still was intended to
2487c5343203SJames Smart 		 * be moved
2488c5343203SJames Smart 		 */
2489c5343203SJames Smart 		if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2490c5343203SJames Smart 			/* push the data over before sending rsp */
2491c5343203SJames Smart 			nvmet_fc_transfer_fcp_data(tgtport, fod,
2492c5343203SJames Smart 						NVMET_FCOP_READDATA);
2493c5343203SJames Smart 			return;
2494c5343203SJames Smart 		}
2495c5343203SJames Smart 
2496c5343203SJames Smart 		/* writes & no data - fall thru */
2497c5343203SJames Smart 	}
2498c5343203SJames Smart 
2499c5343203SJames Smart 	/* data no longer needed */
2500c5343203SJames Smart 	nvmet_fc_free_tgt_pgs(fod);
2501c5343203SJames Smart 
2502c5343203SJames Smart 	nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2503c5343203SJames Smart }
2504c5343203SJames Smart 
2505c5343203SJames Smart 
2506c5343203SJames Smart static void
nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req * nvme_req)2507c5343203SJames Smart nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
2508c5343203SJames Smart {
2509c5343203SJames Smart 	struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2510c5343203SJames Smart 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2511c5343203SJames Smart 
2512c5343203SJames Smart 	__nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2513c5343203SJames Smart }
2514c5343203SJames Smart 
2515c5343203SJames Smart 
2516c5343203SJames Smart /*
2517ea96d649SJames Smart  * Actual processing routine for received FC-NVME I/O Requests from the LLD
2518c5343203SJames Smart  */
2519edba98ddSChristoph Hellwig static void
nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod)2520c5343203SJames Smart nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2521c5343203SJames Smart 			struct nvmet_fc_fcp_iod *fod)
2522c5343203SJames Smart {
2523c5343203SJames Smart 	struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2524cce75291SJames Smart 	u32 xfrlen = be32_to_cpu(cmdiu->data_len);
2525c5343203SJames Smart 	int ret;
2526c5343203SJames Smart 
2527c5343203SJames Smart 	/*
2528c5343203SJames Smart 	 * Fused commands are currently not supported in the linux
2529c5343203SJames Smart 	 * implementation.
2530c5343203SJames Smart 	 *
2531c5343203SJames Smart 	 * As such, the implementation of the FC transport does not
2532c5343203SJames Smart 	 * look at the fused commands and order delivery to the upper
2533c5343203SJames Smart 	 * layer until we have both based on csn.
2534c5343203SJames Smart 	 */
2535c5343203SJames Smart 
2536c5343203SJames Smart 	fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2537c5343203SJames Smart 
2538c5343203SJames Smart 	if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2539c5343203SJames Smart 		fod->io_dir = NVMET_FCP_WRITE;
2540c5343203SJames Smart 		if (!nvme_is_write(&cmdiu->sqe))
2541c5343203SJames Smart 			goto transport_error;
2542c5343203SJames Smart 	} else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2543c5343203SJames Smart 		fod->io_dir = NVMET_FCP_READ;
2544c5343203SJames Smart 		if (nvme_is_write(&cmdiu->sqe))
2545c5343203SJames Smart 			goto transport_error;
2546c5343203SJames Smart 	} else {
2547c5343203SJames Smart 		fod->io_dir = NVMET_FCP_NODATA;
2548cce75291SJames Smart 		if (xfrlen)
2549c5343203SJames Smart 			goto transport_error;
2550c5343203SJames Smart 	}
2551c5343203SJames Smart 
2552c5343203SJames Smart 	fod->req.cmd = &fod->cmdiubuf.sqe;
2553fc6c9730SMax Gurtovoy 	fod->req.cqe = &fod->rspiubuf.cqe;
2554399b70e8SDaniel Wagner 	if (!tgtport->pe)
2555399b70e8SDaniel Wagner 		goto transport_error;
2556ea96d649SJames Smart 	fod->req.port = tgtport->pe->port;
2557c5343203SJames Smart 
2558c5343203SJames Smart 	/* clear any response payload */
2559c5343203SJames Smart 	memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2560c5343203SJames Smart 
2561188f7e8aSJames Smart 	fod->data_sg = NULL;
2562188f7e8aSJames Smart 	fod->data_sg_cnt = 0;
2563188f7e8aSJames Smart 
2564c5343203SJames Smart 	ret = nvmet_req_init(&fod->req,
2565c5343203SJames Smart 				&fod->queue->nvme_cq,
2566c5343203SJames Smart 				&fod->queue->nvme_sq,
2567c5343203SJames Smart 				&nvmet_fc_tgt_fcp_ops);
2568188f7e8aSJames Smart 	if (!ret) {
2569188f7e8aSJames Smart 		/* bad SQE content or invalid ctrl state */
2570188f7e8aSJames Smart 		/* nvmet layer has already called op done to send rsp. */
2571c5343203SJames Smart 		return;
2572c5343203SJames Smart 	}
2573c5343203SJames Smart 
2574cce75291SJames Smart 	fod->req.transfer_len = xfrlen;
2575cce75291SJames Smart 
2576c5343203SJames Smart 	/* keep a running counter of tail position */
2577c5343203SJames Smart 	atomic_inc(&fod->queue->sqtail);
2578c5343203SJames Smart 
25795e62d5c9SChristoph Hellwig 	if (fod->req.transfer_len) {
2580c5343203SJames Smart 		ret = nvmet_fc_alloc_tgt_pgs(fod);
2581c5343203SJames Smart 		if (ret) {
2582c5343203SJames Smart 			nvmet_req_complete(&fod->req, ret);
2583c5343203SJames Smart 			return;
2584c5343203SJames Smart 		}
2585c5343203SJames Smart 	}
2586c5343203SJames Smart 	fod->req.sg = fod->data_sg;
2587c5343203SJames Smart 	fod->req.sg_cnt = fod->data_sg_cnt;
2588c5343203SJames Smart 	fod->offset = 0;
2589c5343203SJames Smart 
2590c5343203SJames Smart 	if (fod->io_dir == NVMET_FCP_WRITE) {
2591c5343203SJames Smart 		/* pull the data over before invoking nvmet layer */
2592c5343203SJames Smart 		nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2593c5343203SJames Smart 		return;
2594c5343203SJames Smart 	}
2595c5343203SJames Smart 
2596c5343203SJames Smart 	/*
2597c5343203SJames Smart 	 * Reads or no data:
2598c5343203SJames Smart 	 *
2599c5343203SJames Smart 	 * can invoke the nvmet_layer now. If read data, cmd completion will
2600c5343203SJames Smart 	 * push the data
2601c5343203SJames Smart 	 */
2602be3f3114SChristoph Hellwig 	fod->req.execute(&fod->req);
2603c5343203SJames Smart 	return;
2604c5343203SJames Smart 
2605c5343203SJames Smart transport_error:
2606a97ec51bSJames Smart 	nvmet_fc_abort_op(tgtport, fod);
2607c5343203SJames Smart }
2608c5343203SJames Smart 
2609c5343203SJames Smart /**
2610c5343203SJames Smart  * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2611c5343203SJames Smart  *                       upon the reception of a NVME FCP CMD IU.
2612c5343203SJames Smart  *
2613c5343203SJames Smart  * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2614c5343203SJames Smart  * layer for processing.
2615c5343203SJames Smart  *
26160fb228d3SJames Smart  * The nvmet_fc layer allocates a local job structure (struct
26170fb228d3SJames Smart  * nvmet_fc_fcp_iod) from the queue for the io and copies the
26180fb228d3SJames Smart  * CMD IU buffer to the job structure. As such, on a successful
26190fb228d3SJames Smart  * completion (returns 0), the LLDD may immediately free/reuse
26200fb228d3SJames Smart  * the CMD IU buffer passed in the call.
2621c5343203SJames Smart  *
26220fb228d3SJames Smart  * However, in some circumstances, due to the packetized nature of FC
26230fb228d3SJames Smart  * and the api of the FC LLDD which may issue a hw command to send the
26240fb228d3SJames Smart  * response, but the LLDD may not get the hw completion for that command
26250fb228d3SJames Smart  * and upcall the nvmet_fc layer before a new command may be
26260fb228d3SJames Smart  * asynchronously received - its possible for a command to be received
26270fb228d3SJames Smart  * before the LLDD and nvmet_fc have recycled the job structure. It gives
26280fb228d3SJames Smart  * the appearance of more commands received than fits in the sq.
26290fb228d3SJames Smart  * To alleviate this scenario, a temporary queue is maintained in the
26300fb228d3SJames Smart  * transport for pending LLDD requests waiting for a queue job structure.
26310fb228d3SJames Smart  * In these "overrun" cases, a temporary queue element is allocated
26320fb228d3SJames Smart  * the LLDD request and CMD iu buffer information remembered, and the
26330fb228d3SJames Smart  * routine returns a -EOVERFLOW status. Subsequently, when a queue job
26340fb228d3SJames Smart  * structure is freed, it is immediately reallocated for anything on the
26350fb228d3SJames Smart  * pending request list. The LLDDs defer_rcv() callback is called,
26360fb228d3SJames Smart  * informing the LLDD that it may reuse the CMD IU buffer, and the io
26370fb228d3SJames Smart  * is then started normally with the transport.
26380fb228d3SJames Smart  *
26390fb228d3SJames Smart  * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
26400fb228d3SJames Smart  * the completion as successful but must not reuse the CMD IU buffer
26410fb228d3SJames Smart  * until the LLDD's defer_rcv() callback has been called for the
26420fb228d3SJames Smart  * corresponding struct nvmefc_tgt_fcp_req pointer.
26430fb228d3SJames Smart  *
26440fb228d3SJames Smart  * If there is any other condition in which an error occurs, the
26450fb228d3SJames Smart  * transport will return a non-zero status indicating the error.
26460fb228d3SJames Smart  * In all cases other than -EOVERFLOW, the transport has not accepted the
26470fb228d3SJames Smart  * request and the LLDD should abort the exchange.
2648c5343203SJames Smart  *
2649c5343203SJames Smart  * @target_port: pointer to the (registered) target port the FCP CMD IU
265019b58d94SJames Smart  *              was received on.
2651c5343203SJames Smart  * @fcpreq:     pointer to a fcpreq request structure to be used to reference
2652c5343203SJames Smart  *              the exchange corresponding to the FCP Exchange.
2653c5343203SJames Smart  * @cmdiubuf:   pointer to the buffer containing the FCP CMD IU
2654c5343203SJames Smart  * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2655c5343203SJames Smart  */
2656c5343203SJames Smart int
nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port * target_port,struct nvmefc_tgt_fcp_req * fcpreq,void * cmdiubuf,u32 cmdiubuf_len)2657c5343203SJames Smart nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2658c5343203SJames Smart 			struct nvmefc_tgt_fcp_req *fcpreq,
2659c5343203SJames Smart 			void *cmdiubuf, u32 cmdiubuf_len)
2660c5343203SJames Smart {
2661c5343203SJames Smart 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2662c5343203SJames Smart 	struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2663c5343203SJames Smart 	struct nvmet_fc_tgt_queue *queue;
2664c5343203SJames Smart 	struct nvmet_fc_fcp_iod *fod;
26650fb228d3SJames Smart 	struct nvmet_fc_defer_fcp_req *deferfcp;
26660fb228d3SJames Smart 	unsigned long flags;
2667c5343203SJames Smart 
2668c5343203SJames Smart 	/* validate iu, so the connection id can be used to find the queue */
2669c5343203SJames Smart 	if ((cmdiubuf_len != sizeof(*cmdiu)) ||
267053b2b2f5SJames Smart 			(cmdiu->format_id != NVME_CMD_FORMAT_ID) ||
2671c5343203SJames Smart 			(cmdiu->fc_id != NVME_CMD_FC_ID) ||
2672c5343203SJames Smart 			(be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2673c5343203SJames Smart 		return -EIO;
2674c5343203SJames Smart 
2675c5343203SJames Smart 	queue = nvmet_fc_find_target_queue(tgtport,
2676c5343203SJames Smart 				be64_to_cpu(cmdiu->connection_id));
2677c5343203SJames Smart 	if (!queue)
2678c5343203SJames Smart 		return -ENOTCONN;
2679c5343203SJames Smart 
2680c5343203SJames Smart 	/*
2681c5343203SJames Smart 	 * note: reference taken by find_target_queue
2682c5343203SJames Smart 	 * After successful fod allocation, the fod will inherit the
2683c5343203SJames Smart 	 * ownership of that reference and will remove the reference
2684c5343203SJames Smart 	 * when the fod is freed.
2685c5343203SJames Smart 	 */
2686c5343203SJames Smart 
26870fb228d3SJames Smart 	spin_lock_irqsave(&queue->qlock, flags);
26880fb228d3SJames Smart 
2689c5343203SJames Smart 	fod = nvmet_fc_alloc_fcp_iod(queue);
26900fb228d3SJames Smart 	if (fod) {
26910fb228d3SJames Smart 		spin_unlock_irqrestore(&queue->qlock, flags);
26920fb228d3SJames Smart 
26930fb228d3SJames Smart 		fcpreq->nvmet_fc_private = fod;
26940fb228d3SJames Smart 		fod->fcpreq = fcpreq;
26950fb228d3SJames Smart 
26960fb228d3SJames Smart 		memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
26970fb228d3SJames Smart 
26980fb228d3SJames Smart 		nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
26990fb228d3SJames Smart 
27000fb228d3SJames Smart 		return 0;
27010fb228d3SJames Smart 	}
27020fb228d3SJames Smart 
27030fb228d3SJames Smart 	if (!tgtport->ops->defer_rcv) {
27040fb228d3SJames Smart 		spin_unlock_irqrestore(&queue->qlock, flags);
2705c5343203SJames Smart 		/* release the queue lookup reference */
2706c5343203SJames Smart 		nvmet_fc_tgt_q_put(queue);
2707c5343203SJames Smart 		return -ENOENT;
2708c5343203SJames Smart 	}
2709c5343203SJames Smart 
27100fb228d3SJames Smart 	deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
27110fb228d3SJames Smart 			struct nvmet_fc_defer_fcp_req, req_list);
27120fb228d3SJames Smart 	if (deferfcp) {
27130fb228d3SJames Smart 		/* Just re-use one that was previously allocated */
27140fb228d3SJames Smart 		list_del(&deferfcp->req_list);
27150fb228d3SJames Smart 	} else {
27160fb228d3SJames Smart 		spin_unlock_irqrestore(&queue->qlock, flags);
2717c5343203SJames Smart 
27180fb228d3SJames Smart 		/* Now we need to dynamically allocate one */
27190fb228d3SJames Smart 		deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
27200fb228d3SJames Smart 		if (!deferfcp) {
27210fb228d3SJames Smart 			/* release the queue lookup reference */
27220fb228d3SJames Smart 			nvmet_fc_tgt_q_put(queue);
27230fb228d3SJames Smart 			return -ENOMEM;
27240fb228d3SJames Smart 		}
27250fb228d3SJames Smart 		spin_lock_irqsave(&queue->qlock, flags);
27260fb228d3SJames Smart 	}
2727c5343203SJames Smart 
27280fb228d3SJames Smart 	/* For now, use rspaddr / rsplen to save payload information */
27290fb228d3SJames Smart 	fcpreq->rspaddr = cmdiubuf;
27300fb228d3SJames Smart 	fcpreq->rsplen  = cmdiubuf_len;
27310fb228d3SJames Smart 	deferfcp->fcp_req = fcpreq;
27320fb228d3SJames Smart 
27330fb228d3SJames Smart 	/* defer processing till a fod becomes available */
27340fb228d3SJames Smart 	list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
27350fb228d3SJames Smart 
27360fb228d3SJames Smart 	/* NOTE: the queue lookup reference is still valid */
27370fb228d3SJames Smart 
27380fb228d3SJames Smart 	spin_unlock_irqrestore(&queue->qlock, flags);
27390fb228d3SJames Smart 
27400fb228d3SJames Smart 	return -EOVERFLOW;
2741c5343203SJames Smart }
2742c5343203SJames Smart EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2743c5343203SJames Smart 
2744a97ec51bSJames Smart /**
2745a97ec51bSJames Smart  * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
2746a97ec51bSJames Smart  *                       upon the reception of an ABTS for a FCP command
2747a97ec51bSJames Smart  *
2748a97ec51bSJames Smart  * Notify the transport that an ABTS has been received for a FCP command
2749a97ec51bSJames Smart  * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
2750a97ec51bSJames Smart  * LLDD believes the command is still being worked on
2751a97ec51bSJames Smart  * (template_ops->fcp_req_release() has not been called).
2752a97ec51bSJames Smart  *
2753a97ec51bSJames Smart  * The transport will wait for any outstanding work (an op to the LLDD,
2754a97ec51bSJames Smart  * which the lldd should complete with error due to the ABTS; or the
2755a97ec51bSJames Smart  * completion from the nvmet layer of the nvme command), then will
2756a97ec51bSJames Smart  * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
2757a97ec51bSJames Smart  * return the i/o context to the LLDD.  The LLDD may send the BA_ACC
2758a97ec51bSJames Smart  * to the ABTS either after return from this function (assuming any
2759a97ec51bSJames Smart  * outstanding op work has been terminated) or upon the callback being
2760a97ec51bSJames Smart  * called.
2761a97ec51bSJames Smart  *
2762a97ec51bSJames Smart  * @target_port: pointer to the (registered) target port the FCP CMD IU
2763a97ec51bSJames Smart  *              was received on.
2764a97ec51bSJames Smart  * @fcpreq:     pointer to the fcpreq request structure that corresponds
2765a97ec51bSJames Smart  *              to the exchange that received the ABTS.
2766a97ec51bSJames Smart  */
2767a97ec51bSJames Smart void
nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port * target_port,struct nvmefc_tgt_fcp_req * fcpreq)2768a97ec51bSJames Smart nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2769a97ec51bSJames Smart 			struct nvmefc_tgt_fcp_req *fcpreq)
2770a97ec51bSJames Smart {
2771a97ec51bSJames Smart 	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2772a97ec51bSJames Smart 	struct nvmet_fc_tgt_queue *queue;
2773a97ec51bSJames Smart 	unsigned long flags;
2774a97ec51bSJames Smart 
2775a97ec51bSJames Smart 	if (!fod || fod->fcpreq != fcpreq)
2776a97ec51bSJames Smart 		/* job appears to have already completed, ignore abort */
2777a97ec51bSJames Smart 		return;
2778a97ec51bSJames Smart 
2779a97ec51bSJames Smart 	queue = fod->queue;
2780a97ec51bSJames Smart 
2781a97ec51bSJames Smart 	spin_lock_irqsave(&queue->qlock, flags);
2782a97ec51bSJames Smart 	if (fod->active) {
2783a97ec51bSJames Smart 		/*
2784a97ec51bSJames Smart 		 * mark as abort. The abort handler, invoked upon completion
2785a97ec51bSJames Smart 		 * of any work, will detect the aborted status and do the
2786a97ec51bSJames Smart 		 * callback.
2787a97ec51bSJames Smart 		 */
2788a97ec51bSJames Smart 		spin_lock(&fod->flock);
2789a97ec51bSJames Smart 		fod->abort = true;
2790a97ec51bSJames Smart 		fod->aborted = true;
2791a97ec51bSJames Smart 		spin_unlock(&fod->flock);
2792a97ec51bSJames Smart 	}
2793a97ec51bSJames Smart 	spin_unlock_irqrestore(&queue->qlock, flags);
2794a97ec51bSJames Smart }
2795a97ec51bSJames Smart EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2796a97ec51bSJames Smart 
2797c5343203SJames Smart 
2798c5343203SJames Smart struct nvmet_fc_traddr {
2799c5343203SJames Smart 	u64	nn;
2800c5343203SJames Smart 	u64	pn;
2801c5343203SJames Smart };
2802c5343203SJames Smart 
2803c5343203SJames Smart static int
__nvme_fc_parse_u64(substring_t * sstr,u64 * val)28049c5358e1SJames Smart __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
2805c5343203SJames Smart {
2806c5343203SJames Smart 	u64 token64;
2807c5343203SJames Smart 
28089c5358e1SJames Smart 	if (match_u64(sstr, &token64))
28099c5358e1SJames Smart 		return -EINVAL;
28109c5358e1SJames Smart 	*val = token64;
2811c5343203SJames Smart 
28129c5358e1SJames Smart 	return 0;
2813c5343203SJames Smart }
2814c5343203SJames Smart 
28159c5358e1SJames Smart /*
28169c5358e1SJames Smart  * This routine validates and extracts the WWN's from the TRADDR string.
28179c5358e1SJames Smart  * As kernel parsers need the 0x to determine number base, universally
28189c5358e1SJames Smart  * build string to parse with 0x prefix before parsing name strings.
28199c5358e1SJames Smart  */
28209c5358e1SJames Smart static int
nvme_fc_parse_traddr(struct nvmet_fc_traddr * traddr,char * buf,size_t blen)28219c5358e1SJames Smart nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
28229c5358e1SJames Smart {
28239c5358e1SJames Smart 	char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
28249c5358e1SJames Smart 	substring_t wwn = { name, &name[sizeof(name)-1] };
28259c5358e1SJames Smart 	int nnoffset, pnoffset;
28269c5358e1SJames Smart 
2827d4e4230cSMilan P. Gandhi 	/* validate if string is one of the 2 allowed formats */
28289c5358e1SJames Smart 	if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
28299c5358e1SJames Smart 			!strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
28309c5358e1SJames Smart 			!strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
28319c5358e1SJames Smart 				"pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
28329c5358e1SJames Smart 		nnoffset = NVME_FC_TRADDR_OXNNLEN;
28339c5358e1SJames Smart 		pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
28349c5358e1SJames Smart 						NVME_FC_TRADDR_OXNNLEN;
28359c5358e1SJames Smart 	} else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
28369c5358e1SJames Smart 			!strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
28379c5358e1SJames Smart 			!strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
28389c5358e1SJames Smart 				"pn-", NVME_FC_TRADDR_NNLEN))) {
28399c5358e1SJames Smart 		nnoffset = NVME_FC_TRADDR_NNLEN;
28409c5358e1SJames Smart 		pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
28419c5358e1SJames Smart 	} else
28429c5358e1SJames Smart 		goto out_einval;
28439c5358e1SJames Smart 
28449c5358e1SJames Smart 	name[0] = '0';
28459c5358e1SJames Smart 	name[1] = 'x';
28469c5358e1SJames Smart 	name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
28479c5358e1SJames Smart 
28489c5358e1SJames Smart 	memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
28499c5358e1SJames Smart 	if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
28509c5358e1SJames Smart 		goto out_einval;
28519c5358e1SJames Smart 
28529c5358e1SJames Smart 	memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
28539c5358e1SJames Smart 	if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
28549c5358e1SJames Smart 		goto out_einval;
28559c5358e1SJames Smart 
28569c5358e1SJames Smart 	return 0;
28579c5358e1SJames Smart 
28589c5358e1SJames Smart out_einval:
28599c5358e1SJames Smart 	pr_warn("%s: bad traddr string\n", __func__);
28609c5358e1SJames Smart 	return -EINVAL;
2861c5343203SJames Smart }
2862c5343203SJames Smart 
2863c5343203SJames Smart static int
nvmet_fc_add_port(struct nvmet_port * port)2864c5343203SJames Smart nvmet_fc_add_port(struct nvmet_port *port)
2865c5343203SJames Smart {
2866c5343203SJames Smart 	struct nvmet_fc_tgtport *tgtport;
2867ea96d649SJames Smart 	struct nvmet_fc_port_entry *pe;
2868c5343203SJames Smart 	struct nvmet_fc_traddr traddr = { 0L, 0L };
2869c5343203SJames Smart 	unsigned long flags;
2870c5343203SJames Smart 	int ret;
2871c5343203SJames Smart 
2872c5343203SJames Smart 	/* validate the address info */
2873c5343203SJames Smart 	if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2874c5343203SJames Smart 	    (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2875c5343203SJames Smart 		return -EINVAL;
2876c5343203SJames Smart 
2877c5343203SJames Smart 	/* map the traddr address info to a target port */
2878c5343203SJames Smart 
28799c5358e1SJames Smart 	ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
28809c5358e1SJames Smart 			sizeof(port->disc_addr.traddr));
2881c5343203SJames Smart 	if (ret)
2882c5343203SJames Smart 		return ret;
2883c5343203SJames Smart 
2884ea96d649SJames Smart 	pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2885ea96d649SJames Smart 	if (!pe)
2886ea96d649SJames Smart 		return -ENOMEM;
2887ea96d649SJames Smart 
2888c5343203SJames Smart 	ret = -ENXIO;
2889c5343203SJames Smart 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2890c5343203SJames Smart 	list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2891c5343203SJames Smart 		if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2892c5343203SJames Smart 		    (tgtport->fc_target_port.port_name == traddr.pn)) {
2893ea96d649SJames Smart 			/* a FC port can only be 1 nvmet port id */
2894ea96d649SJames Smart 			if (!tgtport->pe) {
2895ea96d649SJames Smart 				nvmet_fc_portentry_bind(tgtport, pe, port);
2896c5343203SJames Smart 				ret = 0;
2897ea96d649SJames Smart 			} else
2898ea96d649SJames Smart 				ret = -EALREADY;
2899c5343203SJames Smart 			break;
2900c5343203SJames Smart 		}
2901c5343203SJames Smart 	}
2902c5343203SJames Smart 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2903ea96d649SJames Smart 
2904ea96d649SJames Smart 	if (ret)
2905ea96d649SJames Smart 		kfree(pe);
2906ea96d649SJames Smart 
2907c5343203SJames Smart 	return ret;
2908c5343203SJames Smart }
2909c5343203SJames Smart 
2910c5343203SJames Smart static void
nvmet_fc_remove_port(struct nvmet_port * port)2911c5343203SJames Smart nvmet_fc_remove_port(struct nvmet_port *port)
2912c5343203SJames Smart {
2913ea96d649SJames Smart 	struct nvmet_fc_port_entry *pe = port->priv;
2914ea96d649SJames Smart 
2915ea96d649SJames Smart 	nvmet_fc_portentry_unbind(pe);
2916ea96d649SJames Smart 
2917ccd49addSDaniel Wagner 	/* terminate any outstanding associations */
2918ccd49addSDaniel Wagner 	__nvmet_fc_free_assocs(pe->tgtport);
2919ccd49addSDaniel Wagner 
2920ea96d649SJames Smart 	kfree(pe);
2921c5343203SJames Smart }
2922c5343203SJames Smart 
2923150d71f7SJames Smart static void
nvmet_fc_discovery_chg(struct nvmet_port * port)2924150d71f7SJames Smart nvmet_fc_discovery_chg(struct nvmet_port *port)
2925150d71f7SJames Smart {
2926150d71f7SJames Smart 	struct nvmet_fc_port_entry *pe = port->priv;
2927150d71f7SJames Smart 	struct nvmet_fc_tgtport *tgtport = pe->tgtport;
2928150d71f7SJames Smart 
2929150d71f7SJames Smart 	if (tgtport && tgtport->ops->discovery_event)
2930150d71f7SJames Smart 		tgtport->ops->discovery_event(&tgtport->fc_target_port);
2931150d71f7SJames Smart }
2932150d71f7SJames Smart 
2933e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2934c5343203SJames Smart 	.owner			= THIS_MODULE,
2935c5343203SJames Smart 	.type			= NVMF_TRTYPE_FC,
2936c5343203SJames Smart 	.msdbd			= 1,
2937c5343203SJames Smart 	.add_port		= nvmet_fc_add_port,
2938c5343203SJames Smart 	.remove_port		= nvmet_fc_remove_port,
2939c5343203SJames Smart 	.queue_response		= nvmet_fc_fcp_nvme_cmd_done,
2940c5343203SJames Smart 	.delete_ctrl		= nvmet_fc_delete_ctrl,
2941150d71f7SJames Smart 	.discovery_chg		= nvmet_fc_discovery_chg,
2942c5343203SJames Smart };
2943c5343203SJames Smart 
nvmet_fc_init_module(void)2944c5343203SJames Smart static int __init nvmet_fc_init_module(void)
2945c5343203SJames Smart {
2946c5343203SJames Smart 	return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2947c5343203SJames Smart }
2948c5343203SJames Smart 
nvmet_fc_exit_module(void)2949c5343203SJames Smart static void __exit nvmet_fc_exit_module(void)
2950c5343203SJames Smart {
2951ccd49addSDaniel Wagner 	/* ensure any shutdown operation, e.g. delete ctrls have finished */
2952ccd49addSDaniel Wagner 	flush_workqueue(nvmet_wq);
2953ccd49addSDaniel Wagner 
2954c5343203SJames Smart 	/* sanity check - all lports should be removed */
2955c5343203SJames Smart 	if (!list_empty(&nvmet_fc_target_list))
2956c5343203SJames Smart 		pr_warn("%s: targetport list not empty\n", __func__);
2957c5343203SJames Smart 
2958c5343203SJames Smart 	nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2959c5343203SJames Smart 
2960c5343203SJames Smart 	ida_destroy(&nvmet_fc_tgtport_cnt);
2961c5343203SJames Smart }
2962c5343203SJames Smart 
2963c5343203SJames Smart module_init(nvmet_fc_init_module);
2964c5343203SJames Smart module_exit(nvmet_fc_exit_module);
2965c5343203SJames Smart 
2966c5343203SJames Smart MODULE_LICENSE("GPL v2");
2967