1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2016 Avago Technologies. All rights reserved.
4 */
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/slab.h>
8 #include <linux/blk-mq.h>
9 #include <linux/parser.h>
10 #include <linux/random.h>
11 #include <uapi/scsi/fc/fc_fs.h>
12 #include <uapi/scsi/fc/fc_els.h>
13
14 #include "nvmet.h"
15 #include <linux/nvme-fc-driver.h>
16 #include <linux/nvme-fc.h>
17 #include "../host/fc.h"
18
19
20 /* *************************** Data Structures/Defines ****************** */
21
22
23 #define NVMET_LS_CTX_COUNT 256
24
25 struct nvmet_fc_tgtport;
26 struct nvmet_fc_tgt_assoc;
27
28 struct nvmet_fc_ls_iod { /* for an LS RQST RCV */
29 struct nvmefc_ls_rsp *lsrsp;
30 struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */
31
32 struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */
33
34 struct nvmet_fc_tgtport *tgtport;
35 struct nvmet_fc_tgt_assoc *assoc;
36 void *hosthandle;
37
38 union nvmefc_ls_requests *rqstbuf;
39 union nvmefc_ls_responses *rspbuf;
40 u16 rqstdatalen;
41 dma_addr_t rspdma;
42
43 struct scatterlist sg[2];
44
45 struct work_struct work;
46 } __aligned(sizeof(unsigned long long));
47
48 struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */
49 struct nvmefc_ls_req ls_req;
50
51 struct nvmet_fc_tgtport *tgtport;
52 void *hosthandle;
53
54 int ls_error;
55 struct list_head lsreq_list; /* tgtport->ls_req_list */
56 bool req_queued;
57 };
58
59
60 /* desired maximum for a single sequence - if sg list allows it */
61 #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
62
63 enum nvmet_fcp_datadir {
64 NVMET_FCP_NODATA,
65 NVMET_FCP_WRITE,
66 NVMET_FCP_READ,
67 NVMET_FCP_ABORTED,
68 };
69
70 struct nvmet_fc_fcp_iod {
71 struct nvmefc_tgt_fcp_req *fcpreq;
72
73 struct nvme_fc_cmd_iu cmdiubuf;
74 struct nvme_fc_ersp_iu rspiubuf;
75 dma_addr_t rspdma;
76 struct scatterlist *next_sg;
77 struct scatterlist *data_sg;
78 int data_sg_cnt;
79 u32 offset;
80 enum nvmet_fcp_datadir io_dir;
81 bool active;
82 bool abort;
83 bool aborted;
84 bool writedataactive;
85 spinlock_t flock;
86
87 struct nvmet_req req;
88 struct work_struct defer_work;
89
90 struct nvmet_fc_tgtport *tgtport;
91 struct nvmet_fc_tgt_queue *queue;
92
93 struct list_head fcp_list; /* tgtport->fcp_list */
94 };
95
96 struct nvmet_fc_tgtport {
97 struct nvmet_fc_target_port fc_target_port;
98
99 struct list_head tgt_list; /* nvmet_fc_target_list */
100 struct device *dev; /* dev for dma mapping */
101 struct nvmet_fc_target_template *ops;
102
103 struct nvmet_fc_ls_iod *iod;
104 spinlock_t lock;
105 struct list_head ls_rcv_list;
106 struct list_head ls_req_list;
107 struct list_head ls_busylist;
108 struct list_head assoc_list;
109 struct list_head host_list;
110 struct ida assoc_cnt;
111 struct nvmet_fc_port_entry *pe;
112 struct kref ref;
113 u32 max_sg_cnt;
114
115 struct work_struct put_work;
116 };
117
118 struct nvmet_fc_port_entry {
119 struct nvmet_fc_tgtport *tgtport;
120 struct nvmet_port *port;
121 u64 node_name;
122 u64 port_name;
123 struct list_head pe_list;
124 };
125
126 struct nvmet_fc_defer_fcp_req {
127 struct list_head req_list;
128 struct nvmefc_tgt_fcp_req *fcp_req;
129 };
130
131 struct nvmet_fc_tgt_queue {
132 bool ninetypercent;
133 u16 qid;
134 u16 sqsize;
135 u16 ersp_ratio;
136 __le16 sqhd;
137 atomic_t connected;
138 atomic_t sqtail;
139 atomic_t zrspcnt;
140 atomic_t rsn;
141 spinlock_t qlock;
142 struct nvmet_cq nvme_cq;
143 struct nvmet_sq nvme_sq;
144 struct nvmet_fc_tgt_assoc *assoc;
145 struct list_head fod_list;
146 struct list_head pending_cmd_list;
147 struct list_head avail_defer_list;
148 struct workqueue_struct *work_q;
149 struct kref ref;
150 struct rcu_head rcu;
151 struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */
152 } __aligned(sizeof(unsigned long long));
153
154 struct nvmet_fc_hostport {
155 struct nvmet_fc_tgtport *tgtport;
156 void *hosthandle;
157 struct list_head host_list;
158 struct kref ref;
159 u8 invalid;
160 };
161
162 struct nvmet_fc_tgt_assoc {
163 u64 association_id;
164 u32 a_id;
165 atomic_t terminating;
166 struct nvmet_fc_tgtport *tgtport;
167 struct nvmet_fc_hostport *hostport;
168 struct nvmet_fc_ls_iod *rcv_disconn;
169 struct list_head a_list;
170 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
171 struct kref ref;
172 struct work_struct del_work;
173 struct rcu_head rcu;
174 };
175
176 /*
177 * Association and Connection IDs:
178 *
179 * Association ID will have random number in upper 6 bytes and zero
180 * in lower 2 bytes
181 *
182 * Connection IDs will be Association ID with QID or'd in lower 2 bytes
183 *
184 * note: Association ID = Connection ID for queue 0
185 */
186 #define BYTES_FOR_QID sizeof(u16)
187 #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
188 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
189
190 static inline u64
nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc * assoc,u16 qid)191 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
192 {
193 return (assoc->association_id | qid);
194 }
195
196 static inline u64
nvmet_fc_getassociationid(u64 connectionid)197 nvmet_fc_getassociationid(u64 connectionid)
198 {
199 return connectionid & ~NVMET_FC_QUEUEID_MASK;
200 }
201
202 static inline u16
nvmet_fc_getqueueid(u64 connectionid)203 nvmet_fc_getqueueid(u64 connectionid)
204 {
205 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
206 }
207
208 static inline struct nvmet_fc_tgtport *
targetport_to_tgtport(struct nvmet_fc_target_port * targetport)209 targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
210 {
211 return container_of(targetport, struct nvmet_fc_tgtport,
212 fc_target_port);
213 }
214
215 static inline struct nvmet_fc_fcp_iod *
nvmet_req_to_fod(struct nvmet_req * nvme_req)216 nvmet_req_to_fod(struct nvmet_req *nvme_req)
217 {
218 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
219 }
220
221
222 /* *************************** Globals **************************** */
223
224
225 static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
226
227 static LIST_HEAD(nvmet_fc_target_list);
228 static DEFINE_IDA(nvmet_fc_tgtport_cnt);
229 static LIST_HEAD(nvmet_fc_portentry_list);
230
231
232 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
233 static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work);
234 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
235 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
236 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
237 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
238 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
nvmet_fc_put_tgtport_work(struct work_struct * work)239 static void nvmet_fc_put_tgtport_work(struct work_struct *work)
240 {
241 struct nvmet_fc_tgtport *tgtport =
242 container_of(work, struct nvmet_fc_tgtport, put_work);
243
244 nvmet_fc_tgtport_put(tgtport);
245 }
246 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
247 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
248 struct nvmet_fc_fcp_iod *fod);
249 static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
250 static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
251 struct nvmet_fc_ls_iod *iod);
252
253
254 /* *********************** FC-NVME DMA Handling **************************** */
255
256 /*
257 * The fcloop device passes in a NULL device pointer. Real LLD's will
258 * pass in a valid device pointer. If NULL is passed to the dma mapping
259 * routines, depending on the platform, it may or may not succeed, and
260 * may crash.
261 *
262 * As such:
263 * Wrapper all the dma routines and check the dev pointer.
264 *
265 * If simple mappings (return just a dma address, we'll noop them,
266 * returning a dma address of 0.
267 *
268 * On more complex mappings (dma_map_sg), a pseudo routine fills
269 * in the scatter list, setting all dma addresses to 0.
270 */
271
272 static inline dma_addr_t
fc_dma_map_single(struct device * dev,void * ptr,size_t size,enum dma_data_direction dir)273 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
274 enum dma_data_direction dir)
275 {
276 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
277 }
278
279 static inline int
fc_dma_mapping_error(struct device * dev,dma_addr_t dma_addr)280 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
281 {
282 return dev ? dma_mapping_error(dev, dma_addr) : 0;
283 }
284
285 static inline void
fc_dma_unmap_single(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)286 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
287 enum dma_data_direction dir)
288 {
289 if (dev)
290 dma_unmap_single(dev, addr, size, dir);
291 }
292
293 static inline void
fc_dma_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)294 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
295 enum dma_data_direction dir)
296 {
297 if (dev)
298 dma_sync_single_for_cpu(dev, addr, size, dir);
299 }
300
301 static inline void
fc_dma_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)302 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
303 enum dma_data_direction dir)
304 {
305 if (dev)
306 dma_sync_single_for_device(dev, addr, size, dir);
307 }
308
309 /* pseudo dma_map_sg call */
310 static int
fc_map_sg(struct scatterlist * sg,int nents)311 fc_map_sg(struct scatterlist *sg, int nents)
312 {
313 struct scatterlist *s;
314 int i;
315
316 WARN_ON(nents == 0 || sg[0].length == 0);
317
318 for_each_sg(sg, s, nents, i) {
319 s->dma_address = 0L;
320 #ifdef CONFIG_NEED_SG_DMA_LENGTH
321 s->dma_length = s->length;
322 #endif
323 }
324 return nents;
325 }
326
327 static inline int
fc_dma_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)328 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
329 enum dma_data_direction dir)
330 {
331 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
332 }
333
334 static inline void
fc_dma_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)335 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
336 enum dma_data_direction dir)
337 {
338 if (dev)
339 dma_unmap_sg(dev, sg, nents, dir);
340 }
341
342
343 /* ********************** FC-NVME LS XMT Handling ************************* */
344
345
346 static void
__nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op * lsop)347 __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
348 {
349 struct nvmet_fc_tgtport *tgtport = lsop->tgtport;
350 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
351 unsigned long flags;
352
353 spin_lock_irqsave(&tgtport->lock, flags);
354
355 if (!lsop->req_queued) {
356 spin_unlock_irqrestore(&tgtport->lock, flags);
357 goto out_putwork;
358 }
359
360 list_del(&lsop->lsreq_list);
361
362 lsop->req_queued = false;
363
364 spin_unlock_irqrestore(&tgtport->lock, flags);
365
366 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
367 (lsreq->rqstlen + lsreq->rsplen),
368 DMA_BIDIRECTIONAL);
369
370 out_putwork:
371 queue_work(nvmet_wq, &tgtport->put_work);
372 }
373
374 static int
__nvmet_fc_send_ls_req(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_req_op * lsop,void (* done)(struct nvmefc_ls_req * req,int status))375 __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
376 struct nvmet_fc_ls_req_op *lsop,
377 void (*done)(struct nvmefc_ls_req *req, int status))
378 {
379 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
380 unsigned long flags;
381 int ret = 0;
382
383 if (!tgtport->ops->ls_req)
384 return -EOPNOTSUPP;
385
386 if (!nvmet_fc_tgtport_get(tgtport))
387 return -ESHUTDOWN;
388
389 lsreq->done = done;
390 lsop->req_queued = false;
391 INIT_LIST_HEAD(&lsop->lsreq_list);
392
393 lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr,
394 lsreq->rqstlen + lsreq->rsplen,
395 DMA_BIDIRECTIONAL);
396 if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) {
397 ret = -EFAULT;
398 goto out_puttgtport;
399 }
400 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
401
402 spin_lock_irqsave(&tgtport->lock, flags);
403
404 list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list);
405
406 lsop->req_queued = true;
407
408 spin_unlock_irqrestore(&tgtport->lock, flags);
409
410 ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle,
411 lsreq);
412 if (ret)
413 goto out_unlink;
414
415 return 0;
416
417 out_unlink:
418 lsop->ls_error = ret;
419 spin_lock_irqsave(&tgtport->lock, flags);
420 lsop->req_queued = false;
421 list_del(&lsop->lsreq_list);
422 spin_unlock_irqrestore(&tgtport->lock, flags);
423 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
424 (lsreq->rqstlen + lsreq->rsplen),
425 DMA_BIDIRECTIONAL);
426 out_puttgtport:
427 nvmet_fc_tgtport_put(tgtport);
428
429 return ret;
430 }
431
432 static int
nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_req_op * lsop,void (* done)(struct nvmefc_ls_req * req,int status))433 nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport,
434 struct nvmet_fc_ls_req_op *lsop,
435 void (*done)(struct nvmefc_ls_req *req, int status))
436 {
437 /* don't wait for completion */
438
439 return __nvmet_fc_send_ls_req(tgtport, lsop, done);
440 }
441
442 static void
nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req * lsreq,int status)443 nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
444 {
445 struct nvmet_fc_ls_req_op *lsop =
446 container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req);
447
448 __nvmet_fc_finish_ls_req(lsop);
449
450 /* fc-nvme target doesn't care about success or failure of cmd */
451
452 kfree(lsop);
453 }
454
455 /*
456 * This routine sends a FC-NVME LS to disconnect (aka terminate)
457 * the FC-NVME Association. Terminating the association also
458 * terminates the FC-NVME connections (per queue, both admin and io
459 * queues) that are part of the association. E.g. things are torn
460 * down, and the related FC-NVME Association ID and Connection IDs
461 * become invalid.
462 *
463 * The behavior of the fc-nvme target is such that it's
464 * understanding of the association and connections will implicitly
465 * be torn down. The action is implicit as it may be due to a loss of
466 * connectivity with the fc-nvme host, so the target may never get a
467 * response even if it tried. As such, the action of this routine
468 * is to asynchronously send the LS, ignore any results of the LS, and
469 * continue on with terminating the association. If the fc-nvme host
470 * is present and receives the LS, it too can tear down.
471 */
472 static void
nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc * assoc)473 nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
474 {
475 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
476 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
477 struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
478 struct nvmet_fc_ls_req_op *lsop;
479 struct nvmefc_ls_req *lsreq;
480 int ret;
481
482 /*
483 * If ls_req is NULL or no hosthandle, it's an older lldd and no
484 * message is normal. Otherwise, send unless the hostport has
485 * already been invalidated by the lldd.
486 */
487 if (!tgtport->ops->ls_req || !assoc->hostport ||
488 assoc->hostport->invalid)
489 return;
490
491 lsop = kzalloc((sizeof(*lsop) +
492 sizeof(*discon_rqst) + sizeof(*discon_acc) +
493 tgtport->ops->lsrqst_priv_sz), GFP_KERNEL);
494 if (!lsop) {
495 dev_info(tgtport->dev,
496 "{%d:%d} send Disconnect Association failed: ENOMEM\n",
497 tgtport->fc_target_port.port_num, assoc->a_id);
498 return;
499 }
500
501 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
502 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
503 lsreq = &lsop->ls_req;
504 if (tgtport->ops->lsrqst_priv_sz)
505 lsreq->private = (void *)&discon_acc[1];
506 else
507 lsreq->private = NULL;
508
509 lsop->tgtport = tgtport;
510 lsop->hosthandle = assoc->hostport->hosthandle;
511
512 nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
513 assoc->association_id);
514
515 ret = nvmet_fc_send_ls_req_async(tgtport, lsop,
516 nvmet_fc_disconnect_assoc_done);
517 if (ret) {
518 dev_info(tgtport->dev,
519 "{%d:%d} XMT Disconnect Association failed: %d\n",
520 tgtport->fc_target_port.port_num, assoc->a_id, ret);
521 kfree(lsop);
522 }
523 }
524
525
526 /* *********************** FC-NVME Port Management ************************ */
527
528
529 static int
nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport * tgtport)530 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
531 {
532 struct nvmet_fc_ls_iod *iod;
533 int i;
534
535 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
536 GFP_KERNEL);
537 if (!iod)
538 return -ENOMEM;
539
540 tgtport->iod = iod;
541
542 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
543 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
544 iod->tgtport = tgtport;
545 list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
546
547 iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) +
548 sizeof(union nvmefc_ls_responses),
549 GFP_KERNEL);
550 if (!iod->rqstbuf)
551 goto out_fail;
552
553 iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1];
554
555 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
556 sizeof(*iod->rspbuf),
557 DMA_TO_DEVICE);
558 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
559 goto out_fail;
560 }
561
562 return 0;
563
564 out_fail:
565 kfree(iod->rqstbuf);
566 list_del(&iod->ls_rcv_list);
567 for (iod--, i--; i >= 0; iod--, i--) {
568 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
569 sizeof(*iod->rspbuf), DMA_TO_DEVICE);
570 kfree(iod->rqstbuf);
571 list_del(&iod->ls_rcv_list);
572 }
573
574 kfree(iod);
575
576 return -EFAULT;
577 }
578
579 static void
nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport * tgtport)580 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
581 {
582 struct nvmet_fc_ls_iod *iod = tgtport->iod;
583 int i;
584
585 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
586 fc_dma_unmap_single(tgtport->dev,
587 iod->rspdma, sizeof(*iod->rspbuf),
588 DMA_TO_DEVICE);
589 kfree(iod->rqstbuf);
590 list_del(&iod->ls_rcv_list);
591 }
592 kfree(tgtport->iod);
593 }
594
595 static struct nvmet_fc_ls_iod *
nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport * tgtport)596 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
597 {
598 struct nvmet_fc_ls_iod *iod;
599 unsigned long flags;
600
601 spin_lock_irqsave(&tgtport->lock, flags);
602 iod = list_first_entry_or_null(&tgtport->ls_rcv_list,
603 struct nvmet_fc_ls_iod, ls_rcv_list);
604 if (iod)
605 list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist);
606 spin_unlock_irqrestore(&tgtport->lock, flags);
607 return iod;
608 }
609
610
611 static void
nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)612 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
613 struct nvmet_fc_ls_iod *iod)
614 {
615 unsigned long flags;
616
617 spin_lock_irqsave(&tgtport->lock, flags);
618 list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
619 spin_unlock_irqrestore(&tgtport->lock, flags);
620 }
621
622 static void
nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_tgt_queue * queue)623 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
624 struct nvmet_fc_tgt_queue *queue)
625 {
626 struct nvmet_fc_fcp_iod *fod = queue->fod;
627 int i;
628
629 for (i = 0; i < queue->sqsize; fod++, i++) {
630 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work);
631 fod->tgtport = tgtport;
632 fod->queue = queue;
633 fod->active = false;
634 fod->abort = false;
635 fod->aborted = false;
636 fod->fcpreq = NULL;
637 list_add_tail(&fod->fcp_list, &queue->fod_list);
638 spin_lock_init(&fod->flock);
639
640 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
641 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
642 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
643 list_del(&fod->fcp_list);
644 for (fod--, i--; i >= 0; fod--, i--) {
645 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
646 sizeof(fod->rspiubuf),
647 DMA_TO_DEVICE);
648 fod->rspdma = 0L;
649 list_del(&fod->fcp_list);
650 }
651
652 return;
653 }
654 }
655 }
656
657 static void
nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_tgt_queue * queue)658 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
659 struct nvmet_fc_tgt_queue *queue)
660 {
661 struct nvmet_fc_fcp_iod *fod = queue->fod;
662 int i;
663
664 for (i = 0; i < queue->sqsize; fod++, i++) {
665 if (fod->rspdma)
666 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
667 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
668 }
669 }
670
671 static struct nvmet_fc_fcp_iod *
nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue * queue)672 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
673 {
674 struct nvmet_fc_fcp_iod *fod;
675
676 lockdep_assert_held(&queue->qlock);
677
678 fod = list_first_entry_or_null(&queue->fod_list,
679 struct nvmet_fc_fcp_iod, fcp_list);
680 if (fod) {
681 list_del(&fod->fcp_list);
682 fod->active = true;
683 /*
684 * no queue reference is taken, as it was taken by the
685 * queue lookup just prior to the allocation. The iod
686 * will "inherit" that reference.
687 */
688 }
689 return fod;
690 }
691
692
693 static void
nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_tgt_queue * queue,struct nvmefc_tgt_fcp_req * fcpreq)694 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
695 struct nvmet_fc_tgt_queue *queue,
696 struct nvmefc_tgt_fcp_req *fcpreq)
697 {
698 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
699
700 /*
701 * put all admin cmds on hw queue id 0. All io commands go to
702 * the respective hw queue based on a modulo basis
703 */
704 fcpreq->hwqid = queue->qid ?
705 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
706
707 nvmet_fc_handle_fcp_rqst(tgtport, fod);
708 }
709
710 static void
nvmet_fc_fcp_rqst_op_defer_work(struct work_struct * work)711 nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work)
712 {
713 struct nvmet_fc_fcp_iod *fod =
714 container_of(work, struct nvmet_fc_fcp_iod, defer_work);
715
716 /* Submit deferred IO for processing */
717 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
718
719 }
720
721 static void
nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue * queue,struct nvmet_fc_fcp_iod * fod)722 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
723 struct nvmet_fc_fcp_iod *fod)
724 {
725 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
726 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
727 struct nvmet_fc_defer_fcp_req *deferfcp;
728 unsigned long flags;
729
730 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
731 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
732
733 fcpreq->nvmet_fc_private = NULL;
734
735 fod->active = false;
736 fod->abort = false;
737 fod->aborted = false;
738 fod->writedataactive = false;
739 fod->fcpreq = NULL;
740
741 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
742
743 /* release the queue lookup reference on the completed IO */
744 nvmet_fc_tgt_q_put(queue);
745
746 spin_lock_irqsave(&queue->qlock, flags);
747 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
748 struct nvmet_fc_defer_fcp_req, req_list);
749 if (!deferfcp) {
750 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
751 spin_unlock_irqrestore(&queue->qlock, flags);
752 return;
753 }
754
755 /* Re-use the fod for the next pending cmd that was deferred */
756 list_del(&deferfcp->req_list);
757
758 fcpreq = deferfcp->fcp_req;
759
760 /* deferfcp can be reused for another IO at a later date */
761 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
762
763 spin_unlock_irqrestore(&queue->qlock, flags);
764
765 /* Save NVME CMD IO in fod */
766 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
767
768 /* Setup new fcpreq to be processed */
769 fcpreq->rspaddr = NULL;
770 fcpreq->rsplen = 0;
771 fcpreq->nvmet_fc_private = fod;
772 fod->fcpreq = fcpreq;
773 fod->active = true;
774
775 /* inform LLDD IO is now being processed */
776 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
777
778 /*
779 * Leave the queue lookup get reference taken when
780 * fod was originally allocated.
781 */
782
783 queue_work(queue->work_q, &fod->defer_work);
784 }
785
786 static struct nvmet_fc_tgt_queue *
nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc * assoc,u16 qid,u16 sqsize)787 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
788 u16 qid, u16 sqsize)
789 {
790 struct nvmet_fc_tgt_queue *queue;
791 int ret;
792
793 if (qid > NVMET_NR_QUEUES)
794 return NULL;
795
796 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL);
797 if (!queue)
798 return NULL;
799
800 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
801 assoc->tgtport->fc_target_port.port_num,
802 assoc->a_id, qid);
803 if (!queue->work_q)
804 goto out_free_queue;
805
806 queue->qid = qid;
807 queue->sqsize = sqsize;
808 queue->assoc = assoc;
809 INIT_LIST_HEAD(&queue->fod_list);
810 INIT_LIST_HEAD(&queue->avail_defer_list);
811 INIT_LIST_HEAD(&queue->pending_cmd_list);
812 atomic_set(&queue->connected, 0);
813 atomic_set(&queue->sqtail, 0);
814 atomic_set(&queue->rsn, 1);
815 atomic_set(&queue->zrspcnt, 0);
816 spin_lock_init(&queue->qlock);
817 kref_init(&queue->ref);
818
819 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
820
821 ret = nvmet_sq_init(&queue->nvme_sq);
822 if (ret)
823 goto out_fail_iodlist;
824
825 WARN_ON(assoc->queues[qid]);
826 assoc->queues[qid] = queue;
827
828 return queue;
829
830 out_fail_iodlist:
831 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
832 destroy_workqueue(queue->work_q);
833 out_free_queue:
834 kfree(queue);
835 return NULL;
836 }
837
838
839 static void
nvmet_fc_tgt_queue_free(struct kref * ref)840 nvmet_fc_tgt_queue_free(struct kref *ref)
841 {
842 struct nvmet_fc_tgt_queue *queue =
843 container_of(ref, struct nvmet_fc_tgt_queue, ref);
844
845 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
846
847 destroy_workqueue(queue->work_q);
848
849 kfree_rcu(queue, rcu);
850 }
851
852 static void
nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue * queue)853 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
854 {
855 kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
856 }
857
858 static int
nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue * queue)859 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
860 {
861 return kref_get_unless_zero(&queue->ref);
862 }
863
864
865 static void
nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue * queue)866 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
867 {
868 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
869 struct nvmet_fc_fcp_iod *fod = queue->fod;
870 struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
871 unsigned long flags;
872 int i;
873 bool disconnect;
874
875 disconnect = atomic_xchg(&queue->connected, 0);
876
877 /* if not connected, nothing to do */
878 if (!disconnect)
879 return;
880
881 spin_lock_irqsave(&queue->qlock, flags);
882 /* abort outstanding io's */
883 for (i = 0; i < queue->sqsize; fod++, i++) {
884 if (fod->active) {
885 spin_lock(&fod->flock);
886 fod->abort = true;
887 /*
888 * only call lldd abort routine if waiting for
889 * writedata. other outstanding ops should finish
890 * on their own.
891 */
892 if (fod->writedataactive) {
893 fod->aborted = true;
894 spin_unlock(&fod->flock);
895 tgtport->ops->fcp_abort(
896 &tgtport->fc_target_port, fod->fcpreq);
897 } else
898 spin_unlock(&fod->flock);
899 }
900 }
901
902 /* Cleanup defer'ed IOs in queue */
903 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
904 req_list) {
905 list_del(&deferfcp->req_list);
906 kfree(deferfcp);
907 }
908
909 for (;;) {
910 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
911 struct nvmet_fc_defer_fcp_req, req_list);
912 if (!deferfcp)
913 break;
914
915 list_del(&deferfcp->req_list);
916 spin_unlock_irqrestore(&queue->qlock, flags);
917
918 tgtport->ops->defer_rcv(&tgtport->fc_target_port,
919 deferfcp->fcp_req);
920
921 tgtport->ops->fcp_abort(&tgtport->fc_target_port,
922 deferfcp->fcp_req);
923
924 tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
925 deferfcp->fcp_req);
926
927 /* release the queue lookup reference */
928 nvmet_fc_tgt_q_put(queue);
929
930 kfree(deferfcp);
931
932 spin_lock_irqsave(&queue->qlock, flags);
933 }
934 spin_unlock_irqrestore(&queue->qlock, flags);
935
936 flush_workqueue(queue->work_q);
937
938 nvmet_sq_destroy(&queue->nvme_sq);
939
940 nvmet_fc_tgt_q_put(queue);
941 }
942
943 static struct nvmet_fc_tgt_queue *
nvmet_fc_find_target_queue(struct nvmet_fc_tgtport * tgtport,u64 connection_id)944 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
945 u64 connection_id)
946 {
947 struct nvmet_fc_tgt_assoc *assoc;
948 struct nvmet_fc_tgt_queue *queue;
949 u64 association_id = nvmet_fc_getassociationid(connection_id);
950 u16 qid = nvmet_fc_getqueueid(connection_id);
951
952 if (qid > NVMET_NR_QUEUES)
953 return NULL;
954
955 rcu_read_lock();
956 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
957 if (association_id == assoc->association_id) {
958 queue = assoc->queues[qid];
959 if (queue &&
960 (!atomic_read(&queue->connected) ||
961 !nvmet_fc_tgt_q_get(queue)))
962 queue = NULL;
963 rcu_read_unlock();
964 return queue;
965 }
966 }
967 rcu_read_unlock();
968 return NULL;
969 }
970
971 static void
nvmet_fc_hostport_free(struct kref * ref)972 nvmet_fc_hostport_free(struct kref *ref)
973 {
974 struct nvmet_fc_hostport *hostport =
975 container_of(ref, struct nvmet_fc_hostport, ref);
976 struct nvmet_fc_tgtport *tgtport = hostport->tgtport;
977 unsigned long flags;
978
979 spin_lock_irqsave(&tgtport->lock, flags);
980 list_del(&hostport->host_list);
981 spin_unlock_irqrestore(&tgtport->lock, flags);
982 if (tgtport->ops->host_release && hostport->invalid)
983 tgtport->ops->host_release(hostport->hosthandle);
984 kfree(hostport);
985 nvmet_fc_tgtport_put(tgtport);
986 }
987
988 static void
nvmet_fc_hostport_put(struct nvmet_fc_hostport * hostport)989 nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport)
990 {
991 kref_put(&hostport->ref, nvmet_fc_hostport_free);
992 }
993
994 static int
nvmet_fc_hostport_get(struct nvmet_fc_hostport * hostport)995 nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport)
996 {
997 return kref_get_unless_zero(&hostport->ref);
998 }
999
1000 static void
nvmet_fc_free_hostport(struct nvmet_fc_hostport * hostport)1001 nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport)
1002 {
1003 /* if LLDD not implemented, leave as NULL */
1004 if (!hostport || !hostport->hosthandle)
1005 return;
1006
1007 nvmet_fc_hostport_put(hostport);
1008 }
1009
1010 static struct nvmet_fc_hostport *
nvmet_fc_match_hostport(struct nvmet_fc_tgtport * tgtport,void * hosthandle)1011 nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1012 {
1013 struct nvmet_fc_hostport *host;
1014
1015 lockdep_assert_held(&tgtport->lock);
1016
1017 list_for_each_entry(host, &tgtport->host_list, host_list) {
1018 if (host->hosthandle == hosthandle && !host->invalid) {
1019 if (nvmet_fc_hostport_get(host))
1020 return (host);
1021 }
1022 }
1023
1024 return NULL;
1025 }
1026
1027 static struct nvmet_fc_hostport *
nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport * tgtport,void * hosthandle)1028 nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1029 {
1030 struct nvmet_fc_hostport *newhost, *match = NULL;
1031 unsigned long flags;
1032
1033 /*
1034 * Caller holds a reference on tgtport.
1035 */
1036
1037 /* if LLDD not implemented, leave as NULL */
1038 if (!hosthandle)
1039 return NULL;
1040
1041 spin_lock_irqsave(&tgtport->lock, flags);
1042 match = nvmet_fc_match_hostport(tgtport, hosthandle);
1043 spin_unlock_irqrestore(&tgtport->lock, flags);
1044
1045 if (match)
1046 return match;
1047
1048 newhost = kzalloc(sizeof(*newhost), GFP_KERNEL);
1049 if (!newhost)
1050 return ERR_PTR(-ENOMEM);
1051
1052 spin_lock_irqsave(&tgtport->lock, flags);
1053 match = nvmet_fc_match_hostport(tgtport, hosthandle);
1054 if (match) {
1055 /* new allocation not needed */
1056 kfree(newhost);
1057 newhost = match;
1058 } else {
1059 nvmet_fc_tgtport_get(tgtport);
1060 newhost->tgtport = tgtport;
1061 newhost->hosthandle = hosthandle;
1062 INIT_LIST_HEAD(&newhost->host_list);
1063 kref_init(&newhost->ref);
1064
1065 list_add_tail(&newhost->host_list, &tgtport->host_list);
1066 }
1067 spin_unlock_irqrestore(&tgtport->lock, flags);
1068
1069 return newhost;
1070 }
1071
1072 static void
nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc * assoc)1073 nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
1074 {
1075 nvmet_fc_delete_target_assoc(assoc);
1076 nvmet_fc_tgt_a_put(assoc);
1077 }
1078
1079 static void
nvmet_fc_delete_assoc_work(struct work_struct * work)1080 nvmet_fc_delete_assoc_work(struct work_struct *work)
1081 {
1082 struct nvmet_fc_tgt_assoc *assoc =
1083 container_of(work, struct nvmet_fc_tgt_assoc, del_work);
1084 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
1085
1086 nvmet_fc_delete_assoc(assoc);
1087 nvmet_fc_tgtport_put(tgtport);
1088 }
1089
1090 static void
nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc * assoc)1091 nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
1092 {
1093 nvmet_fc_tgtport_get(assoc->tgtport);
1094 if (!queue_work(nvmet_wq, &assoc->del_work))
1095 nvmet_fc_tgtport_put(assoc->tgtport);
1096 }
1097
1098 static struct nvmet_fc_tgt_assoc *
nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport * tgtport,void * hosthandle)1099 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1100 {
1101 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
1102 unsigned long flags;
1103 u64 ran;
1104 int idx;
1105 bool needrandom = true;
1106
1107 if (!tgtport->pe)
1108 return NULL;
1109
1110 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
1111 if (!assoc)
1112 return NULL;
1113
1114 idx = ida_alloc(&tgtport->assoc_cnt, GFP_KERNEL);
1115 if (idx < 0)
1116 goto out_free_assoc;
1117
1118 if (!nvmet_fc_tgtport_get(tgtport))
1119 goto out_ida;
1120
1121 assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle);
1122 if (IS_ERR(assoc->hostport))
1123 goto out_put;
1124
1125 assoc->tgtport = tgtport;
1126 assoc->a_id = idx;
1127 INIT_LIST_HEAD(&assoc->a_list);
1128 kref_init(&assoc->ref);
1129 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work);
1130 atomic_set(&assoc->terminating, 0);
1131
1132 while (needrandom) {
1133 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
1134 ran = ran << BYTES_FOR_QID_SHIFT;
1135
1136 spin_lock_irqsave(&tgtport->lock, flags);
1137 needrandom = false;
1138 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
1139 if (ran == tmpassoc->association_id) {
1140 needrandom = true;
1141 break;
1142 }
1143 }
1144 if (!needrandom) {
1145 assoc->association_id = ran;
1146 list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
1147 }
1148 spin_unlock_irqrestore(&tgtport->lock, flags);
1149 }
1150
1151 return assoc;
1152
1153 out_put:
1154 nvmet_fc_tgtport_put(tgtport);
1155 out_ida:
1156 ida_free(&tgtport->assoc_cnt, idx);
1157 out_free_assoc:
1158 kfree(assoc);
1159 return NULL;
1160 }
1161
1162 static void
nvmet_fc_target_assoc_free(struct kref * ref)1163 nvmet_fc_target_assoc_free(struct kref *ref)
1164 {
1165 struct nvmet_fc_tgt_assoc *assoc =
1166 container_of(ref, struct nvmet_fc_tgt_assoc, ref);
1167 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
1168 struct nvmet_fc_ls_iod *oldls;
1169 unsigned long flags;
1170 int i;
1171
1172 for (i = NVMET_NR_QUEUES; i >= 0; i--) {
1173 if (assoc->queues[i])
1174 nvmet_fc_delete_target_queue(assoc->queues[i]);
1175 }
1176
1177 /* Send Disconnect now that all i/o has completed */
1178 nvmet_fc_xmt_disconnect_assoc(assoc);
1179
1180 nvmet_fc_free_hostport(assoc->hostport);
1181 spin_lock_irqsave(&tgtport->lock, flags);
1182 oldls = assoc->rcv_disconn;
1183 spin_unlock_irqrestore(&tgtport->lock, flags);
1184 /* if pending Rcv Disconnect Association LS, send rsp now */
1185 if (oldls)
1186 nvmet_fc_xmt_ls_rsp(tgtport, oldls);
1187 ida_free(&tgtport->assoc_cnt, assoc->a_id);
1188 dev_info(tgtport->dev,
1189 "{%d:%d} Association freed\n",
1190 tgtport->fc_target_port.port_num, assoc->a_id);
1191 kfree_rcu(assoc, rcu);
1192 nvmet_fc_tgtport_put(tgtport);
1193 }
1194
1195 static void
nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc * assoc)1196 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
1197 {
1198 kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
1199 }
1200
1201 static int
nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc * assoc)1202 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
1203 {
1204 return kref_get_unless_zero(&assoc->ref);
1205 }
1206
1207 static void
nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc * assoc)1208 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
1209 {
1210 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
1211 unsigned long flags;
1212 int i, terminating;
1213
1214 terminating = atomic_xchg(&assoc->terminating, 1);
1215
1216 /* if already terminating, do nothing */
1217 if (terminating)
1218 return;
1219
1220 spin_lock_irqsave(&tgtport->lock, flags);
1221 list_del_rcu(&assoc->a_list);
1222 spin_unlock_irqrestore(&tgtport->lock, flags);
1223
1224 synchronize_rcu();
1225
1226 /* ensure all in-flight I/Os have been processed */
1227 for (i = NVMET_NR_QUEUES; i >= 0; i--) {
1228 if (assoc->queues[i])
1229 flush_workqueue(assoc->queues[i]->work_q);
1230 }
1231
1232 dev_info(tgtport->dev,
1233 "{%d:%d} Association deleted\n",
1234 tgtport->fc_target_port.port_num, assoc->a_id);
1235 }
1236
1237 static struct nvmet_fc_tgt_assoc *
nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport * tgtport,u64 association_id)1238 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
1239 u64 association_id)
1240 {
1241 struct nvmet_fc_tgt_assoc *assoc;
1242 struct nvmet_fc_tgt_assoc *ret = NULL;
1243
1244 rcu_read_lock();
1245 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
1246 if (association_id == assoc->association_id) {
1247 ret = assoc;
1248 if (!nvmet_fc_tgt_a_get(assoc))
1249 ret = NULL;
1250 break;
1251 }
1252 }
1253 rcu_read_unlock();
1254
1255 return ret;
1256 }
1257
1258 static void
nvmet_fc_portentry_bind(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_port_entry * pe,struct nvmet_port * port)1259 nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport,
1260 struct nvmet_fc_port_entry *pe,
1261 struct nvmet_port *port)
1262 {
1263 lockdep_assert_held(&nvmet_fc_tgtlock);
1264
1265 pe->tgtport = tgtport;
1266 tgtport->pe = pe;
1267
1268 pe->port = port;
1269 port->priv = pe;
1270
1271 pe->node_name = tgtport->fc_target_port.node_name;
1272 pe->port_name = tgtport->fc_target_port.port_name;
1273 INIT_LIST_HEAD(&pe->pe_list);
1274
1275 list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list);
1276 }
1277
1278 static void
nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry * pe)1279 nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe)
1280 {
1281 unsigned long flags;
1282
1283 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1284 if (pe->tgtport)
1285 pe->tgtport->pe = NULL;
1286 list_del(&pe->pe_list);
1287 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1288 }
1289
1290 /*
1291 * called when a targetport deregisters. Breaks the relationship
1292 * with the nvmet port, but leaves the port_entry in place so that
1293 * re-registration can resume operation.
1294 */
1295 static void
nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport * tgtport)1296 nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
1297 {
1298 struct nvmet_fc_port_entry *pe;
1299 unsigned long flags;
1300
1301 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1302 pe = tgtport->pe;
1303 if (pe)
1304 pe->tgtport = NULL;
1305 tgtport->pe = NULL;
1306 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1307 }
1308
1309 /*
1310 * called when a new targetport is registered. Looks in the
1311 * existing nvmet port_entries to see if the nvmet layer is
1312 * configured for the targetport's wwn's. (the targetport existed,
1313 * nvmet configured, the lldd unregistered the tgtport, and is now
1314 * reregistering the same targetport). If so, set the nvmet port
1315 * port entry on the targetport.
1316 */
1317 static void
nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport * tgtport)1318 nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
1319 {
1320 struct nvmet_fc_port_entry *pe;
1321 unsigned long flags;
1322
1323 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1324 list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) {
1325 if (tgtport->fc_target_port.node_name == pe->node_name &&
1326 tgtport->fc_target_port.port_name == pe->port_name) {
1327 WARN_ON(pe->tgtport);
1328 tgtport->pe = pe;
1329 pe->tgtport = tgtport;
1330 break;
1331 }
1332 }
1333 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1334 }
1335
1336 /**
1337 * nvmet_fc_register_targetport - transport entry point called by an
1338 * LLDD to register the existence of a local
1339 * NVME subystem FC port.
1340 * @pinfo: pointer to information about the port to be registered
1341 * @template: LLDD entrypoints and operational parameters for the port
1342 * @dev: physical hardware device node port corresponds to. Will be
1343 * used for DMA mappings
1344 * @portptr: pointer to a local port pointer. Upon success, the routine
1345 * will allocate a nvme_fc_local_port structure and place its
1346 * address in the local port pointer. Upon failure, local port
1347 * pointer will be set to NULL.
1348 *
1349 * Returns:
1350 * a completion status. Must be 0 upon success; a negative errno
1351 * (ex: -ENXIO) upon failure.
1352 */
1353 int
nvmet_fc_register_targetport(struct nvmet_fc_port_info * pinfo,struct nvmet_fc_target_template * template,struct device * dev,struct nvmet_fc_target_port ** portptr)1354 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
1355 struct nvmet_fc_target_template *template,
1356 struct device *dev,
1357 struct nvmet_fc_target_port **portptr)
1358 {
1359 struct nvmet_fc_tgtport *newrec;
1360 unsigned long flags;
1361 int ret, idx;
1362
1363 if (!template->xmt_ls_rsp || !template->fcp_op ||
1364 !template->fcp_abort ||
1365 !template->fcp_req_release || !template->targetport_delete ||
1366 !template->max_hw_queues || !template->max_sgl_segments ||
1367 !template->max_dif_sgl_segments || !template->dma_boundary) {
1368 ret = -EINVAL;
1369 goto out_regtgt_failed;
1370 }
1371
1372 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
1373 GFP_KERNEL);
1374 if (!newrec) {
1375 ret = -ENOMEM;
1376 goto out_regtgt_failed;
1377 }
1378
1379 idx = ida_alloc(&nvmet_fc_tgtport_cnt, GFP_KERNEL);
1380 if (idx < 0) {
1381 ret = -ENOSPC;
1382 goto out_fail_kfree;
1383 }
1384
1385 if (!get_device(dev) && dev) {
1386 ret = -ENODEV;
1387 goto out_ida_put;
1388 }
1389
1390 newrec->fc_target_port.node_name = pinfo->node_name;
1391 newrec->fc_target_port.port_name = pinfo->port_name;
1392 if (template->target_priv_sz)
1393 newrec->fc_target_port.private = &newrec[1];
1394 else
1395 newrec->fc_target_port.private = NULL;
1396 newrec->fc_target_port.port_id = pinfo->port_id;
1397 newrec->fc_target_port.port_num = idx;
1398 INIT_LIST_HEAD(&newrec->tgt_list);
1399 newrec->dev = dev;
1400 newrec->ops = template;
1401 spin_lock_init(&newrec->lock);
1402 INIT_LIST_HEAD(&newrec->ls_rcv_list);
1403 INIT_LIST_HEAD(&newrec->ls_req_list);
1404 INIT_LIST_HEAD(&newrec->ls_busylist);
1405 INIT_LIST_HEAD(&newrec->assoc_list);
1406 INIT_LIST_HEAD(&newrec->host_list);
1407 kref_init(&newrec->ref);
1408 ida_init(&newrec->assoc_cnt);
1409 newrec->max_sg_cnt = template->max_sgl_segments;
1410 INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work);
1411
1412 ret = nvmet_fc_alloc_ls_iodlist(newrec);
1413 if (ret) {
1414 ret = -ENOMEM;
1415 goto out_free_newrec;
1416 }
1417
1418 nvmet_fc_portentry_rebind_tgt(newrec);
1419
1420 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1421 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
1422 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1423
1424 *portptr = &newrec->fc_target_port;
1425 return 0;
1426
1427 out_free_newrec:
1428 put_device(dev);
1429 out_ida_put:
1430 ida_free(&nvmet_fc_tgtport_cnt, idx);
1431 out_fail_kfree:
1432 kfree(newrec);
1433 out_regtgt_failed:
1434 *portptr = NULL;
1435 return ret;
1436 }
1437 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
1438
1439
1440 static void
nvmet_fc_free_tgtport(struct kref * ref)1441 nvmet_fc_free_tgtport(struct kref *ref)
1442 {
1443 struct nvmet_fc_tgtport *tgtport =
1444 container_of(ref, struct nvmet_fc_tgtport, ref);
1445 struct device *dev = tgtport->dev;
1446 unsigned long flags;
1447
1448 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1449 list_del(&tgtport->tgt_list);
1450 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1451
1452 nvmet_fc_free_ls_iodlist(tgtport);
1453
1454 /* let the LLDD know we've finished tearing it down */
1455 tgtport->ops->targetport_delete(&tgtport->fc_target_port);
1456
1457 ida_free(&nvmet_fc_tgtport_cnt,
1458 tgtport->fc_target_port.port_num);
1459
1460 ida_destroy(&tgtport->assoc_cnt);
1461
1462 kfree(tgtport);
1463
1464 put_device(dev);
1465 }
1466
1467 static void
nvmet_fc_tgtport_put(struct nvmet_fc_tgtport * tgtport)1468 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
1469 {
1470 kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
1471 }
1472
1473 static int
nvmet_fc_tgtport_get(struct nvmet_fc_tgtport * tgtport)1474 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
1475 {
1476 return kref_get_unless_zero(&tgtport->ref);
1477 }
1478
1479 static void
__nvmet_fc_free_assocs(struct nvmet_fc_tgtport * tgtport)1480 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
1481 {
1482 struct nvmet_fc_tgt_assoc *assoc;
1483
1484 rcu_read_lock();
1485 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
1486 if (!nvmet_fc_tgt_a_get(assoc))
1487 continue;
1488 nvmet_fc_schedule_delete_assoc(assoc);
1489 nvmet_fc_tgt_a_put(assoc);
1490 }
1491 rcu_read_unlock();
1492 }
1493
1494 /**
1495 * nvmet_fc_invalidate_host - transport entry point called by an LLDD
1496 * to remove references to a hosthandle for LS's.
1497 *
1498 * The nvmet-fc layer ensures that any references to the hosthandle
1499 * on the targetport are forgotten (set to NULL). The LLDD will
1500 * typically call this when a login with a remote host port has been
1501 * lost, thus LS's for the remote host port are no longer possible.
1502 *
1503 * If an LS request is outstanding to the targetport/hosthandle (or
1504 * issued concurrently with the call to invalidate the host), the
1505 * LLDD is responsible for terminating/aborting the LS and completing
1506 * the LS request. It is recommended that these terminations/aborts
1507 * occur after calling to invalidate the host handle to avoid additional
1508 * retries by the nvmet-fc transport. The nvmet-fc transport may
1509 * continue to reference host handle while it cleans up outstanding
1510 * NVME associations. The nvmet-fc transport will call the
1511 * ops->host_release() callback to notify the LLDD that all references
1512 * are complete and the related host handle can be recovered.
1513 * Note: if there are no references, the callback may be called before
1514 * the invalidate host call returns.
1515 *
1516 * @target_port: pointer to the (registered) target port that a prior
1517 * LS was received on and which supplied the transport the
1518 * hosthandle.
1519 * @hosthandle: the handle (pointer) that represents the host port
1520 * that no longer has connectivity and that LS's should
1521 * no longer be directed to.
1522 */
1523 void
nvmet_fc_invalidate_host(struct nvmet_fc_target_port * target_port,void * hosthandle)1524 nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
1525 void *hosthandle)
1526 {
1527 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1528 struct nvmet_fc_tgt_assoc *assoc, *next;
1529 unsigned long flags;
1530 bool noassoc = true;
1531
1532 spin_lock_irqsave(&tgtport->lock, flags);
1533 list_for_each_entry_safe(assoc, next,
1534 &tgtport->assoc_list, a_list) {
1535 if (!assoc->hostport ||
1536 assoc->hostport->hosthandle != hosthandle)
1537 continue;
1538 if (!nvmet_fc_tgt_a_get(assoc))
1539 continue;
1540 assoc->hostport->invalid = 1;
1541 noassoc = false;
1542 nvmet_fc_schedule_delete_assoc(assoc);
1543 nvmet_fc_tgt_a_put(assoc);
1544 }
1545 spin_unlock_irqrestore(&tgtport->lock, flags);
1546
1547 /* if there's nothing to wait for - call the callback */
1548 if (noassoc && tgtport->ops->host_release)
1549 tgtport->ops->host_release(hosthandle);
1550 }
1551 EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host);
1552
1553 /*
1554 * nvmet layer has called to terminate an association
1555 */
1556 static void
nvmet_fc_delete_ctrl(struct nvmet_ctrl * ctrl)1557 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
1558 {
1559 struct nvmet_fc_tgtport *tgtport, *next;
1560 struct nvmet_fc_tgt_assoc *assoc;
1561 struct nvmet_fc_tgt_queue *queue;
1562 unsigned long flags;
1563 bool found_ctrl = false;
1564
1565 /* this is a bit ugly, but don't want to make locks layered */
1566 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1567 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1568 tgt_list) {
1569 if (!nvmet_fc_tgtport_get(tgtport))
1570 continue;
1571 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1572
1573 rcu_read_lock();
1574 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
1575 queue = assoc->queues[0];
1576 if (queue && queue->nvme_sq.ctrl == ctrl) {
1577 if (nvmet_fc_tgt_a_get(assoc))
1578 found_ctrl = true;
1579 break;
1580 }
1581 }
1582 rcu_read_unlock();
1583
1584 nvmet_fc_tgtport_put(tgtport);
1585
1586 if (found_ctrl) {
1587 nvmet_fc_schedule_delete_assoc(assoc);
1588 nvmet_fc_tgt_a_put(assoc);
1589 return;
1590 }
1591
1592 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1593 }
1594 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1595 }
1596
1597 /**
1598 * nvmet_fc_unregister_targetport - transport entry point called by an
1599 * LLDD to deregister/remove a previously
1600 * registered a local NVME subsystem FC port.
1601 * @target_port: pointer to the (registered) target port that is to be
1602 * deregistered.
1603 *
1604 * Returns:
1605 * a completion status. Must be 0 upon success; a negative errno
1606 * (ex: -ENXIO) upon failure.
1607 */
1608 int
nvmet_fc_unregister_targetport(struct nvmet_fc_target_port * target_port)1609 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1610 {
1611 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1612
1613 nvmet_fc_portentry_unbind_tgt(tgtport);
1614
1615 /* terminate any outstanding associations */
1616 __nvmet_fc_free_assocs(tgtport);
1617
1618 flush_workqueue(nvmet_wq);
1619
1620 /*
1621 * should terminate LS's as well. However, LS's will be generated
1622 * at the tail end of association termination, so they likely don't
1623 * exist yet. And even if they did, it's worthwhile to just let
1624 * them finish and targetport ref counting will clean things up.
1625 */
1626
1627 nvmet_fc_tgtport_put(tgtport);
1628
1629 return 0;
1630 }
1631 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1632
1633
1634 /* ********************** FC-NVME LS RCV Handling ************************* */
1635
1636
1637 static void
nvmet_fc_ls_create_association(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)1638 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1639 struct nvmet_fc_ls_iod *iod)
1640 {
1641 struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc;
1642 struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc;
1643 struct nvmet_fc_tgt_queue *queue;
1644 int ret = 0;
1645
1646 memset(acc, 0, sizeof(*acc));
1647
1648 /*
1649 * FC-NVME spec changes. There are initiators sending different
1650 * lengths as padding sizes for Create Association Cmd descriptor
1651 * was incorrect.
1652 * Accept anything of "minimum" length. Assume format per 1.15
1653 * spec (with HOSTID reduced to 16 bytes), ignore how long the
1654 * trailing pad length is.
1655 */
1656 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
1657 ret = VERR_CR_ASSOC_LEN;
1658 else if (be32_to_cpu(rqst->desc_list_len) <
1659 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
1660 ret = VERR_CR_ASSOC_RQST_LEN;
1661 else if (rqst->assoc_cmd.desc_tag !=
1662 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1663 ret = VERR_CR_ASSOC_CMD;
1664 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
1665 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
1666 ret = VERR_CR_ASSOC_CMD_LEN;
1667 else if (!rqst->assoc_cmd.ersp_ratio ||
1668 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1669 be16_to_cpu(rqst->assoc_cmd.sqsize)))
1670 ret = VERR_ERSP_RATIO;
1671
1672 else {
1673 /* new association w/ admin queue */
1674 iod->assoc = nvmet_fc_alloc_target_assoc(
1675 tgtport, iod->hosthandle);
1676 if (!iod->assoc)
1677 ret = VERR_ASSOC_ALLOC_FAIL;
1678 else {
1679 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1680 be16_to_cpu(rqst->assoc_cmd.sqsize));
1681 if (!queue) {
1682 ret = VERR_QUEUE_ALLOC_FAIL;
1683 nvmet_fc_tgt_a_put(iod->assoc);
1684 }
1685 }
1686 }
1687
1688 if (ret) {
1689 dev_err(tgtport->dev,
1690 "Create Association LS failed: %s\n",
1691 validation_errors[ret]);
1692 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1693 sizeof(*acc), rqst->w0.ls_cmd,
1694 FCNVME_RJT_RC_LOGIC,
1695 FCNVME_RJT_EXP_NONE, 0);
1696 return;
1697 }
1698
1699 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1700 atomic_set(&queue->connected, 1);
1701 queue->sqhd = 0; /* best place to init value */
1702
1703 dev_info(tgtport->dev,
1704 "{%d:%d} Association created\n",
1705 tgtport->fc_target_port.port_num, iod->assoc->a_id);
1706
1707 /* format a response */
1708
1709 iod->lsrsp->rsplen = sizeof(*acc);
1710
1711 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1712 fcnvme_lsdesc_len(
1713 sizeof(struct fcnvme_ls_cr_assoc_acc)),
1714 FCNVME_LS_CREATE_ASSOCIATION);
1715 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1716 acc->associd.desc_len =
1717 fcnvme_lsdesc_len(
1718 sizeof(struct fcnvme_lsdesc_assoc_id));
1719 acc->associd.association_id =
1720 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1721 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1722 acc->connectid.desc_len =
1723 fcnvme_lsdesc_len(
1724 sizeof(struct fcnvme_lsdesc_conn_id));
1725 acc->connectid.connection_id = acc->associd.association_id;
1726 }
1727
1728 static void
nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)1729 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1730 struct nvmet_fc_ls_iod *iod)
1731 {
1732 struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn;
1733 struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn;
1734 struct nvmet_fc_tgt_queue *queue;
1735 int ret = 0;
1736
1737 memset(acc, 0, sizeof(*acc));
1738
1739 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1740 ret = VERR_CR_CONN_LEN;
1741 else if (rqst->desc_list_len !=
1742 fcnvme_lsdesc_len(
1743 sizeof(struct fcnvme_ls_cr_conn_rqst)))
1744 ret = VERR_CR_CONN_RQST_LEN;
1745 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1746 ret = VERR_ASSOC_ID;
1747 else if (rqst->associd.desc_len !=
1748 fcnvme_lsdesc_len(
1749 sizeof(struct fcnvme_lsdesc_assoc_id)))
1750 ret = VERR_ASSOC_ID_LEN;
1751 else if (rqst->connect_cmd.desc_tag !=
1752 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1753 ret = VERR_CR_CONN_CMD;
1754 else if (rqst->connect_cmd.desc_len !=
1755 fcnvme_lsdesc_len(
1756 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1757 ret = VERR_CR_CONN_CMD_LEN;
1758 else if (!rqst->connect_cmd.ersp_ratio ||
1759 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1760 be16_to_cpu(rqst->connect_cmd.sqsize)))
1761 ret = VERR_ERSP_RATIO;
1762
1763 else {
1764 /* new io queue */
1765 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1766 be64_to_cpu(rqst->associd.association_id));
1767 if (!iod->assoc)
1768 ret = VERR_NO_ASSOC;
1769 else {
1770 queue = nvmet_fc_alloc_target_queue(iod->assoc,
1771 be16_to_cpu(rqst->connect_cmd.qid),
1772 be16_to_cpu(rqst->connect_cmd.sqsize));
1773 if (!queue)
1774 ret = VERR_QUEUE_ALLOC_FAIL;
1775
1776 /* release get taken in nvmet_fc_find_target_assoc */
1777 nvmet_fc_tgt_a_put(iod->assoc);
1778 }
1779 }
1780
1781 if (ret) {
1782 dev_err(tgtport->dev,
1783 "Create Connection LS failed: %s\n",
1784 validation_errors[ret]);
1785 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1786 sizeof(*acc), rqst->w0.ls_cmd,
1787 (ret == VERR_NO_ASSOC) ?
1788 FCNVME_RJT_RC_INV_ASSOC :
1789 FCNVME_RJT_RC_LOGIC,
1790 FCNVME_RJT_EXP_NONE, 0);
1791 return;
1792 }
1793
1794 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1795 atomic_set(&queue->connected, 1);
1796 queue->sqhd = 0; /* best place to init value */
1797
1798 /* format a response */
1799
1800 iod->lsrsp->rsplen = sizeof(*acc);
1801
1802 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1803 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1804 FCNVME_LS_CREATE_CONNECTION);
1805 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1806 acc->connectid.desc_len =
1807 fcnvme_lsdesc_len(
1808 sizeof(struct fcnvme_lsdesc_conn_id));
1809 acc->connectid.connection_id =
1810 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1811 be16_to_cpu(rqst->connect_cmd.qid)));
1812 }
1813
1814 /*
1815 * Returns true if the LS response is to be transmit
1816 * Returns false if the LS response is to be delayed
1817 */
1818 static int
nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)1819 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1820 struct nvmet_fc_ls_iod *iod)
1821 {
1822 struct fcnvme_ls_disconnect_assoc_rqst *rqst =
1823 &iod->rqstbuf->rq_dis_assoc;
1824 struct fcnvme_ls_disconnect_assoc_acc *acc =
1825 &iod->rspbuf->rsp_dis_assoc;
1826 struct nvmet_fc_tgt_assoc *assoc = NULL;
1827 struct nvmet_fc_ls_iod *oldls = NULL;
1828 unsigned long flags;
1829 int ret = 0;
1830
1831 memset(acc, 0, sizeof(*acc));
1832
1833 ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst);
1834 if (!ret) {
1835 /* match an active association - takes an assoc ref if !NULL */
1836 assoc = nvmet_fc_find_target_assoc(tgtport,
1837 be64_to_cpu(rqst->associd.association_id));
1838 iod->assoc = assoc;
1839 if (!assoc)
1840 ret = VERR_NO_ASSOC;
1841 }
1842
1843 if (ret || !assoc) {
1844 dev_err(tgtport->dev,
1845 "Disconnect LS failed: %s\n",
1846 validation_errors[ret]);
1847 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1848 sizeof(*acc), rqst->w0.ls_cmd,
1849 (ret == VERR_NO_ASSOC) ?
1850 FCNVME_RJT_RC_INV_ASSOC :
1851 FCNVME_RJT_RC_LOGIC,
1852 FCNVME_RJT_EXP_NONE, 0);
1853 return true;
1854 }
1855
1856 /* format a response */
1857
1858 iod->lsrsp->rsplen = sizeof(*acc);
1859
1860 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1861 fcnvme_lsdesc_len(
1862 sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
1863 FCNVME_LS_DISCONNECT_ASSOC);
1864
1865 /*
1866 * The rules for LS response says the response cannot
1867 * go back until ABTS's have been sent for all outstanding
1868 * I/O and a Disconnect Association LS has been sent.
1869 * So... save off the Disconnect LS to send the response
1870 * later. If there was a prior LS already saved, replace
1871 * it with the newer one and send a can't perform reject
1872 * on the older one.
1873 */
1874 spin_lock_irqsave(&tgtport->lock, flags);
1875 oldls = assoc->rcv_disconn;
1876 assoc->rcv_disconn = iod;
1877 spin_unlock_irqrestore(&tgtport->lock, flags);
1878
1879 if (oldls) {
1880 dev_info(tgtport->dev,
1881 "{%d:%d} Multiple Disconnect Association LS's "
1882 "received\n",
1883 tgtport->fc_target_port.port_num, assoc->a_id);
1884 /* overwrite good response with bogus failure */
1885 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
1886 sizeof(*iod->rspbuf),
1887 /* ok to use rqst, LS is same */
1888 rqst->w0.ls_cmd,
1889 FCNVME_RJT_RC_UNAB,
1890 FCNVME_RJT_EXP_NONE, 0);
1891 nvmet_fc_xmt_ls_rsp(tgtport, oldls);
1892 }
1893
1894 nvmet_fc_schedule_delete_assoc(assoc);
1895 nvmet_fc_tgt_a_put(assoc);
1896
1897 return false;
1898 }
1899
1900
1901 /* *********************** NVME Ctrl Routines **************************** */
1902
1903
1904 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1905
1906 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1907
1908 static void
nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp * lsrsp)1909 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
1910 {
1911 struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private;
1912 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1913
1914 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1915 sizeof(*iod->rspbuf), DMA_TO_DEVICE);
1916 nvmet_fc_free_ls_iod(tgtport, iod);
1917 nvmet_fc_tgtport_put(tgtport);
1918 }
1919
1920 static void
nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)1921 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1922 struct nvmet_fc_ls_iod *iod)
1923 {
1924 int ret;
1925
1926 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1927 sizeof(*iod->rspbuf), DMA_TO_DEVICE);
1928
1929 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp);
1930 if (ret)
1931 nvmet_fc_xmt_ls_rsp_done(iod->lsrsp);
1932 }
1933
1934 /*
1935 * Actual processing routine for received FC-NVME LS Requests from the LLD
1936 */
1937 static void
nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)1938 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1939 struct nvmet_fc_ls_iod *iod)
1940 {
1941 struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0;
1942 bool sendrsp = true;
1943
1944 iod->lsrsp->nvme_fc_private = iod;
1945 iod->lsrsp->rspbuf = iod->rspbuf;
1946 iod->lsrsp->rspdma = iod->rspdma;
1947 iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done;
1948 /* Be preventative. handlers will later set to valid length */
1949 iod->lsrsp->rsplen = 0;
1950
1951 iod->assoc = NULL;
1952
1953 /*
1954 * handlers:
1955 * parse request input, execute the request, and format the
1956 * LS response
1957 */
1958 switch (w0->ls_cmd) {
1959 case FCNVME_LS_CREATE_ASSOCIATION:
1960 /* Creates Association and initial Admin Queue/Connection */
1961 nvmet_fc_ls_create_association(tgtport, iod);
1962 break;
1963 case FCNVME_LS_CREATE_CONNECTION:
1964 /* Creates an IO Queue/Connection */
1965 nvmet_fc_ls_create_connection(tgtport, iod);
1966 break;
1967 case FCNVME_LS_DISCONNECT_ASSOC:
1968 /* Terminate a Queue/Connection or the Association */
1969 sendrsp = nvmet_fc_ls_disconnect(tgtport, iod);
1970 break;
1971 default:
1972 iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf,
1973 sizeof(*iod->rspbuf), w0->ls_cmd,
1974 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1975 }
1976
1977 if (sendrsp)
1978 nvmet_fc_xmt_ls_rsp(tgtport, iod);
1979 }
1980
1981 /*
1982 * Actual processing routine for received FC-NVME LS Requests from the LLD
1983 */
1984 static void
nvmet_fc_handle_ls_rqst_work(struct work_struct * work)1985 nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1986 {
1987 struct nvmet_fc_ls_iod *iod =
1988 container_of(work, struct nvmet_fc_ls_iod, work);
1989 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1990
1991 nvmet_fc_handle_ls_rqst(tgtport, iod);
1992 }
1993
1994
1995 /**
1996 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
1997 * upon the reception of a NVME LS request.
1998 *
1999 * The nvmet-fc layer will copy payload to an internal structure for
2000 * processing. As such, upon completion of the routine, the LLDD may
2001 * immediately free/reuse the LS request buffer passed in the call.
2002 *
2003 * If this routine returns error, the LLDD should abort the exchange.
2004 *
2005 * @target_port: pointer to the (registered) target port the LS was
2006 * received on.
2007 * @hosthandle: pointer to the host specific data, gets stored in iod.
2008 * @lsrsp: pointer to a lsrsp structure to be used to reference
2009 * the exchange corresponding to the LS.
2010 * @lsreqbuf: pointer to the buffer containing the LS Request
2011 * @lsreqbuf_len: length, in bytes, of the received LS request
2012 */
2013 int
nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port * target_port,void * hosthandle,struct nvmefc_ls_rsp * lsrsp,void * lsreqbuf,u32 lsreqbuf_len)2014 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
2015 void *hosthandle,
2016 struct nvmefc_ls_rsp *lsrsp,
2017 void *lsreqbuf, u32 lsreqbuf_len)
2018 {
2019 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2020 struct nvmet_fc_ls_iod *iod;
2021 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
2022
2023 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
2024 dev_info(tgtport->dev,
2025 "RCV %s LS failed: payload too large (%d)\n",
2026 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
2027 nvmefc_ls_names[w0->ls_cmd] : "",
2028 lsreqbuf_len);
2029 return -E2BIG;
2030 }
2031
2032 if (!nvmet_fc_tgtport_get(tgtport)) {
2033 dev_info(tgtport->dev,
2034 "RCV %s LS failed: target deleting\n",
2035 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
2036 nvmefc_ls_names[w0->ls_cmd] : "");
2037 return -ESHUTDOWN;
2038 }
2039
2040 iod = nvmet_fc_alloc_ls_iod(tgtport);
2041 if (!iod) {
2042 dev_info(tgtport->dev,
2043 "RCV %s LS failed: context allocation failed\n",
2044 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
2045 nvmefc_ls_names[w0->ls_cmd] : "");
2046 nvmet_fc_tgtport_put(tgtport);
2047 return -ENOENT;
2048 }
2049
2050 iod->lsrsp = lsrsp;
2051 iod->fcpreq = NULL;
2052 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
2053 iod->rqstdatalen = lsreqbuf_len;
2054 iod->hosthandle = hosthandle;
2055
2056 queue_work(nvmet_wq, &iod->work);
2057
2058 return 0;
2059 }
2060 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
2061
2062
2063 /*
2064 * **********************
2065 * Start of FCP handling
2066 * **********************
2067 */
2068
2069 static int
nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod * fod)2070 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
2071 {
2072 struct scatterlist *sg;
2073 unsigned int nent;
2074
2075 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
2076 if (!sg)
2077 goto out;
2078
2079 fod->data_sg = sg;
2080 fod->data_sg_cnt = nent;
2081 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
2082 ((fod->io_dir == NVMET_FCP_WRITE) ?
2083 DMA_FROM_DEVICE : DMA_TO_DEVICE));
2084 /* note: write from initiator perspective */
2085 fod->next_sg = fod->data_sg;
2086
2087 return 0;
2088
2089 out:
2090 return NVME_SC_INTERNAL;
2091 }
2092
2093 static void
nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod * fod)2094 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
2095 {
2096 if (!fod->data_sg || !fod->data_sg_cnt)
2097 return;
2098
2099 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
2100 ((fod->io_dir == NVMET_FCP_WRITE) ?
2101 DMA_FROM_DEVICE : DMA_TO_DEVICE));
2102 sgl_free(fod->data_sg);
2103 fod->data_sg = NULL;
2104 fod->data_sg_cnt = 0;
2105 }
2106
2107
2108 static bool
queue_90percent_full(struct nvmet_fc_tgt_queue * q,u32 sqhd)2109 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
2110 {
2111 u32 sqtail, used;
2112
2113 /* egad, this is ugly. And sqtail is just a best guess */
2114 sqtail = atomic_read(&q->sqtail) % q->sqsize;
2115
2116 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
2117 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
2118 }
2119
2120 /*
2121 * Prep RSP payload.
2122 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
2123 */
2124 static void
nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod)2125 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
2126 struct nvmet_fc_fcp_iod *fod)
2127 {
2128 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
2129 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2130 struct nvme_completion *cqe = &ersp->cqe;
2131 u32 *cqewd = (u32 *)cqe;
2132 bool send_ersp = false;
2133 u32 rsn, rspcnt, xfr_length;
2134
2135 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
2136 xfr_length = fod->req.transfer_len;
2137 else
2138 xfr_length = fod->offset;
2139
2140 /*
2141 * check to see if we can send a 0's rsp.
2142 * Note: to send a 0's response, the NVME-FC host transport will
2143 * recreate the CQE. The host transport knows: sq id, SQHD (last
2144 * seen in an ersp), and command_id. Thus it will create a
2145 * zero-filled CQE with those known fields filled in. Transport
2146 * must send an ersp for any condition where the cqe won't match
2147 * this.
2148 *
2149 * Here are the FC-NVME mandated cases where we must send an ersp:
2150 * every N responses, where N=ersp_ratio
2151 * force fabric commands to send ersp's (not in FC-NVME but good
2152 * practice)
2153 * normal cmds: any time status is non-zero, or status is zero
2154 * but words 0 or 1 are non-zero.
2155 * the SQ is 90% or more full
2156 * the cmd is a fused command
2157 * transferred data length not equal to cmd iu length
2158 */
2159 rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
2160 if (!(rspcnt % fod->queue->ersp_ratio) ||
2161 nvme_is_fabrics((struct nvme_command *) sqe) ||
2162 xfr_length != fod->req.transfer_len ||
2163 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
2164 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
2165 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
2166 send_ersp = true;
2167
2168 /* re-set the fields */
2169 fod->fcpreq->rspaddr = ersp;
2170 fod->fcpreq->rspdma = fod->rspdma;
2171
2172 if (!send_ersp) {
2173 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
2174 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
2175 } else {
2176 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
2177 rsn = atomic_inc_return(&fod->queue->rsn);
2178 ersp->rsn = cpu_to_be32(rsn);
2179 ersp->xfrd_len = cpu_to_be32(xfr_length);
2180 fod->fcpreq->rsplen = sizeof(*ersp);
2181 }
2182
2183 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
2184 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
2185 }
2186
2187 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
2188
2189 static void
nvmet_fc_abort_op(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod)2190 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
2191 struct nvmet_fc_fcp_iod *fod)
2192 {
2193 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2194
2195 /* data no longer needed */
2196 nvmet_fc_free_tgt_pgs(fod);
2197
2198 /*
2199 * if an ABTS was received or we issued the fcp_abort early
2200 * don't call abort routine again.
2201 */
2202 /* no need to take lock - lock was taken earlier to get here */
2203 if (!fod->aborted)
2204 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
2205
2206 nvmet_fc_free_fcp_iod(fod->queue, fod);
2207 }
2208
2209 static void
nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod)2210 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
2211 struct nvmet_fc_fcp_iod *fod)
2212 {
2213 int ret;
2214
2215 fod->fcpreq->op = NVMET_FCOP_RSP;
2216 fod->fcpreq->timeout = 0;
2217
2218 nvmet_fc_prep_fcp_rsp(tgtport, fod);
2219
2220 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
2221 if (ret)
2222 nvmet_fc_abort_op(tgtport, fod);
2223 }
2224
2225 static void
nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod,u8 op)2226 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
2227 struct nvmet_fc_fcp_iod *fod, u8 op)
2228 {
2229 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2230 struct scatterlist *sg = fod->next_sg;
2231 unsigned long flags;
2232 u32 remaininglen = fod->req.transfer_len - fod->offset;
2233 u32 tlen = 0;
2234 int ret;
2235
2236 fcpreq->op = op;
2237 fcpreq->offset = fod->offset;
2238 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
2239
2240 /*
2241 * for next sequence:
2242 * break at a sg element boundary
2243 * attempt to keep sequence length capped at
2244 * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
2245 * be longer if a single sg element is larger
2246 * than that amount. This is done to avoid creating
2247 * a new sg list to use for the tgtport api.
2248 */
2249 fcpreq->sg = sg;
2250 fcpreq->sg_cnt = 0;
2251 while (tlen < remaininglen &&
2252 fcpreq->sg_cnt < tgtport->max_sg_cnt &&
2253 tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
2254 fcpreq->sg_cnt++;
2255 tlen += sg_dma_len(sg);
2256 sg = sg_next(sg);
2257 }
2258 if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
2259 fcpreq->sg_cnt++;
2260 tlen += min_t(u32, sg_dma_len(sg), remaininglen);
2261 sg = sg_next(sg);
2262 }
2263 if (tlen < remaininglen)
2264 fod->next_sg = sg;
2265 else
2266 fod->next_sg = NULL;
2267
2268 fcpreq->transfer_length = tlen;
2269 fcpreq->transferred_length = 0;
2270 fcpreq->fcp_error = 0;
2271 fcpreq->rsplen = 0;
2272
2273 /*
2274 * If the last READDATA request: check if LLDD supports
2275 * combined xfr with response.
2276 */
2277 if ((op == NVMET_FCOP_READDATA) &&
2278 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
2279 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
2280 fcpreq->op = NVMET_FCOP_READDATA_RSP;
2281 nvmet_fc_prep_fcp_rsp(tgtport, fod);
2282 }
2283
2284 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
2285 if (ret) {
2286 /*
2287 * should be ok to set w/o lock as its in the thread of
2288 * execution (not an async timer routine) and doesn't
2289 * contend with any clearing action
2290 */
2291 fod->abort = true;
2292
2293 if (op == NVMET_FCOP_WRITEDATA) {
2294 spin_lock_irqsave(&fod->flock, flags);
2295 fod->writedataactive = false;
2296 spin_unlock_irqrestore(&fod->flock, flags);
2297 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2298 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
2299 fcpreq->fcp_error = ret;
2300 fcpreq->transferred_length = 0;
2301 nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
2302 }
2303 }
2304 }
2305
2306 static inline bool
__nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod * fod,bool abort)2307 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
2308 {
2309 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2310 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2311
2312 /* if in the middle of an io and we need to tear down */
2313 if (abort) {
2314 if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
2315 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2316 return true;
2317 }
2318
2319 nvmet_fc_abort_op(tgtport, fod);
2320 return true;
2321 }
2322
2323 return false;
2324 }
2325
2326 /*
2327 * actual done handler for FCP operations when completed by the lldd
2328 */
2329 static void
nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod * fod)2330 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
2331 {
2332 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2333 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2334 unsigned long flags;
2335 bool abort;
2336
2337 spin_lock_irqsave(&fod->flock, flags);
2338 abort = fod->abort;
2339 fod->writedataactive = false;
2340 spin_unlock_irqrestore(&fod->flock, flags);
2341
2342 switch (fcpreq->op) {
2343
2344 case NVMET_FCOP_WRITEDATA:
2345 if (__nvmet_fc_fod_op_abort(fod, abort))
2346 return;
2347 if (fcpreq->fcp_error ||
2348 fcpreq->transferred_length != fcpreq->transfer_length) {
2349 spin_lock_irqsave(&fod->flock, flags);
2350 fod->abort = true;
2351 spin_unlock_irqrestore(&fod->flock, flags);
2352
2353 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2354 return;
2355 }
2356
2357 fod->offset += fcpreq->transferred_length;
2358 if (fod->offset != fod->req.transfer_len) {
2359 spin_lock_irqsave(&fod->flock, flags);
2360 fod->writedataactive = true;
2361 spin_unlock_irqrestore(&fod->flock, flags);
2362
2363 /* transfer the next chunk */
2364 nvmet_fc_transfer_fcp_data(tgtport, fod,
2365 NVMET_FCOP_WRITEDATA);
2366 return;
2367 }
2368
2369 /* data transfer complete, resume with nvmet layer */
2370 fod->req.execute(&fod->req);
2371 break;
2372
2373 case NVMET_FCOP_READDATA:
2374 case NVMET_FCOP_READDATA_RSP:
2375 if (__nvmet_fc_fod_op_abort(fod, abort))
2376 return;
2377 if (fcpreq->fcp_error ||
2378 fcpreq->transferred_length != fcpreq->transfer_length) {
2379 nvmet_fc_abort_op(tgtport, fod);
2380 return;
2381 }
2382
2383 /* success */
2384
2385 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
2386 /* data no longer needed */
2387 nvmet_fc_free_tgt_pgs(fod);
2388 nvmet_fc_free_fcp_iod(fod->queue, fod);
2389 return;
2390 }
2391
2392 fod->offset += fcpreq->transferred_length;
2393 if (fod->offset != fod->req.transfer_len) {
2394 /* transfer the next chunk */
2395 nvmet_fc_transfer_fcp_data(tgtport, fod,
2396 NVMET_FCOP_READDATA);
2397 return;
2398 }
2399
2400 /* data transfer complete, send response */
2401
2402 /* data no longer needed */
2403 nvmet_fc_free_tgt_pgs(fod);
2404
2405 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2406
2407 break;
2408
2409 case NVMET_FCOP_RSP:
2410 if (__nvmet_fc_fod_op_abort(fod, abort))
2411 return;
2412 nvmet_fc_free_fcp_iod(fod->queue, fod);
2413 break;
2414
2415 default:
2416 break;
2417 }
2418 }
2419
2420 static void
nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req * fcpreq)2421 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
2422 {
2423 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2424
2425 nvmet_fc_fod_op_done(fod);
2426 }
2427
2428 /*
2429 * actual completion handler after execution by the nvmet layer
2430 */
2431 static void
__nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod,int status)2432 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
2433 struct nvmet_fc_fcp_iod *fod, int status)
2434 {
2435 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2436 struct nvme_completion *cqe = &fod->rspiubuf.cqe;
2437 unsigned long flags;
2438 bool abort;
2439
2440 spin_lock_irqsave(&fod->flock, flags);
2441 abort = fod->abort;
2442 spin_unlock_irqrestore(&fod->flock, flags);
2443
2444 /* if we have a CQE, snoop the last sq_head value */
2445 if (!status)
2446 fod->queue->sqhd = cqe->sq_head;
2447
2448 if (abort) {
2449 nvmet_fc_abort_op(tgtport, fod);
2450 return;
2451 }
2452
2453 /* if an error handling the cmd post initial parsing */
2454 if (status) {
2455 /* fudge up a failed CQE status for our transport error */
2456 memset(cqe, 0, sizeof(*cqe));
2457 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
2458 cqe->sq_id = cpu_to_le16(fod->queue->qid);
2459 cqe->command_id = sqe->command_id;
2460 cqe->status = cpu_to_le16(status);
2461 } else {
2462
2463 /*
2464 * try to push the data even if the SQE status is non-zero.
2465 * There may be a status where data still was intended to
2466 * be moved
2467 */
2468 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2469 /* push the data over before sending rsp */
2470 nvmet_fc_transfer_fcp_data(tgtport, fod,
2471 NVMET_FCOP_READDATA);
2472 return;
2473 }
2474
2475 /* writes & no data - fall thru */
2476 }
2477
2478 /* data no longer needed */
2479 nvmet_fc_free_tgt_pgs(fod);
2480
2481 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2482 }
2483
2484
2485 static void
nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req * nvme_req)2486 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
2487 {
2488 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2489 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2490
2491 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2492 }
2493
2494
2495 /*
2496 * Actual processing routine for received FC-NVME I/O Requests from the LLD
2497 */
2498 static void
nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod)2499 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2500 struct nvmet_fc_fcp_iod *fod)
2501 {
2502 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2503 u32 xfrlen = be32_to_cpu(cmdiu->data_len);
2504 int ret;
2505
2506 /*
2507 * Fused commands are currently not supported in the linux
2508 * implementation.
2509 *
2510 * As such, the implementation of the FC transport does not
2511 * look at the fused commands and order delivery to the upper
2512 * layer until we have both based on csn.
2513 */
2514
2515 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2516
2517 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2518 fod->io_dir = NVMET_FCP_WRITE;
2519 if (!nvme_is_write(&cmdiu->sqe))
2520 goto transport_error;
2521 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2522 fod->io_dir = NVMET_FCP_READ;
2523 if (nvme_is_write(&cmdiu->sqe))
2524 goto transport_error;
2525 } else {
2526 fod->io_dir = NVMET_FCP_NODATA;
2527 if (xfrlen)
2528 goto transport_error;
2529 }
2530
2531 fod->req.cmd = &fod->cmdiubuf.sqe;
2532 fod->req.cqe = &fod->rspiubuf.cqe;
2533 if (!tgtport->pe)
2534 goto transport_error;
2535 fod->req.port = tgtport->pe->port;
2536
2537 /* clear any response payload */
2538 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2539
2540 fod->data_sg = NULL;
2541 fod->data_sg_cnt = 0;
2542
2543 ret = nvmet_req_init(&fod->req,
2544 &fod->queue->nvme_cq,
2545 &fod->queue->nvme_sq,
2546 &nvmet_fc_tgt_fcp_ops);
2547 if (!ret) {
2548 /* bad SQE content or invalid ctrl state */
2549 /* nvmet layer has already called op done to send rsp. */
2550 return;
2551 }
2552
2553 fod->req.transfer_len = xfrlen;
2554
2555 /* keep a running counter of tail position */
2556 atomic_inc(&fod->queue->sqtail);
2557
2558 if (fod->req.transfer_len) {
2559 ret = nvmet_fc_alloc_tgt_pgs(fod);
2560 if (ret) {
2561 nvmet_req_complete(&fod->req, ret);
2562 return;
2563 }
2564 }
2565 fod->req.sg = fod->data_sg;
2566 fod->req.sg_cnt = fod->data_sg_cnt;
2567 fod->offset = 0;
2568
2569 if (fod->io_dir == NVMET_FCP_WRITE) {
2570 /* pull the data over before invoking nvmet layer */
2571 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2572 return;
2573 }
2574
2575 /*
2576 * Reads or no data:
2577 *
2578 * can invoke the nvmet_layer now. If read data, cmd completion will
2579 * push the data
2580 */
2581 fod->req.execute(&fod->req);
2582 return;
2583
2584 transport_error:
2585 nvmet_fc_abort_op(tgtport, fod);
2586 }
2587
2588 /**
2589 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2590 * upon the reception of a NVME FCP CMD IU.
2591 *
2592 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2593 * layer for processing.
2594 *
2595 * The nvmet_fc layer allocates a local job structure (struct
2596 * nvmet_fc_fcp_iod) from the queue for the io and copies the
2597 * CMD IU buffer to the job structure. As such, on a successful
2598 * completion (returns 0), the LLDD may immediately free/reuse
2599 * the CMD IU buffer passed in the call.
2600 *
2601 * However, in some circumstances, due to the packetized nature of FC
2602 * and the api of the FC LLDD which may issue a hw command to send the
2603 * response, but the LLDD may not get the hw completion for that command
2604 * and upcall the nvmet_fc layer before a new command may be
2605 * asynchronously received - its possible for a command to be received
2606 * before the LLDD and nvmet_fc have recycled the job structure. It gives
2607 * the appearance of more commands received than fits in the sq.
2608 * To alleviate this scenario, a temporary queue is maintained in the
2609 * transport for pending LLDD requests waiting for a queue job structure.
2610 * In these "overrun" cases, a temporary queue element is allocated
2611 * the LLDD request and CMD iu buffer information remembered, and the
2612 * routine returns a -EOVERFLOW status. Subsequently, when a queue job
2613 * structure is freed, it is immediately reallocated for anything on the
2614 * pending request list. The LLDDs defer_rcv() callback is called,
2615 * informing the LLDD that it may reuse the CMD IU buffer, and the io
2616 * is then started normally with the transport.
2617 *
2618 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
2619 * the completion as successful but must not reuse the CMD IU buffer
2620 * until the LLDD's defer_rcv() callback has been called for the
2621 * corresponding struct nvmefc_tgt_fcp_req pointer.
2622 *
2623 * If there is any other condition in which an error occurs, the
2624 * transport will return a non-zero status indicating the error.
2625 * In all cases other than -EOVERFLOW, the transport has not accepted the
2626 * request and the LLDD should abort the exchange.
2627 *
2628 * @target_port: pointer to the (registered) target port the FCP CMD IU
2629 * was received on.
2630 * @fcpreq: pointer to a fcpreq request structure to be used to reference
2631 * the exchange corresponding to the FCP Exchange.
2632 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU
2633 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2634 */
2635 int
nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port * target_port,struct nvmefc_tgt_fcp_req * fcpreq,void * cmdiubuf,u32 cmdiubuf_len)2636 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2637 struct nvmefc_tgt_fcp_req *fcpreq,
2638 void *cmdiubuf, u32 cmdiubuf_len)
2639 {
2640 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2641 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2642 struct nvmet_fc_tgt_queue *queue;
2643 struct nvmet_fc_fcp_iod *fod;
2644 struct nvmet_fc_defer_fcp_req *deferfcp;
2645 unsigned long flags;
2646
2647 /* validate iu, so the connection id can be used to find the queue */
2648 if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2649 (cmdiu->format_id != NVME_CMD_FORMAT_ID) ||
2650 (cmdiu->fc_id != NVME_CMD_FC_ID) ||
2651 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2652 return -EIO;
2653
2654 queue = nvmet_fc_find_target_queue(tgtport,
2655 be64_to_cpu(cmdiu->connection_id));
2656 if (!queue)
2657 return -ENOTCONN;
2658
2659 /*
2660 * note: reference taken by find_target_queue
2661 * After successful fod allocation, the fod will inherit the
2662 * ownership of that reference and will remove the reference
2663 * when the fod is freed.
2664 */
2665
2666 spin_lock_irqsave(&queue->qlock, flags);
2667
2668 fod = nvmet_fc_alloc_fcp_iod(queue);
2669 if (fod) {
2670 spin_unlock_irqrestore(&queue->qlock, flags);
2671
2672 fcpreq->nvmet_fc_private = fod;
2673 fod->fcpreq = fcpreq;
2674
2675 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2676
2677 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
2678
2679 return 0;
2680 }
2681
2682 if (!tgtport->ops->defer_rcv) {
2683 spin_unlock_irqrestore(&queue->qlock, flags);
2684 /* release the queue lookup reference */
2685 nvmet_fc_tgt_q_put(queue);
2686 return -ENOENT;
2687 }
2688
2689 deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
2690 struct nvmet_fc_defer_fcp_req, req_list);
2691 if (deferfcp) {
2692 /* Just re-use one that was previously allocated */
2693 list_del(&deferfcp->req_list);
2694 } else {
2695 spin_unlock_irqrestore(&queue->qlock, flags);
2696
2697 /* Now we need to dynamically allocate one */
2698 deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
2699 if (!deferfcp) {
2700 /* release the queue lookup reference */
2701 nvmet_fc_tgt_q_put(queue);
2702 return -ENOMEM;
2703 }
2704 spin_lock_irqsave(&queue->qlock, flags);
2705 }
2706
2707 /* For now, use rspaddr / rsplen to save payload information */
2708 fcpreq->rspaddr = cmdiubuf;
2709 fcpreq->rsplen = cmdiubuf_len;
2710 deferfcp->fcp_req = fcpreq;
2711
2712 /* defer processing till a fod becomes available */
2713 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
2714
2715 /* NOTE: the queue lookup reference is still valid */
2716
2717 spin_unlock_irqrestore(&queue->qlock, flags);
2718
2719 return -EOVERFLOW;
2720 }
2721 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2722
2723 /**
2724 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
2725 * upon the reception of an ABTS for a FCP command
2726 *
2727 * Notify the transport that an ABTS has been received for a FCP command
2728 * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
2729 * LLDD believes the command is still being worked on
2730 * (template_ops->fcp_req_release() has not been called).
2731 *
2732 * The transport will wait for any outstanding work (an op to the LLDD,
2733 * which the lldd should complete with error due to the ABTS; or the
2734 * completion from the nvmet layer of the nvme command), then will
2735 * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
2736 * return the i/o context to the LLDD. The LLDD may send the BA_ACC
2737 * to the ABTS either after return from this function (assuming any
2738 * outstanding op work has been terminated) or upon the callback being
2739 * called.
2740 *
2741 * @target_port: pointer to the (registered) target port the FCP CMD IU
2742 * was received on.
2743 * @fcpreq: pointer to the fcpreq request structure that corresponds
2744 * to the exchange that received the ABTS.
2745 */
2746 void
nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port * target_port,struct nvmefc_tgt_fcp_req * fcpreq)2747 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2748 struct nvmefc_tgt_fcp_req *fcpreq)
2749 {
2750 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2751 struct nvmet_fc_tgt_queue *queue;
2752 unsigned long flags;
2753
2754 if (!fod || fod->fcpreq != fcpreq)
2755 /* job appears to have already completed, ignore abort */
2756 return;
2757
2758 queue = fod->queue;
2759
2760 spin_lock_irqsave(&queue->qlock, flags);
2761 if (fod->active) {
2762 /*
2763 * mark as abort. The abort handler, invoked upon completion
2764 * of any work, will detect the aborted status and do the
2765 * callback.
2766 */
2767 spin_lock(&fod->flock);
2768 fod->abort = true;
2769 fod->aborted = true;
2770 spin_unlock(&fod->flock);
2771 }
2772 spin_unlock_irqrestore(&queue->qlock, flags);
2773 }
2774 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2775
2776
2777 struct nvmet_fc_traddr {
2778 u64 nn;
2779 u64 pn;
2780 };
2781
2782 static int
__nvme_fc_parse_u64(substring_t * sstr,u64 * val)2783 __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
2784 {
2785 u64 token64;
2786
2787 if (match_u64(sstr, &token64))
2788 return -EINVAL;
2789 *val = token64;
2790
2791 return 0;
2792 }
2793
2794 /*
2795 * This routine validates and extracts the WWN's from the TRADDR string.
2796 * As kernel parsers need the 0x to determine number base, universally
2797 * build string to parse with 0x prefix before parsing name strings.
2798 */
2799 static int
nvme_fc_parse_traddr(struct nvmet_fc_traddr * traddr,char * buf,size_t blen)2800 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
2801 {
2802 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
2803 substring_t wwn = { name, &name[sizeof(name)-1] };
2804 int nnoffset, pnoffset;
2805
2806 /* validate if string is one of the 2 allowed formats */
2807 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
2808 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
2809 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
2810 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
2811 nnoffset = NVME_FC_TRADDR_OXNNLEN;
2812 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
2813 NVME_FC_TRADDR_OXNNLEN;
2814 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
2815 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
2816 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
2817 "pn-", NVME_FC_TRADDR_NNLEN))) {
2818 nnoffset = NVME_FC_TRADDR_NNLEN;
2819 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
2820 } else
2821 goto out_einval;
2822
2823 name[0] = '0';
2824 name[1] = 'x';
2825 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
2826
2827 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2828 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
2829 goto out_einval;
2830
2831 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2832 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
2833 goto out_einval;
2834
2835 return 0;
2836
2837 out_einval:
2838 pr_warn("%s: bad traddr string\n", __func__);
2839 return -EINVAL;
2840 }
2841
2842 static int
nvmet_fc_add_port(struct nvmet_port * port)2843 nvmet_fc_add_port(struct nvmet_port *port)
2844 {
2845 struct nvmet_fc_tgtport *tgtport;
2846 struct nvmet_fc_port_entry *pe;
2847 struct nvmet_fc_traddr traddr = { 0L, 0L };
2848 unsigned long flags;
2849 int ret;
2850
2851 /* validate the address info */
2852 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2853 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2854 return -EINVAL;
2855
2856 /* map the traddr address info to a target port */
2857
2858 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
2859 sizeof(port->disc_addr.traddr));
2860 if (ret)
2861 return ret;
2862
2863 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2864 if (!pe)
2865 return -ENOMEM;
2866
2867 ret = -ENXIO;
2868 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2869 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2870 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2871 (tgtport->fc_target_port.port_name == traddr.pn)) {
2872 /* a FC port can only be 1 nvmet port id */
2873 if (!tgtport->pe) {
2874 nvmet_fc_portentry_bind(tgtport, pe, port);
2875 ret = 0;
2876 } else
2877 ret = -EALREADY;
2878 break;
2879 }
2880 }
2881 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2882
2883 if (ret)
2884 kfree(pe);
2885
2886 return ret;
2887 }
2888
2889 static void
nvmet_fc_remove_port(struct nvmet_port * port)2890 nvmet_fc_remove_port(struct nvmet_port *port)
2891 {
2892 struct nvmet_fc_port_entry *pe = port->priv;
2893
2894 nvmet_fc_portentry_unbind(pe);
2895
2896 /* terminate any outstanding associations */
2897 __nvmet_fc_free_assocs(pe->tgtport);
2898
2899 kfree(pe);
2900 }
2901
2902 static void
nvmet_fc_discovery_chg(struct nvmet_port * port)2903 nvmet_fc_discovery_chg(struct nvmet_port *port)
2904 {
2905 struct nvmet_fc_port_entry *pe = port->priv;
2906 struct nvmet_fc_tgtport *tgtport = pe->tgtport;
2907
2908 if (tgtport && tgtport->ops->discovery_event)
2909 tgtport->ops->discovery_event(&tgtport->fc_target_port);
2910 }
2911
2912 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2913 .owner = THIS_MODULE,
2914 .type = NVMF_TRTYPE_FC,
2915 .msdbd = 1,
2916 .add_port = nvmet_fc_add_port,
2917 .remove_port = nvmet_fc_remove_port,
2918 .queue_response = nvmet_fc_fcp_nvme_cmd_done,
2919 .delete_ctrl = nvmet_fc_delete_ctrl,
2920 .discovery_chg = nvmet_fc_discovery_chg,
2921 };
2922
nvmet_fc_init_module(void)2923 static int __init nvmet_fc_init_module(void)
2924 {
2925 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2926 }
2927
nvmet_fc_exit_module(void)2928 static void __exit nvmet_fc_exit_module(void)
2929 {
2930 /* ensure any shutdown operation, e.g. delete ctrls have finished */
2931 flush_workqueue(nvmet_wq);
2932
2933 /* sanity check - all lports should be removed */
2934 if (!list_empty(&nvmet_fc_target_list))
2935 pr_warn("%s: targetport list not empty\n", __func__);
2936
2937 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2938
2939 ida_destroy(&nvmet_fc_tgtport_cnt);
2940 }
2941
2942 module_init(nvmet_fc_init_module);
2943 module_exit(nvmet_fc_exit_module);
2944
2945 MODULE_LICENSE("GPL v2");
2946