1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016 Avago Technologies. All rights reserved. 4 */ 5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 6 #include <linux/module.h> 7 #include <linux/slab.h> 8 #include <linux/blk-mq.h> 9 #include <linux/parser.h> 10 #include <linux/random.h> 11 #include <uapi/scsi/fc/fc_fs.h> 12 #include <uapi/scsi/fc/fc_els.h> 13 14 #include "nvmet.h" 15 #include <linux/nvme-fc-driver.h> 16 #include <linux/nvme-fc.h> 17 #include "../host/fc.h" 18 19 20 /* *************************** Data Structures/Defines ****************** */ 21 22 23 #define NVMET_LS_CTX_COUNT 256 24 25 struct nvmet_fc_tgtport; 26 struct nvmet_fc_tgt_assoc; 27 28 struct nvmet_fc_ls_iod { /* for an LS RQST RCV */ 29 struct nvmefc_ls_rsp *lsrsp; 30 struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */ 31 32 struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */ 33 34 struct nvmet_fc_tgtport *tgtport; 35 struct nvmet_fc_tgt_assoc *assoc; 36 void *hosthandle; 37 38 union nvmefc_ls_requests *rqstbuf; 39 union nvmefc_ls_responses *rspbuf; 40 u16 rqstdatalen; 41 dma_addr_t rspdma; 42 43 struct scatterlist sg[2]; 44 45 struct work_struct work; 46 } __aligned(sizeof(unsigned long long)); 47 48 struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */ 49 struct nvmefc_ls_req ls_req; 50 51 struct nvmet_fc_tgtport *tgtport; 52 void *hosthandle; 53 54 int ls_error; 55 struct list_head lsreq_list; /* tgtport->ls_req_list */ 56 bool req_queued; 57 }; 58 59 60 /* desired maximum for a single sequence - if sg list allows it */ 61 #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) 62 63 enum nvmet_fcp_datadir { 64 NVMET_FCP_NODATA, 65 NVMET_FCP_WRITE, 66 NVMET_FCP_READ, 67 NVMET_FCP_ABORTED, 68 }; 69 70 struct nvmet_fc_fcp_iod { 71 struct nvmefc_tgt_fcp_req *fcpreq; 72 73 struct nvme_fc_cmd_iu cmdiubuf; 74 struct nvme_fc_ersp_iu rspiubuf; 75 dma_addr_t rspdma; 76 struct scatterlist *next_sg; 77 struct scatterlist *data_sg; 78 int data_sg_cnt; 79 u32 offset; 80 enum nvmet_fcp_datadir io_dir; 81 bool active; 82 bool abort; 83 bool aborted; 84 bool writedataactive; 85 spinlock_t flock; 86 87 struct nvmet_req req; 88 struct work_struct defer_work; 89 90 struct nvmet_fc_tgtport *tgtport; 91 struct nvmet_fc_tgt_queue *queue; 92 93 struct list_head fcp_list; /* tgtport->fcp_list */ 94 }; 95 96 struct nvmet_fc_tgtport { 97 struct nvmet_fc_target_port fc_target_port; 98 99 struct list_head tgt_list; /* nvmet_fc_target_list */ 100 struct device *dev; /* dev for dma mapping */ 101 struct nvmet_fc_target_template *ops; 102 103 struct nvmet_fc_ls_iod *iod; 104 spinlock_t lock; 105 struct list_head ls_rcv_list; 106 struct list_head ls_req_list; 107 struct list_head ls_busylist; 108 struct list_head assoc_list; 109 struct list_head host_list; 110 struct ida assoc_cnt; 111 struct nvmet_fc_port_entry *pe; 112 struct kref ref; 113 u32 max_sg_cnt; 114 }; 115 116 struct nvmet_fc_port_entry { 117 struct nvmet_fc_tgtport *tgtport; 118 struct nvmet_port *port; 119 u64 node_name; 120 u64 port_name; 121 struct list_head pe_list; 122 }; 123 124 struct nvmet_fc_defer_fcp_req { 125 struct list_head req_list; 126 struct nvmefc_tgt_fcp_req *fcp_req; 127 }; 128 129 struct nvmet_fc_tgt_queue { 130 bool ninetypercent; 131 u16 qid; 132 u16 sqsize; 133 u16 ersp_ratio; 134 __le16 sqhd; 135 atomic_t connected; 136 atomic_t sqtail; 137 atomic_t zrspcnt; 138 atomic_t rsn; 139 spinlock_t qlock; 140 struct nvmet_cq nvme_cq; 141 struct nvmet_sq nvme_sq; 142 struct nvmet_fc_tgt_assoc *assoc; 143 struct list_head fod_list; 144 struct list_head pending_cmd_list; 145 struct list_head avail_defer_list; 146 struct workqueue_struct *work_q; 147 struct kref ref; 148 struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */ 149 } __aligned(sizeof(unsigned long long)); 150 151 struct nvmet_fc_hostport { 152 struct nvmet_fc_tgtport *tgtport; 153 void *hosthandle; 154 struct list_head host_list; 155 struct kref ref; 156 u8 invalid; 157 }; 158 159 struct nvmet_fc_tgt_assoc { 160 u64 association_id; 161 u32 a_id; 162 atomic_t terminating; 163 struct nvmet_fc_tgtport *tgtport; 164 struct nvmet_fc_hostport *hostport; 165 struct nvmet_fc_ls_iod *rcv_disconn; 166 struct list_head a_list; 167 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1]; 168 struct kref ref; 169 struct work_struct del_work; 170 atomic_t del_work_active; 171 }; 172 173 174 static inline int 175 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr) 176 { 177 return (iodptr - iodptr->tgtport->iod); 178 } 179 180 static inline int 181 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr) 182 { 183 return (fodptr - fodptr->queue->fod); 184 } 185 186 187 /* 188 * Association and Connection IDs: 189 * 190 * Association ID will have random number in upper 6 bytes and zero 191 * in lower 2 bytes 192 * 193 * Connection IDs will be Association ID with QID or'd in lower 2 bytes 194 * 195 * note: Association ID = Connection ID for queue 0 196 */ 197 #define BYTES_FOR_QID sizeof(u16) 198 #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8) 199 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1)) 200 201 static inline u64 202 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid) 203 { 204 return (assoc->association_id | qid); 205 } 206 207 static inline u64 208 nvmet_fc_getassociationid(u64 connectionid) 209 { 210 return connectionid & ~NVMET_FC_QUEUEID_MASK; 211 } 212 213 static inline u16 214 nvmet_fc_getqueueid(u64 connectionid) 215 { 216 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK); 217 } 218 219 static inline struct nvmet_fc_tgtport * 220 targetport_to_tgtport(struct nvmet_fc_target_port *targetport) 221 { 222 return container_of(targetport, struct nvmet_fc_tgtport, 223 fc_target_port); 224 } 225 226 static inline struct nvmet_fc_fcp_iod * 227 nvmet_req_to_fod(struct nvmet_req *nvme_req) 228 { 229 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req); 230 } 231 232 233 /* *************************** Globals **************************** */ 234 235 236 static DEFINE_SPINLOCK(nvmet_fc_tgtlock); 237 238 static LIST_HEAD(nvmet_fc_target_list); 239 static DEFINE_IDA(nvmet_fc_tgtport_cnt); 240 static LIST_HEAD(nvmet_fc_portentry_list); 241 242 243 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work); 244 static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work); 245 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc); 246 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc); 247 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); 248 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); 249 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); 250 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); 251 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, 252 struct nvmet_fc_fcp_iod *fod); 253 static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc); 254 static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, 255 struct nvmet_fc_ls_iod *iod); 256 257 258 /* *********************** FC-NVME DMA Handling **************************** */ 259 260 /* 261 * The fcloop device passes in a NULL device pointer. Real LLD's will 262 * pass in a valid device pointer. If NULL is passed to the dma mapping 263 * routines, depending on the platform, it may or may not succeed, and 264 * may crash. 265 * 266 * As such: 267 * Wrapper all the dma routines and check the dev pointer. 268 * 269 * If simple mappings (return just a dma address, we'll noop them, 270 * returning a dma address of 0. 271 * 272 * On more complex mappings (dma_map_sg), a pseudo routine fills 273 * in the scatter list, setting all dma addresses to 0. 274 */ 275 276 static inline dma_addr_t 277 fc_dma_map_single(struct device *dev, void *ptr, size_t size, 278 enum dma_data_direction dir) 279 { 280 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; 281 } 282 283 static inline int 284 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 285 { 286 return dev ? dma_mapping_error(dev, dma_addr) : 0; 287 } 288 289 static inline void 290 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, 291 enum dma_data_direction dir) 292 { 293 if (dev) 294 dma_unmap_single(dev, addr, size, dir); 295 } 296 297 static inline void 298 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, 299 enum dma_data_direction dir) 300 { 301 if (dev) 302 dma_sync_single_for_cpu(dev, addr, size, dir); 303 } 304 305 static inline void 306 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, 307 enum dma_data_direction dir) 308 { 309 if (dev) 310 dma_sync_single_for_device(dev, addr, size, dir); 311 } 312 313 /* pseudo dma_map_sg call */ 314 static int 315 fc_map_sg(struct scatterlist *sg, int nents) 316 { 317 struct scatterlist *s; 318 int i; 319 320 WARN_ON(nents == 0 || sg[0].length == 0); 321 322 for_each_sg(sg, s, nents, i) { 323 s->dma_address = 0L; 324 #ifdef CONFIG_NEED_SG_DMA_LENGTH 325 s->dma_length = s->length; 326 #endif 327 } 328 return nents; 329 } 330 331 static inline int 332 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 333 enum dma_data_direction dir) 334 { 335 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); 336 } 337 338 static inline void 339 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 340 enum dma_data_direction dir) 341 { 342 if (dev) 343 dma_unmap_sg(dev, sg, nents, dir); 344 } 345 346 347 /* ********************** FC-NVME LS XMT Handling ************************* */ 348 349 350 static void 351 __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop) 352 { 353 struct nvmet_fc_tgtport *tgtport = lsop->tgtport; 354 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 355 unsigned long flags; 356 357 spin_lock_irqsave(&tgtport->lock, flags); 358 359 if (!lsop->req_queued) { 360 spin_unlock_irqrestore(&tgtport->lock, flags); 361 return; 362 } 363 364 list_del(&lsop->lsreq_list); 365 366 lsop->req_queued = false; 367 368 spin_unlock_irqrestore(&tgtport->lock, flags); 369 370 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, 371 (lsreq->rqstlen + lsreq->rsplen), 372 DMA_BIDIRECTIONAL); 373 374 nvmet_fc_tgtport_put(tgtport); 375 } 376 377 static int 378 __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport, 379 struct nvmet_fc_ls_req_op *lsop, 380 void (*done)(struct nvmefc_ls_req *req, int status)) 381 { 382 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 383 unsigned long flags; 384 int ret = 0; 385 386 if (!tgtport->ops->ls_req) 387 return -EOPNOTSUPP; 388 389 if (!nvmet_fc_tgtport_get(tgtport)) 390 return -ESHUTDOWN; 391 392 lsreq->done = done; 393 lsop->req_queued = false; 394 INIT_LIST_HEAD(&lsop->lsreq_list); 395 396 lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr, 397 lsreq->rqstlen + lsreq->rsplen, 398 DMA_BIDIRECTIONAL); 399 if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) { 400 ret = -EFAULT; 401 goto out_puttgtport; 402 } 403 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; 404 405 spin_lock_irqsave(&tgtport->lock, flags); 406 407 list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list); 408 409 lsop->req_queued = true; 410 411 spin_unlock_irqrestore(&tgtport->lock, flags); 412 413 ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle, 414 lsreq); 415 if (ret) 416 goto out_unlink; 417 418 return 0; 419 420 out_unlink: 421 lsop->ls_error = ret; 422 spin_lock_irqsave(&tgtport->lock, flags); 423 lsop->req_queued = false; 424 list_del(&lsop->lsreq_list); 425 spin_unlock_irqrestore(&tgtport->lock, flags); 426 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, 427 (lsreq->rqstlen + lsreq->rsplen), 428 DMA_BIDIRECTIONAL); 429 out_puttgtport: 430 nvmet_fc_tgtport_put(tgtport); 431 432 return ret; 433 } 434 435 static int 436 nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport, 437 struct nvmet_fc_ls_req_op *lsop, 438 void (*done)(struct nvmefc_ls_req *req, int status)) 439 { 440 /* don't wait for completion */ 441 442 return __nvmet_fc_send_ls_req(tgtport, lsop, done); 443 } 444 445 static void 446 nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) 447 { 448 struct nvmet_fc_ls_req_op *lsop = 449 container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req); 450 451 __nvmet_fc_finish_ls_req(lsop); 452 453 /* fc-nvme target doesn't care about success or failure of cmd */ 454 455 kfree(lsop); 456 } 457 458 /* 459 * This routine sends a FC-NVME LS to disconnect (aka terminate) 460 * the FC-NVME Association. Terminating the association also 461 * terminates the FC-NVME connections (per queue, both admin and io 462 * queues) that are part of the association. E.g. things are torn 463 * down, and the related FC-NVME Association ID and Connection IDs 464 * become invalid. 465 * 466 * The behavior of the fc-nvme target is such that it's 467 * understanding of the association and connections will implicitly 468 * be torn down. The action is implicit as it may be due to a loss of 469 * connectivity with the fc-nvme host, so the target may never get a 470 * response even if it tried. As such, the action of this routine 471 * is to asynchronously send the LS, ignore any results of the LS, and 472 * continue on with terminating the association. If the fc-nvme host 473 * is present and receives the LS, it too can tear down. 474 */ 475 static void 476 nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc) 477 { 478 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 479 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; 480 struct fcnvme_ls_disconnect_assoc_acc *discon_acc; 481 struct nvmet_fc_ls_req_op *lsop; 482 struct nvmefc_ls_req *lsreq; 483 int ret; 484 485 /* 486 * If ls_req is NULL or no hosthandle, it's an older lldd and no 487 * message is normal. Otherwise, send unless the hostport has 488 * already been invalidated by the lldd. 489 */ 490 if (!tgtport->ops->ls_req || !assoc->hostport || 491 assoc->hostport->invalid) 492 return; 493 494 lsop = kzalloc((sizeof(*lsop) + 495 sizeof(*discon_rqst) + sizeof(*discon_acc) + 496 tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); 497 if (!lsop) { 498 dev_info(tgtport->dev, 499 "{%d:%d} send Disconnect Association failed: ENOMEM\n", 500 tgtport->fc_target_port.port_num, assoc->a_id); 501 return; 502 } 503 504 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1]; 505 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1]; 506 lsreq = &lsop->ls_req; 507 if (tgtport->ops->lsrqst_priv_sz) 508 lsreq->private = (void *)&discon_acc[1]; 509 else 510 lsreq->private = NULL; 511 512 lsop->tgtport = tgtport; 513 lsop->hosthandle = assoc->hostport->hosthandle; 514 515 nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc, 516 assoc->association_id); 517 518 ret = nvmet_fc_send_ls_req_async(tgtport, lsop, 519 nvmet_fc_disconnect_assoc_done); 520 if (ret) { 521 dev_info(tgtport->dev, 522 "{%d:%d} XMT Disconnect Association failed: %d\n", 523 tgtport->fc_target_port.port_num, assoc->a_id, ret); 524 kfree(lsop); 525 } 526 } 527 528 529 /* *********************** FC-NVME Port Management ************************ */ 530 531 532 static int 533 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport) 534 { 535 struct nvmet_fc_ls_iod *iod; 536 int i; 537 538 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod), 539 GFP_KERNEL); 540 if (!iod) 541 return -ENOMEM; 542 543 tgtport->iod = iod; 544 545 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { 546 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work); 547 iod->tgtport = tgtport; 548 list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list); 549 550 iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) + 551 sizeof(union nvmefc_ls_responses), 552 GFP_KERNEL); 553 if (!iod->rqstbuf) 554 goto out_fail; 555 556 iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1]; 557 558 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf, 559 sizeof(*iod->rspbuf), 560 DMA_TO_DEVICE); 561 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma)) 562 goto out_fail; 563 } 564 565 return 0; 566 567 out_fail: 568 kfree(iod->rqstbuf); 569 list_del(&iod->ls_rcv_list); 570 for (iod--, i--; i >= 0; iod--, i--) { 571 fc_dma_unmap_single(tgtport->dev, iod->rspdma, 572 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 573 kfree(iod->rqstbuf); 574 list_del(&iod->ls_rcv_list); 575 } 576 577 kfree(iod); 578 579 return -EFAULT; 580 } 581 582 static void 583 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport) 584 { 585 struct nvmet_fc_ls_iod *iod = tgtport->iod; 586 int i; 587 588 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { 589 fc_dma_unmap_single(tgtport->dev, 590 iod->rspdma, sizeof(*iod->rspbuf), 591 DMA_TO_DEVICE); 592 kfree(iod->rqstbuf); 593 list_del(&iod->ls_rcv_list); 594 } 595 kfree(tgtport->iod); 596 } 597 598 static struct nvmet_fc_ls_iod * 599 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport) 600 { 601 struct nvmet_fc_ls_iod *iod; 602 unsigned long flags; 603 604 spin_lock_irqsave(&tgtport->lock, flags); 605 iod = list_first_entry_or_null(&tgtport->ls_rcv_list, 606 struct nvmet_fc_ls_iod, ls_rcv_list); 607 if (iod) 608 list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist); 609 spin_unlock_irqrestore(&tgtport->lock, flags); 610 return iod; 611 } 612 613 614 static void 615 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport, 616 struct nvmet_fc_ls_iod *iod) 617 { 618 unsigned long flags; 619 620 spin_lock_irqsave(&tgtport->lock, flags); 621 list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list); 622 spin_unlock_irqrestore(&tgtport->lock, flags); 623 } 624 625 static void 626 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, 627 struct nvmet_fc_tgt_queue *queue) 628 { 629 struct nvmet_fc_fcp_iod *fod = queue->fod; 630 int i; 631 632 for (i = 0; i < queue->sqsize; fod++, i++) { 633 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work); 634 fod->tgtport = tgtport; 635 fod->queue = queue; 636 fod->active = false; 637 fod->abort = false; 638 fod->aborted = false; 639 fod->fcpreq = NULL; 640 list_add_tail(&fod->fcp_list, &queue->fod_list); 641 spin_lock_init(&fod->flock); 642 643 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf, 644 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 645 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) { 646 list_del(&fod->fcp_list); 647 for (fod--, i--; i >= 0; fod--, i--) { 648 fc_dma_unmap_single(tgtport->dev, fod->rspdma, 649 sizeof(fod->rspiubuf), 650 DMA_TO_DEVICE); 651 fod->rspdma = 0L; 652 list_del(&fod->fcp_list); 653 } 654 655 return; 656 } 657 } 658 } 659 660 static void 661 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, 662 struct nvmet_fc_tgt_queue *queue) 663 { 664 struct nvmet_fc_fcp_iod *fod = queue->fod; 665 int i; 666 667 for (i = 0; i < queue->sqsize; fod++, i++) { 668 if (fod->rspdma) 669 fc_dma_unmap_single(tgtport->dev, fod->rspdma, 670 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 671 } 672 } 673 674 static struct nvmet_fc_fcp_iod * 675 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) 676 { 677 struct nvmet_fc_fcp_iod *fod; 678 679 lockdep_assert_held(&queue->qlock); 680 681 fod = list_first_entry_or_null(&queue->fod_list, 682 struct nvmet_fc_fcp_iod, fcp_list); 683 if (fod) { 684 list_del(&fod->fcp_list); 685 fod->active = true; 686 /* 687 * no queue reference is taken, as it was taken by the 688 * queue lookup just prior to the allocation. The iod 689 * will "inherit" that reference. 690 */ 691 } 692 return fod; 693 } 694 695 696 static void 697 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport, 698 struct nvmet_fc_tgt_queue *queue, 699 struct nvmefc_tgt_fcp_req *fcpreq) 700 { 701 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 702 703 /* 704 * put all admin cmds on hw queue id 0. All io commands go to 705 * the respective hw queue based on a modulo basis 706 */ 707 fcpreq->hwqid = queue->qid ? 708 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; 709 710 nvmet_fc_handle_fcp_rqst(tgtport, fod); 711 } 712 713 static void 714 nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work) 715 { 716 struct nvmet_fc_fcp_iod *fod = 717 container_of(work, struct nvmet_fc_fcp_iod, defer_work); 718 719 /* Submit deferred IO for processing */ 720 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq); 721 722 } 723 724 static void 725 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, 726 struct nvmet_fc_fcp_iod *fod) 727 { 728 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 729 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 730 struct nvmet_fc_defer_fcp_req *deferfcp; 731 unsigned long flags; 732 733 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, 734 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 735 736 fcpreq->nvmet_fc_private = NULL; 737 738 fod->active = false; 739 fod->abort = false; 740 fod->aborted = false; 741 fod->writedataactive = false; 742 fod->fcpreq = NULL; 743 744 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); 745 746 /* release the queue lookup reference on the completed IO */ 747 nvmet_fc_tgt_q_put(queue); 748 749 spin_lock_irqsave(&queue->qlock, flags); 750 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, 751 struct nvmet_fc_defer_fcp_req, req_list); 752 if (!deferfcp) { 753 list_add_tail(&fod->fcp_list, &fod->queue->fod_list); 754 spin_unlock_irqrestore(&queue->qlock, flags); 755 return; 756 } 757 758 /* Re-use the fod for the next pending cmd that was deferred */ 759 list_del(&deferfcp->req_list); 760 761 fcpreq = deferfcp->fcp_req; 762 763 /* deferfcp can be reused for another IO at a later date */ 764 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list); 765 766 spin_unlock_irqrestore(&queue->qlock, flags); 767 768 /* Save NVME CMD IO in fod */ 769 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen); 770 771 /* Setup new fcpreq to be processed */ 772 fcpreq->rspaddr = NULL; 773 fcpreq->rsplen = 0; 774 fcpreq->nvmet_fc_private = fod; 775 fod->fcpreq = fcpreq; 776 fod->active = true; 777 778 /* inform LLDD IO is now being processed */ 779 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq); 780 781 /* 782 * Leave the queue lookup get reference taken when 783 * fod was originally allocated. 784 */ 785 786 queue_work(queue->work_q, &fod->defer_work); 787 } 788 789 static struct nvmet_fc_tgt_queue * 790 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, 791 u16 qid, u16 sqsize) 792 { 793 struct nvmet_fc_tgt_queue *queue; 794 unsigned long flags; 795 int ret; 796 797 if (qid > NVMET_NR_QUEUES) 798 return NULL; 799 800 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL); 801 if (!queue) 802 return NULL; 803 804 if (!nvmet_fc_tgt_a_get(assoc)) 805 goto out_free_queue; 806 807 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0, 808 assoc->tgtport->fc_target_port.port_num, 809 assoc->a_id, qid); 810 if (!queue->work_q) 811 goto out_a_put; 812 813 queue->qid = qid; 814 queue->sqsize = sqsize; 815 queue->assoc = assoc; 816 INIT_LIST_HEAD(&queue->fod_list); 817 INIT_LIST_HEAD(&queue->avail_defer_list); 818 INIT_LIST_HEAD(&queue->pending_cmd_list); 819 atomic_set(&queue->connected, 0); 820 atomic_set(&queue->sqtail, 0); 821 atomic_set(&queue->rsn, 1); 822 atomic_set(&queue->zrspcnt, 0); 823 spin_lock_init(&queue->qlock); 824 kref_init(&queue->ref); 825 826 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue); 827 828 ret = nvmet_sq_init(&queue->nvme_sq); 829 if (ret) 830 goto out_fail_iodlist; 831 832 WARN_ON(assoc->queues[qid]); 833 spin_lock_irqsave(&assoc->tgtport->lock, flags); 834 assoc->queues[qid] = queue; 835 spin_unlock_irqrestore(&assoc->tgtport->lock, flags); 836 837 return queue; 838 839 out_fail_iodlist: 840 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue); 841 destroy_workqueue(queue->work_q); 842 out_a_put: 843 nvmet_fc_tgt_a_put(assoc); 844 out_free_queue: 845 kfree(queue); 846 return NULL; 847 } 848 849 850 static void 851 nvmet_fc_tgt_queue_free(struct kref *ref) 852 { 853 struct nvmet_fc_tgt_queue *queue = 854 container_of(ref, struct nvmet_fc_tgt_queue, ref); 855 unsigned long flags; 856 857 spin_lock_irqsave(&queue->assoc->tgtport->lock, flags); 858 queue->assoc->queues[queue->qid] = NULL; 859 spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags); 860 861 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue); 862 863 nvmet_fc_tgt_a_put(queue->assoc); 864 865 destroy_workqueue(queue->work_q); 866 867 kfree(queue); 868 } 869 870 static void 871 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue) 872 { 873 kref_put(&queue->ref, nvmet_fc_tgt_queue_free); 874 } 875 876 static int 877 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue) 878 { 879 return kref_get_unless_zero(&queue->ref); 880 } 881 882 883 static void 884 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) 885 { 886 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; 887 struct nvmet_fc_fcp_iod *fod = queue->fod; 888 struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr; 889 unsigned long flags; 890 int i; 891 bool disconnect; 892 893 disconnect = atomic_xchg(&queue->connected, 0); 894 895 /* if not connected, nothing to do */ 896 if (!disconnect) 897 return; 898 899 spin_lock_irqsave(&queue->qlock, flags); 900 /* abort outstanding io's */ 901 for (i = 0; i < queue->sqsize; fod++, i++) { 902 if (fod->active) { 903 spin_lock(&fod->flock); 904 fod->abort = true; 905 /* 906 * only call lldd abort routine if waiting for 907 * writedata. other outstanding ops should finish 908 * on their own. 909 */ 910 if (fod->writedataactive) { 911 fod->aborted = true; 912 spin_unlock(&fod->flock); 913 tgtport->ops->fcp_abort( 914 &tgtport->fc_target_port, fod->fcpreq); 915 } else 916 spin_unlock(&fod->flock); 917 } 918 } 919 920 /* Cleanup defer'ed IOs in queue */ 921 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list, 922 req_list) { 923 list_del(&deferfcp->req_list); 924 kfree(deferfcp); 925 } 926 927 for (;;) { 928 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, 929 struct nvmet_fc_defer_fcp_req, req_list); 930 if (!deferfcp) 931 break; 932 933 list_del(&deferfcp->req_list); 934 spin_unlock_irqrestore(&queue->qlock, flags); 935 936 tgtport->ops->defer_rcv(&tgtport->fc_target_port, 937 deferfcp->fcp_req); 938 939 tgtport->ops->fcp_abort(&tgtport->fc_target_port, 940 deferfcp->fcp_req); 941 942 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, 943 deferfcp->fcp_req); 944 945 /* release the queue lookup reference */ 946 nvmet_fc_tgt_q_put(queue); 947 948 kfree(deferfcp); 949 950 spin_lock_irqsave(&queue->qlock, flags); 951 } 952 spin_unlock_irqrestore(&queue->qlock, flags); 953 954 flush_workqueue(queue->work_q); 955 956 nvmet_sq_destroy(&queue->nvme_sq); 957 958 nvmet_fc_tgt_q_put(queue); 959 } 960 961 static struct nvmet_fc_tgt_queue * 962 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport, 963 u64 connection_id) 964 { 965 struct nvmet_fc_tgt_assoc *assoc; 966 struct nvmet_fc_tgt_queue *queue; 967 u64 association_id = nvmet_fc_getassociationid(connection_id); 968 u16 qid = nvmet_fc_getqueueid(connection_id); 969 unsigned long flags; 970 971 if (qid > NVMET_NR_QUEUES) 972 return NULL; 973 974 spin_lock_irqsave(&tgtport->lock, flags); 975 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { 976 if (association_id == assoc->association_id) { 977 queue = assoc->queues[qid]; 978 if (queue && 979 (!atomic_read(&queue->connected) || 980 !nvmet_fc_tgt_q_get(queue))) 981 queue = NULL; 982 spin_unlock_irqrestore(&tgtport->lock, flags); 983 return queue; 984 } 985 } 986 spin_unlock_irqrestore(&tgtport->lock, flags); 987 return NULL; 988 } 989 990 static void 991 nvmet_fc_hostport_free(struct kref *ref) 992 { 993 struct nvmet_fc_hostport *hostport = 994 container_of(ref, struct nvmet_fc_hostport, ref); 995 struct nvmet_fc_tgtport *tgtport = hostport->tgtport; 996 unsigned long flags; 997 998 spin_lock_irqsave(&tgtport->lock, flags); 999 list_del(&hostport->host_list); 1000 spin_unlock_irqrestore(&tgtport->lock, flags); 1001 if (tgtport->ops->host_release && hostport->invalid) 1002 tgtport->ops->host_release(hostport->hosthandle); 1003 kfree(hostport); 1004 nvmet_fc_tgtport_put(tgtport); 1005 } 1006 1007 static void 1008 nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport) 1009 { 1010 kref_put(&hostport->ref, nvmet_fc_hostport_free); 1011 } 1012 1013 static int 1014 nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport) 1015 { 1016 return kref_get_unless_zero(&hostport->ref); 1017 } 1018 1019 static void 1020 nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport) 1021 { 1022 /* if LLDD not implemented, leave as NULL */ 1023 if (!hostport->hosthandle) 1024 return; 1025 1026 nvmet_fc_hostport_put(hostport); 1027 } 1028 1029 static struct nvmet_fc_hostport * 1030 nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) 1031 { 1032 struct nvmet_fc_hostport *newhost, *host, *match = NULL; 1033 unsigned long flags; 1034 1035 /* if LLDD not implemented, leave as NULL */ 1036 if (!hosthandle) 1037 return NULL; 1038 1039 /* take reference for what will be the newly allocated hostport */ 1040 if (!nvmet_fc_tgtport_get(tgtport)) 1041 return ERR_PTR(-EINVAL); 1042 1043 newhost = kzalloc(sizeof(*newhost), GFP_KERNEL); 1044 if (!newhost) { 1045 spin_lock_irqsave(&tgtport->lock, flags); 1046 list_for_each_entry(host, &tgtport->host_list, host_list) { 1047 if (host->hosthandle == hosthandle && !host->invalid) { 1048 if (nvmet_fc_hostport_get(host)) { 1049 match = host; 1050 break; 1051 } 1052 } 1053 } 1054 spin_unlock_irqrestore(&tgtport->lock, flags); 1055 /* no allocation - release reference */ 1056 nvmet_fc_tgtport_put(tgtport); 1057 return (match) ? match : ERR_PTR(-ENOMEM); 1058 } 1059 1060 newhost->tgtport = tgtport; 1061 newhost->hosthandle = hosthandle; 1062 INIT_LIST_HEAD(&newhost->host_list); 1063 kref_init(&newhost->ref); 1064 1065 spin_lock_irqsave(&tgtport->lock, flags); 1066 list_for_each_entry(host, &tgtport->host_list, host_list) { 1067 if (host->hosthandle == hosthandle && !host->invalid) { 1068 if (nvmet_fc_hostport_get(host)) { 1069 match = host; 1070 break; 1071 } 1072 } 1073 } 1074 if (match) { 1075 kfree(newhost); 1076 newhost = NULL; 1077 /* releasing allocation - release reference */ 1078 nvmet_fc_tgtport_put(tgtport); 1079 } else 1080 list_add_tail(&newhost->host_list, &tgtport->host_list); 1081 spin_unlock_irqrestore(&tgtport->lock, flags); 1082 1083 return (match) ? match : newhost; 1084 } 1085 1086 static void 1087 nvmet_fc_delete_assoc(struct work_struct *work) 1088 { 1089 struct nvmet_fc_tgt_assoc *assoc = 1090 container_of(work, struct nvmet_fc_tgt_assoc, del_work); 1091 1092 nvmet_fc_delete_target_assoc(assoc); 1093 atomic_set(&assoc->del_work_active, 0); 1094 nvmet_fc_tgt_a_put(assoc); 1095 } 1096 1097 static struct nvmet_fc_tgt_assoc * 1098 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) 1099 { 1100 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc; 1101 unsigned long flags; 1102 u64 ran; 1103 int idx; 1104 bool needrandom = true; 1105 1106 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL); 1107 if (!assoc) 1108 return NULL; 1109 1110 idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL); 1111 if (idx < 0) 1112 goto out_free_assoc; 1113 1114 if (!nvmet_fc_tgtport_get(tgtport)) 1115 goto out_ida; 1116 1117 assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle); 1118 if (IS_ERR(assoc->hostport)) 1119 goto out_put; 1120 1121 assoc->tgtport = tgtport; 1122 assoc->a_id = idx; 1123 INIT_LIST_HEAD(&assoc->a_list); 1124 kref_init(&assoc->ref); 1125 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc); 1126 atomic_set(&assoc->del_work_active, 0); 1127 atomic_set(&assoc->terminating, 0); 1128 1129 while (needrandom) { 1130 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID); 1131 ran = ran << BYTES_FOR_QID_SHIFT; 1132 1133 spin_lock_irqsave(&tgtport->lock, flags); 1134 needrandom = false; 1135 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) { 1136 if (ran == tmpassoc->association_id) { 1137 needrandom = true; 1138 break; 1139 } 1140 } 1141 if (!needrandom) { 1142 assoc->association_id = ran; 1143 list_add_tail(&assoc->a_list, &tgtport->assoc_list); 1144 } 1145 spin_unlock_irqrestore(&tgtport->lock, flags); 1146 } 1147 1148 return assoc; 1149 1150 out_put: 1151 nvmet_fc_tgtport_put(tgtport); 1152 out_ida: 1153 ida_simple_remove(&tgtport->assoc_cnt, idx); 1154 out_free_assoc: 1155 kfree(assoc); 1156 return NULL; 1157 } 1158 1159 static void 1160 nvmet_fc_target_assoc_free(struct kref *ref) 1161 { 1162 struct nvmet_fc_tgt_assoc *assoc = 1163 container_of(ref, struct nvmet_fc_tgt_assoc, ref); 1164 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 1165 struct nvmet_fc_ls_iod *oldls; 1166 unsigned long flags; 1167 1168 /* Send Disconnect now that all i/o has completed */ 1169 nvmet_fc_xmt_disconnect_assoc(assoc); 1170 1171 nvmet_fc_free_hostport(assoc->hostport); 1172 spin_lock_irqsave(&tgtport->lock, flags); 1173 list_del(&assoc->a_list); 1174 oldls = assoc->rcv_disconn; 1175 spin_unlock_irqrestore(&tgtport->lock, flags); 1176 /* if pending Rcv Disconnect Association LS, send rsp now */ 1177 if (oldls) 1178 nvmet_fc_xmt_ls_rsp(tgtport, oldls); 1179 ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id); 1180 dev_info(tgtport->dev, 1181 "{%d:%d} Association freed\n", 1182 tgtport->fc_target_port.port_num, assoc->a_id); 1183 kfree(assoc); 1184 nvmet_fc_tgtport_put(tgtport); 1185 } 1186 1187 static void 1188 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc) 1189 { 1190 kref_put(&assoc->ref, nvmet_fc_target_assoc_free); 1191 } 1192 1193 static int 1194 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc) 1195 { 1196 return kref_get_unless_zero(&assoc->ref); 1197 } 1198 1199 static void 1200 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) 1201 { 1202 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 1203 struct nvmet_fc_tgt_queue *queue; 1204 unsigned long flags; 1205 int i, terminating; 1206 1207 terminating = atomic_xchg(&assoc->terminating, 1); 1208 1209 /* if already terminating, do nothing */ 1210 if (terminating) 1211 return; 1212 1213 spin_lock_irqsave(&tgtport->lock, flags); 1214 for (i = NVMET_NR_QUEUES; i >= 0; i--) { 1215 queue = assoc->queues[i]; 1216 if (queue) { 1217 if (!nvmet_fc_tgt_q_get(queue)) 1218 continue; 1219 spin_unlock_irqrestore(&tgtport->lock, flags); 1220 nvmet_fc_delete_target_queue(queue); 1221 nvmet_fc_tgt_q_put(queue); 1222 spin_lock_irqsave(&tgtport->lock, flags); 1223 } 1224 } 1225 spin_unlock_irqrestore(&tgtport->lock, flags); 1226 1227 dev_info(tgtport->dev, 1228 "{%d:%d} Association deleted\n", 1229 tgtport->fc_target_port.port_num, assoc->a_id); 1230 1231 nvmet_fc_tgt_a_put(assoc); 1232 } 1233 1234 static struct nvmet_fc_tgt_assoc * 1235 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport, 1236 u64 association_id) 1237 { 1238 struct nvmet_fc_tgt_assoc *assoc; 1239 struct nvmet_fc_tgt_assoc *ret = NULL; 1240 unsigned long flags; 1241 1242 spin_lock_irqsave(&tgtport->lock, flags); 1243 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { 1244 if (association_id == assoc->association_id) { 1245 ret = assoc; 1246 nvmet_fc_tgt_a_get(assoc); 1247 break; 1248 } 1249 } 1250 spin_unlock_irqrestore(&tgtport->lock, flags); 1251 1252 return ret; 1253 } 1254 1255 static void 1256 nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport, 1257 struct nvmet_fc_port_entry *pe, 1258 struct nvmet_port *port) 1259 { 1260 lockdep_assert_held(&nvmet_fc_tgtlock); 1261 1262 pe->tgtport = tgtport; 1263 tgtport->pe = pe; 1264 1265 pe->port = port; 1266 port->priv = pe; 1267 1268 pe->node_name = tgtport->fc_target_port.node_name; 1269 pe->port_name = tgtport->fc_target_port.port_name; 1270 INIT_LIST_HEAD(&pe->pe_list); 1271 1272 list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list); 1273 } 1274 1275 static void 1276 nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe) 1277 { 1278 unsigned long flags; 1279 1280 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1281 if (pe->tgtport) 1282 pe->tgtport->pe = NULL; 1283 list_del(&pe->pe_list); 1284 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1285 } 1286 1287 /* 1288 * called when a targetport deregisters. Breaks the relationship 1289 * with the nvmet port, but leaves the port_entry in place so that 1290 * re-registration can resume operation. 1291 */ 1292 static void 1293 nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport) 1294 { 1295 struct nvmet_fc_port_entry *pe; 1296 unsigned long flags; 1297 1298 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1299 pe = tgtport->pe; 1300 if (pe) 1301 pe->tgtport = NULL; 1302 tgtport->pe = NULL; 1303 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1304 } 1305 1306 /* 1307 * called when a new targetport is registered. Looks in the 1308 * existing nvmet port_entries to see if the nvmet layer is 1309 * configured for the targetport's wwn's. (the targetport existed, 1310 * nvmet configured, the lldd unregistered the tgtport, and is now 1311 * reregistering the same targetport). If so, set the nvmet port 1312 * port entry on the targetport. 1313 */ 1314 static void 1315 nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport) 1316 { 1317 struct nvmet_fc_port_entry *pe; 1318 unsigned long flags; 1319 1320 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1321 list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) { 1322 if (tgtport->fc_target_port.node_name == pe->node_name && 1323 tgtport->fc_target_port.port_name == pe->port_name) { 1324 WARN_ON(pe->tgtport); 1325 tgtport->pe = pe; 1326 pe->tgtport = tgtport; 1327 break; 1328 } 1329 } 1330 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1331 } 1332 1333 /** 1334 * nvme_fc_register_targetport - transport entry point called by an 1335 * LLDD to register the existence of a local 1336 * NVME subystem FC port. 1337 * @pinfo: pointer to information about the port to be registered 1338 * @template: LLDD entrypoints and operational parameters for the port 1339 * @dev: physical hardware device node port corresponds to. Will be 1340 * used for DMA mappings 1341 * @portptr: pointer to a local port pointer. Upon success, the routine 1342 * will allocate a nvme_fc_local_port structure and place its 1343 * address in the local port pointer. Upon failure, local port 1344 * pointer will be set to NULL. 1345 * 1346 * Returns: 1347 * a completion status. Must be 0 upon success; a negative errno 1348 * (ex: -ENXIO) upon failure. 1349 */ 1350 int 1351 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, 1352 struct nvmet_fc_target_template *template, 1353 struct device *dev, 1354 struct nvmet_fc_target_port **portptr) 1355 { 1356 struct nvmet_fc_tgtport *newrec; 1357 unsigned long flags; 1358 int ret, idx; 1359 1360 if (!template->xmt_ls_rsp || !template->fcp_op || 1361 !template->fcp_abort || 1362 !template->fcp_req_release || !template->targetport_delete || 1363 !template->max_hw_queues || !template->max_sgl_segments || 1364 !template->max_dif_sgl_segments || !template->dma_boundary) { 1365 ret = -EINVAL; 1366 goto out_regtgt_failed; 1367 } 1368 1369 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz), 1370 GFP_KERNEL); 1371 if (!newrec) { 1372 ret = -ENOMEM; 1373 goto out_regtgt_failed; 1374 } 1375 1376 idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL); 1377 if (idx < 0) { 1378 ret = -ENOSPC; 1379 goto out_fail_kfree; 1380 } 1381 1382 if (!get_device(dev) && dev) { 1383 ret = -ENODEV; 1384 goto out_ida_put; 1385 } 1386 1387 newrec->fc_target_port.node_name = pinfo->node_name; 1388 newrec->fc_target_port.port_name = pinfo->port_name; 1389 if (template->target_priv_sz) 1390 newrec->fc_target_port.private = &newrec[1]; 1391 else 1392 newrec->fc_target_port.private = NULL; 1393 newrec->fc_target_port.port_id = pinfo->port_id; 1394 newrec->fc_target_port.port_num = idx; 1395 INIT_LIST_HEAD(&newrec->tgt_list); 1396 newrec->dev = dev; 1397 newrec->ops = template; 1398 spin_lock_init(&newrec->lock); 1399 INIT_LIST_HEAD(&newrec->ls_rcv_list); 1400 INIT_LIST_HEAD(&newrec->ls_req_list); 1401 INIT_LIST_HEAD(&newrec->ls_busylist); 1402 INIT_LIST_HEAD(&newrec->assoc_list); 1403 INIT_LIST_HEAD(&newrec->host_list); 1404 kref_init(&newrec->ref); 1405 ida_init(&newrec->assoc_cnt); 1406 newrec->max_sg_cnt = template->max_sgl_segments; 1407 1408 ret = nvmet_fc_alloc_ls_iodlist(newrec); 1409 if (ret) { 1410 ret = -ENOMEM; 1411 goto out_free_newrec; 1412 } 1413 1414 nvmet_fc_portentry_rebind_tgt(newrec); 1415 1416 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1417 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list); 1418 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1419 1420 *portptr = &newrec->fc_target_port; 1421 return 0; 1422 1423 out_free_newrec: 1424 put_device(dev); 1425 out_ida_put: 1426 ida_simple_remove(&nvmet_fc_tgtport_cnt, idx); 1427 out_fail_kfree: 1428 kfree(newrec); 1429 out_regtgt_failed: 1430 *portptr = NULL; 1431 return ret; 1432 } 1433 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport); 1434 1435 1436 static void 1437 nvmet_fc_free_tgtport(struct kref *ref) 1438 { 1439 struct nvmet_fc_tgtport *tgtport = 1440 container_of(ref, struct nvmet_fc_tgtport, ref); 1441 struct device *dev = tgtport->dev; 1442 unsigned long flags; 1443 1444 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1445 list_del(&tgtport->tgt_list); 1446 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1447 1448 nvmet_fc_free_ls_iodlist(tgtport); 1449 1450 /* let the LLDD know we've finished tearing it down */ 1451 tgtport->ops->targetport_delete(&tgtport->fc_target_port); 1452 1453 ida_simple_remove(&nvmet_fc_tgtport_cnt, 1454 tgtport->fc_target_port.port_num); 1455 1456 ida_destroy(&tgtport->assoc_cnt); 1457 1458 kfree(tgtport); 1459 1460 put_device(dev); 1461 } 1462 1463 static void 1464 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport) 1465 { 1466 kref_put(&tgtport->ref, nvmet_fc_free_tgtport); 1467 } 1468 1469 static int 1470 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport) 1471 { 1472 return kref_get_unless_zero(&tgtport->ref); 1473 } 1474 1475 static void 1476 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) 1477 { 1478 struct nvmet_fc_tgt_assoc *assoc, *next; 1479 unsigned long flags; 1480 int ret; 1481 1482 spin_lock_irqsave(&tgtport->lock, flags); 1483 list_for_each_entry_safe(assoc, next, 1484 &tgtport->assoc_list, a_list) { 1485 if (!nvmet_fc_tgt_a_get(assoc)) 1486 continue; 1487 ret = atomic_cmpxchg(&assoc->del_work_active, 0, 1); 1488 if (ret == 0) { 1489 if (!schedule_work(&assoc->del_work)) 1490 nvmet_fc_tgt_a_put(assoc); 1491 } else { 1492 /* already deleting - release local reference */ 1493 nvmet_fc_tgt_a_put(assoc); 1494 } 1495 } 1496 spin_unlock_irqrestore(&tgtport->lock, flags); 1497 } 1498 1499 /** 1500 * nvmet_fc_invalidate_host - transport entry point called by an LLDD 1501 * to remove references to a hosthandle for LS's. 1502 * 1503 * The nvmet-fc layer ensures that any references to the hosthandle 1504 * on the targetport are forgotten (set to NULL). The LLDD will 1505 * typically call this when a login with a remote host port has been 1506 * lost, thus LS's for the remote host port are no longer possible. 1507 * 1508 * If an LS request is outstanding to the targetport/hosthandle (or 1509 * issued concurrently with the call to invalidate the host), the 1510 * LLDD is responsible for terminating/aborting the LS and completing 1511 * the LS request. It is recommended that these terminations/aborts 1512 * occur after calling to invalidate the host handle to avoid additional 1513 * retries by the nvmet-fc transport. The nvmet-fc transport may 1514 * continue to reference host handle while it cleans up outstanding 1515 * NVME associations. The nvmet-fc transport will call the 1516 * ops->host_release() callback to notify the LLDD that all references 1517 * are complete and the related host handle can be recovered. 1518 * Note: if there are no references, the callback may be called before 1519 * the invalidate host call returns. 1520 * 1521 * @target_port: pointer to the (registered) target port that a prior 1522 * LS was received on and which supplied the transport the 1523 * hosthandle. 1524 * @hosthandle: the handle (pointer) that represents the host port 1525 * that no longer has connectivity and that LS's should 1526 * no longer be directed to. 1527 */ 1528 void 1529 nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port, 1530 void *hosthandle) 1531 { 1532 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 1533 struct nvmet_fc_tgt_assoc *assoc, *next; 1534 unsigned long flags; 1535 bool noassoc = true; 1536 int ret; 1537 1538 spin_lock_irqsave(&tgtport->lock, flags); 1539 list_for_each_entry_safe(assoc, next, 1540 &tgtport->assoc_list, a_list) { 1541 if (!assoc->hostport || 1542 assoc->hostport->hosthandle != hosthandle) 1543 continue; 1544 if (!nvmet_fc_tgt_a_get(assoc)) 1545 continue; 1546 assoc->hostport->invalid = 1; 1547 noassoc = false; 1548 ret = atomic_cmpxchg(&assoc->del_work_active, 0, 1); 1549 if (ret == 0) { 1550 if (!schedule_work(&assoc->del_work)) 1551 nvmet_fc_tgt_a_put(assoc); 1552 } else { 1553 /* already deleting - release local reference */ 1554 nvmet_fc_tgt_a_put(assoc); 1555 } 1556 } 1557 spin_unlock_irqrestore(&tgtport->lock, flags); 1558 1559 /* if there's nothing to wait for - call the callback */ 1560 if (noassoc && tgtport->ops->host_release) 1561 tgtport->ops->host_release(hosthandle); 1562 } 1563 EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host); 1564 1565 /* 1566 * nvmet layer has called to terminate an association 1567 */ 1568 static void 1569 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) 1570 { 1571 struct nvmet_fc_tgtport *tgtport, *next; 1572 struct nvmet_fc_tgt_assoc *assoc; 1573 struct nvmet_fc_tgt_queue *queue; 1574 unsigned long flags; 1575 bool found_ctrl = false; 1576 int ret; 1577 1578 /* this is a bit ugly, but don't want to make locks layered */ 1579 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1580 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list, 1581 tgt_list) { 1582 if (!nvmet_fc_tgtport_get(tgtport)) 1583 continue; 1584 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1585 1586 spin_lock_irqsave(&tgtport->lock, flags); 1587 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { 1588 queue = assoc->queues[0]; 1589 if (queue && queue->nvme_sq.ctrl == ctrl) { 1590 if (nvmet_fc_tgt_a_get(assoc)) 1591 found_ctrl = true; 1592 break; 1593 } 1594 } 1595 spin_unlock_irqrestore(&tgtport->lock, flags); 1596 1597 nvmet_fc_tgtport_put(tgtport); 1598 1599 if (found_ctrl) { 1600 ret = atomic_cmpxchg(&assoc->del_work_active, 0, 1); 1601 if (ret == 0) { 1602 if (!schedule_work(&assoc->del_work)) 1603 nvmet_fc_tgt_a_put(assoc); 1604 } else { 1605 /* already deleting - release local reference */ 1606 nvmet_fc_tgt_a_put(assoc); 1607 } 1608 return; 1609 } 1610 1611 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1612 } 1613 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1614 } 1615 1616 /** 1617 * nvme_fc_unregister_targetport - transport entry point called by an 1618 * LLDD to deregister/remove a previously 1619 * registered a local NVME subsystem FC port. 1620 * @target_port: pointer to the (registered) target port that is to be 1621 * deregistered. 1622 * 1623 * Returns: 1624 * a completion status. Must be 0 upon success; a negative errno 1625 * (ex: -ENXIO) upon failure. 1626 */ 1627 int 1628 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) 1629 { 1630 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 1631 1632 nvmet_fc_portentry_unbind_tgt(tgtport); 1633 1634 /* terminate any outstanding associations */ 1635 __nvmet_fc_free_assocs(tgtport); 1636 1637 /* 1638 * should terminate LS's as well. However, LS's will be generated 1639 * at the tail end of association termination, so they likely don't 1640 * exist yet. And even if they did, it's worthwhile to just let 1641 * them finish and targetport ref counting will clean things up. 1642 */ 1643 1644 nvmet_fc_tgtport_put(tgtport); 1645 1646 return 0; 1647 } 1648 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport); 1649 1650 1651 /* ********************** FC-NVME LS RCV Handling ************************* */ 1652 1653 1654 static void 1655 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, 1656 struct nvmet_fc_ls_iod *iod) 1657 { 1658 struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc; 1659 struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc; 1660 struct nvmet_fc_tgt_queue *queue; 1661 int ret = 0; 1662 1663 memset(acc, 0, sizeof(*acc)); 1664 1665 /* 1666 * FC-NVME spec changes. There are initiators sending different 1667 * lengths as padding sizes for Create Association Cmd descriptor 1668 * was incorrect. 1669 * Accept anything of "minimum" length. Assume format per 1.15 1670 * spec (with HOSTID reduced to 16 bytes), ignore how long the 1671 * trailing pad length is. 1672 */ 1673 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN) 1674 ret = VERR_CR_ASSOC_LEN; 1675 else if (be32_to_cpu(rqst->desc_list_len) < 1676 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN) 1677 ret = VERR_CR_ASSOC_RQST_LEN; 1678 else if (rqst->assoc_cmd.desc_tag != 1679 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD)) 1680 ret = VERR_CR_ASSOC_CMD; 1681 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) < 1682 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN) 1683 ret = VERR_CR_ASSOC_CMD_LEN; 1684 else if (!rqst->assoc_cmd.ersp_ratio || 1685 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >= 1686 be16_to_cpu(rqst->assoc_cmd.sqsize))) 1687 ret = VERR_ERSP_RATIO; 1688 1689 else { 1690 /* new association w/ admin queue */ 1691 iod->assoc = nvmet_fc_alloc_target_assoc( 1692 tgtport, iod->hosthandle); 1693 if (!iod->assoc) 1694 ret = VERR_ASSOC_ALLOC_FAIL; 1695 else { 1696 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0, 1697 be16_to_cpu(rqst->assoc_cmd.sqsize)); 1698 if (!queue) 1699 ret = VERR_QUEUE_ALLOC_FAIL; 1700 } 1701 } 1702 1703 if (ret) { 1704 dev_err(tgtport->dev, 1705 "Create Association LS failed: %s\n", 1706 validation_errors[ret]); 1707 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1708 sizeof(*acc), rqst->w0.ls_cmd, 1709 FCNVME_RJT_RC_LOGIC, 1710 FCNVME_RJT_EXP_NONE, 0); 1711 return; 1712 } 1713 1714 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio); 1715 atomic_set(&queue->connected, 1); 1716 queue->sqhd = 0; /* best place to init value */ 1717 1718 dev_info(tgtport->dev, 1719 "{%d:%d} Association created\n", 1720 tgtport->fc_target_port.port_num, iod->assoc->a_id); 1721 1722 /* format a response */ 1723 1724 iod->lsrsp->rsplen = sizeof(*acc); 1725 1726 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1727 fcnvme_lsdesc_len( 1728 sizeof(struct fcnvme_ls_cr_assoc_acc)), 1729 FCNVME_LS_CREATE_ASSOCIATION); 1730 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); 1731 acc->associd.desc_len = 1732 fcnvme_lsdesc_len( 1733 sizeof(struct fcnvme_lsdesc_assoc_id)); 1734 acc->associd.association_id = 1735 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0)); 1736 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); 1737 acc->connectid.desc_len = 1738 fcnvme_lsdesc_len( 1739 sizeof(struct fcnvme_lsdesc_conn_id)); 1740 acc->connectid.connection_id = acc->associd.association_id; 1741 } 1742 1743 static void 1744 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport, 1745 struct nvmet_fc_ls_iod *iod) 1746 { 1747 struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn; 1748 struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn; 1749 struct nvmet_fc_tgt_queue *queue; 1750 int ret = 0; 1751 1752 memset(acc, 0, sizeof(*acc)); 1753 1754 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst)) 1755 ret = VERR_CR_CONN_LEN; 1756 else if (rqst->desc_list_len != 1757 fcnvme_lsdesc_len( 1758 sizeof(struct fcnvme_ls_cr_conn_rqst))) 1759 ret = VERR_CR_CONN_RQST_LEN; 1760 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) 1761 ret = VERR_ASSOC_ID; 1762 else if (rqst->associd.desc_len != 1763 fcnvme_lsdesc_len( 1764 sizeof(struct fcnvme_lsdesc_assoc_id))) 1765 ret = VERR_ASSOC_ID_LEN; 1766 else if (rqst->connect_cmd.desc_tag != 1767 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD)) 1768 ret = VERR_CR_CONN_CMD; 1769 else if (rqst->connect_cmd.desc_len != 1770 fcnvme_lsdesc_len( 1771 sizeof(struct fcnvme_lsdesc_cr_conn_cmd))) 1772 ret = VERR_CR_CONN_CMD_LEN; 1773 else if (!rqst->connect_cmd.ersp_ratio || 1774 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >= 1775 be16_to_cpu(rqst->connect_cmd.sqsize))) 1776 ret = VERR_ERSP_RATIO; 1777 1778 else { 1779 /* new io queue */ 1780 iod->assoc = nvmet_fc_find_target_assoc(tgtport, 1781 be64_to_cpu(rqst->associd.association_id)); 1782 if (!iod->assoc) 1783 ret = VERR_NO_ASSOC; 1784 else { 1785 queue = nvmet_fc_alloc_target_queue(iod->assoc, 1786 be16_to_cpu(rqst->connect_cmd.qid), 1787 be16_to_cpu(rqst->connect_cmd.sqsize)); 1788 if (!queue) 1789 ret = VERR_QUEUE_ALLOC_FAIL; 1790 1791 /* release get taken in nvmet_fc_find_target_assoc */ 1792 nvmet_fc_tgt_a_put(iod->assoc); 1793 } 1794 } 1795 1796 if (ret) { 1797 dev_err(tgtport->dev, 1798 "Create Connection LS failed: %s\n", 1799 validation_errors[ret]); 1800 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1801 sizeof(*acc), rqst->w0.ls_cmd, 1802 (ret == VERR_NO_ASSOC) ? 1803 FCNVME_RJT_RC_INV_ASSOC : 1804 FCNVME_RJT_RC_LOGIC, 1805 FCNVME_RJT_EXP_NONE, 0); 1806 return; 1807 } 1808 1809 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio); 1810 atomic_set(&queue->connected, 1); 1811 queue->sqhd = 0; /* best place to init value */ 1812 1813 /* format a response */ 1814 1815 iod->lsrsp->rsplen = sizeof(*acc); 1816 1817 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1818 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)), 1819 FCNVME_LS_CREATE_CONNECTION); 1820 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); 1821 acc->connectid.desc_len = 1822 fcnvme_lsdesc_len( 1823 sizeof(struct fcnvme_lsdesc_conn_id)); 1824 acc->connectid.connection_id = 1825 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 1826 be16_to_cpu(rqst->connect_cmd.qid))); 1827 } 1828 1829 /* 1830 * Returns true if the LS response is to be transmit 1831 * Returns false if the LS response is to be delayed 1832 */ 1833 static int 1834 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, 1835 struct nvmet_fc_ls_iod *iod) 1836 { 1837 struct fcnvme_ls_disconnect_assoc_rqst *rqst = 1838 &iod->rqstbuf->rq_dis_assoc; 1839 struct fcnvme_ls_disconnect_assoc_acc *acc = 1840 &iod->rspbuf->rsp_dis_assoc; 1841 struct nvmet_fc_tgt_assoc *assoc = NULL; 1842 struct nvmet_fc_ls_iod *oldls = NULL; 1843 unsigned long flags; 1844 int ret = 0; 1845 1846 memset(acc, 0, sizeof(*acc)); 1847 1848 ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst); 1849 if (!ret) { 1850 /* match an active association - takes an assoc ref if !NULL */ 1851 assoc = nvmet_fc_find_target_assoc(tgtport, 1852 be64_to_cpu(rqst->associd.association_id)); 1853 iod->assoc = assoc; 1854 if (!assoc) 1855 ret = VERR_NO_ASSOC; 1856 } 1857 1858 if (ret || !assoc) { 1859 dev_err(tgtport->dev, 1860 "Disconnect LS failed: %s\n", 1861 validation_errors[ret]); 1862 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1863 sizeof(*acc), rqst->w0.ls_cmd, 1864 (ret == VERR_NO_ASSOC) ? 1865 FCNVME_RJT_RC_INV_ASSOC : 1866 FCNVME_RJT_RC_LOGIC, 1867 FCNVME_RJT_EXP_NONE, 0); 1868 return true; 1869 } 1870 1871 /* format a response */ 1872 1873 iod->lsrsp->rsplen = sizeof(*acc); 1874 1875 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1876 fcnvme_lsdesc_len( 1877 sizeof(struct fcnvme_ls_disconnect_assoc_acc)), 1878 FCNVME_LS_DISCONNECT_ASSOC); 1879 1880 /* release get taken in nvmet_fc_find_target_assoc */ 1881 nvmet_fc_tgt_a_put(assoc); 1882 1883 /* 1884 * The rules for LS response says the response cannot 1885 * go back until ABTS's have been sent for all outstanding 1886 * I/O and a Disconnect Association LS has been sent. 1887 * So... save off the Disconnect LS to send the response 1888 * later. If there was a prior LS already saved, replace 1889 * it with the newer one and send a can't perform reject 1890 * on the older one. 1891 */ 1892 spin_lock_irqsave(&tgtport->lock, flags); 1893 oldls = assoc->rcv_disconn; 1894 assoc->rcv_disconn = iod; 1895 spin_unlock_irqrestore(&tgtport->lock, flags); 1896 1897 nvmet_fc_delete_target_assoc(assoc); 1898 1899 if (oldls) { 1900 dev_info(tgtport->dev, 1901 "{%d:%d} Multiple Disconnect Association LS's " 1902 "received\n", 1903 tgtport->fc_target_port.port_num, assoc->a_id); 1904 /* overwrite good response with bogus failure */ 1905 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, 1906 sizeof(*iod->rspbuf), 1907 /* ok to use rqst, LS is same */ 1908 rqst->w0.ls_cmd, 1909 FCNVME_RJT_RC_UNAB, 1910 FCNVME_RJT_EXP_NONE, 0); 1911 nvmet_fc_xmt_ls_rsp(tgtport, oldls); 1912 } 1913 1914 return false; 1915 } 1916 1917 1918 /* *********************** NVME Ctrl Routines **************************** */ 1919 1920 1921 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req); 1922 1923 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops; 1924 1925 static void 1926 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) 1927 { 1928 struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private; 1929 struct nvmet_fc_tgtport *tgtport = iod->tgtport; 1930 1931 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma, 1932 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 1933 nvmet_fc_free_ls_iod(tgtport, iod); 1934 nvmet_fc_tgtport_put(tgtport); 1935 } 1936 1937 static void 1938 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, 1939 struct nvmet_fc_ls_iod *iod) 1940 { 1941 int ret; 1942 1943 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma, 1944 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 1945 1946 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp); 1947 if (ret) 1948 nvmet_fc_xmt_ls_rsp_done(iod->lsrsp); 1949 } 1950 1951 /* 1952 * Actual processing routine for received FC-NVME LS Requests from the LLD 1953 */ 1954 static void 1955 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport, 1956 struct nvmet_fc_ls_iod *iod) 1957 { 1958 struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0; 1959 bool sendrsp = true; 1960 1961 iod->lsrsp->nvme_fc_private = iod; 1962 iod->lsrsp->rspbuf = iod->rspbuf; 1963 iod->lsrsp->rspdma = iod->rspdma; 1964 iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done; 1965 /* Be preventative. handlers will later set to valid length */ 1966 iod->lsrsp->rsplen = 0; 1967 1968 iod->assoc = NULL; 1969 1970 /* 1971 * handlers: 1972 * parse request input, execute the request, and format the 1973 * LS response 1974 */ 1975 switch (w0->ls_cmd) { 1976 case FCNVME_LS_CREATE_ASSOCIATION: 1977 /* Creates Association and initial Admin Queue/Connection */ 1978 nvmet_fc_ls_create_association(tgtport, iod); 1979 break; 1980 case FCNVME_LS_CREATE_CONNECTION: 1981 /* Creates an IO Queue/Connection */ 1982 nvmet_fc_ls_create_connection(tgtport, iod); 1983 break; 1984 case FCNVME_LS_DISCONNECT_ASSOC: 1985 /* Terminate a Queue/Connection or the Association */ 1986 sendrsp = nvmet_fc_ls_disconnect(tgtport, iod); 1987 break; 1988 default: 1989 iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf, 1990 sizeof(*iod->rspbuf), w0->ls_cmd, 1991 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0); 1992 } 1993 1994 if (sendrsp) 1995 nvmet_fc_xmt_ls_rsp(tgtport, iod); 1996 } 1997 1998 /* 1999 * Actual processing routine for received FC-NVME LS Requests from the LLD 2000 */ 2001 static void 2002 nvmet_fc_handle_ls_rqst_work(struct work_struct *work) 2003 { 2004 struct nvmet_fc_ls_iod *iod = 2005 container_of(work, struct nvmet_fc_ls_iod, work); 2006 struct nvmet_fc_tgtport *tgtport = iod->tgtport; 2007 2008 nvmet_fc_handle_ls_rqst(tgtport, iod); 2009 } 2010 2011 2012 /** 2013 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD 2014 * upon the reception of a NVME LS request. 2015 * 2016 * The nvmet-fc layer will copy payload to an internal structure for 2017 * processing. As such, upon completion of the routine, the LLDD may 2018 * immediately free/reuse the LS request buffer passed in the call. 2019 * 2020 * If this routine returns error, the LLDD should abort the exchange. 2021 * 2022 * @target_port: pointer to the (registered) target port the LS was 2023 * received on. 2024 * @lsrsp: pointer to a lsrsp structure to be used to reference 2025 * the exchange corresponding to the LS. 2026 * @lsreqbuf: pointer to the buffer containing the LS Request 2027 * @lsreqbuf_len: length, in bytes, of the received LS request 2028 */ 2029 int 2030 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, 2031 void *hosthandle, 2032 struct nvmefc_ls_rsp *lsrsp, 2033 void *lsreqbuf, u32 lsreqbuf_len) 2034 { 2035 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 2036 struct nvmet_fc_ls_iod *iod; 2037 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; 2038 2039 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { 2040 dev_info(tgtport->dev, 2041 "RCV %s LS failed: payload too large (%d)\n", 2042 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2043 nvmefc_ls_names[w0->ls_cmd] : "", 2044 lsreqbuf_len); 2045 return -E2BIG; 2046 } 2047 2048 if (!nvmet_fc_tgtport_get(tgtport)) { 2049 dev_info(tgtport->dev, 2050 "RCV %s LS failed: target deleting\n", 2051 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2052 nvmefc_ls_names[w0->ls_cmd] : ""); 2053 return -ESHUTDOWN; 2054 } 2055 2056 iod = nvmet_fc_alloc_ls_iod(tgtport); 2057 if (!iod) { 2058 dev_info(tgtport->dev, 2059 "RCV %s LS failed: context allocation failed\n", 2060 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2061 nvmefc_ls_names[w0->ls_cmd] : ""); 2062 nvmet_fc_tgtport_put(tgtport); 2063 return -ENOENT; 2064 } 2065 2066 iod->lsrsp = lsrsp; 2067 iod->fcpreq = NULL; 2068 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len); 2069 iod->rqstdatalen = lsreqbuf_len; 2070 iod->hosthandle = hosthandle; 2071 2072 schedule_work(&iod->work); 2073 2074 return 0; 2075 } 2076 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req); 2077 2078 2079 /* 2080 * ********************** 2081 * Start of FCP handling 2082 * ********************** 2083 */ 2084 2085 static int 2086 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod) 2087 { 2088 struct scatterlist *sg; 2089 unsigned int nent; 2090 2091 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent); 2092 if (!sg) 2093 goto out; 2094 2095 fod->data_sg = sg; 2096 fod->data_sg_cnt = nent; 2097 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent, 2098 ((fod->io_dir == NVMET_FCP_WRITE) ? 2099 DMA_FROM_DEVICE : DMA_TO_DEVICE)); 2100 /* note: write from initiator perspective */ 2101 fod->next_sg = fod->data_sg; 2102 2103 return 0; 2104 2105 out: 2106 return NVME_SC_INTERNAL; 2107 } 2108 2109 static void 2110 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod) 2111 { 2112 if (!fod->data_sg || !fod->data_sg_cnt) 2113 return; 2114 2115 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt, 2116 ((fod->io_dir == NVMET_FCP_WRITE) ? 2117 DMA_FROM_DEVICE : DMA_TO_DEVICE)); 2118 sgl_free(fod->data_sg); 2119 fod->data_sg = NULL; 2120 fod->data_sg_cnt = 0; 2121 } 2122 2123 2124 static bool 2125 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd) 2126 { 2127 u32 sqtail, used; 2128 2129 /* egad, this is ugly. And sqtail is just a best guess */ 2130 sqtail = atomic_read(&q->sqtail) % q->sqsize; 2131 2132 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd); 2133 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9))); 2134 } 2135 2136 /* 2137 * Prep RSP payload. 2138 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op 2139 */ 2140 static void 2141 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport, 2142 struct nvmet_fc_fcp_iod *fod) 2143 { 2144 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf; 2145 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; 2146 struct nvme_completion *cqe = &ersp->cqe; 2147 u32 *cqewd = (u32 *)cqe; 2148 bool send_ersp = false; 2149 u32 rsn, rspcnt, xfr_length; 2150 2151 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP) 2152 xfr_length = fod->req.transfer_len; 2153 else 2154 xfr_length = fod->offset; 2155 2156 /* 2157 * check to see if we can send a 0's rsp. 2158 * Note: to send a 0's response, the NVME-FC host transport will 2159 * recreate the CQE. The host transport knows: sq id, SQHD (last 2160 * seen in an ersp), and command_id. Thus it will create a 2161 * zero-filled CQE with those known fields filled in. Transport 2162 * must send an ersp for any condition where the cqe won't match 2163 * this. 2164 * 2165 * Here are the FC-NVME mandated cases where we must send an ersp: 2166 * every N responses, where N=ersp_ratio 2167 * force fabric commands to send ersp's (not in FC-NVME but good 2168 * practice) 2169 * normal cmds: any time status is non-zero, or status is zero 2170 * but words 0 or 1 are non-zero. 2171 * the SQ is 90% or more full 2172 * the cmd is a fused command 2173 * transferred data length not equal to cmd iu length 2174 */ 2175 rspcnt = atomic_inc_return(&fod->queue->zrspcnt); 2176 if (!(rspcnt % fod->queue->ersp_ratio) || 2177 nvme_is_fabrics((struct nvme_command *) sqe) || 2178 xfr_length != fod->req.transfer_len || 2179 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] || 2180 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || 2181 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head))) 2182 send_ersp = true; 2183 2184 /* re-set the fields */ 2185 fod->fcpreq->rspaddr = ersp; 2186 fod->fcpreq->rspdma = fod->rspdma; 2187 2188 if (!send_ersp) { 2189 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP); 2190 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP; 2191 } else { 2192 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32)); 2193 rsn = atomic_inc_return(&fod->queue->rsn); 2194 ersp->rsn = cpu_to_be32(rsn); 2195 ersp->xfrd_len = cpu_to_be32(xfr_length); 2196 fod->fcpreq->rsplen = sizeof(*ersp); 2197 } 2198 2199 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma, 2200 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 2201 } 2202 2203 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq); 2204 2205 static void 2206 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport, 2207 struct nvmet_fc_fcp_iod *fod) 2208 { 2209 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2210 2211 /* data no longer needed */ 2212 nvmet_fc_free_tgt_pgs(fod); 2213 2214 /* 2215 * if an ABTS was received or we issued the fcp_abort early 2216 * don't call abort routine again. 2217 */ 2218 /* no need to take lock - lock was taken earlier to get here */ 2219 if (!fod->aborted) 2220 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq); 2221 2222 nvmet_fc_free_fcp_iod(fod->queue, fod); 2223 } 2224 2225 static void 2226 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport, 2227 struct nvmet_fc_fcp_iod *fod) 2228 { 2229 int ret; 2230 2231 fod->fcpreq->op = NVMET_FCOP_RSP; 2232 fod->fcpreq->timeout = 0; 2233 2234 nvmet_fc_prep_fcp_rsp(tgtport, fod); 2235 2236 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); 2237 if (ret) 2238 nvmet_fc_abort_op(tgtport, fod); 2239 } 2240 2241 static void 2242 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, 2243 struct nvmet_fc_fcp_iod *fod, u8 op) 2244 { 2245 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2246 struct scatterlist *sg = fod->next_sg; 2247 unsigned long flags; 2248 u32 remaininglen = fod->req.transfer_len - fod->offset; 2249 u32 tlen = 0; 2250 int ret; 2251 2252 fcpreq->op = op; 2253 fcpreq->offset = fod->offset; 2254 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; 2255 2256 /* 2257 * for next sequence: 2258 * break at a sg element boundary 2259 * attempt to keep sequence length capped at 2260 * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to 2261 * be longer if a single sg element is larger 2262 * than that amount. This is done to avoid creating 2263 * a new sg list to use for the tgtport api. 2264 */ 2265 fcpreq->sg = sg; 2266 fcpreq->sg_cnt = 0; 2267 while (tlen < remaininglen && 2268 fcpreq->sg_cnt < tgtport->max_sg_cnt && 2269 tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) { 2270 fcpreq->sg_cnt++; 2271 tlen += sg_dma_len(sg); 2272 sg = sg_next(sg); 2273 } 2274 if (tlen < remaininglen && fcpreq->sg_cnt == 0) { 2275 fcpreq->sg_cnt++; 2276 tlen += min_t(u32, sg_dma_len(sg), remaininglen); 2277 sg = sg_next(sg); 2278 } 2279 if (tlen < remaininglen) 2280 fod->next_sg = sg; 2281 else 2282 fod->next_sg = NULL; 2283 2284 fcpreq->transfer_length = tlen; 2285 fcpreq->transferred_length = 0; 2286 fcpreq->fcp_error = 0; 2287 fcpreq->rsplen = 0; 2288 2289 /* 2290 * If the last READDATA request: check if LLDD supports 2291 * combined xfr with response. 2292 */ 2293 if ((op == NVMET_FCOP_READDATA) && 2294 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) && 2295 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) { 2296 fcpreq->op = NVMET_FCOP_READDATA_RSP; 2297 nvmet_fc_prep_fcp_rsp(tgtport, fod); 2298 } 2299 2300 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); 2301 if (ret) { 2302 /* 2303 * should be ok to set w/o lock as its in the thread of 2304 * execution (not an async timer routine) and doesn't 2305 * contend with any clearing action 2306 */ 2307 fod->abort = true; 2308 2309 if (op == NVMET_FCOP_WRITEDATA) { 2310 spin_lock_irqsave(&fod->flock, flags); 2311 fod->writedataactive = false; 2312 spin_unlock_irqrestore(&fod->flock, flags); 2313 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2314 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ { 2315 fcpreq->fcp_error = ret; 2316 fcpreq->transferred_length = 0; 2317 nvmet_fc_xmt_fcp_op_done(fod->fcpreq); 2318 } 2319 } 2320 } 2321 2322 static inline bool 2323 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort) 2324 { 2325 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2326 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2327 2328 /* if in the middle of an io and we need to tear down */ 2329 if (abort) { 2330 if (fcpreq->op == NVMET_FCOP_WRITEDATA) { 2331 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2332 return true; 2333 } 2334 2335 nvmet_fc_abort_op(tgtport, fod); 2336 return true; 2337 } 2338 2339 return false; 2340 } 2341 2342 /* 2343 * actual done handler for FCP operations when completed by the lldd 2344 */ 2345 static void 2346 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) 2347 { 2348 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2349 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2350 unsigned long flags; 2351 bool abort; 2352 2353 spin_lock_irqsave(&fod->flock, flags); 2354 abort = fod->abort; 2355 fod->writedataactive = false; 2356 spin_unlock_irqrestore(&fod->flock, flags); 2357 2358 switch (fcpreq->op) { 2359 2360 case NVMET_FCOP_WRITEDATA: 2361 if (__nvmet_fc_fod_op_abort(fod, abort)) 2362 return; 2363 if (fcpreq->fcp_error || 2364 fcpreq->transferred_length != fcpreq->transfer_length) { 2365 spin_lock(&fod->flock); 2366 fod->abort = true; 2367 spin_unlock(&fod->flock); 2368 2369 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2370 return; 2371 } 2372 2373 fod->offset += fcpreq->transferred_length; 2374 if (fod->offset != fod->req.transfer_len) { 2375 spin_lock_irqsave(&fod->flock, flags); 2376 fod->writedataactive = true; 2377 spin_unlock_irqrestore(&fod->flock, flags); 2378 2379 /* transfer the next chunk */ 2380 nvmet_fc_transfer_fcp_data(tgtport, fod, 2381 NVMET_FCOP_WRITEDATA); 2382 return; 2383 } 2384 2385 /* data transfer complete, resume with nvmet layer */ 2386 fod->req.execute(&fod->req); 2387 break; 2388 2389 case NVMET_FCOP_READDATA: 2390 case NVMET_FCOP_READDATA_RSP: 2391 if (__nvmet_fc_fod_op_abort(fod, abort)) 2392 return; 2393 if (fcpreq->fcp_error || 2394 fcpreq->transferred_length != fcpreq->transfer_length) { 2395 nvmet_fc_abort_op(tgtport, fod); 2396 return; 2397 } 2398 2399 /* success */ 2400 2401 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) { 2402 /* data no longer needed */ 2403 nvmet_fc_free_tgt_pgs(fod); 2404 nvmet_fc_free_fcp_iod(fod->queue, fod); 2405 return; 2406 } 2407 2408 fod->offset += fcpreq->transferred_length; 2409 if (fod->offset != fod->req.transfer_len) { 2410 /* transfer the next chunk */ 2411 nvmet_fc_transfer_fcp_data(tgtport, fod, 2412 NVMET_FCOP_READDATA); 2413 return; 2414 } 2415 2416 /* data transfer complete, send response */ 2417 2418 /* data no longer needed */ 2419 nvmet_fc_free_tgt_pgs(fod); 2420 2421 nvmet_fc_xmt_fcp_rsp(tgtport, fod); 2422 2423 break; 2424 2425 case NVMET_FCOP_RSP: 2426 if (__nvmet_fc_fod_op_abort(fod, abort)) 2427 return; 2428 nvmet_fc_free_fcp_iod(fod->queue, fod); 2429 break; 2430 2431 default: 2432 break; 2433 } 2434 } 2435 2436 static void 2437 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq) 2438 { 2439 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 2440 2441 nvmet_fc_fod_op_done(fod); 2442 } 2443 2444 /* 2445 * actual completion handler after execution by the nvmet layer 2446 */ 2447 static void 2448 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport, 2449 struct nvmet_fc_fcp_iod *fod, int status) 2450 { 2451 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; 2452 struct nvme_completion *cqe = &fod->rspiubuf.cqe; 2453 unsigned long flags; 2454 bool abort; 2455 2456 spin_lock_irqsave(&fod->flock, flags); 2457 abort = fod->abort; 2458 spin_unlock_irqrestore(&fod->flock, flags); 2459 2460 /* if we have a CQE, snoop the last sq_head value */ 2461 if (!status) 2462 fod->queue->sqhd = cqe->sq_head; 2463 2464 if (abort) { 2465 nvmet_fc_abort_op(tgtport, fod); 2466 return; 2467 } 2468 2469 /* if an error handling the cmd post initial parsing */ 2470 if (status) { 2471 /* fudge up a failed CQE status for our transport error */ 2472 memset(cqe, 0, sizeof(*cqe)); 2473 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */ 2474 cqe->sq_id = cpu_to_le16(fod->queue->qid); 2475 cqe->command_id = sqe->command_id; 2476 cqe->status = cpu_to_le16(status); 2477 } else { 2478 2479 /* 2480 * try to push the data even if the SQE status is non-zero. 2481 * There may be a status where data still was intended to 2482 * be moved 2483 */ 2484 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) { 2485 /* push the data over before sending rsp */ 2486 nvmet_fc_transfer_fcp_data(tgtport, fod, 2487 NVMET_FCOP_READDATA); 2488 return; 2489 } 2490 2491 /* writes & no data - fall thru */ 2492 } 2493 2494 /* data no longer needed */ 2495 nvmet_fc_free_tgt_pgs(fod); 2496 2497 nvmet_fc_xmt_fcp_rsp(tgtport, fod); 2498 } 2499 2500 2501 static void 2502 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req) 2503 { 2504 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req); 2505 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2506 2507 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0); 2508 } 2509 2510 2511 /* 2512 * Actual processing routine for received FC-NVME I/O Requests from the LLD 2513 */ 2514 static void 2515 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, 2516 struct nvmet_fc_fcp_iod *fod) 2517 { 2518 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf; 2519 u32 xfrlen = be32_to_cpu(cmdiu->data_len); 2520 int ret; 2521 2522 /* 2523 * if there is no nvmet mapping to the targetport there 2524 * shouldn't be requests. just terminate them. 2525 */ 2526 if (!tgtport->pe) 2527 goto transport_error; 2528 2529 /* 2530 * Fused commands are currently not supported in the linux 2531 * implementation. 2532 * 2533 * As such, the implementation of the FC transport does not 2534 * look at the fused commands and order delivery to the upper 2535 * layer until we have both based on csn. 2536 */ 2537 2538 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done; 2539 2540 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) { 2541 fod->io_dir = NVMET_FCP_WRITE; 2542 if (!nvme_is_write(&cmdiu->sqe)) 2543 goto transport_error; 2544 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) { 2545 fod->io_dir = NVMET_FCP_READ; 2546 if (nvme_is_write(&cmdiu->sqe)) 2547 goto transport_error; 2548 } else { 2549 fod->io_dir = NVMET_FCP_NODATA; 2550 if (xfrlen) 2551 goto transport_error; 2552 } 2553 2554 fod->req.cmd = &fod->cmdiubuf.sqe; 2555 fod->req.cqe = &fod->rspiubuf.cqe; 2556 fod->req.port = tgtport->pe->port; 2557 2558 /* clear any response payload */ 2559 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); 2560 2561 fod->data_sg = NULL; 2562 fod->data_sg_cnt = 0; 2563 2564 ret = nvmet_req_init(&fod->req, 2565 &fod->queue->nvme_cq, 2566 &fod->queue->nvme_sq, 2567 &nvmet_fc_tgt_fcp_ops); 2568 if (!ret) { 2569 /* bad SQE content or invalid ctrl state */ 2570 /* nvmet layer has already called op done to send rsp. */ 2571 return; 2572 } 2573 2574 fod->req.transfer_len = xfrlen; 2575 2576 /* keep a running counter of tail position */ 2577 atomic_inc(&fod->queue->sqtail); 2578 2579 if (fod->req.transfer_len) { 2580 ret = nvmet_fc_alloc_tgt_pgs(fod); 2581 if (ret) { 2582 nvmet_req_complete(&fod->req, ret); 2583 return; 2584 } 2585 } 2586 fod->req.sg = fod->data_sg; 2587 fod->req.sg_cnt = fod->data_sg_cnt; 2588 fod->offset = 0; 2589 2590 if (fod->io_dir == NVMET_FCP_WRITE) { 2591 /* pull the data over before invoking nvmet layer */ 2592 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA); 2593 return; 2594 } 2595 2596 /* 2597 * Reads or no data: 2598 * 2599 * can invoke the nvmet_layer now. If read data, cmd completion will 2600 * push the data 2601 */ 2602 fod->req.execute(&fod->req); 2603 return; 2604 2605 transport_error: 2606 nvmet_fc_abort_op(tgtport, fod); 2607 } 2608 2609 /** 2610 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD 2611 * upon the reception of a NVME FCP CMD IU. 2612 * 2613 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc 2614 * layer for processing. 2615 * 2616 * The nvmet_fc layer allocates a local job structure (struct 2617 * nvmet_fc_fcp_iod) from the queue for the io and copies the 2618 * CMD IU buffer to the job structure. As such, on a successful 2619 * completion (returns 0), the LLDD may immediately free/reuse 2620 * the CMD IU buffer passed in the call. 2621 * 2622 * However, in some circumstances, due to the packetized nature of FC 2623 * and the api of the FC LLDD which may issue a hw command to send the 2624 * response, but the LLDD may not get the hw completion for that command 2625 * and upcall the nvmet_fc layer before a new command may be 2626 * asynchronously received - its possible for a command to be received 2627 * before the LLDD and nvmet_fc have recycled the job structure. It gives 2628 * the appearance of more commands received than fits in the sq. 2629 * To alleviate this scenario, a temporary queue is maintained in the 2630 * transport for pending LLDD requests waiting for a queue job structure. 2631 * In these "overrun" cases, a temporary queue element is allocated 2632 * the LLDD request and CMD iu buffer information remembered, and the 2633 * routine returns a -EOVERFLOW status. Subsequently, when a queue job 2634 * structure is freed, it is immediately reallocated for anything on the 2635 * pending request list. The LLDDs defer_rcv() callback is called, 2636 * informing the LLDD that it may reuse the CMD IU buffer, and the io 2637 * is then started normally with the transport. 2638 * 2639 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat 2640 * the completion as successful but must not reuse the CMD IU buffer 2641 * until the LLDD's defer_rcv() callback has been called for the 2642 * corresponding struct nvmefc_tgt_fcp_req pointer. 2643 * 2644 * If there is any other condition in which an error occurs, the 2645 * transport will return a non-zero status indicating the error. 2646 * In all cases other than -EOVERFLOW, the transport has not accepted the 2647 * request and the LLDD should abort the exchange. 2648 * 2649 * @target_port: pointer to the (registered) target port the FCP CMD IU 2650 * was received on. 2651 * @fcpreq: pointer to a fcpreq request structure to be used to reference 2652 * the exchange corresponding to the FCP Exchange. 2653 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU 2654 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU 2655 */ 2656 int 2657 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, 2658 struct nvmefc_tgt_fcp_req *fcpreq, 2659 void *cmdiubuf, u32 cmdiubuf_len) 2660 { 2661 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 2662 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; 2663 struct nvmet_fc_tgt_queue *queue; 2664 struct nvmet_fc_fcp_iod *fod; 2665 struct nvmet_fc_defer_fcp_req *deferfcp; 2666 unsigned long flags; 2667 2668 /* validate iu, so the connection id can be used to find the queue */ 2669 if ((cmdiubuf_len != sizeof(*cmdiu)) || 2670 (cmdiu->format_id != NVME_CMD_FORMAT_ID) || 2671 (cmdiu->fc_id != NVME_CMD_FC_ID) || 2672 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4))) 2673 return -EIO; 2674 2675 queue = nvmet_fc_find_target_queue(tgtport, 2676 be64_to_cpu(cmdiu->connection_id)); 2677 if (!queue) 2678 return -ENOTCONN; 2679 2680 /* 2681 * note: reference taken by find_target_queue 2682 * After successful fod allocation, the fod will inherit the 2683 * ownership of that reference and will remove the reference 2684 * when the fod is freed. 2685 */ 2686 2687 spin_lock_irqsave(&queue->qlock, flags); 2688 2689 fod = nvmet_fc_alloc_fcp_iod(queue); 2690 if (fod) { 2691 spin_unlock_irqrestore(&queue->qlock, flags); 2692 2693 fcpreq->nvmet_fc_private = fod; 2694 fod->fcpreq = fcpreq; 2695 2696 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); 2697 2698 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); 2699 2700 return 0; 2701 } 2702 2703 if (!tgtport->ops->defer_rcv) { 2704 spin_unlock_irqrestore(&queue->qlock, flags); 2705 /* release the queue lookup reference */ 2706 nvmet_fc_tgt_q_put(queue); 2707 return -ENOENT; 2708 } 2709 2710 deferfcp = list_first_entry_or_null(&queue->avail_defer_list, 2711 struct nvmet_fc_defer_fcp_req, req_list); 2712 if (deferfcp) { 2713 /* Just re-use one that was previously allocated */ 2714 list_del(&deferfcp->req_list); 2715 } else { 2716 spin_unlock_irqrestore(&queue->qlock, flags); 2717 2718 /* Now we need to dynamically allocate one */ 2719 deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL); 2720 if (!deferfcp) { 2721 /* release the queue lookup reference */ 2722 nvmet_fc_tgt_q_put(queue); 2723 return -ENOMEM; 2724 } 2725 spin_lock_irqsave(&queue->qlock, flags); 2726 } 2727 2728 /* For now, use rspaddr / rsplen to save payload information */ 2729 fcpreq->rspaddr = cmdiubuf; 2730 fcpreq->rsplen = cmdiubuf_len; 2731 deferfcp->fcp_req = fcpreq; 2732 2733 /* defer processing till a fod becomes available */ 2734 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list); 2735 2736 /* NOTE: the queue lookup reference is still valid */ 2737 2738 spin_unlock_irqrestore(&queue->qlock, flags); 2739 2740 return -EOVERFLOW; 2741 } 2742 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); 2743 2744 /** 2745 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD 2746 * upon the reception of an ABTS for a FCP command 2747 * 2748 * Notify the transport that an ABTS has been received for a FCP command 2749 * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The 2750 * LLDD believes the command is still being worked on 2751 * (template_ops->fcp_req_release() has not been called). 2752 * 2753 * The transport will wait for any outstanding work (an op to the LLDD, 2754 * which the lldd should complete with error due to the ABTS; or the 2755 * completion from the nvmet layer of the nvme command), then will 2756 * stop processing and call the nvmet_fc_rcv_fcp_req() callback to 2757 * return the i/o context to the LLDD. The LLDD may send the BA_ACC 2758 * to the ABTS either after return from this function (assuming any 2759 * outstanding op work has been terminated) or upon the callback being 2760 * called. 2761 * 2762 * @target_port: pointer to the (registered) target port the FCP CMD IU 2763 * was received on. 2764 * @fcpreq: pointer to the fcpreq request structure that corresponds 2765 * to the exchange that received the ABTS. 2766 */ 2767 void 2768 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port, 2769 struct nvmefc_tgt_fcp_req *fcpreq) 2770 { 2771 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 2772 struct nvmet_fc_tgt_queue *queue; 2773 unsigned long flags; 2774 2775 if (!fod || fod->fcpreq != fcpreq) 2776 /* job appears to have already completed, ignore abort */ 2777 return; 2778 2779 queue = fod->queue; 2780 2781 spin_lock_irqsave(&queue->qlock, flags); 2782 if (fod->active) { 2783 /* 2784 * mark as abort. The abort handler, invoked upon completion 2785 * of any work, will detect the aborted status and do the 2786 * callback. 2787 */ 2788 spin_lock(&fod->flock); 2789 fod->abort = true; 2790 fod->aborted = true; 2791 spin_unlock(&fod->flock); 2792 } 2793 spin_unlock_irqrestore(&queue->qlock, flags); 2794 } 2795 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort); 2796 2797 2798 struct nvmet_fc_traddr { 2799 u64 nn; 2800 u64 pn; 2801 }; 2802 2803 static int 2804 __nvme_fc_parse_u64(substring_t *sstr, u64 *val) 2805 { 2806 u64 token64; 2807 2808 if (match_u64(sstr, &token64)) 2809 return -EINVAL; 2810 *val = token64; 2811 2812 return 0; 2813 } 2814 2815 /* 2816 * This routine validates and extracts the WWN's from the TRADDR string. 2817 * As kernel parsers need the 0x to determine number base, universally 2818 * build string to parse with 0x prefix before parsing name strings. 2819 */ 2820 static int 2821 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) 2822 { 2823 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; 2824 substring_t wwn = { name, &name[sizeof(name)-1] }; 2825 int nnoffset, pnoffset; 2826 2827 /* validate if string is one of the 2 allowed formats */ 2828 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && 2829 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && 2830 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], 2831 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { 2832 nnoffset = NVME_FC_TRADDR_OXNNLEN; 2833 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + 2834 NVME_FC_TRADDR_OXNNLEN; 2835 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH && 2836 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && 2837 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], 2838 "pn-", NVME_FC_TRADDR_NNLEN))) { 2839 nnoffset = NVME_FC_TRADDR_NNLEN; 2840 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; 2841 } else 2842 goto out_einval; 2843 2844 name[0] = '0'; 2845 name[1] = 'x'; 2846 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; 2847 2848 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); 2849 if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) 2850 goto out_einval; 2851 2852 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); 2853 if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) 2854 goto out_einval; 2855 2856 return 0; 2857 2858 out_einval: 2859 pr_warn("%s: bad traddr string\n", __func__); 2860 return -EINVAL; 2861 } 2862 2863 static int 2864 nvmet_fc_add_port(struct nvmet_port *port) 2865 { 2866 struct nvmet_fc_tgtport *tgtport; 2867 struct nvmet_fc_port_entry *pe; 2868 struct nvmet_fc_traddr traddr = { 0L, 0L }; 2869 unsigned long flags; 2870 int ret; 2871 2872 /* validate the address info */ 2873 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) || 2874 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC)) 2875 return -EINVAL; 2876 2877 /* map the traddr address info to a target port */ 2878 2879 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr, 2880 sizeof(port->disc_addr.traddr)); 2881 if (ret) 2882 return ret; 2883 2884 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2885 if (!pe) 2886 return -ENOMEM; 2887 2888 ret = -ENXIO; 2889 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 2890 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { 2891 if ((tgtport->fc_target_port.node_name == traddr.nn) && 2892 (tgtport->fc_target_port.port_name == traddr.pn)) { 2893 /* a FC port can only be 1 nvmet port id */ 2894 if (!tgtport->pe) { 2895 nvmet_fc_portentry_bind(tgtport, pe, port); 2896 ret = 0; 2897 } else 2898 ret = -EALREADY; 2899 break; 2900 } 2901 } 2902 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 2903 2904 if (ret) 2905 kfree(pe); 2906 2907 return ret; 2908 } 2909 2910 static void 2911 nvmet_fc_remove_port(struct nvmet_port *port) 2912 { 2913 struct nvmet_fc_port_entry *pe = port->priv; 2914 2915 nvmet_fc_portentry_unbind(pe); 2916 2917 kfree(pe); 2918 } 2919 2920 static void 2921 nvmet_fc_discovery_chg(struct nvmet_port *port) 2922 { 2923 struct nvmet_fc_port_entry *pe = port->priv; 2924 struct nvmet_fc_tgtport *tgtport = pe->tgtport; 2925 2926 if (tgtport && tgtport->ops->discovery_event) 2927 tgtport->ops->discovery_event(&tgtport->fc_target_port); 2928 } 2929 2930 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { 2931 .owner = THIS_MODULE, 2932 .type = NVMF_TRTYPE_FC, 2933 .msdbd = 1, 2934 .add_port = nvmet_fc_add_port, 2935 .remove_port = nvmet_fc_remove_port, 2936 .queue_response = nvmet_fc_fcp_nvme_cmd_done, 2937 .delete_ctrl = nvmet_fc_delete_ctrl, 2938 .discovery_chg = nvmet_fc_discovery_chg, 2939 }; 2940 2941 static int __init nvmet_fc_init_module(void) 2942 { 2943 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops); 2944 } 2945 2946 static void __exit nvmet_fc_exit_module(void) 2947 { 2948 /* sanity check - all lports should be removed */ 2949 if (!list_empty(&nvmet_fc_target_list)) 2950 pr_warn("%s: targetport list not empty\n", __func__); 2951 2952 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops); 2953 2954 ida_destroy(&nvmet_fc_tgtport_cnt); 2955 } 2956 2957 module_init(nvmet_fc_init_module); 2958 module_exit(nvmet_fc_exit_module); 2959 2960 MODULE_LICENSE("GPL v2"); 2961