1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016 Avago Technologies. All rights reserved. 4 */ 5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 6 #include <linux/module.h> 7 #include <linux/slab.h> 8 #include <linux/blk-mq.h> 9 #include <linux/parser.h> 10 #include <linux/random.h> 11 #include <uapi/scsi/fc/fc_fs.h> 12 #include <uapi/scsi/fc/fc_els.h> 13 14 #include "nvmet.h" 15 #include <linux/nvme-fc-driver.h> 16 #include <linux/nvme-fc.h> 17 #include "../host/fc.h" 18 19 20 /* *************************** Data Structures/Defines ****************** */ 21 22 23 #define NVMET_LS_CTX_COUNT 256 24 25 struct nvmet_fc_tgtport; 26 struct nvmet_fc_tgt_assoc; 27 28 struct nvmet_fc_ls_iod { /* for an LS RQST RCV */ 29 struct nvmefc_ls_rsp *lsrsp; 30 struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */ 31 32 struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */ 33 34 struct nvmet_fc_tgtport *tgtport; 35 struct nvmet_fc_tgt_assoc *assoc; 36 void *hosthandle; 37 38 union nvmefc_ls_requests *rqstbuf; 39 union nvmefc_ls_responses *rspbuf; 40 u16 rqstdatalen; 41 dma_addr_t rspdma; 42 43 struct scatterlist sg[2]; 44 45 struct work_struct work; 46 } __aligned(sizeof(unsigned long long)); 47 48 struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */ 49 struct nvmefc_ls_req ls_req; 50 51 struct nvmet_fc_tgtport *tgtport; 52 void *hosthandle; 53 54 int ls_error; 55 struct list_head lsreq_list; /* tgtport->ls_req_list */ 56 bool req_queued; 57 }; 58 59 60 /* desired maximum for a single sequence - if sg list allows it */ 61 #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) 62 63 enum nvmet_fcp_datadir { 64 NVMET_FCP_NODATA, 65 NVMET_FCP_WRITE, 66 NVMET_FCP_READ, 67 NVMET_FCP_ABORTED, 68 }; 69 70 struct nvmet_fc_fcp_iod { 71 struct nvmefc_tgt_fcp_req *fcpreq; 72 73 struct nvme_fc_cmd_iu cmdiubuf; 74 struct nvme_fc_ersp_iu rspiubuf; 75 dma_addr_t rspdma; 76 struct scatterlist *next_sg; 77 struct scatterlist *data_sg; 78 int data_sg_cnt; 79 u32 offset; 80 enum nvmet_fcp_datadir io_dir; 81 bool active; 82 bool abort; 83 bool aborted; 84 bool writedataactive; 85 spinlock_t flock; 86 87 struct nvmet_req req; 88 struct work_struct defer_work; 89 90 struct nvmet_fc_tgtport *tgtport; 91 struct nvmet_fc_tgt_queue *queue; 92 93 struct list_head fcp_list; /* tgtport->fcp_list */ 94 }; 95 96 struct nvmet_fc_tgtport { 97 struct nvmet_fc_target_port fc_target_port; 98 99 struct list_head tgt_list; /* nvmet_fc_target_list */ 100 struct device *dev; /* dev for dma mapping */ 101 struct nvmet_fc_target_template *ops; 102 103 struct nvmet_fc_ls_iod *iod; 104 spinlock_t lock; 105 struct list_head ls_rcv_list; 106 struct list_head ls_req_list; 107 struct list_head ls_busylist; 108 struct list_head assoc_list; 109 struct list_head host_list; 110 struct ida assoc_cnt; 111 struct nvmet_fc_port_entry *pe; 112 struct kref ref; 113 u32 max_sg_cnt; 114 115 struct work_struct put_work; 116 }; 117 118 struct nvmet_fc_port_entry { 119 struct nvmet_fc_tgtport *tgtport; 120 struct nvmet_port *port; 121 u64 node_name; 122 u64 port_name; 123 struct list_head pe_list; 124 }; 125 126 struct nvmet_fc_defer_fcp_req { 127 struct list_head req_list; 128 struct nvmefc_tgt_fcp_req *fcp_req; 129 }; 130 131 struct nvmet_fc_tgt_queue { 132 bool ninetypercent; 133 u16 qid; 134 u16 sqsize; 135 u16 ersp_ratio; 136 __le16 sqhd; 137 atomic_t connected; 138 atomic_t sqtail; 139 atomic_t zrspcnt; 140 atomic_t rsn; 141 spinlock_t qlock; 142 struct nvmet_cq nvme_cq; 143 struct nvmet_sq nvme_sq; 144 struct nvmet_fc_tgt_assoc *assoc; 145 struct list_head fod_list; 146 struct list_head pending_cmd_list; 147 struct list_head avail_defer_list; 148 struct workqueue_struct *work_q; 149 struct kref ref; 150 struct rcu_head rcu; 151 struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */ 152 } __aligned(sizeof(unsigned long long)); 153 154 struct nvmet_fc_hostport { 155 struct nvmet_fc_tgtport *tgtport; 156 void *hosthandle; 157 struct list_head host_list; 158 struct kref ref; 159 u8 invalid; 160 }; 161 162 struct nvmet_fc_tgt_assoc { 163 u64 association_id; 164 u32 a_id; 165 atomic_t terminating; 166 struct nvmet_fc_tgtport *tgtport; 167 struct nvmet_fc_hostport *hostport; 168 struct nvmet_fc_ls_iod *rcv_disconn; 169 struct list_head a_list; 170 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1]; 171 struct kref ref; 172 struct work_struct del_work; 173 struct rcu_head rcu; 174 }; 175 176 177 static inline int 178 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr) 179 { 180 return (iodptr - iodptr->tgtport->iod); 181 } 182 183 static inline int 184 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr) 185 { 186 return (fodptr - fodptr->queue->fod); 187 } 188 189 190 /* 191 * Association and Connection IDs: 192 * 193 * Association ID will have random number in upper 6 bytes and zero 194 * in lower 2 bytes 195 * 196 * Connection IDs will be Association ID with QID or'd in lower 2 bytes 197 * 198 * note: Association ID = Connection ID for queue 0 199 */ 200 #define BYTES_FOR_QID sizeof(u16) 201 #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8) 202 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1)) 203 204 static inline u64 205 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid) 206 { 207 return (assoc->association_id | qid); 208 } 209 210 static inline u64 211 nvmet_fc_getassociationid(u64 connectionid) 212 { 213 return connectionid & ~NVMET_FC_QUEUEID_MASK; 214 } 215 216 static inline u16 217 nvmet_fc_getqueueid(u64 connectionid) 218 { 219 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK); 220 } 221 222 static inline struct nvmet_fc_tgtport * 223 targetport_to_tgtport(struct nvmet_fc_target_port *targetport) 224 { 225 return container_of(targetport, struct nvmet_fc_tgtport, 226 fc_target_port); 227 } 228 229 static inline struct nvmet_fc_fcp_iod * 230 nvmet_req_to_fod(struct nvmet_req *nvme_req) 231 { 232 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req); 233 } 234 235 236 /* *************************** Globals **************************** */ 237 238 239 static DEFINE_SPINLOCK(nvmet_fc_tgtlock); 240 241 static LIST_HEAD(nvmet_fc_target_list); 242 static DEFINE_IDA(nvmet_fc_tgtport_cnt); 243 static LIST_HEAD(nvmet_fc_portentry_list); 244 245 246 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work); 247 static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work); 248 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc); 249 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc); 250 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); 251 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); 252 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); 253 static void nvmet_fc_put_tgtport_work(struct work_struct *work) 254 { 255 struct nvmet_fc_tgtport *tgtport = 256 container_of(work, struct nvmet_fc_tgtport, put_work); 257 258 nvmet_fc_tgtport_put(tgtport); 259 } 260 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); 261 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, 262 struct nvmet_fc_fcp_iod *fod); 263 static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc); 264 static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, 265 struct nvmet_fc_ls_iod *iod); 266 267 268 /* *********************** FC-NVME DMA Handling **************************** */ 269 270 /* 271 * The fcloop device passes in a NULL device pointer. Real LLD's will 272 * pass in a valid device pointer. If NULL is passed to the dma mapping 273 * routines, depending on the platform, it may or may not succeed, and 274 * may crash. 275 * 276 * As such: 277 * Wrapper all the dma routines and check the dev pointer. 278 * 279 * If simple mappings (return just a dma address, we'll noop them, 280 * returning a dma address of 0. 281 * 282 * On more complex mappings (dma_map_sg), a pseudo routine fills 283 * in the scatter list, setting all dma addresses to 0. 284 */ 285 286 static inline dma_addr_t 287 fc_dma_map_single(struct device *dev, void *ptr, size_t size, 288 enum dma_data_direction dir) 289 { 290 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; 291 } 292 293 static inline int 294 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 295 { 296 return dev ? dma_mapping_error(dev, dma_addr) : 0; 297 } 298 299 static inline void 300 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, 301 enum dma_data_direction dir) 302 { 303 if (dev) 304 dma_unmap_single(dev, addr, size, dir); 305 } 306 307 static inline void 308 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, 309 enum dma_data_direction dir) 310 { 311 if (dev) 312 dma_sync_single_for_cpu(dev, addr, size, dir); 313 } 314 315 static inline void 316 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, 317 enum dma_data_direction dir) 318 { 319 if (dev) 320 dma_sync_single_for_device(dev, addr, size, dir); 321 } 322 323 /* pseudo dma_map_sg call */ 324 static int 325 fc_map_sg(struct scatterlist *sg, int nents) 326 { 327 struct scatterlist *s; 328 int i; 329 330 WARN_ON(nents == 0 || sg[0].length == 0); 331 332 for_each_sg(sg, s, nents, i) { 333 s->dma_address = 0L; 334 #ifdef CONFIG_NEED_SG_DMA_LENGTH 335 s->dma_length = s->length; 336 #endif 337 } 338 return nents; 339 } 340 341 static inline int 342 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 343 enum dma_data_direction dir) 344 { 345 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); 346 } 347 348 static inline void 349 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 350 enum dma_data_direction dir) 351 { 352 if (dev) 353 dma_unmap_sg(dev, sg, nents, dir); 354 } 355 356 357 /* ********************** FC-NVME LS XMT Handling ************************* */ 358 359 360 static void 361 __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop) 362 { 363 struct nvmet_fc_tgtport *tgtport = lsop->tgtport; 364 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 365 unsigned long flags; 366 367 spin_lock_irqsave(&tgtport->lock, flags); 368 369 if (!lsop->req_queued) { 370 spin_unlock_irqrestore(&tgtport->lock, flags); 371 goto out_putwork; 372 } 373 374 list_del(&lsop->lsreq_list); 375 376 lsop->req_queued = false; 377 378 spin_unlock_irqrestore(&tgtport->lock, flags); 379 380 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, 381 (lsreq->rqstlen + lsreq->rsplen), 382 DMA_BIDIRECTIONAL); 383 384 out_putwork: 385 queue_work(nvmet_wq, &tgtport->put_work); 386 } 387 388 static int 389 __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport, 390 struct nvmet_fc_ls_req_op *lsop, 391 void (*done)(struct nvmefc_ls_req *req, int status)) 392 { 393 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 394 unsigned long flags; 395 int ret = 0; 396 397 if (!tgtport->ops->ls_req) 398 return -EOPNOTSUPP; 399 400 if (!nvmet_fc_tgtport_get(tgtport)) 401 return -ESHUTDOWN; 402 403 lsreq->done = done; 404 lsop->req_queued = false; 405 INIT_LIST_HEAD(&lsop->lsreq_list); 406 407 lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr, 408 lsreq->rqstlen + lsreq->rsplen, 409 DMA_BIDIRECTIONAL); 410 if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) { 411 ret = -EFAULT; 412 goto out_puttgtport; 413 } 414 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; 415 416 spin_lock_irqsave(&tgtport->lock, flags); 417 418 list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list); 419 420 lsop->req_queued = true; 421 422 spin_unlock_irqrestore(&tgtport->lock, flags); 423 424 ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle, 425 lsreq); 426 if (ret) 427 goto out_unlink; 428 429 return 0; 430 431 out_unlink: 432 lsop->ls_error = ret; 433 spin_lock_irqsave(&tgtport->lock, flags); 434 lsop->req_queued = false; 435 list_del(&lsop->lsreq_list); 436 spin_unlock_irqrestore(&tgtport->lock, flags); 437 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, 438 (lsreq->rqstlen + lsreq->rsplen), 439 DMA_BIDIRECTIONAL); 440 out_puttgtport: 441 nvmet_fc_tgtport_put(tgtport); 442 443 return ret; 444 } 445 446 static int 447 nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport, 448 struct nvmet_fc_ls_req_op *lsop, 449 void (*done)(struct nvmefc_ls_req *req, int status)) 450 { 451 /* don't wait for completion */ 452 453 return __nvmet_fc_send_ls_req(tgtport, lsop, done); 454 } 455 456 static void 457 nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) 458 { 459 struct nvmet_fc_ls_req_op *lsop = 460 container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req); 461 462 __nvmet_fc_finish_ls_req(lsop); 463 464 /* fc-nvme target doesn't care about success or failure of cmd */ 465 466 kfree(lsop); 467 } 468 469 /* 470 * This routine sends a FC-NVME LS to disconnect (aka terminate) 471 * the FC-NVME Association. Terminating the association also 472 * terminates the FC-NVME connections (per queue, both admin and io 473 * queues) that are part of the association. E.g. things are torn 474 * down, and the related FC-NVME Association ID and Connection IDs 475 * become invalid. 476 * 477 * The behavior of the fc-nvme target is such that it's 478 * understanding of the association and connections will implicitly 479 * be torn down. The action is implicit as it may be due to a loss of 480 * connectivity with the fc-nvme host, so the target may never get a 481 * response even if it tried. As such, the action of this routine 482 * is to asynchronously send the LS, ignore any results of the LS, and 483 * continue on with terminating the association. If the fc-nvme host 484 * is present and receives the LS, it too can tear down. 485 */ 486 static void 487 nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc) 488 { 489 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 490 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; 491 struct fcnvme_ls_disconnect_assoc_acc *discon_acc; 492 struct nvmet_fc_ls_req_op *lsop; 493 struct nvmefc_ls_req *lsreq; 494 int ret; 495 496 /* 497 * If ls_req is NULL or no hosthandle, it's an older lldd and no 498 * message is normal. Otherwise, send unless the hostport has 499 * already been invalidated by the lldd. 500 */ 501 if (!tgtport->ops->ls_req || !assoc->hostport || 502 assoc->hostport->invalid) 503 return; 504 505 lsop = kzalloc((sizeof(*lsop) + 506 sizeof(*discon_rqst) + sizeof(*discon_acc) + 507 tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); 508 if (!lsop) { 509 dev_info(tgtport->dev, 510 "{%d:%d} send Disconnect Association failed: ENOMEM\n", 511 tgtport->fc_target_port.port_num, assoc->a_id); 512 return; 513 } 514 515 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1]; 516 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1]; 517 lsreq = &lsop->ls_req; 518 if (tgtport->ops->lsrqst_priv_sz) 519 lsreq->private = (void *)&discon_acc[1]; 520 else 521 lsreq->private = NULL; 522 523 lsop->tgtport = tgtport; 524 lsop->hosthandle = assoc->hostport->hosthandle; 525 526 nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc, 527 assoc->association_id); 528 529 ret = nvmet_fc_send_ls_req_async(tgtport, lsop, 530 nvmet_fc_disconnect_assoc_done); 531 if (ret) { 532 dev_info(tgtport->dev, 533 "{%d:%d} XMT Disconnect Association failed: %d\n", 534 tgtport->fc_target_port.port_num, assoc->a_id, ret); 535 kfree(lsop); 536 } 537 } 538 539 540 /* *********************** FC-NVME Port Management ************************ */ 541 542 543 static int 544 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport) 545 { 546 struct nvmet_fc_ls_iod *iod; 547 int i; 548 549 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod), 550 GFP_KERNEL); 551 if (!iod) 552 return -ENOMEM; 553 554 tgtport->iod = iod; 555 556 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { 557 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work); 558 iod->tgtport = tgtport; 559 list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list); 560 561 iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) + 562 sizeof(union nvmefc_ls_responses), 563 GFP_KERNEL); 564 if (!iod->rqstbuf) 565 goto out_fail; 566 567 iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1]; 568 569 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf, 570 sizeof(*iod->rspbuf), 571 DMA_TO_DEVICE); 572 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma)) 573 goto out_fail; 574 } 575 576 return 0; 577 578 out_fail: 579 kfree(iod->rqstbuf); 580 list_del(&iod->ls_rcv_list); 581 for (iod--, i--; i >= 0; iod--, i--) { 582 fc_dma_unmap_single(tgtport->dev, iod->rspdma, 583 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 584 kfree(iod->rqstbuf); 585 list_del(&iod->ls_rcv_list); 586 } 587 588 kfree(iod); 589 590 return -EFAULT; 591 } 592 593 static void 594 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport) 595 { 596 struct nvmet_fc_ls_iod *iod = tgtport->iod; 597 int i; 598 599 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { 600 fc_dma_unmap_single(tgtport->dev, 601 iod->rspdma, sizeof(*iod->rspbuf), 602 DMA_TO_DEVICE); 603 kfree(iod->rqstbuf); 604 list_del(&iod->ls_rcv_list); 605 } 606 kfree(tgtport->iod); 607 } 608 609 static struct nvmet_fc_ls_iod * 610 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport) 611 { 612 struct nvmet_fc_ls_iod *iod; 613 unsigned long flags; 614 615 spin_lock_irqsave(&tgtport->lock, flags); 616 iod = list_first_entry_or_null(&tgtport->ls_rcv_list, 617 struct nvmet_fc_ls_iod, ls_rcv_list); 618 if (iod) 619 list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist); 620 spin_unlock_irqrestore(&tgtport->lock, flags); 621 return iod; 622 } 623 624 625 static void 626 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport, 627 struct nvmet_fc_ls_iod *iod) 628 { 629 unsigned long flags; 630 631 spin_lock_irqsave(&tgtport->lock, flags); 632 list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list); 633 spin_unlock_irqrestore(&tgtport->lock, flags); 634 } 635 636 static void 637 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, 638 struct nvmet_fc_tgt_queue *queue) 639 { 640 struct nvmet_fc_fcp_iod *fod = queue->fod; 641 int i; 642 643 for (i = 0; i < queue->sqsize; fod++, i++) { 644 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work); 645 fod->tgtport = tgtport; 646 fod->queue = queue; 647 fod->active = false; 648 fod->abort = false; 649 fod->aborted = false; 650 fod->fcpreq = NULL; 651 list_add_tail(&fod->fcp_list, &queue->fod_list); 652 spin_lock_init(&fod->flock); 653 654 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf, 655 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 656 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) { 657 list_del(&fod->fcp_list); 658 for (fod--, i--; i >= 0; fod--, i--) { 659 fc_dma_unmap_single(tgtport->dev, fod->rspdma, 660 sizeof(fod->rspiubuf), 661 DMA_TO_DEVICE); 662 fod->rspdma = 0L; 663 list_del(&fod->fcp_list); 664 } 665 666 return; 667 } 668 } 669 } 670 671 static void 672 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, 673 struct nvmet_fc_tgt_queue *queue) 674 { 675 struct nvmet_fc_fcp_iod *fod = queue->fod; 676 int i; 677 678 for (i = 0; i < queue->sqsize; fod++, i++) { 679 if (fod->rspdma) 680 fc_dma_unmap_single(tgtport->dev, fod->rspdma, 681 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 682 } 683 } 684 685 static struct nvmet_fc_fcp_iod * 686 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) 687 { 688 struct nvmet_fc_fcp_iod *fod; 689 690 lockdep_assert_held(&queue->qlock); 691 692 fod = list_first_entry_or_null(&queue->fod_list, 693 struct nvmet_fc_fcp_iod, fcp_list); 694 if (fod) { 695 list_del(&fod->fcp_list); 696 fod->active = true; 697 /* 698 * no queue reference is taken, as it was taken by the 699 * queue lookup just prior to the allocation. The iod 700 * will "inherit" that reference. 701 */ 702 } 703 return fod; 704 } 705 706 707 static void 708 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport, 709 struct nvmet_fc_tgt_queue *queue, 710 struct nvmefc_tgt_fcp_req *fcpreq) 711 { 712 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 713 714 /* 715 * put all admin cmds on hw queue id 0. All io commands go to 716 * the respective hw queue based on a modulo basis 717 */ 718 fcpreq->hwqid = queue->qid ? 719 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; 720 721 nvmet_fc_handle_fcp_rqst(tgtport, fod); 722 } 723 724 static void 725 nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work) 726 { 727 struct nvmet_fc_fcp_iod *fod = 728 container_of(work, struct nvmet_fc_fcp_iod, defer_work); 729 730 /* Submit deferred IO for processing */ 731 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq); 732 733 } 734 735 static void 736 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, 737 struct nvmet_fc_fcp_iod *fod) 738 { 739 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 740 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 741 struct nvmet_fc_defer_fcp_req *deferfcp; 742 unsigned long flags; 743 744 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, 745 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 746 747 fcpreq->nvmet_fc_private = NULL; 748 749 fod->active = false; 750 fod->abort = false; 751 fod->aborted = false; 752 fod->writedataactive = false; 753 fod->fcpreq = NULL; 754 755 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); 756 757 /* release the queue lookup reference on the completed IO */ 758 nvmet_fc_tgt_q_put(queue); 759 760 spin_lock_irqsave(&queue->qlock, flags); 761 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, 762 struct nvmet_fc_defer_fcp_req, req_list); 763 if (!deferfcp) { 764 list_add_tail(&fod->fcp_list, &fod->queue->fod_list); 765 spin_unlock_irqrestore(&queue->qlock, flags); 766 return; 767 } 768 769 /* Re-use the fod for the next pending cmd that was deferred */ 770 list_del(&deferfcp->req_list); 771 772 fcpreq = deferfcp->fcp_req; 773 774 /* deferfcp can be reused for another IO at a later date */ 775 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list); 776 777 spin_unlock_irqrestore(&queue->qlock, flags); 778 779 /* Save NVME CMD IO in fod */ 780 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen); 781 782 /* Setup new fcpreq to be processed */ 783 fcpreq->rspaddr = NULL; 784 fcpreq->rsplen = 0; 785 fcpreq->nvmet_fc_private = fod; 786 fod->fcpreq = fcpreq; 787 fod->active = true; 788 789 /* inform LLDD IO is now being processed */ 790 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq); 791 792 /* 793 * Leave the queue lookup get reference taken when 794 * fod was originally allocated. 795 */ 796 797 queue_work(queue->work_q, &fod->defer_work); 798 } 799 800 static struct nvmet_fc_tgt_queue * 801 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, 802 u16 qid, u16 sqsize) 803 { 804 struct nvmet_fc_tgt_queue *queue; 805 int ret; 806 807 if (qid > NVMET_NR_QUEUES) 808 return NULL; 809 810 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL); 811 if (!queue) 812 return NULL; 813 814 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0, 815 assoc->tgtport->fc_target_port.port_num, 816 assoc->a_id, qid); 817 if (!queue->work_q) 818 goto out_free_queue; 819 820 queue->qid = qid; 821 queue->sqsize = sqsize; 822 queue->assoc = assoc; 823 INIT_LIST_HEAD(&queue->fod_list); 824 INIT_LIST_HEAD(&queue->avail_defer_list); 825 INIT_LIST_HEAD(&queue->pending_cmd_list); 826 atomic_set(&queue->connected, 0); 827 atomic_set(&queue->sqtail, 0); 828 atomic_set(&queue->rsn, 1); 829 atomic_set(&queue->zrspcnt, 0); 830 spin_lock_init(&queue->qlock); 831 kref_init(&queue->ref); 832 833 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue); 834 835 ret = nvmet_sq_init(&queue->nvme_sq); 836 if (ret) 837 goto out_fail_iodlist; 838 839 WARN_ON(assoc->queues[qid]); 840 assoc->queues[qid] = queue; 841 842 return queue; 843 844 out_fail_iodlist: 845 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue); 846 destroy_workqueue(queue->work_q); 847 out_free_queue: 848 kfree(queue); 849 return NULL; 850 } 851 852 853 static void 854 nvmet_fc_tgt_queue_free(struct kref *ref) 855 { 856 struct nvmet_fc_tgt_queue *queue = 857 container_of(ref, struct nvmet_fc_tgt_queue, ref); 858 859 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue); 860 861 destroy_workqueue(queue->work_q); 862 863 kfree_rcu(queue, rcu); 864 } 865 866 static void 867 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue) 868 { 869 kref_put(&queue->ref, nvmet_fc_tgt_queue_free); 870 } 871 872 static int 873 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue) 874 { 875 return kref_get_unless_zero(&queue->ref); 876 } 877 878 879 static void 880 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) 881 { 882 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; 883 struct nvmet_fc_fcp_iod *fod = queue->fod; 884 struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr; 885 unsigned long flags; 886 int i; 887 bool disconnect; 888 889 disconnect = atomic_xchg(&queue->connected, 0); 890 891 /* if not connected, nothing to do */ 892 if (!disconnect) 893 return; 894 895 spin_lock_irqsave(&queue->qlock, flags); 896 /* abort outstanding io's */ 897 for (i = 0; i < queue->sqsize; fod++, i++) { 898 if (fod->active) { 899 spin_lock(&fod->flock); 900 fod->abort = true; 901 /* 902 * only call lldd abort routine if waiting for 903 * writedata. other outstanding ops should finish 904 * on their own. 905 */ 906 if (fod->writedataactive) { 907 fod->aborted = true; 908 spin_unlock(&fod->flock); 909 tgtport->ops->fcp_abort( 910 &tgtport->fc_target_port, fod->fcpreq); 911 } else 912 spin_unlock(&fod->flock); 913 } 914 } 915 916 /* Cleanup defer'ed IOs in queue */ 917 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list, 918 req_list) { 919 list_del(&deferfcp->req_list); 920 kfree(deferfcp); 921 } 922 923 for (;;) { 924 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, 925 struct nvmet_fc_defer_fcp_req, req_list); 926 if (!deferfcp) 927 break; 928 929 list_del(&deferfcp->req_list); 930 spin_unlock_irqrestore(&queue->qlock, flags); 931 932 tgtport->ops->defer_rcv(&tgtport->fc_target_port, 933 deferfcp->fcp_req); 934 935 tgtport->ops->fcp_abort(&tgtport->fc_target_port, 936 deferfcp->fcp_req); 937 938 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, 939 deferfcp->fcp_req); 940 941 /* release the queue lookup reference */ 942 nvmet_fc_tgt_q_put(queue); 943 944 kfree(deferfcp); 945 946 spin_lock_irqsave(&queue->qlock, flags); 947 } 948 spin_unlock_irqrestore(&queue->qlock, flags); 949 950 flush_workqueue(queue->work_q); 951 952 nvmet_sq_destroy(&queue->nvme_sq); 953 954 nvmet_fc_tgt_q_put(queue); 955 } 956 957 static struct nvmet_fc_tgt_queue * 958 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport, 959 u64 connection_id) 960 { 961 struct nvmet_fc_tgt_assoc *assoc; 962 struct nvmet_fc_tgt_queue *queue; 963 u64 association_id = nvmet_fc_getassociationid(connection_id); 964 u16 qid = nvmet_fc_getqueueid(connection_id); 965 966 if (qid > NVMET_NR_QUEUES) 967 return NULL; 968 969 rcu_read_lock(); 970 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 971 if (association_id == assoc->association_id) { 972 queue = assoc->queues[qid]; 973 if (queue && 974 (!atomic_read(&queue->connected) || 975 !nvmet_fc_tgt_q_get(queue))) 976 queue = NULL; 977 rcu_read_unlock(); 978 return queue; 979 } 980 } 981 rcu_read_unlock(); 982 return NULL; 983 } 984 985 static void 986 nvmet_fc_hostport_free(struct kref *ref) 987 { 988 struct nvmet_fc_hostport *hostport = 989 container_of(ref, struct nvmet_fc_hostport, ref); 990 struct nvmet_fc_tgtport *tgtport = hostport->tgtport; 991 unsigned long flags; 992 993 spin_lock_irqsave(&tgtport->lock, flags); 994 list_del(&hostport->host_list); 995 spin_unlock_irqrestore(&tgtport->lock, flags); 996 if (tgtport->ops->host_release && hostport->invalid) 997 tgtport->ops->host_release(hostport->hosthandle); 998 kfree(hostport); 999 nvmet_fc_tgtport_put(tgtport); 1000 } 1001 1002 static void 1003 nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport) 1004 { 1005 kref_put(&hostport->ref, nvmet_fc_hostport_free); 1006 } 1007 1008 static int 1009 nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport) 1010 { 1011 return kref_get_unless_zero(&hostport->ref); 1012 } 1013 1014 static void 1015 nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport) 1016 { 1017 /* if LLDD not implemented, leave as NULL */ 1018 if (!hostport || !hostport->hosthandle) 1019 return; 1020 1021 nvmet_fc_hostport_put(hostport); 1022 } 1023 1024 static struct nvmet_fc_hostport * 1025 nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) 1026 { 1027 struct nvmet_fc_hostport *host; 1028 1029 lockdep_assert_held(&tgtport->lock); 1030 1031 list_for_each_entry(host, &tgtport->host_list, host_list) { 1032 if (host->hosthandle == hosthandle && !host->invalid) { 1033 if (nvmet_fc_hostport_get(host)) 1034 return (host); 1035 } 1036 } 1037 1038 return NULL; 1039 } 1040 1041 static struct nvmet_fc_hostport * 1042 nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) 1043 { 1044 struct nvmet_fc_hostport *newhost, *match = NULL; 1045 unsigned long flags; 1046 1047 /* if LLDD not implemented, leave as NULL */ 1048 if (!hosthandle) 1049 return NULL; 1050 1051 /* 1052 * take reference for what will be the newly allocated hostport if 1053 * we end up using a new allocation 1054 */ 1055 if (!nvmet_fc_tgtport_get(tgtport)) 1056 return ERR_PTR(-EINVAL); 1057 1058 spin_lock_irqsave(&tgtport->lock, flags); 1059 match = nvmet_fc_match_hostport(tgtport, hosthandle); 1060 spin_unlock_irqrestore(&tgtport->lock, flags); 1061 1062 if (match) { 1063 /* no new allocation - release reference */ 1064 nvmet_fc_tgtport_put(tgtport); 1065 return match; 1066 } 1067 1068 newhost = kzalloc(sizeof(*newhost), GFP_KERNEL); 1069 if (!newhost) { 1070 /* no new allocation - release reference */ 1071 nvmet_fc_tgtport_put(tgtport); 1072 return ERR_PTR(-ENOMEM); 1073 } 1074 1075 spin_lock_irqsave(&tgtport->lock, flags); 1076 match = nvmet_fc_match_hostport(tgtport, hosthandle); 1077 if (match) { 1078 /* new allocation not needed */ 1079 kfree(newhost); 1080 newhost = match; 1081 } else { 1082 newhost->tgtport = tgtport; 1083 newhost->hosthandle = hosthandle; 1084 INIT_LIST_HEAD(&newhost->host_list); 1085 kref_init(&newhost->ref); 1086 1087 list_add_tail(&newhost->host_list, &tgtport->host_list); 1088 } 1089 spin_unlock_irqrestore(&tgtport->lock, flags); 1090 1091 return newhost; 1092 } 1093 1094 static void 1095 nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) 1096 { 1097 nvmet_fc_delete_target_assoc(assoc); 1098 nvmet_fc_tgt_a_put(assoc); 1099 } 1100 1101 static void 1102 nvmet_fc_delete_assoc_work(struct work_struct *work) 1103 { 1104 struct nvmet_fc_tgt_assoc *assoc = 1105 container_of(work, struct nvmet_fc_tgt_assoc, del_work); 1106 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 1107 1108 nvmet_fc_delete_assoc(assoc); 1109 nvmet_fc_tgtport_put(tgtport); 1110 } 1111 1112 static void 1113 nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) 1114 { 1115 nvmet_fc_tgtport_get(assoc->tgtport); 1116 queue_work(nvmet_wq, &assoc->del_work); 1117 } 1118 1119 static struct nvmet_fc_tgt_assoc * 1120 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) 1121 { 1122 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc; 1123 unsigned long flags; 1124 u64 ran; 1125 int idx; 1126 bool needrandom = true; 1127 1128 if (!tgtport->pe) 1129 return NULL; 1130 1131 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL); 1132 if (!assoc) 1133 return NULL; 1134 1135 idx = ida_alloc(&tgtport->assoc_cnt, GFP_KERNEL); 1136 if (idx < 0) 1137 goto out_free_assoc; 1138 1139 if (!nvmet_fc_tgtport_get(tgtport)) 1140 goto out_ida; 1141 1142 assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle); 1143 if (IS_ERR(assoc->hostport)) 1144 goto out_put; 1145 1146 assoc->tgtport = tgtport; 1147 assoc->a_id = idx; 1148 INIT_LIST_HEAD(&assoc->a_list); 1149 kref_init(&assoc->ref); 1150 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work); 1151 atomic_set(&assoc->terminating, 0); 1152 1153 while (needrandom) { 1154 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID); 1155 ran = ran << BYTES_FOR_QID_SHIFT; 1156 1157 spin_lock_irqsave(&tgtport->lock, flags); 1158 needrandom = false; 1159 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) { 1160 if (ran == tmpassoc->association_id) { 1161 needrandom = true; 1162 break; 1163 } 1164 } 1165 if (!needrandom) { 1166 assoc->association_id = ran; 1167 list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list); 1168 } 1169 spin_unlock_irqrestore(&tgtport->lock, flags); 1170 } 1171 1172 return assoc; 1173 1174 out_put: 1175 nvmet_fc_tgtport_put(tgtport); 1176 out_ida: 1177 ida_free(&tgtport->assoc_cnt, idx); 1178 out_free_assoc: 1179 kfree(assoc); 1180 return NULL; 1181 } 1182 1183 static void 1184 nvmet_fc_target_assoc_free(struct kref *ref) 1185 { 1186 struct nvmet_fc_tgt_assoc *assoc = 1187 container_of(ref, struct nvmet_fc_tgt_assoc, ref); 1188 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 1189 struct nvmet_fc_ls_iod *oldls; 1190 unsigned long flags; 1191 int i; 1192 1193 for (i = NVMET_NR_QUEUES; i >= 0; i--) { 1194 if (assoc->queues[i]) 1195 nvmet_fc_delete_target_queue(assoc->queues[i]); 1196 } 1197 1198 /* Send Disconnect now that all i/o has completed */ 1199 nvmet_fc_xmt_disconnect_assoc(assoc); 1200 1201 nvmet_fc_free_hostport(assoc->hostport); 1202 spin_lock_irqsave(&tgtport->lock, flags); 1203 oldls = assoc->rcv_disconn; 1204 spin_unlock_irqrestore(&tgtport->lock, flags); 1205 /* if pending Rcv Disconnect Association LS, send rsp now */ 1206 if (oldls) 1207 nvmet_fc_xmt_ls_rsp(tgtport, oldls); 1208 ida_free(&tgtport->assoc_cnt, assoc->a_id); 1209 dev_info(tgtport->dev, 1210 "{%d:%d} Association freed\n", 1211 tgtport->fc_target_port.port_num, assoc->a_id); 1212 kfree_rcu(assoc, rcu); 1213 nvmet_fc_tgtport_put(tgtport); 1214 } 1215 1216 static void 1217 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc) 1218 { 1219 kref_put(&assoc->ref, nvmet_fc_target_assoc_free); 1220 } 1221 1222 static int 1223 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc) 1224 { 1225 return kref_get_unless_zero(&assoc->ref); 1226 } 1227 1228 static void 1229 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) 1230 { 1231 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 1232 unsigned long flags; 1233 int i, terminating; 1234 1235 terminating = atomic_xchg(&assoc->terminating, 1); 1236 1237 /* if already terminating, do nothing */ 1238 if (terminating) 1239 return; 1240 1241 spin_lock_irqsave(&tgtport->lock, flags); 1242 list_del_rcu(&assoc->a_list); 1243 spin_unlock_irqrestore(&tgtport->lock, flags); 1244 1245 synchronize_rcu(); 1246 1247 /* ensure all in-flight I/Os have been processed */ 1248 for (i = NVMET_NR_QUEUES; i >= 0; i--) { 1249 if (assoc->queues[i]) 1250 flush_workqueue(assoc->queues[i]->work_q); 1251 } 1252 1253 dev_info(tgtport->dev, 1254 "{%d:%d} Association deleted\n", 1255 tgtport->fc_target_port.port_num, assoc->a_id); 1256 } 1257 1258 static struct nvmet_fc_tgt_assoc * 1259 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport, 1260 u64 association_id) 1261 { 1262 struct nvmet_fc_tgt_assoc *assoc; 1263 struct nvmet_fc_tgt_assoc *ret = NULL; 1264 1265 rcu_read_lock(); 1266 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 1267 if (association_id == assoc->association_id) { 1268 ret = assoc; 1269 if (!nvmet_fc_tgt_a_get(assoc)) 1270 ret = NULL; 1271 break; 1272 } 1273 } 1274 rcu_read_unlock(); 1275 1276 return ret; 1277 } 1278 1279 static void 1280 nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport, 1281 struct nvmet_fc_port_entry *pe, 1282 struct nvmet_port *port) 1283 { 1284 lockdep_assert_held(&nvmet_fc_tgtlock); 1285 1286 pe->tgtport = tgtport; 1287 tgtport->pe = pe; 1288 1289 pe->port = port; 1290 port->priv = pe; 1291 1292 pe->node_name = tgtport->fc_target_port.node_name; 1293 pe->port_name = tgtport->fc_target_port.port_name; 1294 INIT_LIST_HEAD(&pe->pe_list); 1295 1296 list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list); 1297 } 1298 1299 static void 1300 nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe) 1301 { 1302 unsigned long flags; 1303 1304 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1305 if (pe->tgtport) 1306 pe->tgtport->pe = NULL; 1307 list_del(&pe->pe_list); 1308 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1309 } 1310 1311 /* 1312 * called when a targetport deregisters. Breaks the relationship 1313 * with the nvmet port, but leaves the port_entry in place so that 1314 * re-registration can resume operation. 1315 */ 1316 static void 1317 nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport) 1318 { 1319 struct nvmet_fc_port_entry *pe; 1320 unsigned long flags; 1321 1322 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1323 pe = tgtport->pe; 1324 if (pe) 1325 pe->tgtport = NULL; 1326 tgtport->pe = NULL; 1327 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1328 } 1329 1330 /* 1331 * called when a new targetport is registered. Looks in the 1332 * existing nvmet port_entries to see if the nvmet layer is 1333 * configured for the targetport's wwn's. (the targetport existed, 1334 * nvmet configured, the lldd unregistered the tgtport, and is now 1335 * reregistering the same targetport). If so, set the nvmet port 1336 * port entry on the targetport. 1337 */ 1338 static void 1339 nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport) 1340 { 1341 struct nvmet_fc_port_entry *pe; 1342 unsigned long flags; 1343 1344 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1345 list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) { 1346 if (tgtport->fc_target_port.node_name == pe->node_name && 1347 tgtport->fc_target_port.port_name == pe->port_name) { 1348 WARN_ON(pe->tgtport); 1349 tgtport->pe = pe; 1350 pe->tgtport = tgtport; 1351 break; 1352 } 1353 } 1354 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1355 } 1356 1357 /** 1358 * nvmet_fc_register_targetport - transport entry point called by an 1359 * LLDD to register the existence of a local 1360 * NVME subystem FC port. 1361 * @pinfo: pointer to information about the port to be registered 1362 * @template: LLDD entrypoints and operational parameters for the port 1363 * @dev: physical hardware device node port corresponds to. Will be 1364 * used for DMA mappings 1365 * @portptr: pointer to a local port pointer. Upon success, the routine 1366 * will allocate a nvme_fc_local_port structure and place its 1367 * address in the local port pointer. Upon failure, local port 1368 * pointer will be set to NULL. 1369 * 1370 * Returns: 1371 * a completion status. Must be 0 upon success; a negative errno 1372 * (ex: -ENXIO) upon failure. 1373 */ 1374 int 1375 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, 1376 struct nvmet_fc_target_template *template, 1377 struct device *dev, 1378 struct nvmet_fc_target_port **portptr) 1379 { 1380 struct nvmet_fc_tgtport *newrec; 1381 unsigned long flags; 1382 int ret, idx; 1383 1384 if (!template->xmt_ls_rsp || !template->fcp_op || 1385 !template->fcp_abort || 1386 !template->fcp_req_release || !template->targetport_delete || 1387 !template->max_hw_queues || !template->max_sgl_segments || 1388 !template->max_dif_sgl_segments || !template->dma_boundary) { 1389 ret = -EINVAL; 1390 goto out_regtgt_failed; 1391 } 1392 1393 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz), 1394 GFP_KERNEL); 1395 if (!newrec) { 1396 ret = -ENOMEM; 1397 goto out_regtgt_failed; 1398 } 1399 1400 idx = ida_alloc(&nvmet_fc_tgtport_cnt, GFP_KERNEL); 1401 if (idx < 0) { 1402 ret = -ENOSPC; 1403 goto out_fail_kfree; 1404 } 1405 1406 if (!get_device(dev) && dev) { 1407 ret = -ENODEV; 1408 goto out_ida_put; 1409 } 1410 1411 newrec->fc_target_port.node_name = pinfo->node_name; 1412 newrec->fc_target_port.port_name = pinfo->port_name; 1413 if (template->target_priv_sz) 1414 newrec->fc_target_port.private = &newrec[1]; 1415 else 1416 newrec->fc_target_port.private = NULL; 1417 newrec->fc_target_port.port_id = pinfo->port_id; 1418 newrec->fc_target_port.port_num = idx; 1419 INIT_LIST_HEAD(&newrec->tgt_list); 1420 newrec->dev = dev; 1421 newrec->ops = template; 1422 spin_lock_init(&newrec->lock); 1423 INIT_LIST_HEAD(&newrec->ls_rcv_list); 1424 INIT_LIST_HEAD(&newrec->ls_req_list); 1425 INIT_LIST_HEAD(&newrec->ls_busylist); 1426 INIT_LIST_HEAD(&newrec->assoc_list); 1427 INIT_LIST_HEAD(&newrec->host_list); 1428 kref_init(&newrec->ref); 1429 ida_init(&newrec->assoc_cnt); 1430 newrec->max_sg_cnt = template->max_sgl_segments; 1431 INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work); 1432 1433 ret = nvmet_fc_alloc_ls_iodlist(newrec); 1434 if (ret) { 1435 ret = -ENOMEM; 1436 goto out_free_newrec; 1437 } 1438 1439 nvmet_fc_portentry_rebind_tgt(newrec); 1440 1441 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1442 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list); 1443 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1444 1445 *portptr = &newrec->fc_target_port; 1446 return 0; 1447 1448 out_free_newrec: 1449 put_device(dev); 1450 out_ida_put: 1451 ida_free(&nvmet_fc_tgtport_cnt, idx); 1452 out_fail_kfree: 1453 kfree(newrec); 1454 out_regtgt_failed: 1455 *portptr = NULL; 1456 return ret; 1457 } 1458 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport); 1459 1460 1461 static void 1462 nvmet_fc_free_tgtport(struct kref *ref) 1463 { 1464 struct nvmet_fc_tgtport *tgtport = 1465 container_of(ref, struct nvmet_fc_tgtport, ref); 1466 struct device *dev = tgtport->dev; 1467 unsigned long flags; 1468 1469 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1470 list_del(&tgtport->tgt_list); 1471 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1472 1473 nvmet_fc_free_ls_iodlist(tgtport); 1474 1475 /* let the LLDD know we've finished tearing it down */ 1476 tgtport->ops->targetport_delete(&tgtport->fc_target_port); 1477 1478 ida_free(&nvmet_fc_tgtport_cnt, 1479 tgtport->fc_target_port.port_num); 1480 1481 ida_destroy(&tgtport->assoc_cnt); 1482 1483 kfree(tgtport); 1484 1485 put_device(dev); 1486 } 1487 1488 static void 1489 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport) 1490 { 1491 kref_put(&tgtport->ref, nvmet_fc_free_tgtport); 1492 } 1493 1494 static int 1495 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport) 1496 { 1497 return kref_get_unless_zero(&tgtport->ref); 1498 } 1499 1500 static void 1501 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) 1502 { 1503 struct nvmet_fc_tgt_assoc *assoc; 1504 1505 rcu_read_lock(); 1506 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 1507 if (!nvmet_fc_tgt_a_get(assoc)) 1508 continue; 1509 nvmet_fc_schedule_delete_assoc(assoc); 1510 nvmet_fc_tgt_a_put(assoc); 1511 } 1512 rcu_read_unlock(); 1513 } 1514 1515 /** 1516 * nvmet_fc_invalidate_host - transport entry point called by an LLDD 1517 * to remove references to a hosthandle for LS's. 1518 * 1519 * The nvmet-fc layer ensures that any references to the hosthandle 1520 * on the targetport are forgotten (set to NULL). The LLDD will 1521 * typically call this when a login with a remote host port has been 1522 * lost, thus LS's for the remote host port are no longer possible. 1523 * 1524 * If an LS request is outstanding to the targetport/hosthandle (or 1525 * issued concurrently with the call to invalidate the host), the 1526 * LLDD is responsible for terminating/aborting the LS and completing 1527 * the LS request. It is recommended that these terminations/aborts 1528 * occur after calling to invalidate the host handle to avoid additional 1529 * retries by the nvmet-fc transport. The nvmet-fc transport may 1530 * continue to reference host handle while it cleans up outstanding 1531 * NVME associations. The nvmet-fc transport will call the 1532 * ops->host_release() callback to notify the LLDD that all references 1533 * are complete and the related host handle can be recovered. 1534 * Note: if there are no references, the callback may be called before 1535 * the invalidate host call returns. 1536 * 1537 * @target_port: pointer to the (registered) target port that a prior 1538 * LS was received on and which supplied the transport the 1539 * hosthandle. 1540 * @hosthandle: the handle (pointer) that represents the host port 1541 * that no longer has connectivity and that LS's should 1542 * no longer be directed to. 1543 */ 1544 void 1545 nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port, 1546 void *hosthandle) 1547 { 1548 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 1549 struct nvmet_fc_tgt_assoc *assoc, *next; 1550 unsigned long flags; 1551 bool noassoc = true; 1552 1553 spin_lock_irqsave(&tgtport->lock, flags); 1554 list_for_each_entry_safe(assoc, next, 1555 &tgtport->assoc_list, a_list) { 1556 if (!assoc->hostport || 1557 assoc->hostport->hosthandle != hosthandle) 1558 continue; 1559 if (!nvmet_fc_tgt_a_get(assoc)) 1560 continue; 1561 assoc->hostport->invalid = 1; 1562 noassoc = false; 1563 nvmet_fc_schedule_delete_assoc(assoc); 1564 nvmet_fc_tgt_a_put(assoc); 1565 } 1566 spin_unlock_irqrestore(&tgtport->lock, flags); 1567 1568 /* if there's nothing to wait for - call the callback */ 1569 if (noassoc && tgtport->ops->host_release) 1570 tgtport->ops->host_release(hosthandle); 1571 } 1572 EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host); 1573 1574 /* 1575 * nvmet layer has called to terminate an association 1576 */ 1577 static void 1578 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) 1579 { 1580 struct nvmet_fc_tgtport *tgtport, *next; 1581 struct nvmet_fc_tgt_assoc *assoc; 1582 struct nvmet_fc_tgt_queue *queue; 1583 unsigned long flags; 1584 bool found_ctrl = false; 1585 1586 /* this is a bit ugly, but don't want to make locks layered */ 1587 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1588 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list, 1589 tgt_list) { 1590 if (!nvmet_fc_tgtport_get(tgtport)) 1591 continue; 1592 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1593 1594 rcu_read_lock(); 1595 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 1596 queue = assoc->queues[0]; 1597 if (queue && queue->nvme_sq.ctrl == ctrl) { 1598 if (nvmet_fc_tgt_a_get(assoc)) 1599 found_ctrl = true; 1600 break; 1601 } 1602 } 1603 rcu_read_unlock(); 1604 1605 nvmet_fc_tgtport_put(tgtport); 1606 1607 if (found_ctrl) { 1608 nvmet_fc_schedule_delete_assoc(assoc); 1609 nvmet_fc_tgt_a_put(assoc); 1610 return; 1611 } 1612 1613 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1614 } 1615 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1616 } 1617 1618 /** 1619 * nvmet_fc_unregister_targetport - transport entry point called by an 1620 * LLDD to deregister/remove a previously 1621 * registered a local NVME subsystem FC port. 1622 * @target_port: pointer to the (registered) target port that is to be 1623 * deregistered. 1624 * 1625 * Returns: 1626 * a completion status. Must be 0 upon success; a negative errno 1627 * (ex: -ENXIO) upon failure. 1628 */ 1629 int 1630 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) 1631 { 1632 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 1633 1634 nvmet_fc_portentry_unbind_tgt(tgtport); 1635 1636 /* terminate any outstanding associations */ 1637 __nvmet_fc_free_assocs(tgtport); 1638 1639 flush_workqueue(nvmet_wq); 1640 1641 /* 1642 * should terminate LS's as well. However, LS's will be generated 1643 * at the tail end of association termination, so they likely don't 1644 * exist yet. And even if they did, it's worthwhile to just let 1645 * them finish and targetport ref counting will clean things up. 1646 */ 1647 1648 nvmet_fc_tgtport_put(tgtport); 1649 1650 return 0; 1651 } 1652 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport); 1653 1654 1655 /* ********************** FC-NVME LS RCV Handling ************************* */ 1656 1657 1658 static void 1659 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, 1660 struct nvmet_fc_ls_iod *iod) 1661 { 1662 struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc; 1663 struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc; 1664 struct nvmet_fc_tgt_queue *queue; 1665 int ret = 0; 1666 1667 memset(acc, 0, sizeof(*acc)); 1668 1669 /* 1670 * FC-NVME spec changes. There are initiators sending different 1671 * lengths as padding sizes for Create Association Cmd descriptor 1672 * was incorrect. 1673 * Accept anything of "minimum" length. Assume format per 1.15 1674 * spec (with HOSTID reduced to 16 bytes), ignore how long the 1675 * trailing pad length is. 1676 */ 1677 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN) 1678 ret = VERR_CR_ASSOC_LEN; 1679 else if (be32_to_cpu(rqst->desc_list_len) < 1680 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN) 1681 ret = VERR_CR_ASSOC_RQST_LEN; 1682 else if (rqst->assoc_cmd.desc_tag != 1683 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD)) 1684 ret = VERR_CR_ASSOC_CMD; 1685 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) < 1686 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN) 1687 ret = VERR_CR_ASSOC_CMD_LEN; 1688 else if (!rqst->assoc_cmd.ersp_ratio || 1689 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >= 1690 be16_to_cpu(rqst->assoc_cmd.sqsize))) 1691 ret = VERR_ERSP_RATIO; 1692 1693 else { 1694 /* new association w/ admin queue */ 1695 iod->assoc = nvmet_fc_alloc_target_assoc( 1696 tgtport, iod->hosthandle); 1697 if (!iod->assoc) 1698 ret = VERR_ASSOC_ALLOC_FAIL; 1699 else { 1700 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0, 1701 be16_to_cpu(rqst->assoc_cmd.sqsize)); 1702 if (!queue) { 1703 ret = VERR_QUEUE_ALLOC_FAIL; 1704 nvmet_fc_tgt_a_put(iod->assoc); 1705 } 1706 } 1707 } 1708 1709 if (ret) { 1710 dev_err(tgtport->dev, 1711 "Create Association LS failed: %s\n", 1712 validation_errors[ret]); 1713 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1714 sizeof(*acc), rqst->w0.ls_cmd, 1715 FCNVME_RJT_RC_LOGIC, 1716 FCNVME_RJT_EXP_NONE, 0); 1717 return; 1718 } 1719 1720 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio); 1721 atomic_set(&queue->connected, 1); 1722 queue->sqhd = 0; /* best place to init value */ 1723 1724 dev_info(tgtport->dev, 1725 "{%d:%d} Association created\n", 1726 tgtport->fc_target_port.port_num, iod->assoc->a_id); 1727 1728 /* format a response */ 1729 1730 iod->lsrsp->rsplen = sizeof(*acc); 1731 1732 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1733 fcnvme_lsdesc_len( 1734 sizeof(struct fcnvme_ls_cr_assoc_acc)), 1735 FCNVME_LS_CREATE_ASSOCIATION); 1736 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); 1737 acc->associd.desc_len = 1738 fcnvme_lsdesc_len( 1739 sizeof(struct fcnvme_lsdesc_assoc_id)); 1740 acc->associd.association_id = 1741 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0)); 1742 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); 1743 acc->connectid.desc_len = 1744 fcnvme_lsdesc_len( 1745 sizeof(struct fcnvme_lsdesc_conn_id)); 1746 acc->connectid.connection_id = acc->associd.association_id; 1747 } 1748 1749 static void 1750 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport, 1751 struct nvmet_fc_ls_iod *iod) 1752 { 1753 struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn; 1754 struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn; 1755 struct nvmet_fc_tgt_queue *queue; 1756 int ret = 0; 1757 1758 memset(acc, 0, sizeof(*acc)); 1759 1760 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst)) 1761 ret = VERR_CR_CONN_LEN; 1762 else if (rqst->desc_list_len != 1763 fcnvme_lsdesc_len( 1764 sizeof(struct fcnvme_ls_cr_conn_rqst))) 1765 ret = VERR_CR_CONN_RQST_LEN; 1766 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) 1767 ret = VERR_ASSOC_ID; 1768 else if (rqst->associd.desc_len != 1769 fcnvme_lsdesc_len( 1770 sizeof(struct fcnvme_lsdesc_assoc_id))) 1771 ret = VERR_ASSOC_ID_LEN; 1772 else if (rqst->connect_cmd.desc_tag != 1773 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD)) 1774 ret = VERR_CR_CONN_CMD; 1775 else if (rqst->connect_cmd.desc_len != 1776 fcnvme_lsdesc_len( 1777 sizeof(struct fcnvme_lsdesc_cr_conn_cmd))) 1778 ret = VERR_CR_CONN_CMD_LEN; 1779 else if (!rqst->connect_cmd.ersp_ratio || 1780 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >= 1781 be16_to_cpu(rqst->connect_cmd.sqsize))) 1782 ret = VERR_ERSP_RATIO; 1783 1784 else { 1785 /* new io queue */ 1786 iod->assoc = nvmet_fc_find_target_assoc(tgtport, 1787 be64_to_cpu(rqst->associd.association_id)); 1788 if (!iod->assoc) 1789 ret = VERR_NO_ASSOC; 1790 else { 1791 queue = nvmet_fc_alloc_target_queue(iod->assoc, 1792 be16_to_cpu(rqst->connect_cmd.qid), 1793 be16_to_cpu(rqst->connect_cmd.sqsize)); 1794 if (!queue) 1795 ret = VERR_QUEUE_ALLOC_FAIL; 1796 1797 /* release get taken in nvmet_fc_find_target_assoc */ 1798 nvmet_fc_tgt_a_put(iod->assoc); 1799 } 1800 } 1801 1802 if (ret) { 1803 dev_err(tgtport->dev, 1804 "Create Connection LS failed: %s\n", 1805 validation_errors[ret]); 1806 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1807 sizeof(*acc), rqst->w0.ls_cmd, 1808 (ret == VERR_NO_ASSOC) ? 1809 FCNVME_RJT_RC_INV_ASSOC : 1810 FCNVME_RJT_RC_LOGIC, 1811 FCNVME_RJT_EXP_NONE, 0); 1812 return; 1813 } 1814 1815 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio); 1816 atomic_set(&queue->connected, 1); 1817 queue->sqhd = 0; /* best place to init value */ 1818 1819 /* format a response */ 1820 1821 iod->lsrsp->rsplen = sizeof(*acc); 1822 1823 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1824 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)), 1825 FCNVME_LS_CREATE_CONNECTION); 1826 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); 1827 acc->connectid.desc_len = 1828 fcnvme_lsdesc_len( 1829 sizeof(struct fcnvme_lsdesc_conn_id)); 1830 acc->connectid.connection_id = 1831 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 1832 be16_to_cpu(rqst->connect_cmd.qid))); 1833 } 1834 1835 /* 1836 * Returns true if the LS response is to be transmit 1837 * Returns false if the LS response is to be delayed 1838 */ 1839 static int 1840 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, 1841 struct nvmet_fc_ls_iod *iod) 1842 { 1843 struct fcnvme_ls_disconnect_assoc_rqst *rqst = 1844 &iod->rqstbuf->rq_dis_assoc; 1845 struct fcnvme_ls_disconnect_assoc_acc *acc = 1846 &iod->rspbuf->rsp_dis_assoc; 1847 struct nvmet_fc_tgt_assoc *assoc = NULL; 1848 struct nvmet_fc_ls_iod *oldls = NULL; 1849 unsigned long flags; 1850 int ret = 0; 1851 1852 memset(acc, 0, sizeof(*acc)); 1853 1854 ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst); 1855 if (!ret) { 1856 /* match an active association - takes an assoc ref if !NULL */ 1857 assoc = nvmet_fc_find_target_assoc(tgtport, 1858 be64_to_cpu(rqst->associd.association_id)); 1859 iod->assoc = assoc; 1860 if (!assoc) 1861 ret = VERR_NO_ASSOC; 1862 } 1863 1864 if (ret || !assoc) { 1865 dev_err(tgtport->dev, 1866 "Disconnect LS failed: %s\n", 1867 validation_errors[ret]); 1868 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1869 sizeof(*acc), rqst->w0.ls_cmd, 1870 (ret == VERR_NO_ASSOC) ? 1871 FCNVME_RJT_RC_INV_ASSOC : 1872 FCNVME_RJT_RC_LOGIC, 1873 FCNVME_RJT_EXP_NONE, 0); 1874 return true; 1875 } 1876 1877 /* format a response */ 1878 1879 iod->lsrsp->rsplen = sizeof(*acc); 1880 1881 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1882 fcnvme_lsdesc_len( 1883 sizeof(struct fcnvme_ls_disconnect_assoc_acc)), 1884 FCNVME_LS_DISCONNECT_ASSOC); 1885 1886 /* 1887 * The rules for LS response says the response cannot 1888 * go back until ABTS's have been sent for all outstanding 1889 * I/O and a Disconnect Association LS has been sent. 1890 * So... save off the Disconnect LS to send the response 1891 * later. If there was a prior LS already saved, replace 1892 * it with the newer one and send a can't perform reject 1893 * on the older one. 1894 */ 1895 spin_lock_irqsave(&tgtport->lock, flags); 1896 oldls = assoc->rcv_disconn; 1897 assoc->rcv_disconn = iod; 1898 spin_unlock_irqrestore(&tgtport->lock, flags); 1899 1900 if (oldls) { 1901 dev_info(tgtport->dev, 1902 "{%d:%d} Multiple Disconnect Association LS's " 1903 "received\n", 1904 tgtport->fc_target_port.port_num, assoc->a_id); 1905 /* overwrite good response with bogus failure */ 1906 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, 1907 sizeof(*iod->rspbuf), 1908 /* ok to use rqst, LS is same */ 1909 rqst->w0.ls_cmd, 1910 FCNVME_RJT_RC_UNAB, 1911 FCNVME_RJT_EXP_NONE, 0); 1912 nvmet_fc_xmt_ls_rsp(tgtport, oldls); 1913 } 1914 1915 nvmet_fc_schedule_delete_assoc(assoc); 1916 nvmet_fc_tgt_a_put(assoc); 1917 1918 return false; 1919 } 1920 1921 1922 /* *********************** NVME Ctrl Routines **************************** */ 1923 1924 1925 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req); 1926 1927 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops; 1928 1929 static void 1930 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) 1931 { 1932 struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private; 1933 struct nvmet_fc_tgtport *tgtport = iod->tgtport; 1934 1935 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma, 1936 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 1937 nvmet_fc_free_ls_iod(tgtport, iod); 1938 nvmet_fc_tgtport_put(tgtport); 1939 } 1940 1941 static void 1942 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, 1943 struct nvmet_fc_ls_iod *iod) 1944 { 1945 int ret; 1946 1947 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma, 1948 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 1949 1950 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp); 1951 if (ret) 1952 nvmet_fc_xmt_ls_rsp_done(iod->lsrsp); 1953 } 1954 1955 /* 1956 * Actual processing routine for received FC-NVME LS Requests from the LLD 1957 */ 1958 static void 1959 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport, 1960 struct nvmet_fc_ls_iod *iod) 1961 { 1962 struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0; 1963 bool sendrsp = true; 1964 1965 iod->lsrsp->nvme_fc_private = iod; 1966 iod->lsrsp->rspbuf = iod->rspbuf; 1967 iod->lsrsp->rspdma = iod->rspdma; 1968 iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done; 1969 /* Be preventative. handlers will later set to valid length */ 1970 iod->lsrsp->rsplen = 0; 1971 1972 iod->assoc = NULL; 1973 1974 /* 1975 * handlers: 1976 * parse request input, execute the request, and format the 1977 * LS response 1978 */ 1979 switch (w0->ls_cmd) { 1980 case FCNVME_LS_CREATE_ASSOCIATION: 1981 /* Creates Association and initial Admin Queue/Connection */ 1982 nvmet_fc_ls_create_association(tgtport, iod); 1983 break; 1984 case FCNVME_LS_CREATE_CONNECTION: 1985 /* Creates an IO Queue/Connection */ 1986 nvmet_fc_ls_create_connection(tgtport, iod); 1987 break; 1988 case FCNVME_LS_DISCONNECT_ASSOC: 1989 /* Terminate a Queue/Connection or the Association */ 1990 sendrsp = nvmet_fc_ls_disconnect(tgtport, iod); 1991 break; 1992 default: 1993 iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf, 1994 sizeof(*iod->rspbuf), w0->ls_cmd, 1995 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0); 1996 } 1997 1998 if (sendrsp) 1999 nvmet_fc_xmt_ls_rsp(tgtport, iod); 2000 } 2001 2002 /* 2003 * Actual processing routine for received FC-NVME LS Requests from the LLD 2004 */ 2005 static void 2006 nvmet_fc_handle_ls_rqst_work(struct work_struct *work) 2007 { 2008 struct nvmet_fc_ls_iod *iod = 2009 container_of(work, struct nvmet_fc_ls_iod, work); 2010 struct nvmet_fc_tgtport *tgtport = iod->tgtport; 2011 2012 nvmet_fc_handle_ls_rqst(tgtport, iod); 2013 } 2014 2015 2016 /** 2017 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD 2018 * upon the reception of a NVME LS request. 2019 * 2020 * The nvmet-fc layer will copy payload to an internal structure for 2021 * processing. As such, upon completion of the routine, the LLDD may 2022 * immediately free/reuse the LS request buffer passed in the call. 2023 * 2024 * If this routine returns error, the LLDD should abort the exchange. 2025 * 2026 * @target_port: pointer to the (registered) target port the LS was 2027 * received on. 2028 * @hosthandle: pointer to the host specific data, gets stored in iod. 2029 * @lsrsp: pointer to a lsrsp structure to be used to reference 2030 * the exchange corresponding to the LS. 2031 * @lsreqbuf: pointer to the buffer containing the LS Request 2032 * @lsreqbuf_len: length, in bytes, of the received LS request 2033 */ 2034 int 2035 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, 2036 void *hosthandle, 2037 struct nvmefc_ls_rsp *lsrsp, 2038 void *lsreqbuf, u32 lsreqbuf_len) 2039 { 2040 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 2041 struct nvmet_fc_ls_iod *iod; 2042 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; 2043 2044 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { 2045 dev_info(tgtport->dev, 2046 "RCV %s LS failed: payload too large (%d)\n", 2047 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2048 nvmefc_ls_names[w0->ls_cmd] : "", 2049 lsreqbuf_len); 2050 return -E2BIG; 2051 } 2052 2053 if (!nvmet_fc_tgtport_get(tgtport)) { 2054 dev_info(tgtport->dev, 2055 "RCV %s LS failed: target deleting\n", 2056 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2057 nvmefc_ls_names[w0->ls_cmd] : ""); 2058 return -ESHUTDOWN; 2059 } 2060 2061 iod = nvmet_fc_alloc_ls_iod(tgtport); 2062 if (!iod) { 2063 dev_info(tgtport->dev, 2064 "RCV %s LS failed: context allocation failed\n", 2065 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2066 nvmefc_ls_names[w0->ls_cmd] : ""); 2067 nvmet_fc_tgtport_put(tgtport); 2068 return -ENOENT; 2069 } 2070 2071 iod->lsrsp = lsrsp; 2072 iod->fcpreq = NULL; 2073 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len); 2074 iod->rqstdatalen = lsreqbuf_len; 2075 iod->hosthandle = hosthandle; 2076 2077 queue_work(nvmet_wq, &iod->work); 2078 2079 return 0; 2080 } 2081 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req); 2082 2083 2084 /* 2085 * ********************** 2086 * Start of FCP handling 2087 * ********************** 2088 */ 2089 2090 static int 2091 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod) 2092 { 2093 struct scatterlist *sg; 2094 unsigned int nent; 2095 2096 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent); 2097 if (!sg) 2098 goto out; 2099 2100 fod->data_sg = sg; 2101 fod->data_sg_cnt = nent; 2102 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent, 2103 ((fod->io_dir == NVMET_FCP_WRITE) ? 2104 DMA_FROM_DEVICE : DMA_TO_DEVICE)); 2105 /* note: write from initiator perspective */ 2106 fod->next_sg = fod->data_sg; 2107 2108 return 0; 2109 2110 out: 2111 return NVME_SC_INTERNAL; 2112 } 2113 2114 static void 2115 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod) 2116 { 2117 if (!fod->data_sg || !fod->data_sg_cnt) 2118 return; 2119 2120 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt, 2121 ((fod->io_dir == NVMET_FCP_WRITE) ? 2122 DMA_FROM_DEVICE : DMA_TO_DEVICE)); 2123 sgl_free(fod->data_sg); 2124 fod->data_sg = NULL; 2125 fod->data_sg_cnt = 0; 2126 } 2127 2128 2129 static bool 2130 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd) 2131 { 2132 u32 sqtail, used; 2133 2134 /* egad, this is ugly. And sqtail is just a best guess */ 2135 sqtail = atomic_read(&q->sqtail) % q->sqsize; 2136 2137 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd); 2138 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9))); 2139 } 2140 2141 /* 2142 * Prep RSP payload. 2143 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op 2144 */ 2145 static void 2146 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport, 2147 struct nvmet_fc_fcp_iod *fod) 2148 { 2149 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf; 2150 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; 2151 struct nvme_completion *cqe = &ersp->cqe; 2152 u32 *cqewd = (u32 *)cqe; 2153 bool send_ersp = false; 2154 u32 rsn, rspcnt, xfr_length; 2155 2156 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP) 2157 xfr_length = fod->req.transfer_len; 2158 else 2159 xfr_length = fod->offset; 2160 2161 /* 2162 * check to see if we can send a 0's rsp. 2163 * Note: to send a 0's response, the NVME-FC host transport will 2164 * recreate the CQE. The host transport knows: sq id, SQHD (last 2165 * seen in an ersp), and command_id. Thus it will create a 2166 * zero-filled CQE with those known fields filled in. Transport 2167 * must send an ersp for any condition where the cqe won't match 2168 * this. 2169 * 2170 * Here are the FC-NVME mandated cases where we must send an ersp: 2171 * every N responses, where N=ersp_ratio 2172 * force fabric commands to send ersp's (not in FC-NVME but good 2173 * practice) 2174 * normal cmds: any time status is non-zero, or status is zero 2175 * but words 0 or 1 are non-zero. 2176 * the SQ is 90% or more full 2177 * the cmd is a fused command 2178 * transferred data length not equal to cmd iu length 2179 */ 2180 rspcnt = atomic_inc_return(&fod->queue->zrspcnt); 2181 if (!(rspcnt % fod->queue->ersp_ratio) || 2182 nvme_is_fabrics((struct nvme_command *) sqe) || 2183 xfr_length != fod->req.transfer_len || 2184 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] || 2185 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || 2186 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head))) 2187 send_ersp = true; 2188 2189 /* re-set the fields */ 2190 fod->fcpreq->rspaddr = ersp; 2191 fod->fcpreq->rspdma = fod->rspdma; 2192 2193 if (!send_ersp) { 2194 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP); 2195 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP; 2196 } else { 2197 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32)); 2198 rsn = atomic_inc_return(&fod->queue->rsn); 2199 ersp->rsn = cpu_to_be32(rsn); 2200 ersp->xfrd_len = cpu_to_be32(xfr_length); 2201 fod->fcpreq->rsplen = sizeof(*ersp); 2202 } 2203 2204 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma, 2205 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 2206 } 2207 2208 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq); 2209 2210 static void 2211 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport, 2212 struct nvmet_fc_fcp_iod *fod) 2213 { 2214 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2215 2216 /* data no longer needed */ 2217 nvmet_fc_free_tgt_pgs(fod); 2218 2219 /* 2220 * if an ABTS was received or we issued the fcp_abort early 2221 * don't call abort routine again. 2222 */ 2223 /* no need to take lock - lock was taken earlier to get here */ 2224 if (!fod->aborted) 2225 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq); 2226 2227 nvmet_fc_free_fcp_iod(fod->queue, fod); 2228 } 2229 2230 static void 2231 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport, 2232 struct nvmet_fc_fcp_iod *fod) 2233 { 2234 int ret; 2235 2236 fod->fcpreq->op = NVMET_FCOP_RSP; 2237 fod->fcpreq->timeout = 0; 2238 2239 nvmet_fc_prep_fcp_rsp(tgtport, fod); 2240 2241 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); 2242 if (ret) 2243 nvmet_fc_abort_op(tgtport, fod); 2244 } 2245 2246 static void 2247 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, 2248 struct nvmet_fc_fcp_iod *fod, u8 op) 2249 { 2250 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2251 struct scatterlist *sg = fod->next_sg; 2252 unsigned long flags; 2253 u32 remaininglen = fod->req.transfer_len - fod->offset; 2254 u32 tlen = 0; 2255 int ret; 2256 2257 fcpreq->op = op; 2258 fcpreq->offset = fod->offset; 2259 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; 2260 2261 /* 2262 * for next sequence: 2263 * break at a sg element boundary 2264 * attempt to keep sequence length capped at 2265 * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to 2266 * be longer if a single sg element is larger 2267 * than that amount. This is done to avoid creating 2268 * a new sg list to use for the tgtport api. 2269 */ 2270 fcpreq->sg = sg; 2271 fcpreq->sg_cnt = 0; 2272 while (tlen < remaininglen && 2273 fcpreq->sg_cnt < tgtport->max_sg_cnt && 2274 tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) { 2275 fcpreq->sg_cnt++; 2276 tlen += sg_dma_len(sg); 2277 sg = sg_next(sg); 2278 } 2279 if (tlen < remaininglen && fcpreq->sg_cnt == 0) { 2280 fcpreq->sg_cnt++; 2281 tlen += min_t(u32, sg_dma_len(sg), remaininglen); 2282 sg = sg_next(sg); 2283 } 2284 if (tlen < remaininglen) 2285 fod->next_sg = sg; 2286 else 2287 fod->next_sg = NULL; 2288 2289 fcpreq->transfer_length = tlen; 2290 fcpreq->transferred_length = 0; 2291 fcpreq->fcp_error = 0; 2292 fcpreq->rsplen = 0; 2293 2294 /* 2295 * If the last READDATA request: check if LLDD supports 2296 * combined xfr with response. 2297 */ 2298 if ((op == NVMET_FCOP_READDATA) && 2299 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) && 2300 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) { 2301 fcpreq->op = NVMET_FCOP_READDATA_RSP; 2302 nvmet_fc_prep_fcp_rsp(tgtport, fod); 2303 } 2304 2305 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); 2306 if (ret) { 2307 /* 2308 * should be ok to set w/o lock as its in the thread of 2309 * execution (not an async timer routine) and doesn't 2310 * contend with any clearing action 2311 */ 2312 fod->abort = true; 2313 2314 if (op == NVMET_FCOP_WRITEDATA) { 2315 spin_lock_irqsave(&fod->flock, flags); 2316 fod->writedataactive = false; 2317 spin_unlock_irqrestore(&fod->flock, flags); 2318 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2319 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ { 2320 fcpreq->fcp_error = ret; 2321 fcpreq->transferred_length = 0; 2322 nvmet_fc_xmt_fcp_op_done(fod->fcpreq); 2323 } 2324 } 2325 } 2326 2327 static inline bool 2328 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort) 2329 { 2330 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2331 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2332 2333 /* if in the middle of an io and we need to tear down */ 2334 if (abort) { 2335 if (fcpreq->op == NVMET_FCOP_WRITEDATA) { 2336 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2337 return true; 2338 } 2339 2340 nvmet_fc_abort_op(tgtport, fod); 2341 return true; 2342 } 2343 2344 return false; 2345 } 2346 2347 /* 2348 * actual done handler for FCP operations when completed by the lldd 2349 */ 2350 static void 2351 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) 2352 { 2353 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2354 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2355 unsigned long flags; 2356 bool abort; 2357 2358 spin_lock_irqsave(&fod->flock, flags); 2359 abort = fod->abort; 2360 fod->writedataactive = false; 2361 spin_unlock_irqrestore(&fod->flock, flags); 2362 2363 switch (fcpreq->op) { 2364 2365 case NVMET_FCOP_WRITEDATA: 2366 if (__nvmet_fc_fod_op_abort(fod, abort)) 2367 return; 2368 if (fcpreq->fcp_error || 2369 fcpreq->transferred_length != fcpreq->transfer_length) { 2370 spin_lock_irqsave(&fod->flock, flags); 2371 fod->abort = true; 2372 spin_unlock_irqrestore(&fod->flock, flags); 2373 2374 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2375 return; 2376 } 2377 2378 fod->offset += fcpreq->transferred_length; 2379 if (fod->offset != fod->req.transfer_len) { 2380 spin_lock_irqsave(&fod->flock, flags); 2381 fod->writedataactive = true; 2382 spin_unlock_irqrestore(&fod->flock, flags); 2383 2384 /* transfer the next chunk */ 2385 nvmet_fc_transfer_fcp_data(tgtport, fod, 2386 NVMET_FCOP_WRITEDATA); 2387 return; 2388 } 2389 2390 /* data transfer complete, resume with nvmet layer */ 2391 fod->req.execute(&fod->req); 2392 break; 2393 2394 case NVMET_FCOP_READDATA: 2395 case NVMET_FCOP_READDATA_RSP: 2396 if (__nvmet_fc_fod_op_abort(fod, abort)) 2397 return; 2398 if (fcpreq->fcp_error || 2399 fcpreq->transferred_length != fcpreq->transfer_length) { 2400 nvmet_fc_abort_op(tgtport, fod); 2401 return; 2402 } 2403 2404 /* success */ 2405 2406 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) { 2407 /* data no longer needed */ 2408 nvmet_fc_free_tgt_pgs(fod); 2409 nvmet_fc_free_fcp_iod(fod->queue, fod); 2410 return; 2411 } 2412 2413 fod->offset += fcpreq->transferred_length; 2414 if (fod->offset != fod->req.transfer_len) { 2415 /* transfer the next chunk */ 2416 nvmet_fc_transfer_fcp_data(tgtport, fod, 2417 NVMET_FCOP_READDATA); 2418 return; 2419 } 2420 2421 /* data transfer complete, send response */ 2422 2423 /* data no longer needed */ 2424 nvmet_fc_free_tgt_pgs(fod); 2425 2426 nvmet_fc_xmt_fcp_rsp(tgtport, fod); 2427 2428 break; 2429 2430 case NVMET_FCOP_RSP: 2431 if (__nvmet_fc_fod_op_abort(fod, abort)) 2432 return; 2433 nvmet_fc_free_fcp_iod(fod->queue, fod); 2434 break; 2435 2436 default: 2437 break; 2438 } 2439 } 2440 2441 static void 2442 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq) 2443 { 2444 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 2445 2446 nvmet_fc_fod_op_done(fod); 2447 } 2448 2449 /* 2450 * actual completion handler after execution by the nvmet layer 2451 */ 2452 static void 2453 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport, 2454 struct nvmet_fc_fcp_iod *fod, int status) 2455 { 2456 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; 2457 struct nvme_completion *cqe = &fod->rspiubuf.cqe; 2458 unsigned long flags; 2459 bool abort; 2460 2461 spin_lock_irqsave(&fod->flock, flags); 2462 abort = fod->abort; 2463 spin_unlock_irqrestore(&fod->flock, flags); 2464 2465 /* if we have a CQE, snoop the last sq_head value */ 2466 if (!status) 2467 fod->queue->sqhd = cqe->sq_head; 2468 2469 if (abort) { 2470 nvmet_fc_abort_op(tgtport, fod); 2471 return; 2472 } 2473 2474 /* if an error handling the cmd post initial parsing */ 2475 if (status) { 2476 /* fudge up a failed CQE status for our transport error */ 2477 memset(cqe, 0, sizeof(*cqe)); 2478 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */ 2479 cqe->sq_id = cpu_to_le16(fod->queue->qid); 2480 cqe->command_id = sqe->command_id; 2481 cqe->status = cpu_to_le16(status); 2482 } else { 2483 2484 /* 2485 * try to push the data even if the SQE status is non-zero. 2486 * There may be a status where data still was intended to 2487 * be moved 2488 */ 2489 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) { 2490 /* push the data over before sending rsp */ 2491 nvmet_fc_transfer_fcp_data(tgtport, fod, 2492 NVMET_FCOP_READDATA); 2493 return; 2494 } 2495 2496 /* writes & no data - fall thru */ 2497 } 2498 2499 /* data no longer needed */ 2500 nvmet_fc_free_tgt_pgs(fod); 2501 2502 nvmet_fc_xmt_fcp_rsp(tgtport, fod); 2503 } 2504 2505 2506 static void 2507 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req) 2508 { 2509 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req); 2510 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2511 2512 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0); 2513 } 2514 2515 2516 /* 2517 * Actual processing routine for received FC-NVME I/O Requests from the LLD 2518 */ 2519 static void 2520 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, 2521 struct nvmet_fc_fcp_iod *fod) 2522 { 2523 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf; 2524 u32 xfrlen = be32_to_cpu(cmdiu->data_len); 2525 int ret; 2526 2527 /* 2528 * Fused commands are currently not supported in the linux 2529 * implementation. 2530 * 2531 * As such, the implementation of the FC transport does not 2532 * look at the fused commands and order delivery to the upper 2533 * layer until we have both based on csn. 2534 */ 2535 2536 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done; 2537 2538 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) { 2539 fod->io_dir = NVMET_FCP_WRITE; 2540 if (!nvme_is_write(&cmdiu->sqe)) 2541 goto transport_error; 2542 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) { 2543 fod->io_dir = NVMET_FCP_READ; 2544 if (nvme_is_write(&cmdiu->sqe)) 2545 goto transport_error; 2546 } else { 2547 fod->io_dir = NVMET_FCP_NODATA; 2548 if (xfrlen) 2549 goto transport_error; 2550 } 2551 2552 fod->req.cmd = &fod->cmdiubuf.sqe; 2553 fod->req.cqe = &fod->rspiubuf.cqe; 2554 if (!tgtport->pe) 2555 goto transport_error; 2556 fod->req.port = tgtport->pe->port; 2557 2558 /* clear any response payload */ 2559 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); 2560 2561 fod->data_sg = NULL; 2562 fod->data_sg_cnt = 0; 2563 2564 ret = nvmet_req_init(&fod->req, 2565 &fod->queue->nvme_cq, 2566 &fod->queue->nvme_sq, 2567 &nvmet_fc_tgt_fcp_ops); 2568 if (!ret) { 2569 /* bad SQE content or invalid ctrl state */ 2570 /* nvmet layer has already called op done to send rsp. */ 2571 return; 2572 } 2573 2574 fod->req.transfer_len = xfrlen; 2575 2576 /* keep a running counter of tail position */ 2577 atomic_inc(&fod->queue->sqtail); 2578 2579 if (fod->req.transfer_len) { 2580 ret = nvmet_fc_alloc_tgt_pgs(fod); 2581 if (ret) { 2582 nvmet_req_complete(&fod->req, ret); 2583 return; 2584 } 2585 } 2586 fod->req.sg = fod->data_sg; 2587 fod->req.sg_cnt = fod->data_sg_cnt; 2588 fod->offset = 0; 2589 2590 if (fod->io_dir == NVMET_FCP_WRITE) { 2591 /* pull the data over before invoking nvmet layer */ 2592 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA); 2593 return; 2594 } 2595 2596 /* 2597 * Reads or no data: 2598 * 2599 * can invoke the nvmet_layer now. If read data, cmd completion will 2600 * push the data 2601 */ 2602 fod->req.execute(&fod->req); 2603 return; 2604 2605 transport_error: 2606 nvmet_fc_abort_op(tgtport, fod); 2607 } 2608 2609 /** 2610 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD 2611 * upon the reception of a NVME FCP CMD IU. 2612 * 2613 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc 2614 * layer for processing. 2615 * 2616 * The nvmet_fc layer allocates a local job structure (struct 2617 * nvmet_fc_fcp_iod) from the queue for the io and copies the 2618 * CMD IU buffer to the job structure. As such, on a successful 2619 * completion (returns 0), the LLDD may immediately free/reuse 2620 * the CMD IU buffer passed in the call. 2621 * 2622 * However, in some circumstances, due to the packetized nature of FC 2623 * and the api of the FC LLDD which may issue a hw command to send the 2624 * response, but the LLDD may not get the hw completion for that command 2625 * and upcall the nvmet_fc layer before a new command may be 2626 * asynchronously received - its possible for a command to be received 2627 * before the LLDD and nvmet_fc have recycled the job structure. It gives 2628 * the appearance of more commands received than fits in the sq. 2629 * To alleviate this scenario, a temporary queue is maintained in the 2630 * transport for pending LLDD requests waiting for a queue job structure. 2631 * In these "overrun" cases, a temporary queue element is allocated 2632 * the LLDD request and CMD iu buffer information remembered, and the 2633 * routine returns a -EOVERFLOW status. Subsequently, when a queue job 2634 * structure is freed, it is immediately reallocated for anything on the 2635 * pending request list. The LLDDs defer_rcv() callback is called, 2636 * informing the LLDD that it may reuse the CMD IU buffer, and the io 2637 * is then started normally with the transport. 2638 * 2639 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat 2640 * the completion as successful but must not reuse the CMD IU buffer 2641 * until the LLDD's defer_rcv() callback has been called for the 2642 * corresponding struct nvmefc_tgt_fcp_req pointer. 2643 * 2644 * If there is any other condition in which an error occurs, the 2645 * transport will return a non-zero status indicating the error. 2646 * In all cases other than -EOVERFLOW, the transport has not accepted the 2647 * request and the LLDD should abort the exchange. 2648 * 2649 * @target_port: pointer to the (registered) target port the FCP CMD IU 2650 * was received on. 2651 * @fcpreq: pointer to a fcpreq request structure to be used to reference 2652 * the exchange corresponding to the FCP Exchange. 2653 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU 2654 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU 2655 */ 2656 int 2657 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, 2658 struct nvmefc_tgt_fcp_req *fcpreq, 2659 void *cmdiubuf, u32 cmdiubuf_len) 2660 { 2661 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 2662 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; 2663 struct nvmet_fc_tgt_queue *queue; 2664 struct nvmet_fc_fcp_iod *fod; 2665 struct nvmet_fc_defer_fcp_req *deferfcp; 2666 unsigned long flags; 2667 2668 /* validate iu, so the connection id can be used to find the queue */ 2669 if ((cmdiubuf_len != sizeof(*cmdiu)) || 2670 (cmdiu->format_id != NVME_CMD_FORMAT_ID) || 2671 (cmdiu->fc_id != NVME_CMD_FC_ID) || 2672 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4))) 2673 return -EIO; 2674 2675 queue = nvmet_fc_find_target_queue(tgtport, 2676 be64_to_cpu(cmdiu->connection_id)); 2677 if (!queue) 2678 return -ENOTCONN; 2679 2680 /* 2681 * note: reference taken by find_target_queue 2682 * After successful fod allocation, the fod will inherit the 2683 * ownership of that reference and will remove the reference 2684 * when the fod is freed. 2685 */ 2686 2687 spin_lock_irqsave(&queue->qlock, flags); 2688 2689 fod = nvmet_fc_alloc_fcp_iod(queue); 2690 if (fod) { 2691 spin_unlock_irqrestore(&queue->qlock, flags); 2692 2693 fcpreq->nvmet_fc_private = fod; 2694 fod->fcpreq = fcpreq; 2695 2696 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); 2697 2698 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); 2699 2700 return 0; 2701 } 2702 2703 if (!tgtport->ops->defer_rcv) { 2704 spin_unlock_irqrestore(&queue->qlock, flags); 2705 /* release the queue lookup reference */ 2706 nvmet_fc_tgt_q_put(queue); 2707 return -ENOENT; 2708 } 2709 2710 deferfcp = list_first_entry_or_null(&queue->avail_defer_list, 2711 struct nvmet_fc_defer_fcp_req, req_list); 2712 if (deferfcp) { 2713 /* Just re-use one that was previously allocated */ 2714 list_del(&deferfcp->req_list); 2715 } else { 2716 spin_unlock_irqrestore(&queue->qlock, flags); 2717 2718 /* Now we need to dynamically allocate one */ 2719 deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL); 2720 if (!deferfcp) { 2721 /* release the queue lookup reference */ 2722 nvmet_fc_tgt_q_put(queue); 2723 return -ENOMEM; 2724 } 2725 spin_lock_irqsave(&queue->qlock, flags); 2726 } 2727 2728 /* For now, use rspaddr / rsplen to save payload information */ 2729 fcpreq->rspaddr = cmdiubuf; 2730 fcpreq->rsplen = cmdiubuf_len; 2731 deferfcp->fcp_req = fcpreq; 2732 2733 /* defer processing till a fod becomes available */ 2734 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list); 2735 2736 /* NOTE: the queue lookup reference is still valid */ 2737 2738 spin_unlock_irqrestore(&queue->qlock, flags); 2739 2740 return -EOVERFLOW; 2741 } 2742 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); 2743 2744 /** 2745 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD 2746 * upon the reception of an ABTS for a FCP command 2747 * 2748 * Notify the transport that an ABTS has been received for a FCP command 2749 * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The 2750 * LLDD believes the command is still being worked on 2751 * (template_ops->fcp_req_release() has not been called). 2752 * 2753 * The transport will wait for any outstanding work (an op to the LLDD, 2754 * which the lldd should complete with error due to the ABTS; or the 2755 * completion from the nvmet layer of the nvme command), then will 2756 * stop processing and call the nvmet_fc_rcv_fcp_req() callback to 2757 * return the i/o context to the LLDD. The LLDD may send the BA_ACC 2758 * to the ABTS either after return from this function (assuming any 2759 * outstanding op work has been terminated) or upon the callback being 2760 * called. 2761 * 2762 * @target_port: pointer to the (registered) target port the FCP CMD IU 2763 * was received on. 2764 * @fcpreq: pointer to the fcpreq request structure that corresponds 2765 * to the exchange that received the ABTS. 2766 */ 2767 void 2768 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port, 2769 struct nvmefc_tgt_fcp_req *fcpreq) 2770 { 2771 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 2772 struct nvmet_fc_tgt_queue *queue; 2773 unsigned long flags; 2774 2775 if (!fod || fod->fcpreq != fcpreq) 2776 /* job appears to have already completed, ignore abort */ 2777 return; 2778 2779 queue = fod->queue; 2780 2781 spin_lock_irqsave(&queue->qlock, flags); 2782 if (fod->active) { 2783 /* 2784 * mark as abort. The abort handler, invoked upon completion 2785 * of any work, will detect the aborted status and do the 2786 * callback. 2787 */ 2788 spin_lock(&fod->flock); 2789 fod->abort = true; 2790 fod->aborted = true; 2791 spin_unlock(&fod->flock); 2792 } 2793 spin_unlock_irqrestore(&queue->qlock, flags); 2794 } 2795 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort); 2796 2797 2798 struct nvmet_fc_traddr { 2799 u64 nn; 2800 u64 pn; 2801 }; 2802 2803 static int 2804 __nvme_fc_parse_u64(substring_t *sstr, u64 *val) 2805 { 2806 u64 token64; 2807 2808 if (match_u64(sstr, &token64)) 2809 return -EINVAL; 2810 *val = token64; 2811 2812 return 0; 2813 } 2814 2815 /* 2816 * This routine validates and extracts the WWN's from the TRADDR string. 2817 * As kernel parsers need the 0x to determine number base, universally 2818 * build string to parse with 0x prefix before parsing name strings. 2819 */ 2820 static int 2821 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) 2822 { 2823 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; 2824 substring_t wwn = { name, &name[sizeof(name)-1] }; 2825 int nnoffset, pnoffset; 2826 2827 /* validate if string is one of the 2 allowed formats */ 2828 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && 2829 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && 2830 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], 2831 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { 2832 nnoffset = NVME_FC_TRADDR_OXNNLEN; 2833 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + 2834 NVME_FC_TRADDR_OXNNLEN; 2835 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH && 2836 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && 2837 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], 2838 "pn-", NVME_FC_TRADDR_NNLEN))) { 2839 nnoffset = NVME_FC_TRADDR_NNLEN; 2840 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; 2841 } else 2842 goto out_einval; 2843 2844 name[0] = '0'; 2845 name[1] = 'x'; 2846 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; 2847 2848 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); 2849 if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) 2850 goto out_einval; 2851 2852 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); 2853 if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) 2854 goto out_einval; 2855 2856 return 0; 2857 2858 out_einval: 2859 pr_warn("%s: bad traddr string\n", __func__); 2860 return -EINVAL; 2861 } 2862 2863 static int 2864 nvmet_fc_add_port(struct nvmet_port *port) 2865 { 2866 struct nvmet_fc_tgtport *tgtport; 2867 struct nvmet_fc_port_entry *pe; 2868 struct nvmet_fc_traddr traddr = { 0L, 0L }; 2869 unsigned long flags; 2870 int ret; 2871 2872 /* validate the address info */ 2873 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) || 2874 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC)) 2875 return -EINVAL; 2876 2877 /* map the traddr address info to a target port */ 2878 2879 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr, 2880 sizeof(port->disc_addr.traddr)); 2881 if (ret) 2882 return ret; 2883 2884 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2885 if (!pe) 2886 return -ENOMEM; 2887 2888 ret = -ENXIO; 2889 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 2890 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { 2891 if ((tgtport->fc_target_port.node_name == traddr.nn) && 2892 (tgtport->fc_target_port.port_name == traddr.pn)) { 2893 /* a FC port can only be 1 nvmet port id */ 2894 if (!tgtport->pe) { 2895 nvmet_fc_portentry_bind(tgtport, pe, port); 2896 ret = 0; 2897 } else 2898 ret = -EALREADY; 2899 break; 2900 } 2901 } 2902 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 2903 2904 if (ret) 2905 kfree(pe); 2906 2907 return ret; 2908 } 2909 2910 static void 2911 nvmet_fc_remove_port(struct nvmet_port *port) 2912 { 2913 struct nvmet_fc_port_entry *pe = port->priv; 2914 2915 nvmet_fc_portentry_unbind(pe); 2916 2917 /* terminate any outstanding associations */ 2918 __nvmet_fc_free_assocs(pe->tgtport); 2919 2920 kfree(pe); 2921 } 2922 2923 static void 2924 nvmet_fc_discovery_chg(struct nvmet_port *port) 2925 { 2926 struct nvmet_fc_port_entry *pe = port->priv; 2927 struct nvmet_fc_tgtport *tgtport = pe->tgtport; 2928 2929 if (tgtport && tgtport->ops->discovery_event) 2930 tgtport->ops->discovery_event(&tgtport->fc_target_port); 2931 } 2932 2933 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { 2934 .owner = THIS_MODULE, 2935 .type = NVMF_TRTYPE_FC, 2936 .msdbd = 1, 2937 .add_port = nvmet_fc_add_port, 2938 .remove_port = nvmet_fc_remove_port, 2939 .queue_response = nvmet_fc_fcp_nvme_cmd_done, 2940 .delete_ctrl = nvmet_fc_delete_ctrl, 2941 .discovery_chg = nvmet_fc_discovery_chg, 2942 }; 2943 2944 static int __init nvmet_fc_init_module(void) 2945 { 2946 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops); 2947 } 2948 2949 static void __exit nvmet_fc_exit_module(void) 2950 { 2951 /* ensure any shutdown operation, e.g. delete ctrls have finished */ 2952 flush_workqueue(nvmet_wq); 2953 2954 /* sanity check - all lports should be removed */ 2955 if (!list_empty(&nvmet_fc_target_list)) 2956 pr_warn("%s: targetport list not empty\n", __func__); 2957 2958 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops); 2959 2960 ida_destroy(&nvmet_fc_tgtport_cnt); 2961 } 2962 2963 module_init(nvmet_fc_init_module); 2964 module_exit(nvmet_fc_exit_module); 2965 2966 MODULE_LICENSE("GPL v2"); 2967