1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016 Avago Technologies. All rights reserved. 4 */ 5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 6 #include <linux/module.h> 7 #include <linux/slab.h> 8 #include <linux/blk-mq.h> 9 #include <linux/parser.h> 10 #include <linux/random.h> 11 #include <uapi/scsi/fc/fc_fs.h> 12 #include <uapi/scsi/fc/fc_els.h> 13 14 #include "nvmet.h" 15 #include <linux/nvme-fc-driver.h> 16 #include <linux/nvme-fc.h> 17 #include "../host/fc.h" 18 19 20 /* *************************** Data Structures/Defines ****************** */ 21 22 23 #define NVMET_LS_CTX_COUNT 256 24 25 struct nvmet_fc_tgtport; 26 struct nvmet_fc_tgt_assoc; 27 28 struct nvmet_fc_ls_iod { /* for an LS RQST RCV */ 29 struct nvmefc_ls_rsp *lsrsp; 30 struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */ 31 32 struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */ 33 34 struct nvmet_fc_tgtport *tgtport; 35 struct nvmet_fc_tgt_assoc *assoc; 36 void *hosthandle; 37 38 union nvmefc_ls_requests *rqstbuf; 39 union nvmefc_ls_responses *rspbuf; 40 u16 rqstdatalen; 41 dma_addr_t rspdma; 42 43 struct scatterlist sg[2]; 44 45 struct work_struct work; 46 } __aligned(sizeof(unsigned long long)); 47 48 struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */ 49 struct nvmefc_ls_req ls_req; 50 51 struct nvmet_fc_tgtport *tgtport; 52 void *hosthandle; 53 54 int ls_error; 55 struct list_head lsreq_list; /* tgtport->ls_req_list */ 56 bool req_queued; 57 }; 58 59 60 /* desired maximum for a single sequence - if sg list allows it */ 61 #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) 62 63 enum nvmet_fcp_datadir { 64 NVMET_FCP_NODATA, 65 NVMET_FCP_WRITE, 66 NVMET_FCP_READ, 67 NVMET_FCP_ABORTED, 68 }; 69 70 struct nvmet_fc_fcp_iod { 71 struct nvmefc_tgt_fcp_req *fcpreq; 72 73 struct nvme_fc_cmd_iu cmdiubuf; 74 struct nvme_fc_ersp_iu rspiubuf; 75 dma_addr_t rspdma; 76 struct scatterlist *next_sg; 77 struct scatterlist *data_sg; 78 int data_sg_cnt; 79 u32 offset; 80 enum nvmet_fcp_datadir io_dir; 81 bool active; 82 bool abort; 83 bool aborted; 84 bool writedataactive; 85 spinlock_t flock; 86 87 struct nvmet_req req; 88 struct work_struct defer_work; 89 90 struct nvmet_fc_tgtport *tgtport; 91 struct nvmet_fc_tgt_queue *queue; 92 93 struct list_head fcp_list; /* tgtport->fcp_list */ 94 }; 95 96 struct nvmet_fc_tgtport { 97 struct nvmet_fc_target_port fc_target_port; 98 99 struct list_head tgt_list; /* nvmet_fc_target_list */ 100 struct device *dev; /* dev for dma mapping */ 101 struct nvmet_fc_target_template *ops; 102 103 struct nvmet_fc_ls_iod *iod; 104 spinlock_t lock; 105 struct list_head ls_rcv_list; 106 struct list_head ls_req_list; 107 struct list_head ls_busylist; 108 struct list_head assoc_list; 109 struct list_head host_list; 110 struct ida assoc_cnt; 111 struct nvmet_fc_port_entry *pe; 112 struct kref ref; 113 u32 max_sg_cnt; 114 }; 115 116 struct nvmet_fc_port_entry { 117 struct nvmet_fc_tgtport *tgtport; 118 struct nvmet_port *port; 119 u64 node_name; 120 u64 port_name; 121 struct list_head pe_list; 122 }; 123 124 struct nvmet_fc_defer_fcp_req { 125 struct list_head req_list; 126 struct nvmefc_tgt_fcp_req *fcp_req; 127 }; 128 129 struct nvmet_fc_tgt_queue { 130 bool ninetypercent; 131 u16 qid; 132 u16 sqsize; 133 u16 ersp_ratio; 134 __le16 sqhd; 135 atomic_t connected; 136 atomic_t sqtail; 137 atomic_t zrspcnt; 138 atomic_t rsn; 139 spinlock_t qlock; 140 struct nvmet_cq nvme_cq; 141 struct nvmet_sq nvme_sq; 142 struct nvmet_fc_tgt_assoc *assoc; 143 struct list_head fod_list; 144 struct list_head pending_cmd_list; 145 struct list_head avail_defer_list; 146 struct workqueue_struct *work_q; 147 struct kref ref; 148 struct rcu_head rcu; 149 struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */ 150 } __aligned(sizeof(unsigned long long)); 151 152 struct nvmet_fc_hostport { 153 struct nvmet_fc_tgtport *tgtport; 154 void *hosthandle; 155 struct list_head host_list; 156 struct kref ref; 157 u8 invalid; 158 }; 159 160 struct nvmet_fc_tgt_assoc { 161 u64 association_id; 162 u32 a_id; 163 atomic_t terminating; 164 struct nvmet_fc_tgtport *tgtport; 165 struct nvmet_fc_hostport *hostport; 166 struct nvmet_fc_ls_iod *rcv_disconn; 167 struct list_head a_list; 168 struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1]; 169 struct kref ref; 170 struct work_struct del_work; 171 struct rcu_head rcu; 172 }; 173 174 175 static inline int 176 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr) 177 { 178 return (iodptr - iodptr->tgtport->iod); 179 } 180 181 static inline int 182 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr) 183 { 184 return (fodptr - fodptr->queue->fod); 185 } 186 187 188 /* 189 * Association and Connection IDs: 190 * 191 * Association ID will have random number in upper 6 bytes and zero 192 * in lower 2 bytes 193 * 194 * Connection IDs will be Association ID with QID or'd in lower 2 bytes 195 * 196 * note: Association ID = Connection ID for queue 0 197 */ 198 #define BYTES_FOR_QID sizeof(u16) 199 #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8) 200 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1)) 201 202 static inline u64 203 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid) 204 { 205 return (assoc->association_id | qid); 206 } 207 208 static inline u64 209 nvmet_fc_getassociationid(u64 connectionid) 210 { 211 return connectionid & ~NVMET_FC_QUEUEID_MASK; 212 } 213 214 static inline u16 215 nvmet_fc_getqueueid(u64 connectionid) 216 { 217 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK); 218 } 219 220 static inline struct nvmet_fc_tgtport * 221 targetport_to_tgtport(struct nvmet_fc_target_port *targetport) 222 { 223 return container_of(targetport, struct nvmet_fc_tgtport, 224 fc_target_port); 225 } 226 227 static inline struct nvmet_fc_fcp_iod * 228 nvmet_req_to_fod(struct nvmet_req *nvme_req) 229 { 230 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req); 231 } 232 233 234 /* *************************** Globals **************************** */ 235 236 237 static DEFINE_SPINLOCK(nvmet_fc_tgtlock); 238 239 static LIST_HEAD(nvmet_fc_target_list); 240 static DEFINE_IDA(nvmet_fc_tgtport_cnt); 241 static LIST_HEAD(nvmet_fc_portentry_list); 242 243 244 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work); 245 static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work); 246 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc); 247 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc); 248 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); 249 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); 250 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); 251 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); 252 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, 253 struct nvmet_fc_fcp_iod *fod); 254 static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc); 255 static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, 256 struct nvmet_fc_ls_iod *iod); 257 258 259 /* *********************** FC-NVME DMA Handling **************************** */ 260 261 /* 262 * The fcloop device passes in a NULL device pointer. Real LLD's will 263 * pass in a valid device pointer. If NULL is passed to the dma mapping 264 * routines, depending on the platform, it may or may not succeed, and 265 * may crash. 266 * 267 * As such: 268 * Wrapper all the dma routines and check the dev pointer. 269 * 270 * If simple mappings (return just a dma address, we'll noop them, 271 * returning a dma address of 0. 272 * 273 * On more complex mappings (dma_map_sg), a pseudo routine fills 274 * in the scatter list, setting all dma addresses to 0. 275 */ 276 277 static inline dma_addr_t 278 fc_dma_map_single(struct device *dev, void *ptr, size_t size, 279 enum dma_data_direction dir) 280 { 281 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; 282 } 283 284 static inline int 285 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 286 { 287 return dev ? dma_mapping_error(dev, dma_addr) : 0; 288 } 289 290 static inline void 291 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, 292 enum dma_data_direction dir) 293 { 294 if (dev) 295 dma_unmap_single(dev, addr, size, dir); 296 } 297 298 static inline void 299 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, 300 enum dma_data_direction dir) 301 { 302 if (dev) 303 dma_sync_single_for_cpu(dev, addr, size, dir); 304 } 305 306 static inline void 307 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, 308 enum dma_data_direction dir) 309 { 310 if (dev) 311 dma_sync_single_for_device(dev, addr, size, dir); 312 } 313 314 /* pseudo dma_map_sg call */ 315 static int 316 fc_map_sg(struct scatterlist *sg, int nents) 317 { 318 struct scatterlist *s; 319 int i; 320 321 WARN_ON(nents == 0 || sg[0].length == 0); 322 323 for_each_sg(sg, s, nents, i) { 324 s->dma_address = 0L; 325 #ifdef CONFIG_NEED_SG_DMA_LENGTH 326 s->dma_length = s->length; 327 #endif 328 } 329 return nents; 330 } 331 332 static inline int 333 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 334 enum dma_data_direction dir) 335 { 336 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); 337 } 338 339 static inline void 340 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 341 enum dma_data_direction dir) 342 { 343 if (dev) 344 dma_unmap_sg(dev, sg, nents, dir); 345 } 346 347 348 /* ********************** FC-NVME LS XMT Handling ************************* */ 349 350 351 static void 352 __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop) 353 { 354 struct nvmet_fc_tgtport *tgtport = lsop->tgtport; 355 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 356 unsigned long flags; 357 358 spin_lock_irqsave(&tgtport->lock, flags); 359 360 if (!lsop->req_queued) { 361 spin_unlock_irqrestore(&tgtport->lock, flags); 362 return; 363 } 364 365 list_del(&lsop->lsreq_list); 366 367 lsop->req_queued = false; 368 369 spin_unlock_irqrestore(&tgtport->lock, flags); 370 371 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, 372 (lsreq->rqstlen + lsreq->rsplen), 373 DMA_BIDIRECTIONAL); 374 375 nvmet_fc_tgtport_put(tgtport); 376 } 377 378 static int 379 __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport, 380 struct nvmet_fc_ls_req_op *lsop, 381 void (*done)(struct nvmefc_ls_req *req, int status)) 382 { 383 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 384 unsigned long flags; 385 int ret = 0; 386 387 if (!tgtport->ops->ls_req) 388 return -EOPNOTSUPP; 389 390 if (!nvmet_fc_tgtport_get(tgtport)) 391 return -ESHUTDOWN; 392 393 lsreq->done = done; 394 lsop->req_queued = false; 395 INIT_LIST_HEAD(&lsop->lsreq_list); 396 397 lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr, 398 lsreq->rqstlen + lsreq->rsplen, 399 DMA_BIDIRECTIONAL); 400 if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) { 401 ret = -EFAULT; 402 goto out_puttgtport; 403 } 404 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; 405 406 spin_lock_irqsave(&tgtport->lock, flags); 407 408 list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list); 409 410 lsop->req_queued = true; 411 412 spin_unlock_irqrestore(&tgtport->lock, flags); 413 414 ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle, 415 lsreq); 416 if (ret) 417 goto out_unlink; 418 419 return 0; 420 421 out_unlink: 422 lsop->ls_error = ret; 423 spin_lock_irqsave(&tgtport->lock, flags); 424 lsop->req_queued = false; 425 list_del(&lsop->lsreq_list); 426 spin_unlock_irqrestore(&tgtport->lock, flags); 427 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, 428 (lsreq->rqstlen + lsreq->rsplen), 429 DMA_BIDIRECTIONAL); 430 out_puttgtport: 431 nvmet_fc_tgtport_put(tgtport); 432 433 return ret; 434 } 435 436 static int 437 nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport, 438 struct nvmet_fc_ls_req_op *lsop, 439 void (*done)(struct nvmefc_ls_req *req, int status)) 440 { 441 /* don't wait for completion */ 442 443 return __nvmet_fc_send_ls_req(tgtport, lsop, done); 444 } 445 446 static void 447 nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) 448 { 449 struct nvmet_fc_ls_req_op *lsop = 450 container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req); 451 452 __nvmet_fc_finish_ls_req(lsop); 453 454 /* fc-nvme target doesn't care about success or failure of cmd */ 455 456 kfree(lsop); 457 } 458 459 /* 460 * This routine sends a FC-NVME LS to disconnect (aka terminate) 461 * the FC-NVME Association. Terminating the association also 462 * terminates the FC-NVME connections (per queue, both admin and io 463 * queues) that are part of the association. E.g. things are torn 464 * down, and the related FC-NVME Association ID and Connection IDs 465 * become invalid. 466 * 467 * The behavior of the fc-nvme target is such that it's 468 * understanding of the association and connections will implicitly 469 * be torn down. The action is implicit as it may be due to a loss of 470 * connectivity with the fc-nvme host, so the target may never get a 471 * response even if it tried. As such, the action of this routine 472 * is to asynchronously send the LS, ignore any results of the LS, and 473 * continue on with terminating the association. If the fc-nvme host 474 * is present and receives the LS, it too can tear down. 475 */ 476 static void 477 nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc) 478 { 479 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 480 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; 481 struct fcnvme_ls_disconnect_assoc_acc *discon_acc; 482 struct nvmet_fc_ls_req_op *lsop; 483 struct nvmefc_ls_req *lsreq; 484 int ret; 485 486 /* 487 * If ls_req is NULL or no hosthandle, it's an older lldd and no 488 * message is normal. Otherwise, send unless the hostport has 489 * already been invalidated by the lldd. 490 */ 491 if (!tgtport->ops->ls_req || !assoc->hostport || 492 assoc->hostport->invalid) 493 return; 494 495 lsop = kzalloc((sizeof(*lsop) + 496 sizeof(*discon_rqst) + sizeof(*discon_acc) + 497 tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); 498 if (!lsop) { 499 dev_info(tgtport->dev, 500 "{%d:%d} send Disconnect Association failed: ENOMEM\n", 501 tgtport->fc_target_port.port_num, assoc->a_id); 502 return; 503 } 504 505 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1]; 506 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1]; 507 lsreq = &lsop->ls_req; 508 if (tgtport->ops->lsrqst_priv_sz) 509 lsreq->private = (void *)&discon_acc[1]; 510 else 511 lsreq->private = NULL; 512 513 lsop->tgtport = tgtport; 514 lsop->hosthandle = assoc->hostport->hosthandle; 515 516 nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc, 517 assoc->association_id); 518 519 ret = nvmet_fc_send_ls_req_async(tgtport, lsop, 520 nvmet_fc_disconnect_assoc_done); 521 if (ret) { 522 dev_info(tgtport->dev, 523 "{%d:%d} XMT Disconnect Association failed: %d\n", 524 tgtport->fc_target_port.port_num, assoc->a_id, ret); 525 kfree(lsop); 526 } 527 } 528 529 530 /* *********************** FC-NVME Port Management ************************ */ 531 532 533 static int 534 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport) 535 { 536 struct nvmet_fc_ls_iod *iod; 537 int i; 538 539 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod), 540 GFP_KERNEL); 541 if (!iod) 542 return -ENOMEM; 543 544 tgtport->iod = iod; 545 546 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { 547 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work); 548 iod->tgtport = tgtport; 549 list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list); 550 551 iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) + 552 sizeof(union nvmefc_ls_responses), 553 GFP_KERNEL); 554 if (!iod->rqstbuf) 555 goto out_fail; 556 557 iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1]; 558 559 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf, 560 sizeof(*iod->rspbuf), 561 DMA_TO_DEVICE); 562 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma)) 563 goto out_fail; 564 } 565 566 return 0; 567 568 out_fail: 569 kfree(iod->rqstbuf); 570 list_del(&iod->ls_rcv_list); 571 for (iod--, i--; i >= 0; iod--, i--) { 572 fc_dma_unmap_single(tgtport->dev, iod->rspdma, 573 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 574 kfree(iod->rqstbuf); 575 list_del(&iod->ls_rcv_list); 576 } 577 578 kfree(iod); 579 580 return -EFAULT; 581 } 582 583 static void 584 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport) 585 { 586 struct nvmet_fc_ls_iod *iod = tgtport->iod; 587 int i; 588 589 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { 590 fc_dma_unmap_single(tgtport->dev, 591 iod->rspdma, sizeof(*iod->rspbuf), 592 DMA_TO_DEVICE); 593 kfree(iod->rqstbuf); 594 list_del(&iod->ls_rcv_list); 595 } 596 kfree(tgtport->iod); 597 } 598 599 static struct nvmet_fc_ls_iod * 600 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport) 601 { 602 struct nvmet_fc_ls_iod *iod; 603 unsigned long flags; 604 605 spin_lock_irqsave(&tgtport->lock, flags); 606 iod = list_first_entry_or_null(&tgtport->ls_rcv_list, 607 struct nvmet_fc_ls_iod, ls_rcv_list); 608 if (iod) 609 list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist); 610 spin_unlock_irqrestore(&tgtport->lock, flags); 611 return iod; 612 } 613 614 615 static void 616 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport, 617 struct nvmet_fc_ls_iod *iod) 618 { 619 unsigned long flags; 620 621 spin_lock_irqsave(&tgtport->lock, flags); 622 list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list); 623 spin_unlock_irqrestore(&tgtport->lock, flags); 624 } 625 626 static void 627 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, 628 struct nvmet_fc_tgt_queue *queue) 629 { 630 struct nvmet_fc_fcp_iod *fod = queue->fod; 631 int i; 632 633 for (i = 0; i < queue->sqsize; fod++, i++) { 634 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work); 635 fod->tgtport = tgtport; 636 fod->queue = queue; 637 fod->active = false; 638 fod->abort = false; 639 fod->aborted = false; 640 fod->fcpreq = NULL; 641 list_add_tail(&fod->fcp_list, &queue->fod_list); 642 spin_lock_init(&fod->flock); 643 644 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf, 645 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 646 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) { 647 list_del(&fod->fcp_list); 648 for (fod--, i--; i >= 0; fod--, i--) { 649 fc_dma_unmap_single(tgtport->dev, fod->rspdma, 650 sizeof(fod->rspiubuf), 651 DMA_TO_DEVICE); 652 fod->rspdma = 0L; 653 list_del(&fod->fcp_list); 654 } 655 656 return; 657 } 658 } 659 } 660 661 static void 662 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, 663 struct nvmet_fc_tgt_queue *queue) 664 { 665 struct nvmet_fc_fcp_iod *fod = queue->fod; 666 int i; 667 668 for (i = 0; i < queue->sqsize; fod++, i++) { 669 if (fod->rspdma) 670 fc_dma_unmap_single(tgtport->dev, fod->rspdma, 671 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 672 } 673 } 674 675 static struct nvmet_fc_fcp_iod * 676 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) 677 { 678 struct nvmet_fc_fcp_iod *fod; 679 680 lockdep_assert_held(&queue->qlock); 681 682 fod = list_first_entry_or_null(&queue->fod_list, 683 struct nvmet_fc_fcp_iod, fcp_list); 684 if (fod) { 685 list_del(&fod->fcp_list); 686 fod->active = true; 687 /* 688 * no queue reference is taken, as it was taken by the 689 * queue lookup just prior to the allocation. The iod 690 * will "inherit" that reference. 691 */ 692 } 693 return fod; 694 } 695 696 697 static void 698 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport, 699 struct nvmet_fc_tgt_queue *queue, 700 struct nvmefc_tgt_fcp_req *fcpreq) 701 { 702 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 703 704 /* 705 * put all admin cmds on hw queue id 0. All io commands go to 706 * the respective hw queue based on a modulo basis 707 */ 708 fcpreq->hwqid = queue->qid ? 709 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; 710 711 nvmet_fc_handle_fcp_rqst(tgtport, fod); 712 } 713 714 static void 715 nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work) 716 { 717 struct nvmet_fc_fcp_iod *fod = 718 container_of(work, struct nvmet_fc_fcp_iod, defer_work); 719 720 /* Submit deferred IO for processing */ 721 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq); 722 723 } 724 725 static void 726 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, 727 struct nvmet_fc_fcp_iod *fod) 728 { 729 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 730 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 731 struct nvmet_fc_defer_fcp_req *deferfcp; 732 unsigned long flags; 733 734 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, 735 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 736 737 fcpreq->nvmet_fc_private = NULL; 738 739 fod->active = false; 740 fod->abort = false; 741 fod->aborted = false; 742 fod->writedataactive = false; 743 fod->fcpreq = NULL; 744 745 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); 746 747 /* release the queue lookup reference on the completed IO */ 748 nvmet_fc_tgt_q_put(queue); 749 750 spin_lock_irqsave(&queue->qlock, flags); 751 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, 752 struct nvmet_fc_defer_fcp_req, req_list); 753 if (!deferfcp) { 754 list_add_tail(&fod->fcp_list, &fod->queue->fod_list); 755 spin_unlock_irqrestore(&queue->qlock, flags); 756 return; 757 } 758 759 /* Re-use the fod for the next pending cmd that was deferred */ 760 list_del(&deferfcp->req_list); 761 762 fcpreq = deferfcp->fcp_req; 763 764 /* deferfcp can be reused for another IO at a later date */ 765 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list); 766 767 spin_unlock_irqrestore(&queue->qlock, flags); 768 769 /* Save NVME CMD IO in fod */ 770 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen); 771 772 /* Setup new fcpreq to be processed */ 773 fcpreq->rspaddr = NULL; 774 fcpreq->rsplen = 0; 775 fcpreq->nvmet_fc_private = fod; 776 fod->fcpreq = fcpreq; 777 fod->active = true; 778 779 /* inform LLDD IO is now being processed */ 780 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq); 781 782 /* 783 * Leave the queue lookup get reference taken when 784 * fod was originally allocated. 785 */ 786 787 queue_work(queue->work_q, &fod->defer_work); 788 } 789 790 static struct nvmet_fc_tgt_queue * 791 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, 792 u16 qid, u16 sqsize) 793 { 794 struct nvmet_fc_tgt_queue *queue; 795 int ret; 796 797 if (qid > NVMET_NR_QUEUES) 798 return NULL; 799 800 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL); 801 if (!queue) 802 return NULL; 803 804 if (!nvmet_fc_tgt_a_get(assoc)) 805 goto out_free_queue; 806 807 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0, 808 assoc->tgtport->fc_target_port.port_num, 809 assoc->a_id, qid); 810 if (!queue->work_q) 811 goto out_a_put; 812 813 queue->qid = qid; 814 queue->sqsize = sqsize; 815 queue->assoc = assoc; 816 INIT_LIST_HEAD(&queue->fod_list); 817 INIT_LIST_HEAD(&queue->avail_defer_list); 818 INIT_LIST_HEAD(&queue->pending_cmd_list); 819 atomic_set(&queue->connected, 0); 820 atomic_set(&queue->sqtail, 0); 821 atomic_set(&queue->rsn, 1); 822 atomic_set(&queue->zrspcnt, 0); 823 spin_lock_init(&queue->qlock); 824 kref_init(&queue->ref); 825 826 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue); 827 828 ret = nvmet_sq_init(&queue->nvme_sq); 829 if (ret) 830 goto out_fail_iodlist; 831 832 WARN_ON(assoc->queues[qid]); 833 rcu_assign_pointer(assoc->queues[qid], queue); 834 835 return queue; 836 837 out_fail_iodlist: 838 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue); 839 destroy_workqueue(queue->work_q); 840 out_a_put: 841 nvmet_fc_tgt_a_put(assoc); 842 out_free_queue: 843 kfree(queue); 844 return NULL; 845 } 846 847 848 static void 849 nvmet_fc_tgt_queue_free(struct kref *ref) 850 { 851 struct nvmet_fc_tgt_queue *queue = 852 container_of(ref, struct nvmet_fc_tgt_queue, ref); 853 854 rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL); 855 856 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue); 857 858 nvmet_fc_tgt_a_put(queue->assoc); 859 860 destroy_workqueue(queue->work_q); 861 862 kfree_rcu(queue, rcu); 863 } 864 865 static void 866 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue) 867 { 868 kref_put(&queue->ref, nvmet_fc_tgt_queue_free); 869 } 870 871 static int 872 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue) 873 { 874 return kref_get_unless_zero(&queue->ref); 875 } 876 877 878 static void 879 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) 880 { 881 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; 882 struct nvmet_fc_fcp_iod *fod = queue->fod; 883 struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr; 884 unsigned long flags; 885 int i; 886 bool disconnect; 887 888 disconnect = atomic_xchg(&queue->connected, 0); 889 890 /* if not connected, nothing to do */ 891 if (!disconnect) 892 return; 893 894 spin_lock_irqsave(&queue->qlock, flags); 895 /* abort outstanding io's */ 896 for (i = 0; i < queue->sqsize; fod++, i++) { 897 if (fod->active) { 898 spin_lock(&fod->flock); 899 fod->abort = true; 900 /* 901 * only call lldd abort routine if waiting for 902 * writedata. other outstanding ops should finish 903 * on their own. 904 */ 905 if (fod->writedataactive) { 906 fod->aborted = true; 907 spin_unlock(&fod->flock); 908 tgtport->ops->fcp_abort( 909 &tgtport->fc_target_port, fod->fcpreq); 910 } else 911 spin_unlock(&fod->flock); 912 } 913 } 914 915 /* Cleanup defer'ed IOs in queue */ 916 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list, 917 req_list) { 918 list_del(&deferfcp->req_list); 919 kfree(deferfcp); 920 } 921 922 for (;;) { 923 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, 924 struct nvmet_fc_defer_fcp_req, req_list); 925 if (!deferfcp) 926 break; 927 928 list_del(&deferfcp->req_list); 929 spin_unlock_irqrestore(&queue->qlock, flags); 930 931 tgtport->ops->defer_rcv(&tgtport->fc_target_port, 932 deferfcp->fcp_req); 933 934 tgtport->ops->fcp_abort(&tgtport->fc_target_port, 935 deferfcp->fcp_req); 936 937 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, 938 deferfcp->fcp_req); 939 940 /* release the queue lookup reference */ 941 nvmet_fc_tgt_q_put(queue); 942 943 kfree(deferfcp); 944 945 spin_lock_irqsave(&queue->qlock, flags); 946 } 947 spin_unlock_irqrestore(&queue->qlock, flags); 948 949 flush_workqueue(queue->work_q); 950 951 nvmet_sq_destroy(&queue->nvme_sq); 952 953 nvmet_fc_tgt_q_put(queue); 954 } 955 956 static struct nvmet_fc_tgt_queue * 957 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport, 958 u64 connection_id) 959 { 960 struct nvmet_fc_tgt_assoc *assoc; 961 struct nvmet_fc_tgt_queue *queue; 962 u64 association_id = nvmet_fc_getassociationid(connection_id); 963 u16 qid = nvmet_fc_getqueueid(connection_id); 964 965 if (qid > NVMET_NR_QUEUES) 966 return NULL; 967 968 rcu_read_lock(); 969 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 970 if (association_id == assoc->association_id) { 971 queue = rcu_dereference(assoc->queues[qid]); 972 if (queue && 973 (!atomic_read(&queue->connected) || 974 !nvmet_fc_tgt_q_get(queue))) 975 queue = NULL; 976 rcu_read_unlock(); 977 return queue; 978 } 979 } 980 rcu_read_unlock(); 981 return NULL; 982 } 983 984 static void 985 nvmet_fc_hostport_free(struct kref *ref) 986 { 987 struct nvmet_fc_hostport *hostport = 988 container_of(ref, struct nvmet_fc_hostport, ref); 989 struct nvmet_fc_tgtport *tgtport = hostport->tgtport; 990 unsigned long flags; 991 992 spin_lock_irqsave(&tgtport->lock, flags); 993 list_del(&hostport->host_list); 994 spin_unlock_irqrestore(&tgtport->lock, flags); 995 if (tgtport->ops->host_release && hostport->invalid) 996 tgtport->ops->host_release(hostport->hosthandle); 997 kfree(hostport); 998 nvmet_fc_tgtport_put(tgtport); 999 } 1000 1001 static void 1002 nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport) 1003 { 1004 kref_put(&hostport->ref, nvmet_fc_hostport_free); 1005 } 1006 1007 static int 1008 nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport) 1009 { 1010 return kref_get_unless_zero(&hostport->ref); 1011 } 1012 1013 static void 1014 nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport) 1015 { 1016 /* if LLDD not implemented, leave as NULL */ 1017 if (!hostport || !hostport->hosthandle) 1018 return; 1019 1020 nvmet_fc_hostport_put(hostport); 1021 } 1022 1023 static struct nvmet_fc_hostport * 1024 nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) 1025 { 1026 struct nvmet_fc_hostport *host; 1027 1028 lockdep_assert_held(&tgtport->lock); 1029 1030 list_for_each_entry(host, &tgtport->host_list, host_list) { 1031 if (host->hosthandle == hosthandle && !host->invalid) { 1032 if (nvmet_fc_hostport_get(host)) 1033 return (host); 1034 } 1035 } 1036 1037 return NULL; 1038 } 1039 1040 static struct nvmet_fc_hostport * 1041 nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) 1042 { 1043 struct nvmet_fc_hostport *newhost, *match = NULL; 1044 unsigned long flags; 1045 1046 /* if LLDD not implemented, leave as NULL */ 1047 if (!hosthandle) 1048 return NULL; 1049 1050 /* 1051 * take reference for what will be the newly allocated hostport if 1052 * we end up using a new allocation 1053 */ 1054 if (!nvmet_fc_tgtport_get(tgtport)) 1055 return ERR_PTR(-EINVAL); 1056 1057 spin_lock_irqsave(&tgtport->lock, flags); 1058 match = nvmet_fc_match_hostport(tgtport, hosthandle); 1059 spin_unlock_irqrestore(&tgtport->lock, flags); 1060 1061 if (match) { 1062 /* no new allocation - release reference */ 1063 nvmet_fc_tgtport_put(tgtport); 1064 return match; 1065 } 1066 1067 newhost = kzalloc(sizeof(*newhost), GFP_KERNEL); 1068 if (!newhost) { 1069 /* no new allocation - release reference */ 1070 nvmet_fc_tgtport_put(tgtport); 1071 return ERR_PTR(-ENOMEM); 1072 } 1073 1074 spin_lock_irqsave(&tgtport->lock, flags); 1075 match = nvmet_fc_match_hostport(tgtport, hosthandle); 1076 if (match) { 1077 /* new allocation not needed */ 1078 kfree(newhost); 1079 newhost = match; 1080 /* no new allocation - release reference */ 1081 nvmet_fc_tgtport_put(tgtport); 1082 } else { 1083 newhost->tgtport = tgtport; 1084 newhost->hosthandle = hosthandle; 1085 INIT_LIST_HEAD(&newhost->host_list); 1086 kref_init(&newhost->ref); 1087 1088 list_add_tail(&newhost->host_list, &tgtport->host_list); 1089 } 1090 spin_unlock_irqrestore(&tgtport->lock, flags); 1091 1092 return newhost; 1093 } 1094 1095 static void 1096 nvmet_fc_delete_assoc(struct work_struct *work) 1097 { 1098 struct nvmet_fc_tgt_assoc *assoc = 1099 container_of(work, struct nvmet_fc_tgt_assoc, del_work); 1100 1101 nvmet_fc_delete_target_assoc(assoc); 1102 nvmet_fc_tgt_a_put(assoc); 1103 } 1104 1105 static struct nvmet_fc_tgt_assoc * 1106 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) 1107 { 1108 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc; 1109 unsigned long flags; 1110 u64 ran; 1111 int idx; 1112 bool needrandom = true; 1113 1114 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL); 1115 if (!assoc) 1116 return NULL; 1117 1118 idx = ida_alloc(&tgtport->assoc_cnt, GFP_KERNEL); 1119 if (idx < 0) 1120 goto out_free_assoc; 1121 1122 if (!nvmet_fc_tgtport_get(tgtport)) 1123 goto out_ida; 1124 1125 assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle); 1126 if (IS_ERR(assoc->hostport)) 1127 goto out_put; 1128 1129 assoc->tgtport = tgtport; 1130 assoc->a_id = idx; 1131 INIT_LIST_HEAD(&assoc->a_list); 1132 kref_init(&assoc->ref); 1133 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc); 1134 atomic_set(&assoc->terminating, 0); 1135 1136 while (needrandom) { 1137 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID); 1138 ran = ran << BYTES_FOR_QID_SHIFT; 1139 1140 spin_lock_irqsave(&tgtport->lock, flags); 1141 needrandom = false; 1142 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) { 1143 if (ran == tmpassoc->association_id) { 1144 needrandom = true; 1145 break; 1146 } 1147 } 1148 if (!needrandom) { 1149 assoc->association_id = ran; 1150 list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list); 1151 } 1152 spin_unlock_irqrestore(&tgtport->lock, flags); 1153 } 1154 1155 return assoc; 1156 1157 out_put: 1158 nvmet_fc_tgtport_put(tgtport); 1159 out_ida: 1160 ida_free(&tgtport->assoc_cnt, idx); 1161 out_free_assoc: 1162 kfree(assoc); 1163 return NULL; 1164 } 1165 1166 static void 1167 nvmet_fc_target_assoc_free(struct kref *ref) 1168 { 1169 struct nvmet_fc_tgt_assoc *assoc = 1170 container_of(ref, struct nvmet_fc_tgt_assoc, ref); 1171 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 1172 struct nvmet_fc_ls_iod *oldls; 1173 unsigned long flags; 1174 1175 /* Send Disconnect now that all i/o has completed */ 1176 nvmet_fc_xmt_disconnect_assoc(assoc); 1177 1178 nvmet_fc_free_hostport(assoc->hostport); 1179 spin_lock_irqsave(&tgtport->lock, flags); 1180 list_del_rcu(&assoc->a_list); 1181 oldls = assoc->rcv_disconn; 1182 spin_unlock_irqrestore(&tgtport->lock, flags); 1183 /* if pending Rcv Disconnect Association LS, send rsp now */ 1184 if (oldls) 1185 nvmet_fc_xmt_ls_rsp(tgtport, oldls); 1186 ida_free(&tgtport->assoc_cnt, assoc->a_id); 1187 dev_info(tgtport->dev, 1188 "{%d:%d} Association freed\n", 1189 tgtport->fc_target_port.port_num, assoc->a_id); 1190 kfree_rcu(assoc, rcu); 1191 nvmet_fc_tgtport_put(tgtport); 1192 } 1193 1194 static void 1195 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc) 1196 { 1197 kref_put(&assoc->ref, nvmet_fc_target_assoc_free); 1198 } 1199 1200 static int 1201 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc) 1202 { 1203 return kref_get_unless_zero(&assoc->ref); 1204 } 1205 1206 static void 1207 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) 1208 { 1209 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 1210 struct nvmet_fc_tgt_queue *queue; 1211 int i, terminating; 1212 1213 terminating = atomic_xchg(&assoc->terminating, 1); 1214 1215 /* if already terminating, do nothing */ 1216 if (terminating) 1217 return; 1218 1219 1220 for (i = NVMET_NR_QUEUES; i >= 0; i--) { 1221 rcu_read_lock(); 1222 queue = rcu_dereference(assoc->queues[i]); 1223 if (!queue) { 1224 rcu_read_unlock(); 1225 continue; 1226 } 1227 1228 if (!nvmet_fc_tgt_q_get(queue)) { 1229 rcu_read_unlock(); 1230 continue; 1231 } 1232 rcu_read_unlock(); 1233 nvmet_fc_delete_target_queue(queue); 1234 nvmet_fc_tgt_q_put(queue); 1235 } 1236 1237 dev_info(tgtport->dev, 1238 "{%d:%d} Association deleted\n", 1239 tgtport->fc_target_port.port_num, assoc->a_id); 1240 1241 nvmet_fc_tgt_a_put(assoc); 1242 } 1243 1244 static struct nvmet_fc_tgt_assoc * 1245 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport, 1246 u64 association_id) 1247 { 1248 struct nvmet_fc_tgt_assoc *assoc; 1249 struct nvmet_fc_tgt_assoc *ret = NULL; 1250 1251 rcu_read_lock(); 1252 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 1253 if (association_id == assoc->association_id) { 1254 ret = assoc; 1255 if (!nvmet_fc_tgt_a_get(assoc)) 1256 ret = NULL; 1257 break; 1258 } 1259 } 1260 rcu_read_unlock(); 1261 1262 return ret; 1263 } 1264 1265 static void 1266 nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport, 1267 struct nvmet_fc_port_entry *pe, 1268 struct nvmet_port *port) 1269 { 1270 lockdep_assert_held(&nvmet_fc_tgtlock); 1271 1272 pe->tgtport = tgtport; 1273 tgtport->pe = pe; 1274 1275 pe->port = port; 1276 port->priv = pe; 1277 1278 pe->node_name = tgtport->fc_target_port.node_name; 1279 pe->port_name = tgtport->fc_target_port.port_name; 1280 INIT_LIST_HEAD(&pe->pe_list); 1281 1282 list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list); 1283 } 1284 1285 static void 1286 nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe) 1287 { 1288 unsigned long flags; 1289 1290 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1291 if (pe->tgtport) 1292 pe->tgtport->pe = NULL; 1293 list_del(&pe->pe_list); 1294 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1295 } 1296 1297 /* 1298 * called when a targetport deregisters. Breaks the relationship 1299 * with the nvmet port, but leaves the port_entry in place so that 1300 * re-registration can resume operation. 1301 */ 1302 static void 1303 nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport) 1304 { 1305 struct nvmet_fc_port_entry *pe; 1306 unsigned long flags; 1307 1308 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1309 pe = tgtport->pe; 1310 if (pe) 1311 pe->tgtport = NULL; 1312 tgtport->pe = NULL; 1313 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1314 } 1315 1316 /* 1317 * called when a new targetport is registered. Looks in the 1318 * existing nvmet port_entries to see if the nvmet layer is 1319 * configured for the targetport's wwn's. (the targetport existed, 1320 * nvmet configured, the lldd unregistered the tgtport, and is now 1321 * reregistering the same targetport). If so, set the nvmet port 1322 * port entry on the targetport. 1323 */ 1324 static void 1325 nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport) 1326 { 1327 struct nvmet_fc_port_entry *pe; 1328 unsigned long flags; 1329 1330 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1331 list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) { 1332 if (tgtport->fc_target_port.node_name == pe->node_name && 1333 tgtport->fc_target_port.port_name == pe->port_name) { 1334 WARN_ON(pe->tgtport); 1335 tgtport->pe = pe; 1336 pe->tgtport = tgtport; 1337 break; 1338 } 1339 } 1340 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1341 } 1342 1343 /** 1344 * nvmet_fc_register_targetport - transport entry point called by an 1345 * LLDD to register the existence of a local 1346 * NVME subystem FC port. 1347 * @pinfo: pointer to information about the port to be registered 1348 * @template: LLDD entrypoints and operational parameters for the port 1349 * @dev: physical hardware device node port corresponds to. Will be 1350 * used for DMA mappings 1351 * @portptr: pointer to a local port pointer. Upon success, the routine 1352 * will allocate a nvme_fc_local_port structure and place its 1353 * address in the local port pointer. Upon failure, local port 1354 * pointer will be set to NULL. 1355 * 1356 * Returns: 1357 * a completion status. Must be 0 upon success; a negative errno 1358 * (ex: -ENXIO) upon failure. 1359 */ 1360 int 1361 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, 1362 struct nvmet_fc_target_template *template, 1363 struct device *dev, 1364 struct nvmet_fc_target_port **portptr) 1365 { 1366 struct nvmet_fc_tgtport *newrec; 1367 unsigned long flags; 1368 int ret, idx; 1369 1370 if (!template->xmt_ls_rsp || !template->fcp_op || 1371 !template->fcp_abort || 1372 !template->fcp_req_release || !template->targetport_delete || 1373 !template->max_hw_queues || !template->max_sgl_segments || 1374 !template->max_dif_sgl_segments || !template->dma_boundary) { 1375 ret = -EINVAL; 1376 goto out_regtgt_failed; 1377 } 1378 1379 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz), 1380 GFP_KERNEL); 1381 if (!newrec) { 1382 ret = -ENOMEM; 1383 goto out_regtgt_failed; 1384 } 1385 1386 idx = ida_alloc(&nvmet_fc_tgtport_cnt, GFP_KERNEL); 1387 if (idx < 0) { 1388 ret = -ENOSPC; 1389 goto out_fail_kfree; 1390 } 1391 1392 if (!get_device(dev) && dev) { 1393 ret = -ENODEV; 1394 goto out_ida_put; 1395 } 1396 1397 newrec->fc_target_port.node_name = pinfo->node_name; 1398 newrec->fc_target_port.port_name = pinfo->port_name; 1399 if (template->target_priv_sz) 1400 newrec->fc_target_port.private = &newrec[1]; 1401 else 1402 newrec->fc_target_port.private = NULL; 1403 newrec->fc_target_port.port_id = pinfo->port_id; 1404 newrec->fc_target_port.port_num = idx; 1405 INIT_LIST_HEAD(&newrec->tgt_list); 1406 newrec->dev = dev; 1407 newrec->ops = template; 1408 spin_lock_init(&newrec->lock); 1409 INIT_LIST_HEAD(&newrec->ls_rcv_list); 1410 INIT_LIST_HEAD(&newrec->ls_req_list); 1411 INIT_LIST_HEAD(&newrec->ls_busylist); 1412 INIT_LIST_HEAD(&newrec->assoc_list); 1413 INIT_LIST_HEAD(&newrec->host_list); 1414 kref_init(&newrec->ref); 1415 ida_init(&newrec->assoc_cnt); 1416 newrec->max_sg_cnt = template->max_sgl_segments; 1417 1418 ret = nvmet_fc_alloc_ls_iodlist(newrec); 1419 if (ret) { 1420 ret = -ENOMEM; 1421 goto out_free_newrec; 1422 } 1423 1424 nvmet_fc_portentry_rebind_tgt(newrec); 1425 1426 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1427 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list); 1428 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1429 1430 *portptr = &newrec->fc_target_port; 1431 return 0; 1432 1433 out_free_newrec: 1434 put_device(dev); 1435 out_ida_put: 1436 ida_free(&nvmet_fc_tgtport_cnt, idx); 1437 out_fail_kfree: 1438 kfree(newrec); 1439 out_regtgt_failed: 1440 *portptr = NULL; 1441 return ret; 1442 } 1443 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport); 1444 1445 1446 static void 1447 nvmet_fc_free_tgtport(struct kref *ref) 1448 { 1449 struct nvmet_fc_tgtport *tgtport = 1450 container_of(ref, struct nvmet_fc_tgtport, ref); 1451 struct device *dev = tgtport->dev; 1452 unsigned long flags; 1453 1454 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1455 list_del(&tgtport->tgt_list); 1456 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1457 1458 nvmet_fc_free_ls_iodlist(tgtport); 1459 1460 /* let the LLDD know we've finished tearing it down */ 1461 tgtport->ops->targetport_delete(&tgtport->fc_target_port); 1462 1463 ida_free(&nvmet_fc_tgtport_cnt, 1464 tgtport->fc_target_port.port_num); 1465 1466 ida_destroy(&tgtport->assoc_cnt); 1467 1468 kfree(tgtport); 1469 1470 put_device(dev); 1471 } 1472 1473 static void 1474 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport) 1475 { 1476 kref_put(&tgtport->ref, nvmet_fc_free_tgtport); 1477 } 1478 1479 static int 1480 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport) 1481 { 1482 return kref_get_unless_zero(&tgtport->ref); 1483 } 1484 1485 static void 1486 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) 1487 { 1488 struct nvmet_fc_tgt_assoc *assoc; 1489 1490 rcu_read_lock(); 1491 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 1492 if (!nvmet_fc_tgt_a_get(assoc)) 1493 continue; 1494 if (!queue_work(nvmet_wq, &assoc->del_work)) 1495 /* already deleting - release local reference */ 1496 nvmet_fc_tgt_a_put(assoc); 1497 } 1498 rcu_read_unlock(); 1499 } 1500 1501 /** 1502 * nvmet_fc_invalidate_host - transport entry point called by an LLDD 1503 * to remove references to a hosthandle for LS's. 1504 * 1505 * The nvmet-fc layer ensures that any references to the hosthandle 1506 * on the targetport are forgotten (set to NULL). The LLDD will 1507 * typically call this when a login with a remote host port has been 1508 * lost, thus LS's for the remote host port are no longer possible. 1509 * 1510 * If an LS request is outstanding to the targetport/hosthandle (or 1511 * issued concurrently with the call to invalidate the host), the 1512 * LLDD is responsible for terminating/aborting the LS and completing 1513 * the LS request. It is recommended that these terminations/aborts 1514 * occur after calling to invalidate the host handle to avoid additional 1515 * retries by the nvmet-fc transport. The nvmet-fc transport may 1516 * continue to reference host handle while it cleans up outstanding 1517 * NVME associations. The nvmet-fc transport will call the 1518 * ops->host_release() callback to notify the LLDD that all references 1519 * are complete and the related host handle can be recovered. 1520 * Note: if there are no references, the callback may be called before 1521 * the invalidate host call returns. 1522 * 1523 * @target_port: pointer to the (registered) target port that a prior 1524 * LS was received on and which supplied the transport the 1525 * hosthandle. 1526 * @hosthandle: the handle (pointer) that represents the host port 1527 * that no longer has connectivity and that LS's should 1528 * no longer be directed to. 1529 */ 1530 void 1531 nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port, 1532 void *hosthandle) 1533 { 1534 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 1535 struct nvmet_fc_tgt_assoc *assoc, *next; 1536 unsigned long flags; 1537 bool noassoc = true; 1538 1539 spin_lock_irqsave(&tgtport->lock, flags); 1540 list_for_each_entry_safe(assoc, next, 1541 &tgtport->assoc_list, a_list) { 1542 if (!assoc->hostport || 1543 assoc->hostport->hosthandle != hosthandle) 1544 continue; 1545 if (!nvmet_fc_tgt_a_get(assoc)) 1546 continue; 1547 assoc->hostport->invalid = 1; 1548 noassoc = false; 1549 if (!queue_work(nvmet_wq, &assoc->del_work)) 1550 /* already deleting - release local reference */ 1551 nvmet_fc_tgt_a_put(assoc); 1552 } 1553 spin_unlock_irqrestore(&tgtport->lock, flags); 1554 1555 /* if there's nothing to wait for - call the callback */ 1556 if (noassoc && tgtport->ops->host_release) 1557 tgtport->ops->host_release(hosthandle); 1558 } 1559 EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host); 1560 1561 /* 1562 * nvmet layer has called to terminate an association 1563 */ 1564 static void 1565 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) 1566 { 1567 struct nvmet_fc_tgtport *tgtport, *next; 1568 struct nvmet_fc_tgt_assoc *assoc; 1569 struct nvmet_fc_tgt_queue *queue; 1570 unsigned long flags; 1571 bool found_ctrl = false; 1572 1573 /* this is a bit ugly, but don't want to make locks layered */ 1574 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1575 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list, 1576 tgt_list) { 1577 if (!nvmet_fc_tgtport_get(tgtport)) 1578 continue; 1579 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1580 1581 rcu_read_lock(); 1582 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 1583 queue = rcu_dereference(assoc->queues[0]); 1584 if (queue && queue->nvme_sq.ctrl == ctrl) { 1585 if (nvmet_fc_tgt_a_get(assoc)) 1586 found_ctrl = true; 1587 break; 1588 } 1589 } 1590 rcu_read_unlock(); 1591 1592 nvmet_fc_tgtport_put(tgtport); 1593 1594 if (found_ctrl) { 1595 if (!queue_work(nvmet_wq, &assoc->del_work)) 1596 /* already deleting - release local reference */ 1597 nvmet_fc_tgt_a_put(assoc); 1598 return; 1599 } 1600 1601 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1602 } 1603 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1604 } 1605 1606 /** 1607 * nvmet_fc_unregister_targetport - transport entry point called by an 1608 * LLDD to deregister/remove a previously 1609 * registered a local NVME subsystem FC port. 1610 * @target_port: pointer to the (registered) target port that is to be 1611 * deregistered. 1612 * 1613 * Returns: 1614 * a completion status. Must be 0 upon success; a negative errno 1615 * (ex: -ENXIO) upon failure. 1616 */ 1617 int 1618 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) 1619 { 1620 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 1621 1622 nvmet_fc_portentry_unbind_tgt(tgtport); 1623 1624 /* terminate any outstanding associations */ 1625 __nvmet_fc_free_assocs(tgtport); 1626 1627 /* 1628 * should terminate LS's as well. However, LS's will be generated 1629 * at the tail end of association termination, so they likely don't 1630 * exist yet. And even if they did, it's worthwhile to just let 1631 * them finish and targetport ref counting will clean things up. 1632 */ 1633 1634 nvmet_fc_tgtport_put(tgtport); 1635 1636 return 0; 1637 } 1638 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport); 1639 1640 1641 /* ********************** FC-NVME LS RCV Handling ************************* */ 1642 1643 1644 static void 1645 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, 1646 struct nvmet_fc_ls_iod *iod) 1647 { 1648 struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc; 1649 struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc; 1650 struct nvmet_fc_tgt_queue *queue; 1651 int ret = 0; 1652 1653 memset(acc, 0, sizeof(*acc)); 1654 1655 /* 1656 * FC-NVME spec changes. There are initiators sending different 1657 * lengths as padding sizes for Create Association Cmd descriptor 1658 * was incorrect. 1659 * Accept anything of "minimum" length. Assume format per 1.15 1660 * spec (with HOSTID reduced to 16 bytes), ignore how long the 1661 * trailing pad length is. 1662 */ 1663 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN) 1664 ret = VERR_CR_ASSOC_LEN; 1665 else if (be32_to_cpu(rqst->desc_list_len) < 1666 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN) 1667 ret = VERR_CR_ASSOC_RQST_LEN; 1668 else if (rqst->assoc_cmd.desc_tag != 1669 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD)) 1670 ret = VERR_CR_ASSOC_CMD; 1671 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) < 1672 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN) 1673 ret = VERR_CR_ASSOC_CMD_LEN; 1674 else if (!rqst->assoc_cmd.ersp_ratio || 1675 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >= 1676 be16_to_cpu(rqst->assoc_cmd.sqsize))) 1677 ret = VERR_ERSP_RATIO; 1678 1679 else { 1680 /* new association w/ admin queue */ 1681 iod->assoc = nvmet_fc_alloc_target_assoc( 1682 tgtport, iod->hosthandle); 1683 if (!iod->assoc) 1684 ret = VERR_ASSOC_ALLOC_FAIL; 1685 else { 1686 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0, 1687 be16_to_cpu(rqst->assoc_cmd.sqsize)); 1688 if (!queue) { 1689 ret = VERR_QUEUE_ALLOC_FAIL; 1690 nvmet_fc_tgt_a_put(iod->assoc); 1691 } 1692 } 1693 } 1694 1695 if (ret) { 1696 dev_err(tgtport->dev, 1697 "Create Association LS failed: %s\n", 1698 validation_errors[ret]); 1699 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1700 sizeof(*acc), rqst->w0.ls_cmd, 1701 FCNVME_RJT_RC_LOGIC, 1702 FCNVME_RJT_EXP_NONE, 0); 1703 return; 1704 } 1705 1706 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio); 1707 atomic_set(&queue->connected, 1); 1708 queue->sqhd = 0; /* best place to init value */ 1709 1710 dev_info(tgtport->dev, 1711 "{%d:%d} Association created\n", 1712 tgtport->fc_target_port.port_num, iod->assoc->a_id); 1713 1714 /* format a response */ 1715 1716 iod->lsrsp->rsplen = sizeof(*acc); 1717 1718 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1719 fcnvme_lsdesc_len( 1720 sizeof(struct fcnvme_ls_cr_assoc_acc)), 1721 FCNVME_LS_CREATE_ASSOCIATION); 1722 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); 1723 acc->associd.desc_len = 1724 fcnvme_lsdesc_len( 1725 sizeof(struct fcnvme_lsdesc_assoc_id)); 1726 acc->associd.association_id = 1727 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0)); 1728 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); 1729 acc->connectid.desc_len = 1730 fcnvme_lsdesc_len( 1731 sizeof(struct fcnvme_lsdesc_conn_id)); 1732 acc->connectid.connection_id = acc->associd.association_id; 1733 } 1734 1735 static void 1736 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport, 1737 struct nvmet_fc_ls_iod *iod) 1738 { 1739 struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn; 1740 struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn; 1741 struct nvmet_fc_tgt_queue *queue; 1742 int ret = 0; 1743 1744 memset(acc, 0, sizeof(*acc)); 1745 1746 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst)) 1747 ret = VERR_CR_CONN_LEN; 1748 else if (rqst->desc_list_len != 1749 fcnvme_lsdesc_len( 1750 sizeof(struct fcnvme_ls_cr_conn_rqst))) 1751 ret = VERR_CR_CONN_RQST_LEN; 1752 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) 1753 ret = VERR_ASSOC_ID; 1754 else if (rqst->associd.desc_len != 1755 fcnvme_lsdesc_len( 1756 sizeof(struct fcnvme_lsdesc_assoc_id))) 1757 ret = VERR_ASSOC_ID_LEN; 1758 else if (rqst->connect_cmd.desc_tag != 1759 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD)) 1760 ret = VERR_CR_CONN_CMD; 1761 else if (rqst->connect_cmd.desc_len != 1762 fcnvme_lsdesc_len( 1763 sizeof(struct fcnvme_lsdesc_cr_conn_cmd))) 1764 ret = VERR_CR_CONN_CMD_LEN; 1765 else if (!rqst->connect_cmd.ersp_ratio || 1766 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >= 1767 be16_to_cpu(rqst->connect_cmd.sqsize))) 1768 ret = VERR_ERSP_RATIO; 1769 1770 else { 1771 /* new io queue */ 1772 iod->assoc = nvmet_fc_find_target_assoc(tgtport, 1773 be64_to_cpu(rqst->associd.association_id)); 1774 if (!iod->assoc) 1775 ret = VERR_NO_ASSOC; 1776 else { 1777 queue = nvmet_fc_alloc_target_queue(iod->assoc, 1778 be16_to_cpu(rqst->connect_cmd.qid), 1779 be16_to_cpu(rqst->connect_cmd.sqsize)); 1780 if (!queue) 1781 ret = VERR_QUEUE_ALLOC_FAIL; 1782 1783 /* release get taken in nvmet_fc_find_target_assoc */ 1784 nvmet_fc_tgt_a_put(iod->assoc); 1785 } 1786 } 1787 1788 if (ret) { 1789 dev_err(tgtport->dev, 1790 "Create Connection LS failed: %s\n", 1791 validation_errors[ret]); 1792 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1793 sizeof(*acc), rqst->w0.ls_cmd, 1794 (ret == VERR_NO_ASSOC) ? 1795 FCNVME_RJT_RC_INV_ASSOC : 1796 FCNVME_RJT_RC_LOGIC, 1797 FCNVME_RJT_EXP_NONE, 0); 1798 return; 1799 } 1800 1801 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio); 1802 atomic_set(&queue->connected, 1); 1803 queue->sqhd = 0; /* best place to init value */ 1804 1805 /* format a response */ 1806 1807 iod->lsrsp->rsplen = sizeof(*acc); 1808 1809 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1810 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)), 1811 FCNVME_LS_CREATE_CONNECTION); 1812 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); 1813 acc->connectid.desc_len = 1814 fcnvme_lsdesc_len( 1815 sizeof(struct fcnvme_lsdesc_conn_id)); 1816 acc->connectid.connection_id = 1817 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 1818 be16_to_cpu(rqst->connect_cmd.qid))); 1819 } 1820 1821 /* 1822 * Returns true if the LS response is to be transmit 1823 * Returns false if the LS response is to be delayed 1824 */ 1825 static int 1826 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, 1827 struct nvmet_fc_ls_iod *iod) 1828 { 1829 struct fcnvme_ls_disconnect_assoc_rqst *rqst = 1830 &iod->rqstbuf->rq_dis_assoc; 1831 struct fcnvme_ls_disconnect_assoc_acc *acc = 1832 &iod->rspbuf->rsp_dis_assoc; 1833 struct nvmet_fc_tgt_assoc *assoc = NULL; 1834 struct nvmet_fc_ls_iod *oldls = NULL; 1835 unsigned long flags; 1836 int ret = 0; 1837 1838 memset(acc, 0, sizeof(*acc)); 1839 1840 ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst); 1841 if (!ret) { 1842 /* match an active association - takes an assoc ref if !NULL */ 1843 assoc = nvmet_fc_find_target_assoc(tgtport, 1844 be64_to_cpu(rqst->associd.association_id)); 1845 iod->assoc = assoc; 1846 if (!assoc) 1847 ret = VERR_NO_ASSOC; 1848 } 1849 1850 if (ret || !assoc) { 1851 dev_err(tgtport->dev, 1852 "Disconnect LS failed: %s\n", 1853 validation_errors[ret]); 1854 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1855 sizeof(*acc), rqst->w0.ls_cmd, 1856 (ret == VERR_NO_ASSOC) ? 1857 FCNVME_RJT_RC_INV_ASSOC : 1858 FCNVME_RJT_RC_LOGIC, 1859 FCNVME_RJT_EXP_NONE, 0); 1860 return true; 1861 } 1862 1863 /* format a response */ 1864 1865 iod->lsrsp->rsplen = sizeof(*acc); 1866 1867 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1868 fcnvme_lsdesc_len( 1869 sizeof(struct fcnvme_ls_disconnect_assoc_acc)), 1870 FCNVME_LS_DISCONNECT_ASSOC); 1871 1872 /* release get taken in nvmet_fc_find_target_assoc */ 1873 nvmet_fc_tgt_a_put(assoc); 1874 1875 /* 1876 * The rules for LS response says the response cannot 1877 * go back until ABTS's have been sent for all outstanding 1878 * I/O and a Disconnect Association LS has been sent. 1879 * So... save off the Disconnect LS to send the response 1880 * later. If there was a prior LS already saved, replace 1881 * it with the newer one and send a can't perform reject 1882 * on the older one. 1883 */ 1884 spin_lock_irqsave(&tgtport->lock, flags); 1885 oldls = assoc->rcv_disconn; 1886 assoc->rcv_disconn = iod; 1887 spin_unlock_irqrestore(&tgtport->lock, flags); 1888 1889 nvmet_fc_delete_target_assoc(assoc); 1890 1891 if (oldls) { 1892 dev_info(tgtport->dev, 1893 "{%d:%d} Multiple Disconnect Association LS's " 1894 "received\n", 1895 tgtport->fc_target_port.port_num, assoc->a_id); 1896 /* overwrite good response with bogus failure */ 1897 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, 1898 sizeof(*iod->rspbuf), 1899 /* ok to use rqst, LS is same */ 1900 rqst->w0.ls_cmd, 1901 FCNVME_RJT_RC_UNAB, 1902 FCNVME_RJT_EXP_NONE, 0); 1903 nvmet_fc_xmt_ls_rsp(tgtport, oldls); 1904 } 1905 1906 return false; 1907 } 1908 1909 1910 /* *********************** NVME Ctrl Routines **************************** */ 1911 1912 1913 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req); 1914 1915 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops; 1916 1917 static void 1918 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) 1919 { 1920 struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private; 1921 struct nvmet_fc_tgtport *tgtport = iod->tgtport; 1922 1923 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma, 1924 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 1925 nvmet_fc_free_ls_iod(tgtport, iod); 1926 nvmet_fc_tgtport_put(tgtport); 1927 } 1928 1929 static void 1930 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, 1931 struct nvmet_fc_ls_iod *iod) 1932 { 1933 int ret; 1934 1935 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma, 1936 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 1937 1938 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp); 1939 if (ret) 1940 nvmet_fc_xmt_ls_rsp_done(iod->lsrsp); 1941 } 1942 1943 /* 1944 * Actual processing routine for received FC-NVME LS Requests from the LLD 1945 */ 1946 static void 1947 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport, 1948 struct nvmet_fc_ls_iod *iod) 1949 { 1950 struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0; 1951 bool sendrsp = true; 1952 1953 iod->lsrsp->nvme_fc_private = iod; 1954 iod->lsrsp->rspbuf = iod->rspbuf; 1955 iod->lsrsp->rspdma = iod->rspdma; 1956 iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done; 1957 /* Be preventative. handlers will later set to valid length */ 1958 iod->lsrsp->rsplen = 0; 1959 1960 iod->assoc = NULL; 1961 1962 /* 1963 * handlers: 1964 * parse request input, execute the request, and format the 1965 * LS response 1966 */ 1967 switch (w0->ls_cmd) { 1968 case FCNVME_LS_CREATE_ASSOCIATION: 1969 /* Creates Association and initial Admin Queue/Connection */ 1970 nvmet_fc_ls_create_association(tgtport, iod); 1971 break; 1972 case FCNVME_LS_CREATE_CONNECTION: 1973 /* Creates an IO Queue/Connection */ 1974 nvmet_fc_ls_create_connection(tgtport, iod); 1975 break; 1976 case FCNVME_LS_DISCONNECT_ASSOC: 1977 /* Terminate a Queue/Connection or the Association */ 1978 sendrsp = nvmet_fc_ls_disconnect(tgtport, iod); 1979 break; 1980 default: 1981 iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf, 1982 sizeof(*iod->rspbuf), w0->ls_cmd, 1983 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0); 1984 } 1985 1986 if (sendrsp) 1987 nvmet_fc_xmt_ls_rsp(tgtport, iod); 1988 } 1989 1990 /* 1991 * Actual processing routine for received FC-NVME LS Requests from the LLD 1992 */ 1993 static void 1994 nvmet_fc_handle_ls_rqst_work(struct work_struct *work) 1995 { 1996 struct nvmet_fc_ls_iod *iod = 1997 container_of(work, struct nvmet_fc_ls_iod, work); 1998 struct nvmet_fc_tgtport *tgtport = iod->tgtport; 1999 2000 nvmet_fc_handle_ls_rqst(tgtport, iod); 2001 } 2002 2003 2004 /** 2005 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD 2006 * upon the reception of a NVME LS request. 2007 * 2008 * The nvmet-fc layer will copy payload to an internal structure for 2009 * processing. As such, upon completion of the routine, the LLDD may 2010 * immediately free/reuse the LS request buffer passed in the call. 2011 * 2012 * If this routine returns error, the LLDD should abort the exchange. 2013 * 2014 * @target_port: pointer to the (registered) target port the LS was 2015 * received on. 2016 * @hosthandle: pointer to the host specific data, gets stored in iod. 2017 * @lsrsp: pointer to a lsrsp structure to be used to reference 2018 * the exchange corresponding to the LS. 2019 * @lsreqbuf: pointer to the buffer containing the LS Request 2020 * @lsreqbuf_len: length, in bytes, of the received LS request 2021 */ 2022 int 2023 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, 2024 void *hosthandle, 2025 struct nvmefc_ls_rsp *lsrsp, 2026 void *lsreqbuf, u32 lsreqbuf_len) 2027 { 2028 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 2029 struct nvmet_fc_ls_iod *iod; 2030 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; 2031 2032 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { 2033 dev_info(tgtport->dev, 2034 "RCV %s LS failed: payload too large (%d)\n", 2035 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2036 nvmefc_ls_names[w0->ls_cmd] : "", 2037 lsreqbuf_len); 2038 return -E2BIG; 2039 } 2040 2041 if (!nvmet_fc_tgtport_get(tgtport)) { 2042 dev_info(tgtport->dev, 2043 "RCV %s LS failed: target deleting\n", 2044 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2045 nvmefc_ls_names[w0->ls_cmd] : ""); 2046 return -ESHUTDOWN; 2047 } 2048 2049 iod = nvmet_fc_alloc_ls_iod(tgtport); 2050 if (!iod) { 2051 dev_info(tgtport->dev, 2052 "RCV %s LS failed: context allocation failed\n", 2053 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2054 nvmefc_ls_names[w0->ls_cmd] : ""); 2055 nvmet_fc_tgtport_put(tgtport); 2056 return -ENOENT; 2057 } 2058 2059 iod->lsrsp = lsrsp; 2060 iod->fcpreq = NULL; 2061 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len); 2062 iod->rqstdatalen = lsreqbuf_len; 2063 iod->hosthandle = hosthandle; 2064 2065 queue_work(nvmet_wq, &iod->work); 2066 2067 return 0; 2068 } 2069 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req); 2070 2071 2072 /* 2073 * ********************** 2074 * Start of FCP handling 2075 * ********************** 2076 */ 2077 2078 static int 2079 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod) 2080 { 2081 struct scatterlist *sg; 2082 unsigned int nent; 2083 2084 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent); 2085 if (!sg) 2086 goto out; 2087 2088 fod->data_sg = sg; 2089 fod->data_sg_cnt = nent; 2090 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent, 2091 ((fod->io_dir == NVMET_FCP_WRITE) ? 2092 DMA_FROM_DEVICE : DMA_TO_DEVICE)); 2093 /* note: write from initiator perspective */ 2094 fod->next_sg = fod->data_sg; 2095 2096 return 0; 2097 2098 out: 2099 return NVME_SC_INTERNAL; 2100 } 2101 2102 static void 2103 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod) 2104 { 2105 if (!fod->data_sg || !fod->data_sg_cnt) 2106 return; 2107 2108 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt, 2109 ((fod->io_dir == NVMET_FCP_WRITE) ? 2110 DMA_FROM_DEVICE : DMA_TO_DEVICE)); 2111 sgl_free(fod->data_sg); 2112 fod->data_sg = NULL; 2113 fod->data_sg_cnt = 0; 2114 } 2115 2116 2117 static bool 2118 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd) 2119 { 2120 u32 sqtail, used; 2121 2122 /* egad, this is ugly. And sqtail is just a best guess */ 2123 sqtail = atomic_read(&q->sqtail) % q->sqsize; 2124 2125 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd); 2126 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9))); 2127 } 2128 2129 /* 2130 * Prep RSP payload. 2131 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op 2132 */ 2133 static void 2134 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport, 2135 struct nvmet_fc_fcp_iod *fod) 2136 { 2137 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf; 2138 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; 2139 struct nvme_completion *cqe = &ersp->cqe; 2140 u32 *cqewd = (u32 *)cqe; 2141 bool send_ersp = false; 2142 u32 rsn, rspcnt, xfr_length; 2143 2144 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP) 2145 xfr_length = fod->req.transfer_len; 2146 else 2147 xfr_length = fod->offset; 2148 2149 /* 2150 * check to see if we can send a 0's rsp. 2151 * Note: to send a 0's response, the NVME-FC host transport will 2152 * recreate the CQE. The host transport knows: sq id, SQHD (last 2153 * seen in an ersp), and command_id. Thus it will create a 2154 * zero-filled CQE with those known fields filled in. Transport 2155 * must send an ersp for any condition where the cqe won't match 2156 * this. 2157 * 2158 * Here are the FC-NVME mandated cases where we must send an ersp: 2159 * every N responses, where N=ersp_ratio 2160 * force fabric commands to send ersp's (not in FC-NVME but good 2161 * practice) 2162 * normal cmds: any time status is non-zero, or status is zero 2163 * but words 0 or 1 are non-zero. 2164 * the SQ is 90% or more full 2165 * the cmd is a fused command 2166 * transferred data length not equal to cmd iu length 2167 */ 2168 rspcnt = atomic_inc_return(&fod->queue->zrspcnt); 2169 if (!(rspcnt % fod->queue->ersp_ratio) || 2170 nvme_is_fabrics((struct nvme_command *) sqe) || 2171 xfr_length != fod->req.transfer_len || 2172 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] || 2173 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || 2174 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head))) 2175 send_ersp = true; 2176 2177 /* re-set the fields */ 2178 fod->fcpreq->rspaddr = ersp; 2179 fod->fcpreq->rspdma = fod->rspdma; 2180 2181 if (!send_ersp) { 2182 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP); 2183 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP; 2184 } else { 2185 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32)); 2186 rsn = atomic_inc_return(&fod->queue->rsn); 2187 ersp->rsn = cpu_to_be32(rsn); 2188 ersp->xfrd_len = cpu_to_be32(xfr_length); 2189 fod->fcpreq->rsplen = sizeof(*ersp); 2190 } 2191 2192 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma, 2193 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 2194 } 2195 2196 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq); 2197 2198 static void 2199 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport, 2200 struct nvmet_fc_fcp_iod *fod) 2201 { 2202 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2203 2204 /* data no longer needed */ 2205 nvmet_fc_free_tgt_pgs(fod); 2206 2207 /* 2208 * if an ABTS was received or we issued the fcp_abort early 2209 * don't call abort routine again. 2210 */ 2211 /* no need to take lock - lock was taken earlier to get here */ 2212 if (!fod->aborted) 2213 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq); 2214 2215 nvmet_fc_free_fcp_iod(fod->queue, fod); 2216 } 2217 2218 static void 2219 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport, 2220 struct nvmet_fc_fcp_iod *fod) 2221 { 2222 int ret; 2223 2224 fod->fcpreq->op = NVMET_FCOP_RSP; 2225 fod->fcpreq->timeout = 0; 2226 2227 nvmet_fc_prep_fcp_rsp(tgtport, fod); 2228 2229 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); 2230 if (ret) 2231 nvmet_fc_abort_op(tgtport, fod); 2232 } 2233 2234 static void 2235 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, 2236 struct nvmet_fc_fcp_iod *fod, u8 op) 2237 { 2238 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2239 struct scatterlist *sg = fod->next_sg; 2240 unsigned long flags; 2241 u32 remaininglen = fod->req.transfer_len - fod->offset; 2242 u32 tlen = 0; 2243 int ret; 2244 2245 fcpreq->op = op; 2246 fcpreq->offset = fod->offset; 2247 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; 2248 2249 /* 2250 * for next sequence: 2251 * break at a sg element boundary 2252 * attempt to keep sequence length capped at 2253 * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to 2254 * be longer if a single sg element is larger 2255 * than that amount. This is done to avoid creating 2256 * a new sg list to use for the tgtport api. 2257 */ 2258 fcpreq->sg = sg; 2259 fcpreq->sg_cnt = 0; 2260 while (tlen < remaininglen && 2261 fcpreq->sg_cnt < tgtport->max_sg_cnt && 2262 tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) { 2263 fcpreq->sg_cnt++; 2264 tlen += sg_dma_len(sg); 2265 sg = sg_next(sg); 2266 } 2267 if (tlen < remaininglen && fcpreq->sg_cnt == 0) { 2268 fcpreq->sg_cnt++; 2269 tlen += min_t(u32, sg_dma_len(sg), remaininglen); 2270 sg = sg_next(sg); 2271 } 2272 if (tlen < remaininglen) 2273 fod->next_sg = sg; 2274 else 2275 fod->next_sg = NULL; 2276 2277 fcpreq->transfer_length = tlen; 2278 fcpreq->transferred_length = 0; 2279 fcpreq->fcp_error = 0; 2280 fcpreq->rsplen = 0; 2281 2282 /* 2283 * If the last READDATA request: check if LLDD supports 2284 * combined xfr with response. 2285 */ 2286 if ((op == NVMET_FCOP_READDATA) && 2287 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) && 2288 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) { 2289 fcpreq->op = NVMET_FCOP_READDATA_RSP; 2290 nvmet_fc_prep_fcp_rsp(tgtport, fod); 2291 } 2292 2293 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); 2294 if (ret) { 2295 /* 2296 * should be ok to set w/o lock as its in the thread of 2297 * execution (not an async timer routine) and doesn't 2298 * contend with any clearing action 2299 */ 2300 fod->abort = true; 2301 2302 if (op == NVMET_FCOP_WRITEDATA) { 2303 spin_lock_irqsave(&fod->flock, flags); 2304 fod->writedataactive = false; 2305 spin_unlock_irqrestore(&fod->flock, flags); 2306 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2307 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ { 2308 fcpreq->fcp_error = ret; 2309 fcpreq->transferred_length = 0; 2310 nvmet_fc_xmt_fcp_op_done(fod->fcpreq); 2311 } 2312 } 2313 } 2314 2315 static inline bool 2316 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort) 2317 { 2318 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2319 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2320 2321 /* if in the middle of an io and we need to tear down */ 2322 if (abort) { 2323 if (fcpreq->op == NVMET_FCOP_WRITEDATA) { 2324 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2325 return true; 2326 } 2327 2328 nvmet_fc_abort_op(tgtport, fod); 2329 return true; 2330 } 2331 2332 return false; 2333 } 2334 2335 /* 2336 * actual done handler for FCP operations when completed by the lldd 2337 */ 2338 static void 2339 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) 2340 { 2341 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2342 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2343 unsigned long flags; 2344 bool abort; 2345 2346 spin_lock_irqsave(&fod->flock, flags); 2347 abort = fod->abort; 2348 fod->writedataactive = false; 2349 spin_unlock_irqrestore(&fod->flock, flags); 2350 2351 switch (fcpreq->op) { 2352 2353 case NVMET_FCOP_WRITEDATA: 2354 if (__nvmet_fc_fod_op_abort(fod, abort)) 2355 return; 2356 if (fcpreq->fcp_error || 2357 fcpreq->transferred_length != fcpreq->transfer_length) { 2358 spin_lock_irqsave(&fod->flock, flags); 2359 fod->abort = true; 2360 spin_unlock_irqrestore(&fod->flock, flags); 2361 2362 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2363 return; 2364 } 2365 2366 fod->offset += fcpreq->transferred_length; 2367 if (fod->offset != fod->req.transfer_len) { 2368 spin_lock_irqsave(&fod->flock, flags); 2369 fod->writedataactive = true; 2370 spin_unlock_irqrestore(&fod->flock, flags); 2371 2372 /* transfer the next chunk */ 2373 nvmet_fc_transfer_fcp_data(tgtport, fod, 2374 NVMET_FCOP_WRITEDATA); 2375 return; 2376 } 2377 2378 /* data transfer complete, resume with nvmet layer */ 2379 fod->req.execute(&fod->req); 2380 break; 2381 2382 case NVMET_FCOP_READDATA: 2383 case NVMET_FCOP_READDATA_RSP: 2384 if (__nvmet_fc_fod_op_abort(fod, abort)) 2385 return; 2386 if (fcpreq->fcp_error || 2387 fcpreq->transferred_length != fcpreq->transfer_length) { 2388 nvmet_fc_abort_op(tgtport, fod); 2389 return; 2390 } 2391 2392 /* success */ 2393 2394 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) { 2395 /* data no longer needed */ 2396 nvmet_fc_free_tgt_pgs(fod); 2397 nvmet_fc_free_fcp_iod(fod->queue, fod); 2398 return; 2399 } 2400 2401 fod->offset += fcpreq->transferred_length; 2402 if (fod->offset != fod->req.transfer_len) { 2403 /* transfer the next chunk */ 2404 nvmet_fc_transfer_fcp_data(tgtport, fod, 2405 NVMET_FCOP_READDATA); 2406 return; 2407 } 2408 2409 /* data transfer complete, send response */ 2410 2411 /* data no longer needed */ 2412 nvmet_fc_free_tgt_pgs(fod); 2413 2414 nvmet_fc_xmt_fcp_rsp(tgtport, fod); 2415 2416 break; 2417 2418 case NVMET_FCOP_RSP: 2419 if (__nvmet_fc_fod_op_abort(fod, abort)) 2420 return; 2421 nvmet_fc_free_fcp_iod(fod->queue, fod); 2422 break; 2423 2424 default: 2425 break; 2426 } 2427 } 2428 2429 static void 2430 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq) 2431 { 2432 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 2433 2434 nvmet_fc_fod_op_done(fod); 2435 } 2436 2437 /* 2438 * actual completion handler after execution by the nvmet layer 2439 */ 2440 static void 2441 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport, 2442 struct nvmet_fc_fcp_iod *fod, int status) 2443 { 2444 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; 2445 struct nvme_completion *cqe = &fod->rspiubuf.cqe; 2446 unsigned long flags; 2447 bool abort; 2448 2449 spin_lock_irqsave(&fod->flock, flags); 2450 abort = fod->abort; 2451 spin_unlock_irqrestore(&fod->flock, flags); 2452 2453 /* if we have a CQE, snoop the last sq_head value */ 2454 if (!status) 2455 fod->queue->sqhd = cqe->sq_head; 2456 2457 if (abort) { 2458 nvmet_fc_abort_op(tgtport, fod); 2459 return; 2460 } 2461 2462 /* if an error handling the cmd post initial parsing */ 2463 if (status) { 2464 /* fudge up a failed CQE status for our transport error */ 2465 memset(cqe, 0, sizeof(*cqe)); 2466 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */ 2467 cqe->sq_id = cpu_to_le16(fod->queue->qid); 2468 cqe->command_id = sqe->command_id; 2469 cqe->status = cpu_to_le16(status); 2470 } else { 2471 2472 /* 2473 * try to push the data even if the SQE status is non-zero. 2474 * There may be a status where data still was intended to 2475 * be moved 2476 */ 2477 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) { 2478 /* push the data over before sending rsp */ 2479 nvmet_fc_transfer_fcp_data(tgtport, fod, 2480 NVMET_FCOP_READDATA); 2481 return; 2482 } 2483 2484 /* writes & no data - fall thru */ 2485 } 2486 2487 /* data no longer needed */ 2488 nvmet_fc_free_tgt_pgs(fod); 2489 2490 nvmet_fc_xmt_fcp_rsp(tgtport, fod); 2491 } 2492 2493 2494 static void 2495 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req) 2496 { 2497 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req); 2498 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2499 2500 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0); 2501 } 2502 2503 2504 /* 2505 * Actual processing routine for received FC-NVME I/O Requests from the LLD 2506 */ 2507 static void 2508 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, 2509 struct nvmet_fc_fcp_iod *fod) 2510 { 2511 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf; 2512 u32 xfrlen = be32_to_cpu(cmdiu->data_len); 2513 int ret; 2514 2515 /* 2516 * Fused commands are currently not supported in the linux 2517 * implementation. 2518 * 2519 * As such, the implementation of the FC transport does not 2520 * look at the fused commands and order delivery to the upper 2521 * layer until we have both based on csn. 2522 */ 2523 2524 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done; 2525 2526 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) { 2527 fod->io_dir = NVMET_FCP_WRITE; 2528 if (!nvme_is_write(&cmdiu->sqe)) 2529 goto transport_error; 2530 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) { 2531 fod->io_dir = NVMET_FCP_READ; 2532 if (nvme_is_write(&cmdiu->sqe)) 2533 goto transport_error; 2534 } else { 2535 fod->io_dir = NVMET_FCP_NODATA; 2536 if (xfrlen) 2537 goto transport_error; 2538 } 2539 2540 fod->req.cmd = &fod->cmdiubuf.sqe; 2541 fod->req.cqe = &fod->rspiubuf.cqe; 2542 if (tgtport->pe) 2543 fod->req.port = tgtport->pe->port; 2544 2545 /* clear any response payload */ 2546 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); 2547 2548 fod->data_sg = NULL; 2549 fod->data_sg_cnt = 0; 2550 2551 ret = nvmet_req_init(&fod->req, 2552 &fod->queue->nvme_cq, 2553 &fod->queue->nvme_sq, 2554 &nvmet_fc_tgt_fcp_ops); 2555 if (!ret) { 2556 /* bad SQE content or invalid ctrl state */ 2557 /* nvmet layer has already called op done to send rsp. */ 2558 return; 2559 } 2560 2561 fod->req.transfer_len = xfrlen; 2562 2563 /* keep a running counter of tail position */ 2564 atomic_inc(&fod->queue->sqtail); 2565 2566 if (fod->req.transfer_len) { 2567 ret = nvmet_fc_alloc_tgt_pgs(fod); 2568 if (ret) { 2569 nvmet_req_complete(&fod->req, ret); 2570 return; 2571 } 2572 } 2573 fod->req.sg = fod->data_sg; 2574 fod->req.sg_cnt = fod->data_sg_cnt; 2575 fod->offset = 0; 2576 2577 if (fod->io_dir == NVMET_FCP_WRITE) { 2578 /* pull the data over before invoking nvmet layer */ 2579 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA); 2580 return; 2581 } 2582 2583 /* 2584 * Reads or no data: 2585 * 2586 * can invoke the nvmet_layer now. If read data, cmd completion will 2587 * push the data 2588 */ 2589 fod->req.execute(&fod->req); 2590 return; 2591 2592 transport_error: 2593 nvmet_fc_abort_op(tgtport, fod); 2594 } 2595 2596 /** 2597 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD 2598 * upon the reception of a NVME FCP CMD IU. 2599 * 2600 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc 2601 * layer for processing. 2602 * 2603 * The nvmet_fc layer allocates a local job structure (struct 2604 * nvmet_fc_fcp_iod) from the queue for the io and copies the 2605 * CMD IU buffer to the job structure. As such, on a successful 2606 * completion (returns 0), the LLDD may immediately free/reuse 2607 * the CMD IU buffer passed in the call. 2608 * 2609 * However, in some circumstances, due to the packetized nature of FC 2610 * and the api of the FC LLDD which may issue a hw command to send the 2611 * response, but the LLDD may not get the hw completion for that command 2612 * and upcall the nvmet_fc layer before a new command may be 2613 * asynchronously received - its possible for a command to be received 2614 * before the LLDD and nvmet_fc have recycled the job structure. It gives 2615 * the appearance of more commands received than fits in the sq. 2616 * To alleviate this scenario, a temporary queue is maintained in the 2617 * transport for pending LLDD requests waiting for a queue job structure. 2618 * In these "overrun" cases, a temporary queue element is allocated 2619 * the LLDD request and CMD iu buffer information remembered, and the 2620 * routine returns a -EOVERFLOW status. Subsequently, when a queue job 2621 * structure is freed, it is immediately reallocated for anything on the 2622 * pending request list. The LLDDs defer_rcv() callback is called, 2623 * informing the LLDD that it may reuse the CMD IU buffer, and the io 2624 * is then started normally with the transport. 2625 * 2626 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat 2627 * the completion as successful but must not reuse the CMD IU buffer 2628 * until the LLDD's defer_rcv() callback has been called for the 2629 * corresponding struct nvmefc_tgt_fcp_req pointer. 2630 * 2631 * If there is any other condition in which an error occurs, the 2632 * transport will return a non-zero status indicating the error. 2633 * In all cases other than -EOVERFLOW, the transport has not accepted the 2634 * request and the LLDD should abort the exchange. 2635 * 2636 * @target_port: pointer to the (registered) target port the FCP CMD IU 2637 * was received on. 2638 * @fcpreq: pointer to a fcpreq request structure to be used to reference 2639 * the exchange corresponding to the FCP Exchange. 2640 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU 2641 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU 2642 */ 2643 int 2644 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, 2645 struct nvmefc_tgt_fcp_req *fcpreq, 2646 void *cmdiubuf, u32 cmdiubuf_len) 2647 { 2648 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 2649 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; 2650 struct nvmet_fc_tgt_queue *queue; 2651 struct nvmet_fc_fcp_iod *fod; 2652 struct nvmet_fc_defer_fcp_req *deferfcp; 2653 unsigned long flags; 2654 2655 /* validate iu, so the connection id can be used to find the queue */ 2656 if ((cmdiubuf_len != sizeof(*cmdiu)) || 2657 (cmdiu->format_id != NVME_CMD_FORMAT_ID) || 2658 (cmdiu->fc_id != NVME_CMD_FC_ID) || 2659 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4))) 2660 return -EIO; 2661 2662 queue = nvmet_fc_find_target_queue(tgtport, 2663 be64_to_cpu(cmdiu->connection_id)); 2664 if (!queue) 2665 return -ENOTCONN; 2666 2667 /* 2668 * note: reference taken by find_target_queue 2669 * After successful fod allocation, the fod will inherit the 2670 * ownership of that reference and will remove the reference 2671 * when the fod is freed. 2672 */ 2673 2674 spin_lock_irqsave(&queue->qlock, flags); 2675 2676 fod = nvmet_fc_alloc_fcp_iod(queue); 2677 if (fod) { 2678 spin_unlock_irqrestore(&queue->qlock, flags); 2679 2680 fcpreq->nvmet_fc_private = fod; 2681 fod->fcpreq = fcpreq; 2682 2683 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); 2684 2685 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); 2686 2687 return 0; 2688 } 2689 2690 if (!tgtport->ops->defer_rcv) { 2691 spin_unlock_irqrestore(&queue->qlock, flags); 2692 /* release the queue lookup reference */ 2693 nvmet_fc_tgt_q_put(queue); 2694 return -ENOENT; 2695 } 2696 2697 deferfcp = list_first_entry_or_null(&queue->avail_defer_list, 2698 struct nvmet_fc_defer_fcp_req, req_list); 2699 if (deferfcp) { 2700 /* Just re-use one that was previously allocated */ 2701 list_del(&deferfcp->req_list); 2702 } else { 2703 spin_unlock_irqrestore(&queue->qlock, flags); 2704 2705 /* Now we need to dynamically allocate one */ 2706 deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL); 2707 if (!deferfcp) { 2708 /* release the queue lookup reference */ 2709 nvmet_fc_tgt_q_put(queue); 2710 return -ENOMEM; 2711 } 2712 spin_lock_irqsave(&queue->qlock, flags); 2713 } 2714 2715 /* For now, use rspaddr / rsplen to save payload information */ 2716 fcpreq->rspaddr = cmdiubuf; 2717 fcpreq->rsplen = cmdiubuf_len; 2718 deferfcp->fcp_req = fcpreq; 2719 2720 /* defer processing till a fod becomes available */ 2721 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list); 2722 2723 /* NOTE: the queue lookup reference is still valid */ 2724 2725 spin_unlock_irqrestore(&queue->qlock, flags); 2726 2727 return -EOVERFLOW; 2728 } 2729 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); 2730 2731 /** 2732 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD 2733 * upon the reception of an ABTS for a FCP command 2734 * 2735 * Notify the transport that an ABTS has been received for a FCP command 2736 * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The 2737 * LLDD believes the command is still being worked on 2738 * (template_ops->fcp_req_release() has not been called). 2739 * 2740 * The transport will wait for any outstanding work (an op to the LLDD, 2741 * which the lldd should complete with error due to the ABTS; or the 2742 * completion from the nvmet layer of the nvme command), then will 2743 * stop processing and call the nvmet_fc_rcv_fcp_req() callback to 2744 * return the i/o context to the LLDD. The LLDD may send the BA_ACC 2745 * to the ABTS either after return from this function (assuming any 2746 * outstanding op work has been terminated) or upon the callback being 2747 * called. 2748 * 2749 * @target_port: pointer to the (registered) target port the FCP CMD IU 2750 * was received on. 2751 * @fcpreq: pointer to the fcpreq request structure that corresponds 2752 * to the exchange that received the ABTS. 2753 */ 2754 void 2755 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port, 2756 struct nvmefc_tgt_fcp_req *fcpreq) 2757 { 2758 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 2759 struct nvmet_fc_tgt_queue *queue; 2760 unsigned long flags; 2761 2762 if (!fod || fod->fcpreq != fcpreq) 2763 /* job appears to have already completed, ignore abort */ 2764 return; 2765 2766 queue = fod->queue; 2767 2768 spin_lock_irqsave(&queue->qlock, flags); 2769 if (fod->active) { 2770 /* 2771 * mark as abort. The abort handler, invoked upon completion 2772 * of any work, will detect the aborted status and do the 2773 * callback. 2774 */ 2775 spin_lock(&fod->flock); 2776 fod->abort = true; 2777 fod->aborted = true; 2778 spin_unlock(&fod->flock); 2779 } 2780 spin_unlock_irqrestore(&queue->qlock, flags); 2781 } 2782 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort); 2783 2784 2785 struct nvmet_fc_traddr { 2786 u64 nn; 2787 u64 pn; 2788 }; 2789 2790 static int 2791 __nvme_fc_parse_u64(substring_t *sstr, u64 *val) 2792 { 2793 u64 token64; 2794 2795 if (match_u64(sstr, &token64)) 2796 return -EINVAL; 2797 *val = token64; 2798 2799 return 0; 2800 } 2801 2802 /* 2803 * This routine validates and extracts the WWN's from the TRADDR string. 2804 * As kernel parsers need the 0x to determine number base, universally 2805 * build string to parse with 0x prefix before parsing name strings. 2806 */ 2807 static int 2808 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) 2809 { 2810 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; 2811 substring_t wwn = { name, &name[sizeof(name)-1] }; 2812 int nnoffset, pnoffset; 2813 2814 /* validate if string is one of the 2 allowed formats */ 2815 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && 2816 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && 2817 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], 2818 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { 2819 nnoffset = NVME_FC_TRADDR_OXNNLEN; 2820 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + 2821 NVME_FC_TRADDR_OXNNLEN; 2822 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH && 2823 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && 2824 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], 2825 "pn-", NVME_FC_TRADDR_NNLEN))) { 2826 nnoffset = NVME_FC_TRADDR_NNLEN; 2827 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; 2828 } else 2829 goto out_einval; 2830 2831 name[0] = '0'; 2832 name[1] = 'x'; 2833 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; 2834 2835 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); 2836 if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) 2837 goto out_einval; 2838 2839 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); 2840 if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) 2841 goto out_einval; 2842 2843 return 0; 2844 2845 out_einval: 2846 pr_warn("%s: bad traddr string\n", __func__); 2847 return -EINVAL; 2848 } 2849 2850 static int 2851 nvmet_fc_add_port(struct nvmet_port *port) 2852 { 2853 struct nvmet_fc_tgtport *tgtport; 2854 struct nvmet_fc_port_entry *pe; 2855 struct nvmet_fc_traddr traddr = { 0L, 0L }; 2856 unsigned long flags; 2857 int ret; 2858 2859 /* validate the address info */ 2860 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) || 2861 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC)) 2862 return -EINVAL; 2863 2864 /* map the traddr address info to a target port */ 2865 2866 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr, 2867 sizeof(port->disc_addr.traddr)); 2868 if (ret) 2869 return ret; 2870 2871 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2872 if (!pe) 2873 return -ENOMEM; 2874 2875 ret = -ENXIO; 2876 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 2877 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { 2878 if ((tgtport->fc_target_port.node_name == traddr.nn) && 2879 (tgtport->fc_target_port.port_name == traddr.pn)) { 2880 /* a FC port can only be 1 nvmet port id */ 2881 if (!tgtport->pe) { 2882 nvmet_fc_portentry_bind(tgtport, pe, port); 2883 ret = 0; 2884 } else 2885 ret = -EALREADY; 2886 break; 2887 } 2888 } 2889 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 2890 2891 if (ret) 2892 kfree(pe); 2893 2894 return ret; 2895 } 2896 2897 static void 2898 nvmet_fc_remove_port(struct nvmet_port *port) 2899 { 2900 struct nvmet_fc_port_entry *pe = port->priv; 2901 2902 nvmet_fc_portentry_unbind(pe); 2903 2904 kfree(pe); 2905 } 2906 2907 static void 2908 nvmet_fc_discovery_chg(struct nvmet_port *port) 2909 { 2910 struct nvmet_fc_port_entry *pe = port->priv; 2911 struct nvmet_fc_tgtport *tgtport = pe->tgtport; 2912 2913 if (tgtport && tgtport->ops->discovery_event) 2914 tgtport->ops->discovery_event(&tgtport->fc_target_port); 2915 } 2916 2917 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { 2918 .owner = THIS_MODULE, 2919 .type = NVMF_TRTYPE_FC, 2920 .msdbd = 1, 2921 .add_port = nvmet_fc_add_port, 2922 .remove_port = nvmet_fc_remove_port, 2923 .queue_response = nvmet_fc_fcp_nvme_cmd_done, 2924 .delete_ctrl = nvmet_fc_delete_ctrl, 2925 .discovery_chg = nvmet_fc_discovery_chg, 2926 }; 2927 2928 static int __init nvmet_fc_init_module(void) 2929 { 2930 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops); 2931 } 2932 2933 static void __exit nvmet_fc_exit_module(void) 2934 { 2935 /* sanity check - all lports should be removed */ 2936 if (!list_empty(&nvmet_fc_target_list)) 2937 pr_warn("%s: targetport list not empty\n", __func__); 2938 2939 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops); 2940 2941 ida_destroy(&nvmet_fc_tgtport_cnt); 2942 } 2943 2944 module_init(nvmet_fc_init_module); 2945 module_exit(nvmet_fc_exit_module); 2946 2947 MODULE_LICENSE("GPL v2"); 2948