1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016 Avago Technologies. All rights reserved. 4 */ 5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 6 #include <linux/module.h> 7 #include <linux/slab.h> 8 #include <linux/blk-mq.h> 9 #include <linux/parser.h> 10 #include <linux/random.h> 11 #include <uapi/scsi/fc/fc_fs.h> 12 #include <uapi/scsi/fc/fc_els.h> 13 14 #include "nvmet.h" 15 #include <linux/nvme-fc-driver.h> 16 #include <linux/nvme-fc.h> 17 #include "../host/fc.h" 18 19 20 /* *************************** Data Structures/Defines ****************** */ 21 22 23 #define NVMET_LS_CTX_COUNT 256 24 25 struct nvmet_fc_tgtport; 26 struct nvmet_fc_tgt_assoc; 27 28 struct nvmet_fc_ls_iod { /* for an LS RQST RCV */ 29 struct nvmefc_ls_rsp *lsrsp; 30 struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */ 31 32 struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */ 33 34 struct nvmet_fc_tgtport *tgtport; 35 struct nvmet_fc_tgt_assoc *assoc; 36 void *hosthandle; 37 38 union nvmefc_ls_requests *rqstbuf; 39 union nvmefc_ls_responses *rspbuf; 40 u16 rqstdatalen; 41 dma_addr_t rspdma; 42 43 struct scatterlist sg[2]; 44 45 struct work_struct work; 46 } __aligned(sizeof(unsigned long long)); 47 48 struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */ 49 struct nvmefc_ls_req ls_req; 50 51 struct nvmet_fc_tgtport *tgtport; 52 void *hosthandle; 53 54 int ls_error; 55 struct list_head lsreq_list; /* tgtport->ls_req_list */ 56 bool req_queued; 57 }; 58 59 60 /* desired maximum for a single sequence - if sg list allows it */ 61 #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) 62 63 enum nvmet_fcp_datadir { 64 NVMET_FCP_NODATA, 65 NVMET_FCP_WRITE, 66 NVMET_FCP_READ, 67 NVMET_FCP_ABORTED, 68 }; 69 70 struct nvmet_fc_fcp_iod { 71 struct nvmefc_tgt_fcp_req *fcpreq; 72 73 struct nvme_fc_cmd_iu cmdiubuf; 74 struct nvme_fc_ersp_iu rspiubuf; 75 dma_addr_t rspdma; 76 struct scatterlist *next_sg; 77 struct scatterlist *data_sg; 78 int data_sg_cnt; 79 u32 offset; 80 enum nvmet_fcp_datadir io_dir; 81 bool active; 82 bool abort; 83 bool aborted; 84 bool writedataactive; 85 spinlock_t flock; 86 87 struct nvmet_req req; 88 struct work_struct defer_work; 89 90 struct nvmet_fc_tgtport *tgtport; 91 struct nvmet_fc_tgt_queue *queue; 92 93 struct list_head fcp_list; /* tgtport->fcp_list */ 94 }; 95 96 struct nvmet_fc_tgtport { 97 struct nvmet_fc_target_port fc_target_port; 98 99 struct list_head tgt_list; /* nvmet_fc_target_list */ 100 struct device *dev; /* dev for dma mapping */ 101 struct nvmet_fc_target_template *ops; 102 103 struct nvmet_fc_ls_iod *iod; 104 spinlock_t lock; 105 struct list_head ls_rcv_list; 106 struct list_head ls_req_list; 107 struct list_head ls_busylist; 108 struct list_head assoc_list; 109 struct list_head host_list; 110 struct ida assoc_cnt; 111 struct nvmet_fc_port_entry *pe; 112 struct kref ref; 113 u32 max_sg_cnt; 114 }; 115 116 struct nvmet_fc_port_entry { 117 struct nvmet_fc_tgtport *tgtport; 118 struct nvmet_port *port; 119 u64 node_name; 120 u64 port_name; 121 struct list_head pe_list; 122 }; 123 124 struct nvmet_fc_defer_fcp_req { 125 struct list_head req_list; 126 struct nvmefc_tgt_fcp_req *fcp_req; 127 }; 128 129 struct nvmet_fc_tgt_queue { 130 bool ninetypercent; 131 u16 qid; 132 u16 sqsize; 133 u16 ersp_ratio; 134 __le16 sqhd; 135 atomic_t connected; 136 atomic_t sqtail; 137 atomic_t zrspcnt; 138 atomic_t rsn; 139 spinlock_t qlock; 140 struct nvmet_cq nvme_cq; 141 struct nvmet_sq nvme_sq; 142 struct nvmet_fc_tgt_assoc *assoc; 143 struct list_head fod_list; 144 struct list_head pending_cmd_list; 145 struct list_head avail_defer_list; 146 struct workqueue_struct *work_q; 147 struct kref ref; 148 struct rcu_head rcu; 149 struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */ 150 } __aligned(sizeof(unsigned long long)); 151 152 struct nvmet_fc_hostport { 153 struct nvmet_fc_tgtport *tgtport; 154 void *hosthandle; 155 struct list_head host_list; 156 struct kref ref; 157 u8 invalid; 158 }; 159 160 struct nvmet_fc_tgt_assoc { 161 u64 association_id; 162 u32 a_id; 163 atomic_t terminating; 164 struct nvmet_fc_tgtport *tgtport; 165 struct nvmet_fc_hostport *hostport; 166 struct nvmet_fc_ls_iod *rcv_disconn; 167 struct list_head a_list; 168 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1]; 169 struct kref ref; 170 struct work_struct del_work; 171 struct rcu_head rcu; 172 }; 173 174 175 static inline int 176 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr) 177 { 178 return (iodptr - iodptr->tgtport->iod); 179 } 180 181 static inline int 182 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr) 183 { 184 return (fodptr - fodptr->queue->fod); 185 } 186 187 188 /* 189 * Association and Connection IDs: 190 * 191 * Association ID will have random number in upper 6 bytes and zero 192 * in lower 2 bytes 193 * 194 * Connection IDs will be Association ID with QID or'd in lower 2 bytes 195 * 196 * note: Association ID = Connection ID for queue 0 197 */ 198 #define BYTES_FOR_QID sizeof(u16) 199 #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8) 200 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1)) 201 202 static inline u64 203 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid) 204 { 205 return (assoc->association_id | qid); 206 } 207 208 static inline u64 209 nvmet_fc_getassociationid(u64 connectionid) 210 { 211 return connectionid & ~NVMET_FC_QUEUEID_MASK; 212 } 213 214 static inline u16 215 nvmet_fc_getqueueid(u64 connectionid) 216 { 217 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK); 218 } 219 220 static inline struct nvmet_fc_tgtport * 221 targetport_to_tgtport(struct nvmet_fc_target_port *targetport) 222 { 223 return container_of(targetport, struct nvmet_fc_tgtport, 224 fc_target_port); 225 } 226 227 static inline struct nvmet_fc_fcp_iod * 228 nvmet_req_to_fod(struct nvmet_req *nvme_req) 229 { 230 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req); 231 } 232 233 234 /* *************************** Globals **************************** */ 235 236 237 static DEFINE_SPINLOCK(nvmet_fc_tgtlock); 238 239 static LIST_HEAD(nvmet_fc_target_list); 240 static DEFINE_IDA(nvmet_fc_tgtport_cnt); 241 static LIST_HEAD(nvmet_fc_portentry_list); 242 243 244 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work); 245 static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work); 246 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc); 247 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc); 248 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); 249 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); 250 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); 251 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); 252 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, 253 struct nvmet_fc_fcp_iod *fod); 254 static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc); 255 static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, 256 struct nvmet_fc_ls_iod *iod); 257 258 259 /* *********************** FC-NVME DMA Handling **************************** */ 260 261 /* 262 * The fcloop device passes in a NULL device pointer. Real LLD's will 263 * pass in a valid device pointer. If NULL is passed to the dma mapping 264 * routines, depending on the platform, it may or may not succeed, and 265 * may crash. 266 * 267 * As such: 268 * Wrapper all the dma routines and check the dev pointer. 269 * 270 * If simple mappings (return just a dma address, we'll noop them, 271 * returning a dma address of 0. 272 * 273 * On more complex mappings (dma_map_sg), a pseudo routine fills 274 * in the scatter list, setting all dma addresses to 0. 275 */ 276 277 static inline dma_addr_t 278 fc_dma_map_single(struct device *dev, void *ptr, size_t size, 279 enum dma_data_direction dir) 280 { 281 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; 282 } 283 284 static inline int 285 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 286 { 287 return dev ? dma_mapping_error(dev, dma_addr) : 0; 288 } 289 290 static inline void 291 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, 292 enum dma_data_direction dir) 293 { 294 if (dev) 295 dma_unmap_single(dev, addr, size, dir); 296 } 297 298 static inline void 299 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, 300 enum dma_data_direction dir) 301 { 302 if (dev) 303 dma_sync_single_for_cpu(dev, addr, size, dir); 304 } 305 306 static inline void 307 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, 308 enum dma_data_direction dir) 309 { 310 if (dev) 311 dma_sync_single_for_device(dev, addr, size, dir); 312 } 313 314 /* pseudo dma_map_sg call */ 315 static int 316 fc_map_sg(struct scatterlist *sg, int nents) 317 { 318 struct scatterlist *s; 319 int i; 320 321 WARN_ON(nents == 0 || sg[0].length == 0); 322 323 for_each_sg(sg, s, nents, i) { 324 s->dma_address = 0L; 325 #ifdef CONFIG_NEED_SG_DMA_LENGTH 326 s->dma_length = s->length; 327 #endif 328 } 329 return nents; 330 } 331 332 static inline int 333 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 334 enum dma_data_direction dir) 335 { 336 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); 337 } 338 339 static inline void 340 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 341 enum dma_data_direction dir) 342 { 343 if (dev) 344 dma_unmap_sg(dev, sg, nents, dir); 345 } 346 347 348 /* ********************** FC-NVME LS XMT Handling ************************* */ 349 350 351 static void 352 __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop) 353 { 354 struct nvmet_fc_tgtport *tgtport = lsop->tgtport; 355 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 356 unsigned long flags; 357 358 spin_lock_irqsave(&tgtport->lock, flags); 359 360 if (!lsop->req_queued) { 361 spin_unlock_irqrestore(&tgtport->lock, flags); 362 goto out_puttgtport; 363 } 364 365 list_del(&lsop->lsreq_list); 366 367 lsop->req_queued = false; 368 369 spin_unlock_irqrestore(&tgtport->lock, flags); 370 371 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, 372 (lsreq->rqstlen + lsreq->rsplen), 373 DMA_BIDIRECTIONAL); 374 375 out_puttgtport: 376 nvmet_fc_tgtport_put(tgtport); 377 } 378 379 static int 380 __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport, 381 struct nvmet_fc_ls_req_op *lsop, 382 void (*done)(struct nvmefc_ls_req *req, int status)) 383 { 384 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 385 unsigned long flags; 386 int ret = 0; 387 388 if (!tgtport->ops->ls_req) 389 return -EOPNOTSUPP; 390 391 if (!nvmet_fc_tgtport_get(tgtport)) 392 return -ESHUTDOWN; 393 394 lsreq->done = done; 395 lsop->req_queued = false; 396 INIT_LIST_HEAD(&lsop->lsreq_list); 397 398 lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr, 399 lsreq->rqstlen + lsreq->rsplen, 400 DMA_BIDIRECTIONAL); 401 if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) { 402 ret = -EFAULT; 403 goto out_puttgtport; 404 } 405 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; 406 407 spin_lock_irqsave(&tgtport->lock, flags); 408 409 list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list); 410 411 lsop->req_queued = true; 412 413 spin_unlock_irqrestore(&tgtport->lock, flags); 414 415 ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle, 416 lsreq); 417 if (ret) 418 goto out_unlink; 419 420 return 0; 421 422 out_unlink: 423 lsop->ls_error = ret; 424 spin_lock_irqsave(&tgtport->lock, flags); 425 lsop->req_queued = false; 426 list_del(&lsop->lsreq_list); 427 spin_unlock_irqrestore(&tgtport->lock, flags); 428 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, 429 (lsreq->rqstlen + lsreq->rsplen), 430 DMA_BIDIRECTIONAL); 431 out_puttgtport: 432 nvmet_fc_tgtport_put(tgtport); 433 434 return ret; 435 } 436 437 static int 438 nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport, 439 struct nvmet_fc_ls_req_op *lsop, 440 void (*done)(struct nvmefc_ls_req *req, int status)) 441 { 442 /* don't wait for completion */ 443 444 return __nvmet_fc_send_ls_req(tgtport, lsop, done); 445 } 446 447 static void 448 nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) 449 { 450 struct nvmet_fc_ls_req_op *lsop = 451 container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req); 452 453 __nvmet_fc_finish_ls_req(lsop); 454 455 /* fc-nvme target doesn't care about success or failure of cmd */ 456 457 kfree(lsop); 458 } 459 460 /* 461 * This routine sends a FC-NVME LS to disconnect (aka terminate) 462 * the FC-NVME Association. Terminating the association also 463 * terminates the FC-NVME connections (per queue, both admin and io 464 * queues) that are part of the association. E.g. things are torn 465 * down, and the related FC-NVME Association ID and Connection IDs 466 * become invalid. 467 * 468 * The behavior of the fc-nvme target is such that it's 469 * understanding of the association and connections will implicitly 470 * be torn down. The action is implicit as it may be due to a loss of 471 * connectivity with the fc-nvme host, so the target may never get a 472 * response even if it tried. As such, the action of this routine 473 * is to asynchronously send the LS, ignore any results of the LS, and 474 * continue on with terminating the association. If the fc-nvme host 475 * is present and receives the LS, it too can tear down. 476 */ 477 static void 478 nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc) 479 { 480 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 481 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; 482 struct fcnvme_ls_disconnect_assoc_acc *discon_acc; 483 struct nvmet_fc_ls_req_op *lsop; 484 struct nvmefc_ls_req *lsreq; 485 int ret; 486 487 /* 488 * If ls_req is NULL or no hosthandle, it's an older lldd and no 489 * message is normal. Otherwise, send unless the hostport has 490 * already been invalidated by the lldd. 491 */ 492 if (!tgtport->ops->ls_req || !assoc->hostport || 493 assoc->hostport->invalid) 494 return; 495 496 lsop = kzalloc((sizeof(*lsop) + 497 sizeof(*discon_rqst) + sizeof(*discon_acc) + 498 tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); 499 if (!lsop) { 500 dev_info(tgtport->dev, 501 "{%d:%d} send Disconnect Association failed: ENOMEM\n", 502 tgtport->fc_target_port.port_num, assoc->a_id); 503 return; 504 } 505 506 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1]; 507 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1]; 508 lsreq = &lsop->ls_req; 509 if (tgtport->ops->lsrqst_priv_sz) 510 lsreq->private = (void *)&discon_acc[1]; 511 else 512 lsreq->private = NULL; 513 514 lsop->tgtport = tgtport; 515 lsop->hosthandle = assoc->hostport->hosthandle; 516 517 nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc, 518 assoc->association_id); 519 520 ret = nvmet_fc_send_ls_req_async(tgtport, lsop, 521 nvmet_fc_disconnect_assoc_done); 522 if (ret) { 523 dev_info(tgtport->dev, 524 "{%d:%d} XMT Disconnect Association failed: %d\n", 525 tgtport->fc_target_port.port_num, assoc->a_id, ret); 526 kfree(lsop); 527 } 528 } 529 530 531 /* *********************** FC-NVME Port Management ************************ */ 532 533 534 static int 535 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport) 536 { 537 struct nvmet_fc_ls_iod *iod; 538 int i; 539 540 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod), 541 GFP_KERNEL); 542 if (!iod) 543 return -ENOMEM; 544 545 tgtport->iod = iod; 546 547 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { 548 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work); 549 iod->tgtport = tgtport; 550 list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list); 551 552 iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) + 553 sizeof(union nvmefc_ls_responses), 554 GFP_KERNEL); 555 if (!iod->rqstbuf) 556 goto out_fail; 557 558 iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1]; 559 560 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf, 561 sizeof(*iod->rspbuf), 562 DMA_TO_DEVICE); 563 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma)) 564 goto out_fail; 565 } 566 567 return 0; 568 569 out_fail: 570 kfree(iod->rqstbuf); 571 list_del(&iod->ls_rcv_list); 572 for (iod--, i--; i >= 0; iod--, i--) { 573 fc_dma_unmap_single(tgtport->dev, iod->rspdma, 574 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 575 kfree(iod->rqstbuf); 576 list_del(&iod->ls_rcv_list); 577 } 578 579 kfree(iod); 580 581 return -EFAULT; 582 } 583 584 static void 585 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport) 586 { 587 struct nvmet_fc_ls_iod *iod = tgtport->iod; 588 int i; 589 590 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { 591 fc_dma_unmap_single(tgtport->dev, 592 iod->rspdma, sizeof(*iod->rspbuf), 593 DMA_TO_DEVICE); 594 kfree(iod->rqstbuf); 595 list_del(&iod->ls_rcv_list); 596 } 597 kfree(tgtport->iod); 598 } 599 600 static struct nvmet_fc_ls_iod * 601 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport) 602 { 603 struct nvmet_fc_ls_iod *iod; 604 unsigned long flags; 605 606 spin_lock_irqsave(&tgtport->lock, flags); 607 iod = list_first_entry_or_null(&tgtport->ls_rcv_list, 608 struct nvmet_fc_ls_iod, ls_rcv_list); 609 if (iod) 610 list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist); 611 spin_unlock_irqrestore(&tgtport->lock, flags); 612 return iod; 613 } 614 615 616 static void 617 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport, 618 struct nvmet_fc_ls_iod *iod) 619 { 620 unsigned long flags; 621 622 spin_lock_irqsave(&tgtport->lock, flags); 623 list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list); 624 spin_unlock_irqrestore(&tgtport->lock, flags); 625 } 626 627 static void 628 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, 629 struct nvmet_fc_tgt_queue *queue) 630 { 631 struct nvmet_fc_fcp_iod *fod = queue->fod; 632 int i; 633 634 for (i = 0; i < queue->sqsize; fod++, i++) { 635 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work); 636 fod->tgtport = tgtport; 637 fod->queue = queue; 638 fod->active = false; 639 fod->abort = false; 640 fod->aborted = false; 641 fod->fcpreq = NULL; 642 list_add_tail(&fod->fcp_list, &queue->fod_list); 643 spin_lock_init(&fod->flock); 644 645 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf, 646 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 647 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) { 648 list_del(&fod->fcp_list); 649 for (fod--, i--; i >= 0; fod--, i--) { 650 fc_dma_unmap_single(tgtport->dev, fod->rspdma, 651 sizeof(fod->rspiubuf), 652 DMA_TO_DEVICE); 653 fod->rspdma = 0L; 654 list_del(&fod->fcp_list); 655 } 656 657 return; 658 } 659 } 660 } 661 662 static void 663 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, 664 struct nvmet_fc_tgt_queue *queue) 665 { 666 struct nvmet_fc_fcp_iod *fod = queue->fod; 667 int i; 668 669 for (i = 0; i < queue->sqsize; fod++, i++) { 670 if (fod->rspdma) 671 fc_dma_unmap_single(tgtport->dev, fod->rspdma, 672 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 673 } 674 } 675 676 static struct nvmet_fc_fcp_iod * 677 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) 678 { 679 struct nvmet_fc_fcp_iod *fod; 680 681 lockdep_assert_held(&queue->qlock); 682 683 fod = list_first_entry_or_null(&queue->fod_list, 684 struct nvmet_fc_fcp_iod, fcp_list); 685 if (fod) { 686 list_del(&fod->fcp_list); 687 fod->active = true; 688 /* 689 * no queue reference is taken, as it was taken by the 690 * queue lookup just prior to the allocation. The iod 691 * will "inherit" that reference. 692 */ 693 } 694 return fod; 695 } 696 697 698 static void 699 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport, 700 struct nvmet_fc_tgt_queue *queue, 701 struct nvmefc_tgt_fcp_req *fcpreq) 702 { 703 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 704 705 /* 706 * put all admin cmds on hw queue id 0. All io commands go to 707 * the respective hw queue based on a modulo basis 708 */ 709 fcpreq->hwqid = queue->qid ? 710 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; 711 712 nvmet_fc_handle_fcp_rqst(tgtport, fod); 713 } 714 715 static void 716 nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work) 717 { 718 struct nvmet_fc_fcp_iod *fod = 719 container_of(work, struct nvmet_fc_fcp_iod, defer_work); 720 721 /* Submit deferred IO for processing */ 722 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq); 723 724 } 725 726 static void 727 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, 728 struct nvmet_fc_fcp_iod *fod) 729 { 730 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 731 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 732 struct nvmet_fc_defer_fcp_req *deferfcp; 733 unsigned long flags; 734 735 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, 736 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 737 738 fcpreq->nvmet_fc_private = NULL; 739 740 fod->active = false; 741 fod->abort = false; 742 fod->aborted = false; 743 fod->writedataactive = false; 744 fod->fcpreq = NULL; 745 746 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); 747 748 /* release the queue lookup reference on the completed IO */ 749 nvmet_fc_tgt_q_put(queue); 750 751 spin_lock_irqsave(&queue->qlock, flags); 752 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, 753 struct nvmet_fc_defer_fcp_req, req_list); 754 if (!deferfcp) { 755 list_add_tail(&fod->fcp_list, &fod->queue->fod_list); 756 spin_unlock_irqrestore(&queue->qlock, flags); 757 return; 758 } 759 760 /* Re-use the fod for the next pending cmd that was deferred */ 761 list_del(&deferfcp->req_list); 762 763 fcpreq = deferfcp->fcp_req; 764 765 /* deferfcp can be reused for another IO at a later date */ 766 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list); 767 768 spin_unlock_irqrestore(&queue->qlock, flags); 769 770 /* Save NVME CMD IO in fod */ 771 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen); 772 773 /* Setup new fcpreq to be processed */ 774 fcpreq->rspaddr = NULL; 775 fcpreq->rsplen = 0; 776 fcpreq->nvmet_fc_private = fod; 777 fod->fcpreq = fcpreq; 778 fod->active = true; 779 780 /* inform LLDD IO is now being processed */ 781 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq); 782 783 /* 784 * Leave the queue lookup get reference taken when 785 * fod was originally allocated. 786 */ 787 788 queue_work(queue->work_q, &fod->defer_work); 789 } 790 791 static struct nvmet_fc_tgt_queue * 792 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, 793 u16 qid, u16 sqsize) 794 { 795 struct nvmet_fc_tgt_queue *queue; 796 int ret; 797 798 if (qid > NVMET_NR_QUEUES) 799 return NULL; 800 801 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL); 802 if (!queue) 803 return NULL; 804 805 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0, 806 assoc->tgtport->fc_target_port.port_num, 807 assoc->a_id, qid); 808 if (!queue->work_q) 809 goto out_free_queue; 810 811 queue->qid = qid; 812 queue->sqsize = sqsize; 813 queue->assoc = assoc; 814 INIT_LIST_HEAD(&queue->fod_list); 815 INIT_LIST_HEAD(&queue->avail_defer_list); 816 INIT_LIST_HEAD(&queue->pending_cmd_list); 817 atomic_set(&queue->connected, 0); 818 atomic_set(&queue->sqtail, 0); 819 atomic_set(&queue->rsn, 1); 820 atomic_set(&queue->zrspcnt, 0); 821 spin_lock_init(&queue->qlock); 822 kref_init(&queue->ref); 823 824 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue); 825 826 ret = nvmet_sq_init(&queue->nvme_sq); 827 if (ret) 828 goto out_fail_iodlist; 829 830 WARN_ON(assoc->queues[qid]); 831 assoc->queues[qid] = queue; 832 833 return queue; 834 835 out_fail_iodlist: 836 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue); 837 destroy_workqueue(queue->work_q); 838 out_free_queue: 839 kfree(queue); 840 return NULL; 841 } 842 843 844 static void 845 nvmet_fc_tgt_queue_free(struct kref *ref) 846 { 847 struct nvmet_fc_tgt_queue *queue = 848 container_of(ref, struct nvmet_fc_tgt_queue, ref); 849 850 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue); 851 852 destroy_workqueue(queue->work_q); 853 854 kfree_rcu(queue, rcu); 855 } 856 857 static void 858 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue) 859 { 860 kref_put(&queue->ref, nvmet_fc_tgt_queue_free); 861 } 862 863 static int 864 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue) 865 { 866 return kref_get_unless_zero(&queue->ref); 867 } 868 869 870 static void 871 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) 872 { 873 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; 874 struct nvmet_fc_fcp_iod *fod = queue->fod; 875 struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr; 876 unsigned long flags; 877 int i; 878 bool disconnect; 879 880 disconnect = atomic_xchg(&queue->connected, 0); 881 882 /* if not connected, nothing to do */ 883 if (!disconnect) 884 return; 885 886 spin_lock_irqsave(&queue->qlock, flags); 887 /* abort outstanding io's */ 888 for (i = 0; i < queue->sqsize; fod++, i++) { 889 if (fod->active) { 890 spin_lock(&fod->flock); 891 fod->abort = true; 892 /* 893 * only call lldd abort routine if waiting for 894 * writedata. other outstanding ops should finish 895 * on their own. 896 */ 897 if (fod->writedataactive) { 898 fod->aborted = true; 899 spin_unlock(&fod->flock); 900 tgtport->ops->fcp_abort( 901 &tgtport->fc_target_port, fod->fcpreq); 902 } else 903 spin_unlock(&fod->flock); 904 } 905 } 906 907 /* Cleanup defer'ed IOs in queue */ 908 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list, 909 req_list) { 910 list_del(&deferfcp->req_list); 911 kfree(deferfcp); 912 } 913 914 for (;;) { 915 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, 916 struct nvmet_fc_defer_fcp_req, req_list); 917 if (!deferfcp) 918 break; 919 920 list_del(&deferfcp->req_list); 921 spin_unlock_irqrestore(&queue->qlock, flags); 922 923 tgtport->ops->defer_rcv(&tgtport->fc_target_port, 924 deferfcp->fcp_req); 925 926 tgtport->ops->fcp_abort(&tgtport->fc_target_port, 927 deferfcp->fcp_req); 928 929 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, 930 deferfcp->fcp_req); 931 932 /* release the queue lookup reference */ 933 nvmet_fc_tgt_q_put(queue); 934 935 kfree(deferfcp); 936 937 spin_lock_irqsave(&queue->qlock, flags); 938 } 939 spin_unlock_irqrestore(&queue->qlock, flags); 940 941 flush_workqueue(queue->work_q); 942 943 nvmet_sq_destroy(&queue->nvme_sq); 944 945 nvmet_fc_tgt_q_put(queue); 946 } 947 948 static struct nvmet_fc_tgt_queue * 949 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport, 950 u64 connection_id) 951 { 952 struct nvmet_fc_tgt_assoc *assoc; 953 struct nvmet_fc_tgt_queue *queue; 954 u64 association_id = nvmet_fc_getassociationid(connection_id); 955 u16 qid = nvmet_fc_getqueueid(connection_id); 956 957 if (qid > NVMET_NR_QUEUES) 958 return NULL; 959 960 rcu_read_lock(); 961 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 962 if (association_id == assoc->association_id) { 963 queue = assoc->queues[qid]; 964 if (queue && 965 (!atomic_read(&queue->connected) || 966 !nvmet_fc_tgt_q_get(queue))) 967 queue = NULL; 968 rcu_read_unlock(); 969 return queue; 970 } 971 } 972 rcu_read_unlock(); 973 return NULL; 974 } 975 976 static void 977 nvmet_fc_hostport_free(struct kref *ref) 978 { 979 struct nvmet_fc_hostport *hostport = 980 container_of(ref, struct nvmet_fc_hostport, ref); 981 struct nvmet_fc_tgtport *tgtport = hostport->tgtport; 982 unsigned long flags; 983 984 spin_lock_irqsave(&tgtport->lock, flags); 985 list_del(&hostport->host_list); 986 spin_unlock_irqrestore(&tgtport->lock, flags); 987 if (tgtport->ops->host_release && hostport->invalid) 988 tgtport->ops->host_release(hostport->hosthandle); 989 kfree(hostport); 990 nvmet_fc_tgtport_put(tgtport); 991 } 992 993 static void 994 nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport) 995 { 996 kref_put(&hostport->ref, nvmet_fc_hostport_free); 997 } 998 999 static int 1000 nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport) 1001 { 1002 return kref_get_unless_zero(&hostport->ref); 1003 } 1004 1005 static void 1006 nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport) 1007 { 1008 /* if LLDD not implemented, leave as NULL */ 1009 if (!hostport || !hostport->hosthandle) 1010 return; 1011 1012 nvmet_fc_hostport_put(hostport); 1013 } 1014 1015 static struct nvmet_fc_hostport * 1016 nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) 1017 { 1018 struct nvmet_fc_hostport *host; 1019 1020 lockdep_assert_held(&tgtport->lock); 1021 1022 list_for_each_entry(host, &tgtport->host_list, host_list) { 1023 if (host->hosthandle == hosthandle && !host->invalid) { 1024 if (nvmet_fc_hostport_get(host)) 1025 return (host); 1026 } 1027 } 1028 1029 return NULL; 1030 } 1031 1032 static struct nvmet_fc_hostport * 1033 nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) 1034 { 1035 struct nvmet_fc_hostport *newhost, *match = NULL; 1036 unsigned long flags; 1037 1038 /* if LLDD not implemented, leave as NULL */ 1039 if (!hosthandle) 1040 return NULL; 1041 1042 /* 1043 * take reference for what will be the newly allocated hostport if 1044 * we end up using a new allocation 1045 */ 1046 if (!nvmet_fc_tgtport_get(tgtport)) 1047 return ERR_PTR(-EINVAL); 1048 1049 spin_lock_irqsave(&tgtport->lock, flags); 1050 match = nvmet_fc_match_hostport(tgtport, hosthandle); 1051 spin_unlock_irqrestore(&tgtport->lock, flags); 1052 1053 if (match) { 1054 /* no new allocation - release reference */ 1055 nvmet_fc_tgtport_put(tgtport); 1056 return match; 1057 } 1058 1059 newhost = kzalloc(sizeof(*newhost), GFP_KERNEL); 1060 if (!newhost) { 1061 /* no new allocation - release reference */ 1062 nvmet_fc_tgtport_put(tgtport); 1063 return ERR_PTR(-ENOMEM); 1064 } 1065 1066 spin_lock_irqsave(&tgtport->lock, flags); 1067 match = nvmet_fc_match_hostport(tgtport, hosthandle); 1068 if (match) { 1069 /* new allocation not needed */ 1070 kfree(newhost); 1071 newhost = match; 1072 /* no new allocation - release reference */ 1073 nvmet_fc_tgtport_put(tgtport); 1074 } else { 1075 newhost->tgtport = tgtport; 1076 newhost->hosthandle = hosthandle; 1077 INIT_LIST_HEAD(&newhost->host_list); 1078 kref_init(&newhost->ref); 1079 1080 list_add_tail(&newhost->host_list, &tgtport->host_list); 1081 } 1082 spin_unlock_irqrestore(&tgtport->lock, flags); 1083 1084 return newhost; 1085 } 1086 1087 static void 1088 nvmet_fc_delete_assoc(struct work_struct *work) 1089 { 1090 struct nvmet_fc_tgt_assoc *assoc = 1091 container_of(work, struct nvmet_fc_tgt_assoc, del_work); 1092 1093 nvmet_fc_delete_target_assoc(assoc); 1094 nvmet_fc_tgt_a_put(assoc); 1095 } 1096 1097 static struct nvmet_fc_tgt_assoc * 1098 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) 1099 { 1100 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc; 1101 unsigned long flags; 1102 u64 ran; 1103 int idx; 1104 bool needrandom = true; 1105 1106 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL); 1107 if (!assoc) 1108 return NULL; 1109 1110 idx = ida_alloc(&tgtport->assoc_cnt, GFP_KERNEL); 1111 if (idx < 0) 1112 goto out_free_assoc; 1113 1114 if (!nvmet_fc_tgtport_get(tgtport)) 1115 goto out_ida; 1116 1117 assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle); 1118 if (IS_ERR(assoc->hostport)) 1119 goto out_put; 1120 1121 assoc->tgtport = tgtport; 1122 assoc->a_id = idx; 1123 INIT_LIST_HEAD(&assoc->a_list); 1124 kref_init(&assoc->ref); 1125 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc); 1126 atomic_set(&assoc->terminating, 0); 1127 1128 while (needrandom) { 1129 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID); 1130 ran = ran << BYTES_FOR_QID_SHIFT; 1131 1132 spin_lock_irqsave(&tgtport->lock, flags); 1133 needrandom = false; 1134 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) { 1135 if (ran == tmpassoc->association_id) { 1136 needrandom = true; 1137 break; 1138 } 1139 } 1140 if (!needrandom) { 1141 assoc->association_id = ran; 1142 list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list); 1143 } 1144 spin_unlock_irqrestore(&tgtport->lock, flags); 1145 } 1146 1147 return assoc; 1148 1149 out_put: 1150 nvmet_fc_tgtport_put(tgtport); 1151 out_ida: 1152 ida_free(&tgtport->assoc_cnt, idx); 1153 out_free_assoc: 1154 kfree(assoc); 1155 return NULL; 1156 } 1157 1158 static void 1159 nvmet_fc_target_assoc_free(struct kref *ref) 1160 { 1161 struct nvmet_fc_tgt_assoc *assoc = 1162 container_of(ref, struct nvmet_fc_tgt_assoc, ref); 1163 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 1164 struct nvmet_fc_ls_iod *oldls; 1165 unsigned long flags; 1166 int i; 1167 1168 for (i = NVMET_NR_QUEUES; i >= 0; i--) { 1169 if (assoc->queues[i]) 1170 nvmet_fc_delete_target_queue(assoc->queues[i]); 1171 } 1172 1173 /* Send Disconnect now that all i/o has completed */ 1174 nvmet_fc_xmt_disconnect_assoc(assoc); 1175 1176 nvmet_fc_free_hostport(assoc->hostport); 1177 spin_lock_irqsave(&tgtport->lock, flags); 1178 oldls = assoc->rcv_disconn; 1179 spin_unlock_irqrestore(&tgtport->lock, flags); 1180 /* if pending Rcv Disconnect Association LS, send rsp now */ 1181 if (oldls) 1182 nvmet_fc_xmt_ls_rsp(tgtport, oldls); 1183 ida_free(&tgtport->assoc_cnt, assoc->a_id); 1184 dev_info(tgtport->dev, 1185 "{%d:%d} Association freed\n", 1186 tgtport->fc_target_port.port_num, assoc->a_id); 1187 kfree_rcu(assoc, rcu); 1188 nvmet_fc_tgtport_put(tgtport); 1189 } 1190 1191 static void 1192 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc) 1193 { 1194 kref_put(&assoc->ref, nvmet_fc_target_assoc_free); 1195 } 1196 1197 static int 1198 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc) 1199 { 1200 return kref_get_unless_zero(&assoc->ref); 1201 } 1202 1203 static void 1204 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) 1205 { 1206 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 1207 unsigned long flags; 1208 int i, terminating; 1209 1210 terminating = atomic_xchg(&assoc->terminating, 1); 1211 1212 /* if already terminating, do nothing */ 1213 if (terminating) 1214 return; 1215 1216 spin_lock_irqsave(&tgtport->lock, flags); 1217 list_del_rcu(&assoc->a_list); 1218 spin_unlock_irqrestore(&tgtport->lock, flags); 1219 1220 synchronize_rcu(); 1221 1222 /* ensure all in-flight I/Os have been processed */ 1223 for (i = NVMET_NR_QUEUES; i >= 0; i--) { 1224 if (assoc->queues[i]) 1225 flush_workqueue(assoc->queues[i]->work_q); 1226 } 1227 1228 dev_info(tgtport->dev, 1229 "{%d:%d} Association deleted\n", 1230 tgtport->fc_target_port.port_num, assoc->a_id); 1231 } 1232 1233 static struct nvmet_fc_tgt_assoc * 1234 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport, 1235 u64 association_id) 1236 { 1237 struct nvmet_fc_tgt_assoc *assoc; 1238 struct nvmet_fc_tgt_assoc *ret = NULL; 1239 1240 rcu_read_lock(); 1241 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 1242 if (association_id == assoc->association_id) { 1243 ret = assoc; 1244 if (!nvmet_fc_tgt_a_get(assoc)) 1245 ret = NULL; 1246 break; 1247 } 1248 } 1249 rcu_read_unlock(); 1250 1251 return ret; 1252 } 1253 1254 static void 1255 nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport, 1256 struct nvmet_fc_port_entry *pe, 1257 struct nvmet_port *port) 1258 { 1259 lockdep_assert_held(&nvmet_fc_tgtlock); 1260 1261 pe->tgtport = tgtport; 1262 tgtport->pe = pe; 1263 1264 pe->port = port; 1265 port->priv = pe; 1266 1267 pe->node_name = tgtport->fc_target_port.node_name; 1268 pe->port_name = tgtport->fc_target_port.port_name; 1269 INIT_LIST_HEAD(&pe->pe_list); 1270 1271 list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list); 1272 } 1273 1274 static void 1275 nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe) 1276 { 1277 unsigned long flags; 1278 1279 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1280 if (pe->tgtport) 1281 pe->tgtport->pe = NULL; 1282 list_del(&pe->pe_list); 1283 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1284 } 1285 1286 /* 1287 * called when a targetport deregisters. Breaks the relationship 1288 * with the nvmet port, but leaves the port_entry in place so that 1289 * re-registration can resume operation. 1290 */ 1291 static void 1292 nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport) 1293 { 1294 struct nvmet_fc_port_entry *pe; 1295 unsigned long flags; 1296 1297 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1298 pe = tgtport->pe; 1299 if (pe) 1300 pe->tgtport = NULL; 1301 tgtport->pe = NULL; 1302 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1303 } 1304 1305 /* 1306 * called when a new targetport is registered. Looks in the 1307 * existing nvmet port_entries to see if the nvmet layer is 1308 * configured for the targetport's wwn's. (the targetport existed, 1309 * nvmet configured, the lldd unregistered the tgtport, and is now 1310 * reregistering the same targetport). If so, set the nvmet port 1311 * port entry on the targetport. 1312 */ 1313 static void 1314 nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport) 1315 { 1316 struct nvmet_fc_port_entry *pe; 1317 unsigned long flags; 1318 1319 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1320 list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) { 1321 if (tgtport->fc_target_port.node_name == pe->node_name && 1322 tgtport->fc_target_port.port_name == pe->port_name) { 1323 WARN_ON(pe->tgtport); 1324 tgtport->pe = pe; 1325 pe->tgtport = tgtport; 1326 break; 1327 } 1328 } 1329 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1330 } 1331 1332 /** 1333 * nvmet_fc_register_targetport - transport entry point called by an 1334 * LLDD to register the existence of a local 1335 * NVME subystem FC port. 1336 * @pinfo: pointer to information about the port to be registered 1337 * @template: LLDD entrypoints and operational parameters for the port 1338 * @dev: physical hardware device node port corresponds to. Will be 1339 * used for DMA mappings 1340 * @portptr: pointer to a local port pointer. Upon success, the routine 1341 * will allocate a nvme_fc_local_port structure and place its 1342 * address in the local port pointer. Upon failure, local port 1343 * pointer will be set to NULL. 1344 * 1345 * Returns: 1346 * a completion status. Must be 0 upon success; a negative errno 1347 * (ex: -ENXIO) upon failure. 1348 */ 1349 int 1350 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, 1351 struct nvmet_fc_target_template *template, 1352 struct device *dev, 1353 struct nvmet_fc_target_port **portptr) 1354 { 1355 struct nvmet_fc_tgtport *newrec; 1356 unsigned long flags; 1357 int ret, idx; 1358 1359 if (!template->xmt_ls_rsp || !template->fcp_op || 1360 !template->fcp_abort || 1361 !template->fcp_req_release || !template->targetport_delete || 1362 !template->max_hw_queues || !template->max_sgl_segments || 1363 !template->max_dif_sgl_segments || !template->dma_boundary) { 1364 ret = -EINVAL; 1365 goto out_regtgt_failed; 1366 } 1367 1368 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz), 1369 GFP_KERNEL); 1370 if (!newrec) { 1371 ret = -ENOMEM; 1372 goto out_regtgt_failed; 1373 } 1374 1375 idx = ida_alloc(&nvmet_fc_tgtport_cnt, GFP_KERNEL); 1376 if (idx < 0) { 1377 ret = -ENOSPC; 1378 goto out_fail_kfree; 1379 } 1380 1381 if (!get_device(dev) && dev) { 1382 ret = -ENODEV; 1383 goto out_ida_put; 1384 } 1385 1386 newrec->fc_target_port.node_name = pinfo->node_name; 1387 newrec->fc_target_port.port_name = pinfo->port_name; 1388 if (template->target_priv_sz) 1389 newrec->fc_target_port.private = &newrec[1]; 1390 else 1391 newrec->fc_target_port.private = NULL; 1392 newrec->fc_target_port.port_id = pinfo->port_id; 1393 newrec->fc_target_port.port_num = idx; 1394 INIT_LIST_HEAD(&newrec->tgt_list); 1395 newrec->dev = dev; 1396 newrec->ops = template; 1397 spin_lock_init(&newrec->lock); 1398 INIT_LIST_HEAD(&newrec->ls_rcv_list); 1399 INIT_LIST_HEAD(&newrec->ls_req_list); 1400 INIT_LIST_HEAD(&newrec->ls_busylist); 1401 INIT_LIST_HEAD(&newrec->assoc_list); 1402 INIT_LIST_HEAD(&newrec->host_list); 1403 kref_init(&newrec->ref); 1404 ida_init(&newrec->assoc_cnt); 1405 newrec->max_sg_cnt = template->max_sgl_segments; 1406 1407 ret = nvmet_fc_alloc_ls_iodlist(newrec); 1408 if (ret) { 1409 ret = -ENOMEM; 1410 goto out_free_newrec; 1411 } 1412 1413 nvmet_fc_portentry_rebind_tgt(newrec); 1414 1415 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1416 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list); 1417 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1418 1419 *portptr = &newrec->fc_target_port; 1420 return 0; 1421 1422 out_free_newrec: 1423 put_device(dev); 1424 out_ida_put: 1425 ida_free(&nvmet_fc_tgtport_cnt, idx); 1426 out_fail_kfree: 1427 kfree(newrec); 1428 out_regtgt_failed: 1429 *portptr = NULL; 1430 return ret; 1431 } 1432 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport); 1433 1434 1435 static void 1436 nvmet_fc_free_tgtport(struct kref *ref) 1437 { 1438 struct nvmet_fc_tgtport *tgtport = 1439 container_of(ref, struct nvmet_fc_tgtport, ref); 1440 struct device *dev = tgtport->dev; 1441 unsigned long flags; 1442 1443 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1444 list_del(&tgtport->tgt_list); 1445 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1446 1447 nvmet_fc_free_ls_iodlist(tgtport); 1448 1449 /* let the LLDD know we've finished tearing it down */ 1450 tgtport->ops->targetport_delete(&tgtport->fc_target_port); 1451 1452 ida_free(&nvmet_fc_tgtport_cnt, 1453 tgtport->fc_target_port.port_num); 1454 1455 ida_destroy(&tgtport->assoc_cnt); 1456 1457 kfree(tgtport); 1458 1459 put_device(dev); 1460 } 1461 1462 static void 1463 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport) 1464 { 1465 kref_put(&tgtport->ref, nvmet_fc_free_tgtport); 1466 } 1467 1468 static int 1469 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport) 1470 { 1471 return kref_get_unless_zero(&tgtport->ref); 1472 } 1473 1474 static void 1475 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) 1476 { 1477 struct nvmet_fc_tgt_assoc *assoc; 1478 1479 rcu_read_lock(); 1480 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 1481 if (!nvmet_fc_tgt_a_get(assoc)) 1482 continue; 1483 queue_work(nvmet_wq, &assoc->del_work); 1484 nvmet_fc_tgt_a_put(assoc); 1485 } 1486 rcu_read_unlock(); 1487 } 1488 1489 /** 1490 * nvmet_fc_invalidate_host - transport entry point called by an LLDD 1491 * to remove references to a hosthandle for LS's. 1492 * 1493 * The nvmet-fc layer ensures that any references to the hosthandle 1494 * on the targetport are forgotten (set to NULL). The LLDD will 1495 * typically call this when a login with a remote host port has been 1496 * lost, thus LS's for the remote host port are no longer possible. 1497 * 1498 * If an LS request is outstanding to the targetport/hosthandle (or 1499 * issued concurrently with the call to invalidate the host), the 1500 * LLDD is responsible for terminating/aborting the LS and completing 1501 * the LS request. It is recommended that these terminations/aborts 1502 * occur after calling to invalidate the host handle to avoid additional 1503 * retries by the nvmet-fc transport. The nvmet-fc transport may 1504 * continue to reference host handle while it cleans up outstanding 1505 * NVME associations. The nvmet-fc transport will call the 1506 * ops->host_release() callback to notify the LLDD that all references 1507 * are complete and the related host handle can be recovered. 1508 * Note: if there are no references, the callback may be called before 1509 * the invalidate host call returns. 1510 * 1511 * @target_port: pointer to the (registered) target port that a prior 1512 * LS was received on and which supplied the transport the 1513 * hosthandle. 1514 * @hosthandle: the handle (pointer) that represents the host port 1515 * that no longer has connectivity and that LS's should 1516 * no longer be directed to. 1517 */ 1518 void 1519 nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port, 1520 void *hosthandle) 1521 { 1522 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 1523 struct nvmet_fc_tgt_assoc *assoc, *next; 1524 unsigned long flags; 1525 bool noassoc = true; 1526 1527 spin_lock_irqsave(&tgtport->lock, flags); 1528 list_for_each_entry_safe(assoc, next, 1529 &tgtport->assoc_list, a_list) { 1530 if (!assoc->hostport || 1531 assoc->hostport->hosthandle != hosthandle) 1532 continue; 1533 if (!nvmet_fc_tgt_a_get(assoc)) 1534 continue; 1535 assoc->hostport->invalid = 1; 1536 noassoc = false; 1537 queue_work(nvmet_wq, &assoc->del_work); 1538 nvmet_fc_tgt_a_put(assoc); 1539 } 1540 spin_unlock_irqrestore(&tgtport->lock, flags); 1541 1542 /* if there's nothing to wait for - call the callback */ 1543 if (noassoc && tgtport->ops->host_release) 1544 tgtport->ops->host_release(hosthandle); 1545 } 1546 EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host); 1547 1548 /* 1549 * nvmet layer has called to terminate an association 1550 */ 1551 static void 1552 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) 1553 { 1554 struct nvmet_fc_tgtport *tgtport, *next; 1555 struct nvmet_fc_tgt_assoc *assoc; 1556 struct nvmet_fc_tgt_queue *queue; 1557 unsigned long flags; 1558 bool found_ctrl = false; 1559 1560 /* this is a bit ugly, but don't want to make locks layered */ 1561 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1562 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list, 1563 tgt_list) { 1564 if (!nvmet_fc_tgtport_get(tgtport)) 1565 continue; 1566 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1567 1568 rcu_read_lock(); 1569 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 1570 queue = assoc->queues[0]; 1571 if (queue && queue->nvme_sq.ctrl == ctrl) { 1572 if (nvmet_fc_tgt_a_get(assoc)) 1573 found_ctrl = true; 1574 break; 1575 } 1576 } 1577 rcu_read_unlock(); 1578 1579 nvmet_fc_tgtport_put(tgtport); 1580 1581 if (found_ctrl) { 1582 queue_work(nvmet_wq, &assoc->del_work); 1583 nvmet_fc_tgt_a_put(assoc); 1584 return; 1585 } 1586 1587 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1588 } 1589 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1590 } 1591 1592 /** 1593 * nvmet_fc_unregister_targetport - transport entry point called by an 1594 * LLDD to deregister/remove a previously 1595 * registered a local NVME subsystem FC port. 1596 * @target_port: pointer to the (registered) target port that is to be 1597 * deregistered. 1598 * 1599 * Returns: 1600 * a completion status. Must be 0 upon success; a negative errno 1601 * (ex: -ENXIO) upon failure. 1602 */ 1603 int 1604 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) 1605 { 1606 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 1607 1608 nvmet_fc_portentry_unbind_tgt(tgtport); 1609 1610 /* terminate any outstanding associations */ 1611 __nvmet_fc_free_assocs(tgtport); 1612 1613 flush_workqueue(nvmet_wq); 1614 1615 /* 1616 * should terminate LS's as well. However, LS's will be generated 1617 * at the tail end of association termination, so they likely don't 1618 * exist yet. And even if they did, it's worthwhile to just let 1619 * them finish and targetport ref counting will clean things up. 1620 */ 1621 1622 nvmet_fc_tgtport_put(tgtport); 1623 1624 return 0; 1625 } 1626 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport); 1627 1628 1629 /* ********************** FC-NVME LS RCV Handling ************************* */ 1630 1631 1632 static void 1633 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, 1634 struct nvmet_fc_ls_iod *iod) 1635 { 1636 struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc; 1637 struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc; 1638 struct nvmet_fc_tgt_queue *queue; 1639 int ret = 0; 1640 1641 memset(acc, 0, sizeof(*acc)); 1642 1643 /* 1644 * FC-NVME spec changes. There are initiators sending different 1645 * lengths as padding sizes for Create Association Cmd descriptor 1646 * was incorrect. 1647 * Accept anything of "minimum" length. Assume format per 1.15 1648 * spec (with HOSTID reduced to 16 bytes), ignore how long the 1649 * trailing pad length is. 1650 */ 1651 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN) 1652 ret = VERR_CR_ASSOC_LEN; 1653 else if (be32_to_cpu(rqst->desc_list_len) < 1654 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN) 1655 ret = VERR_CR_ASSOC_RQST_LEN; 1656 else if (rqst->assoc_cmd.desc_tag != 1657 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD)) 1658 ret = VERR_CR_ASSOC_CMD; 1659 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) < 1660 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN) 1661 ret = VERR_CR_ASSOC_CMD_LEN; 1662 else if (!rqst->assoc_cmd.ersp_ratio || 1663 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >= 1664 be16_to_cpu(rqst->assoc_cmd.sqsize))) 1665 ret = VERR_ERSP_RATIO; 1666 1667 else { 1668 /* new association w/ admin queue */ 1669 iod->assoc = nvmet_fc_alloc_target_assoc( 1670 tgtport, iod->hosthandle); 1671 if (!iod->assoc) 1672 ret = VERR_ASSOC_ALLOC_FAIL; 1673 else { 1674 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0, 1675 be16_to_cpu(rqst->assoc_cmd.sqsize)); 1676 if (!queue) { 1677 ret = VERR_QUEUE_ALLOC_FAIL; 1678 nvmet_fc_tgt_a_put(iod->assoc); 1679 } 1680 } 1681 } 1682 1683 if (ret) { 1684 dev_err(tgtport->dev, 1685 "Create Association LS failed: %s\n", 1686 validation_errors[ret]); 1687 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1688 sizeof(*acc), rqst->w0.ls_cmd, 1689 FCNVME_RJT_RC_LOGIC, 1690 FCNVME_RJT_EXP_NONE, 0); 1691 return; 1692 } 1693 1694 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio); 1695 atomic_set(&queue->connected, 1); 1696 queue->sqhd = 0; /* best place to init value */ 1697 1698 dev_info(tgtport->dev, 1699 "{%d:%d} Association created\n", 1700 tgtport->fc_target_port.port_num, iod->assoc->a_id); 1701 1702 /* format a response */ 1703 1704 iod->lsrsp->rsplen = sizeof(*acc); 1705 1706 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1707 fcnvme_lsdesc_len( 1708 sizeof(struct fcnvme_ls_cr_assoc_acc)), 1709 FCNVME_LS_CREATE_ASSOCIATION); 1710 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); 1711 acc->associd.desc_len = 1712 fcnvme_lsdesc_len( 1713 sizeof(struct fcnvme_lsdesc_assoc_id)); 1714 acc->associd.association_id = 1715 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0)); 1716 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); 1717 acc->connectid.desc_len = 1718 fcnvme_lsdesc_len( 1719 sizeof(struct fcnvme_lsdesc_conn_id)); 1720 acc->connectid.connection_id = acc->associd.association_id; 1721 } 1722 1723 static void 1724 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport, 1725 struct nvmet_fc_ls_iod *iod) 1726 { 1727 struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn; 1728 struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn; 1729 struct nvmet_fc_tgt_queue *queue; 1730 int ret = 0; 1731 1732 memset(acc, 0, sizeof(*acc)); 1733 1734 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst)) 1735 ret = VERR_CR_CONN_LEN; 1736 else if (rqst->desc_list_len != 1737 fcnvme_lsdesc_len( 1738 sizeof(struct fcnvme_ls_cr_conn_rqst))) 1739 ret = VERR_CR_CONN_RQST_LEN; 1740 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) 1741 ret = VERR_ASSOC_ID; 1742 else if (rqst->associd.desc_len != 1743 fcnvme_lsdesc_len( 1744 sizeof(struct fcnvme_lsdesc_assoc_id))) 1745 ret = VERR_ASSOC_ID_LEN; 1746 else if (rqst->connect_cmd.desc_tag != 1747 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD)) 1748 ret = VERR_CR_CONN_CMD; 1749 else if (rqst->connect_cmd.desc_len != 1750 fcnvme_lsdesc_len( 1751 sizeof(struct fcnvme_lsdesc_cr_conn_cmd))) 1752 ret = VERR_CR_CONN_CMD_LEN; 1753 else if (!rqst->connect_cmd.ersp_ratio || 1754 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >= 1755 be16_to_cpu(rqst->connect_cmd.sqsize))) 1756 ret = VERR_ERSP_RATIO; 1757 1758 else { 1759 /* new io queue */ 1760 iod->assoc = nvmet_fc_find_target_assoc(tgtport, 1761 be64_to_cpu(rqst->associd.association_id)); 1762 if (!iod->assoc) 1763 ret = VERR_NO_ASSOC; 1764 else { 1765 queue = nvmet_fc_alloc_target_queue(iod->assoc, 1766 be16_to_cpu(rqst->connect_cmd.qid), 1767 be16_to_cpu(rqst->connect_cmd.sqsize)); 1768 if (!queue) 1769 ret = VERR_QUEUE_ALLOC_FAIL; 1770 1771 /* release get taken in nvmet_fc_find_target_assoc */ 1772 nvmet_fc_tgt_a_put(iod->assoc); 1773 } 1774 } 1775 1776 if (ret) { 1777 dev_err(tgtport->dev, 1778 "Create Connection LS failed: %s\n", 1779 validation_errors[ret]); 1780 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1781 sizeof(*acc), rqst->w0.ls_cmd, 1782 (ret == VERR_NO_ASSOC) ? 1783 FCNVME_RJT_RC_INV_ASSOC : 1784 FCNVME_RJT_RC_LOGIC, 1785 FCNVME_RJT_EXP_NONE, 0); 1786 return; 1787 } 1788 1789 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio); 1790 atomic_set(&queue->connected, 1); 1791 queue->sqhd = 0; /* best place to init value */ 1792 1793 /* format a response */ 1794 1795 iod->lsrsp->rsplen = sizeof(*acc); 1796 1797 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1798 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)), 1799 FCNVME_LS_CREATE_CONNECTION); 1800 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); 1801 acc->connectid.desc_len = 1802 fcnvme_lsdesc_len( 1803 sizeof(struct fcnvme_lsdesc_conn_id)); 1804 acc->connectid.connection_id = 1805 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 1806 be16_to_cpu(rqst->connect_cmd.qid))); 1807 } 1808 1809 /* 1810 * Returns true if the LS response is to be transmit 1811 * Returns false if the LS response is to be delayed 1812 */ 1813 static int 1814 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, 1815 struct nvmet_fc_ls_iod *iod) 1816 { 1817 struct fcnvme_ls_disconnect_assoc_rqst *rqst = 1818 &iod->rqstbuf->rq_dis_assoc; 1819 struct fcnvme_ls_disconnect_assoc_acc *acc = 1820 &iod->rspbuf->rsp_dis_assoc; 1821 struct nvmet_fc_tgt_assoc *assoc = NULL; 1822 struct nvmet_fc_ls_iod *oldls = NULL; 1823 unsigned long flags; 1824 int ret = 0; 1825 1826 memset(acc, 0, sizeof(*acc)); 1827 1828 ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst); 1829 if (!ret) { 1830 /* match an active association - takes an assoc ref if !NULL */ 1831 assoc = nvmet_fc_find_target_assoc(tgtport, 1832 be64_to_cpu(rqst->associd.association_id)); 1833 iod->assoc = assoc; 1834 if (!assoc) 1835 ret = VERR_NO_ASSOC; 1836 } 1837 1838 if (ret || !assoc) { 1839 dev_err(tgtport->dev, 1840 "Disconnect LS failed: %s\n", 1841 validation_errors[ret]); 1842 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1843 sizeof(*acc), rqst->w0.ls_cmd, 1844 (ret == VERR_NO_ASSOC) ? 1845 FCNVME_RJT_RC_INV_ASSOC : 1846 FCNVME_RJT_RC_LOGIC, 1847 FCNVME_RJT_EXP_NONE, 0); 1848 return true; 1849 } 1850 1851 /* format a response */ 1852 1853 iod->lsrsp->rsplen = sizeof(*acc); 1854 1855 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1856 fcnvme_lsdesc_len( 1857 sizeof(struct fcnvme_ls_disconnect_assoc_acc)), 1858 FCNVME_LS_DISCONNECT_ASSOC); 1859 1860 /* 1861 * The rules for LS response says the response cannot 1862 * go back until ABTS's have been sent for all outstanding 1863 * I/O and a Disconnect Association LS has been sent. 1864 * So... save off the Disconnect LS to send the response 1865 * later. If there was a prior LS already saved, replace 1866 * it with the newer one and send a can't perform reject 1867 * on the older one. 1868 */ 1869 spin_lock_irqsave(&tgtport->lock, flags); 1870 oldls = assoc->rcv_disconn; 1871 assoc->rcv_disconn = iod; 1872 spin_unlock_irqrestore(&tgtport->lock, flags); 1873 1874 if (oldls) { 1875 dev_info(tgtport->dev, 1876 "{%d:%d} Multiple Disconnect Association LS's " 1877 "received\n", 1878 tgtport->fc_target_port.port_num, assoc->a_id); 1879 /* overwrite good response with bogus failure */ 1880 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, 1881 sizeof(*iod->rspbuf), 1882 /* ok to use rqst, LS is same */ 1883 rqst->w0.ls_cmd, 1884 FCNVME_RJT_RC_UNAB, 1885 FCNVME_RJT_EXP_NONE, 0); 1886 nvmet_fc_xmt_ls_rsp(tgtport, oldls); 1887 } 1888 1889 queue_work(nvmet_wq, &assoc->del_work); 1890 nvmet_fc_tgt_a_put(assoc); 1891 1892 return false; 1893 } 1894 1895 1896 /* *********************** NVME Ctrl Routines **************************** */ 1897 1898 1899 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req); 1900 1901 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops; 1902 1903 static void 1904 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) 1905 { 1906 struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private; 1907 struct nvmet_fc_tgtport *tgtport = iod->tgtport; 1908 1909 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma, 1910 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 1911 nvmet_fc_free_ls_iod(tgtport, iod); 1912 nvmet_fc_tgtport_put(tgtport); 1913 } 1914 1915 static void 1916 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, 1917 struct nvmet_fc_ls_iod *iod) 1918 { 1919 int ret; 1920 1921 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma, 1922 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 1923 1924 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp); 1925 if (ret) 1926 nvmet_fc_xmt_ls_rsp_done(iod->lsrsp); 1927 } 1928 1929 /* 1930 * Actual processing routine for received FC-NVME LS Requests from the LLD 1931 */ 1932 static void 1933 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport, 1934 struct nvmet_fc_ls_iod *iod) 1935 { 1936 struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0; 1937 bool sendrsp = true; 1938 1939 iod->lsrsp->nvme_fc_private = iod; 1940 iod->lsrsp->rspbuf = iod->rspbuf; 1941 iod->lsrsp->rspdma = iod->rspdma; 1942 iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done; 1943 /* Be preventative. handlers will later set to valid length */ 1944 iod->lsrsp->rsplen = 0; 1945 1946 iod->assoc = NULL; 1947 1948 /* 1949 * handlers: 1950 * parse request input, execute the request, and format the 1951 * LS response 1952 */ 1953 switch (w0->ls_cmd) { 1954 case FCNVME_LS_CREATE_ASSOCIATION: 1955 /* Creates Association and initial Admin Queue/Connection */ 1956 nvmet_fc_ls_create_association(tgtport, iod); 1957 break; 1958 case FCNVME_LS_CREATE_CONNECTION: 1959 /* Creates an IO Queue/Connection */ 1960 nvmet_fc_ls_create_connection(tgtport, iod); 1961 break; 1962 case FCNVME_LS_DISCONNECT_ASSOC: 1963 /* Terminate a Queue/Connection or the Association */ 1964 sendrsp = nvmet_fc_ls_disconnect(tgtport, iod); 1965 break; 1966 default: 1967 iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf, 1968 sizeof(*iod->rspbuf), w0->ls_cmd, 1969 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0); 1970 } 1971 1972 if (sendrsp) 1973 nvmet_fc_xmt_ls_rsp(tgtport, iod); 1974 } 1975 1976 /* 1977 * Actual processing routine for received FC-NVME LS Requests from the LLD 1978 */ 1979 static void 1980 nvmet_fc_handle_ls_rqst_work(struct work_struct *work) 1981 { 1982 struct nvmet_fc_ls_iod *iod = 1983 container_of(work, struct nvmet_fc_ls_iod, work); 1984 struct nvmet_fc_tgtport *tgtport = iod->tgtport; 1985 1986 nvmet_fc_handle_ls_rqst(tgtport, iod); 1987 } 1988 1989 1990 /** 1991 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD 1992 * upon the reception of a NVME LS request. 1993 * 1994 * The nvmet-fc layer will copy payload to an internal structure for 1995 * processing. As such, upon completion of the routine, the LLDD may 1996 * immediately free/reuse the LS request buffer passed in the call. 1997 * 1998 * If this routine returns error, the LLDD should abort the exchange. 1999 * 2000 * @target_port: pointer to the (registered) target port the LS was 2001 * received on. 2002 * @hosthandle: pointer to the host specific data, gets stored in iod. 2003 * @lsrsp: pointer to a lsrsp structure to be used to reference 2004 * the exchange corresponding to the LS. 2005 * @lsreqbuf: pointer to the buffer containing the LS Request 2006 * @lsreqbuf_len: length, in bytes, of the received LS request 2007 */ 2008 int 2009 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, 2010 void *hosthandle, 2011 struct nvmefc_ls_rsp *lsrsp, 2012 void *lsreqbuf, u32 lsreqbuf_len) 2013 { 2014 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 2015 struct nvmet_fc_ls_iod *iod; 2016 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; 2017 2018 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { 2019 dev_info(tgtport->dev, 2020 "RCV %s LS failed: payload too large (%d)\n", 2021 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2022 nvmefc_ls_names[w0->ls_cmd] : "", 2023 lsreqbuf_len); 2024 return -E2BIG; 2025 } 2026 2027 if (!nvmet_fc_tgtport_get(tgtport)) { 2028 dev_info(tgtport->dev, 2029 "RCV %s LS failed: target deleting\n", 2030 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2031 nvmefc_ls_names[w0->ls_cmd] : ""); 2032 return -ESHUTDOWN; 2033 } 2034 2035 iod = nvmet_fc_alloc_ls_iod(tgtport); 2036 if (!iod) { 2037 dev_info(tgtport->dev, 2038 "RCV %s LS failed: context allocation failed\n", 2039 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2040 nvmefc_ls_names[w0->ls_cmd] : ""); 2041 nvmet_fc_tgtport_put(tgtport); 2042 return -ENOENT; 2043 } 2044 2045 iod->lsrsp = lsrsp; 2046 iod->fcpreq = NULL; 2047 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len); 2048 iod->rqstdatalen = lsreqbuf_len; 2049 iod->hosthandle = hosthandle; 2050 2051 queue_work(nvmet_wq, &iod->work); 2052 2053 return 0; 2054 } 2055 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req); 2056 2057 2058 /* 2059 * ********************** 2060 * Start of FCP handling 2061 * ********************** 2062 */ 2063 2064 static int 2065 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod) 2066 { 2067 struct scatterlist *sg; 2068 unsigned int nent; 2069 2070 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent); 2071 if (!sg) 2072 goto out; 2073 2074 fod->data_sg = sg; 2075 fod->data_sg_cnt = nent; 2076 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent, 2077 ((fod->io_dir == NVMET_FCP_WRITE) ? 2078 DMA_FROM_DEVICE : DMA_TO_DEVICE)); 2079 /* note: write from initiator perspective */ 2080 fod->next_sg = fod->data_sg; 2081 2082 return 0; 2083 2084 out: 2085 return NVME_SC_INTERNAL; 2086 } 2087 2088 static void 2089 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod) 2090 { 2091 if (!fod->data_sg || !fod->data_sg_cnt) 2092 return; 2093 2094 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt, 2095 ((fod->io_dir == NVMET_FCP_WRITE) ? 2096 DMA_FROM_DEVICE : DMA_TO_DEVICE)); 2097 sgl_free(fod->data_sg); 2098 fod->data_sg = NULL; 2099 fod->data_sg_cnt = 0; 2100 } 2101 2102 2103 static bool 2104 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd) 2105 { 2106 u32 sqtail, used; 2107 2108 /* egad, this is ugly. And sqtail is just a best guess */ 2109 sqtail = atomic_read(&q->sqtail) % q->sqsize; 2110 2111 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd); 2112 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9))); 2113 } 2114 2115 /* 2116 * Prep RSP payload. 2117 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op 2118 */ 2119 static void 2120 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport, 2121 struct nvmet_fc_fcp_iod *fod) 2122 { 2123 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf; 2124 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; 2125 struct nvme_completion *cqe = &ersp->cqe; 2126 u32 *cqewd = (u32 *)cqe; 2127 bool send_ersp = false; 2128 u32 rsn, rspcnt, xfr_length; 2129 2130 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP) 2131 xfr_length = fod->req.transfer_len; 2132 else 2133 xfr_length = fod->offset; 2134 2135 /* 2136 * check to see if we can send a 0's rsp. 2137 * Note: to send a 0's response, the NVME-FC host transport will 2138 * recreate the CQE. The host transport knows: sq id, SQHD (last 2139 * seen in an ersp), and command_id. Thus it will create a 2140 * zero-filled CQE with those known fields filled in. Transport 2141 * must send an ersp for any condition where the cqe won't match 2142 * this. 2143 * 2144 * Here are the FC-NVME mandated cases where we must send an ersp: 2145 * every N responses, where N=ersp_ratio 2146 * force fabric commands to send ersp's (not in FC-NVME but good 2147 * practice) 2148 * normal cmds: any time status is non-zero, or status is zero 2149 * but words 0 or 1 are non-zero. 2150 * the SQ is 90% or more full 2151 * the cmd is a fused command 2152 * transferred data length not equal to cmd iu length 2153 */ 2154 rspcnt = atomic_inc_return(&fod->queue->zrspcnt); 2155 if (!(rspcnt % fod->queue->ersp_ratio) || 2156 nvme_is_fabrics((struct nvme_command *) sqe) || 2157 xfr_length != fod->req.transfer_len || 2158 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] || 2159 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || 2160 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head))) 2161 send_ersp = true; 2162 2163 /* re-set the fields */ 2164 fod->fcpreq->rspaddr = ersp; 2165 fod->fcpreq->rspdma = fod->rspdma; 2166 2167 if (!send_ersp) { 2168 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP); 2169 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP; 2170 } else { 2171 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32)); 2172 rsn = atomic_inc_return(&fod->queue->rsn); 2173 ersp->rsn = cpu_to_be32(rsn); 2174 ersp->xfrd_len = cpu_to_be32(xfr_length); 2175 fod->fcpreq->rsplen = sizeof(*ersp); 2176 } 2177 2178 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma, 2179 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 2180 } 2181 2182 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq); 2183 2184 static void 2185 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport, 2186 struct nvmet_fc_fcp_iod *fod) 2187 { 2188 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2189 2190 /* data no longer needed */ 2191 nvmet_fc_free_tgt_pgs(fod); 2192 2193 /* 2194 * if an ABTS was received or we issued the fcp_abort early 2195 * don't call abort routine again. 2196 */ 2197 /* no need to take lock - lock was taken earlier to get here */ 2198 if (!fod->aborted) 2199 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq); 2200 2201 nvmet_fc_free_fcp_iod(fod->queue, fod); 2202 } 2203 2204 static void 2205 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport, 2206 struct nvmet_fc_fcp_iod *fod) 2207 { 2208 int ret; 2209 2210 fod->fcpreq->op = NVMET_FCOP_RSP; 2211 fod->fcpreq->timeout = 0; 2212 2213 nvmet_fc_prep_fcp_rsp(tgtport, fod); 2214 2215 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); 2216 if (ret) 2217 nvmet_fc_abort_op(tgtport, fod); 2218 } 2219 2220 static void 2221 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, 2222 struct nvmet_fc_fcp_iod *fod, u8 op) 2223 { 2224 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2225 struct scatterlist *sg = fod->next_sg; 2226 unsigned long flags; 2227 u32 remaininglen = fod->req.transfer_len - fod->offset; 2228 u32 tlen = 0; 2229 int ret; 2230 2231 fcpreq->op = op; 2232 fcpreq->offset = fod->offset; 2233 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; 2234 2235 /* 2236 * for next sequence: 2237 * break at a sg element boundary 2238 * attempt to keep sequence length capped at 2239 * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to 2240 * be longer if a single sg element is larger 2241 * than that amount. This is done to avoid creating 2242 * a new sg list to use for the tgtport api. 2243 */ 2244 fcpreq->sg = sg; 2245 fcpreq->sg_cnt = 0; 2246 while (tlen < remaininglen && 2247 fcpreq->sg_cnt < tgtport->max_sg_cnt && 2248 tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) { 2249 fcpreq->sg_cnt++; 2250 tlen += sg_dma_len(sg); 2251 sg = sg_next(sg); 2252 } 2253 if (tlen < remaininglen && fcpreq->sg_cnt == 0) { 2254 fcpreq->sg_cnt++; 2255 tlen += min_t(u32, sg_dma_len(sg), remaininglen); 2256 sg = sg_next(sg); 2257 } 2258 if (tlen < remaininglen) 2259 fod->next_sg = sg; 2260 else 2261 fod->next_sg = NULL; 2262 2263 fcpreq->transfer_length = tlen; 2264 fcpreq->transferred_length = 0; 2265 fcpreq->fcp_error = 0; 2266 fcpreq->rsplen = 0; 2267 2268 /* 2269 * If the last READDATA request: check if LLDD supports 2270 * combined xfr with response. 2271 */ 2272 if ((op == NVMET_FCOP_READDATA) && 2273 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) && 2274 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) { 2275 fcpreq->op = NVMET_FCOP_READDATA_RSP; 2276 nvmet_fc_prep_fcp_rsp(tgtport, fod); 2277 } 2278 2279 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); 2280 if (ret) { 2281 /* 2282 * should be ok to set w/o lock as its in the thread of 2283 * execution (not an async timer routine) and doesn't 2284 * contend with any clearing action 2285 */ 2286 fod->abort = true; 2287 2288 if (op == NVMET_FCOP_WRITEDATA) { 2289 spin_lock_irqsave(&fod->flock, flags); 2290 fod->writedataactive = false; 2291 spin_unlock_irqrestore(&fod->flock, flags); 2292 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2293 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ { 2294 fcpreq->fcp_error = ret; 2295 fcpreq->transferred_length = 0; 2296 nvmet_fc_xmt_fcp_op_done(fod->fcpreq); 2297 } 2298 } 2299 } 2300 2301 static inline bool 2302 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort) 2303 { 2304 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2305 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2306 2307 /* if in the middle of an io and we need to tear down */ 2308 if (abort) { 2309 if (fcpreq->op == NVMET_FCOP_WRITEDATA) { 2310 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2311 return true; 2312 } 2313 2314 nvmet_fc_abort_op(tgtport, fod); 2315 return true; 2316 } 2317 2318 return false; 2319 } 2320 2321 /* 2322 * actual done handler for FCP operations when completed by the lldd 2323 */ 2324 static void 2325 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) 2326 { 2327 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2328 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2329 unsigned long flags; 2330 bool abort; 2331 2332 spin_lock_irqsave(&fod->flock, flags); 2333 abort = fod->abort; 2334 fod->writedataactive = false; 2335 spin_unlock_irqrestore(&fod->flock, flags); 2336 2337 switch (fcpreq->op) { 2338 2339 case NVMET_FCOP_WRITEDATA: 2340 if (__nvmet_fc_fod_op_abort(fod, abort)) 2341 return; 2342 if (fcpreq->fcp_error || 2343 fcpreq->transferred_length != fcpreq->transfer_length) { 2344 spin_lock_irqsave(&fod->flock, flags); 2345 fod->abort = true; 2346 spin_unlock_irqrestore(&fod->flock, flags); 2347 2348 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2349 return; 2350 } 2351 2352 fod->offset += fcpreq->transferred_length; 2353 if (fod->offset != fod->req.transfer_len) { 2354 spin_lock_irqsave(&fod->flock, flags); 2355 fod->writedataactive = true; 2356 spin_unlock_irqrestore(&fod->flock, flags); 2357 2358 /* transfer the next chunk */ 2359 nvmet_fc_transfer_fcp_data(tgtport, fod, 2360 NVMET_FCOP_WRITEDATA); 2361 return; 2362 } 2363 2364 /* data transfer complete, resume with nvmet layer */ 2365 fod->req.execute(&fod->req); 2366 break; 2367 2368 case NVMET_FCOP_READDATA: 2369 case NVMET_FCOP_READDATA_RSP: 2370 if (__nvmet_fc_fod_op_abort(fod, abort)) 2371 return; 2372 if (fcpreq->fcp_error || 2373 fcpreq->transferred_length != fcpreq->transfer_length) { 2374 nvmet_fc_abort_op(tgtport, fod); 2375 return; 2376 } 2377 2378 /* success */ 2379 2380 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) { 2381 /* data no longer needed */ 2382 nvmet_fc_free_tgt_pgs(fod); 2383 nvmet_fc_free_fcp_iod(fod->queue, fod); 2384 return; 2385 } 2386 2387 fod->offset += fcpreq->transferred_length; 2388 if (fod->offset != fod->req.transfer_len) { 2389 /* transfer the next chunk */ 2390 nvmet_fc_transfer_fcp_data(tgtport, fod, 2391 NVMET_FCOP_READDATA); 2392 return; 2393 } 2394 2395 /* data transfer complete, send response */ 2396 2397 /* data no longer needed */ 2398 nvmet_fc_free_tgt_pgs(fod); 2399 2400 nvmet_fc_xmt_fcp_rsp(tgtport, fod); 2401 2402 break; 2403 2404 case NVMET_FCOP_RSP: 2405 if (__nvmet_fc_fod_op_abort(fod, abort)) 2406 return; 2407 nvmet_fc_free_fcp_iod(fod->queue, fod); 2408 break; 2409 2410 default: 2411 break; 2412 } 2413 } 2414 2415 static void 2416 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq) 2417 { 2418 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 2419 2420 nvmet_fc_fod_op_done(fod); 2421 } 2422 2423 /* 2424 * actual completion handler after execution by the nvmet layer 2425 */ 2426 static void 2427 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport, 2428 struct nvmet_fc_fcp_iod *fod, int status) 2429 { 2430 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; 2431 struct nvme_completion *cqe = &fod->rspiubuf.cqe; 2432 unsigned long flags; 2433 bool abort; 2434 2435 spin_lock_irqsave(&fod->flock, flags); 2436 abort = fod->abort; 2437 spin_unlock_irqrestore(&fod->flock, flags); 2438 2439 /* if we have a CQE, snoop the last sq_head value */ 2440 if (!status) 2441 fod->queue->sqhd = cqe->sq_head; 2442 2443 if (abort) { 2444 nvmet_fc_abort_op(tgtport, fod); 2445 return; 2446 } 2447 2448 /* if an error handling the cmd post initial parsing */ 2449 if (status) { 2450 /* fudge up a failed CQE status for our transport error */ 2451 memset(cqe, 0, sizeof(*cqe)); 2452 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */ 2453 cqe->sq_id = cpu_to_le16(fod->queue->qid); 2454 cqe->command_id = sqe->command_id; 2455 cqe->status = cpu_to_le16(status); 2456 } else { 2457 2458 /* 2459 * try to push the data even if the SQE status is non-zero. 2460 * There may be a status where data still was intended to 2461 * be moved 2462 */ 2463 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) { 2464 /* push the data over before sending rsp */ 2465 nvmet_fc_transfer_fcp_data(tgtport, fod, 2466 NVMET_FCOP_READDATA); 2467 return; 2468 } 2469 2470 /* writes & no data - fall thru */ 2471 } 2472 2473 /* data no longer needed */ 2474 nvmet_fc_free_tgt_pgs(fod); 2475 2476 nvmet_fc_xmt_fcp_rsp(tgtport, fod); 2477 } 2478 2479 2480 static void 2481 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req) 2482 { 2483 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req); 2484 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2485 2486 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0); 2487 } 2488 2489 2490 /* 2491 * Actual processing routine for received FC-NVME I/O Requests from the LLD 2492 */ 2493 static void 2494 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, 2495 struct nvmet_fc_fcp_iod *fod) 2496 { 2497 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf; 2498 u32 xfrlen = be32_to_cpu(cmdiu->data_len); 2499 int ret; 2500 2501 /* 2502 * Fused commands are currently not supported in the linux 2503 * implementation. 2504 * 2505 * As such, the implementation of the FC transport does not 2506 * look at the fused commands and order delivery to the upper 2507 * layer until we have both based on csn. 2508 */ 2509 2510 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done; 2511 2512 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) { 2513 fod->io_dir = NVMET_FCP_WRITE; 2514 if (!nvme_is_write(&cmdiu->sqe)) 2515 goto transport_error; 2516 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) { 2517 fod->io_dir = NVMET_FCP_READ; 2518 if (nvme_is_write(&cmdiu->sqe)) 2519 goto transport_error; 2520 } else { 2521 fod->io_dir = NVMET_FCP_NODATA; 2522 if (xfrlen) 2523 goto transport_error; 2524 } 2525 2526 fod->req.cmd = &fod->cmdiubuf.sqe; 2527 fod->req.cqe = &fod->rspiubuf.cqe; 2528 if (tgtport->pe) 2529 fod->req.port = tgtport->pe->port; 2530 2531 /* clear any response payload */ 2532 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); 2533 2534 fod->data_sg = NULL; 2535 fod->data_sg_cnt = 0; 2536 2537 ret = nvmet_req_init(&fod->req, 2538 &fod->queue->nvme_cq, 2539 &fod->queue->nvme_sq, 2540 &nvmet_fc_tgt_fcp_ops); 2541 if (!ret) { 2542 /* bad SQE content or invalid ctrl state */ 2543 /* nvmet layer has already called op done to send rsp. */ 2544 return; 2545 } 2546 2547 fod->req.transfer_len = xfrlen; 2548 2549 /* keep a running counter of tail position */ 2550 atomic_inc(&fod->queue->sqtail); 2551 2552 if (fod->req.transfer_len) { 2553 ret = nvmet_fc_alloc_tgt_pgs(fod); 2554 if (ret) { 2555 nvmet_req_complete(&fod->req, ret); 2556 return; 2557 } 2558 } 2559 fod->req.sg = fod->data_sg; 2560 fod->req.sg_cnt = fod->data_sg_cnt; 2561 fod->offset = 0; 2562 2563 if (fod->io_dir == NVMET_FCP_WRITE) { 2564 /* pull the data over before invoking nvmet layer */ 2565 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA); 2566 return; 2567 } 2568 2569 /* 2570 * Reads or no data: 2571 * 2572 * can invoke the nvmet_layer now. If read data, cmd completion will 2573 * push the data 2574 */ 2575 fod->req.execute(&fod->req); 2576 return; 2577 2578 transport_error: 2579 nvmet_fc_abort_op(tgtport, fod); 2580 } 2581 2582 /** 2583 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD 2584 * upon the reception of a NVME FCP CMD IU. 2585 * 2586 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc 2587 * layer for processing. 2588 * 2589 * The nvmet_fc layer allocates a local job structure (struct 2590 * nvmet_fc_fcp_iod) from the queue for the io and copies the 2591 * CMD IU buffer to the job structure. As such, on a successful 2592 * completion (returns 0), the LLDD may immediately free/reuse 2593 * the CMD IU buffer passed in the call. 2594 * 2595 * However, in some circumstances, due to the packetized nature of FC 2596 * and the api of the FC LLDD which may issue a hw command to send the 2597 * response, but the LLDD may not get the hw completion for that command 2598 * and upcall the nvmet_fc layer before a new command may be 2599 * asynchronously received - its possible for a command to be received 2600 * before the LLDD and nvmet_fc have recycled the job structure. It gives 2601 * the appearance of more commands received than fits in the sq. 2602 * To alleviate this scenario, a temporary queue is maintained in the 2603 * transport for pending LLDD requests waiting for a queue job structure. 2604 * In these "overrun" cases, a temporary queue element is allocated 2605 * the LLDD request and CMD iu buffer information remembered, and the 2606 * routine returns a -EOVERFLOW status. Subsequently, when a queue job 2607 * structure is freed, it is immediately reallocated for anything on the 2608 * pending request list. The LLDDs defer_rcv() callback is called, 2609 * informing the LLDD that it may reuse the CMD IU buffer, and the io 2610 * is then started normally with the transport. 2611 * 2612 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat 2613 * the completion as successful but must not reuse the CMD IU buffer 2614 * until the LLDD's defer_rcv() callback has been called for the 2615 * corresponding struct nvmefc_tgt_fcp_req pointer. 2616 * 2617 * If there is any other condition in which an error occurs, the 2618 * transport will return a non-zero status indicating the error. 2619 * In all cases other than -EOVERFLOW, the transport has not accepted the 2620 * request and the LLDD should abort the exchange. 2621 * 2622 * @target_port: pointer to the (registered) target port the FCP CMD IU 2623 * was received on. 2624 * @fcpreq: pointer to a fcpreq request structure to be used to reference 2625 * the exchange corresponding to the FCP Exchange. 2626 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU 2627 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU 2628 */ 2629 int 2630 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, 2631 struct nvmefc_tgt_fcp_req *fcpreq, 2632 void *cmdiubuf, u32 cmdiubuf_len) 2633 { 2634 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 2635 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; 2636 struct nvmet_fc_tgt_queue *queue; 2637 struct nvmet_fc_fcp_iod *fod; 2638 struct nvmet_fc_defer_fcp_req *deferfcp; 2639 unsigned long flags; 2640 2641 /* validate iu, so the connection id can be used to find the queue */ 2642 if ((cmdiubuf_len != sizeof(*cmdiu)) || 2643 (cmdiu->format_id != NVME_CMD_FORMAT_ID) || 2644 (cmdiu->fc_id != NVME_CMD_FC_ID) || 2645 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4))) 2646 return -EIO; 2647 2648 queue = nvmet_fc_find_target_queue(tgtport, 2649 be64_to_cpu(cmdiu->connection_id)); 2650 if (!queue) 2651 return -ENOTCONN; 2652 2653 /* 2654 * note: reference taken by find_target_queue 2655 * After successful fod allocation, the fod will inherit the 2656 * ownership of that reference and will remove the reference 2657 * when the fod is freed. 2658 */ 2659 2660 spin_lock_irqsave(&queue->qlock, flags); 2661 2662 fod = nvmet_fc_alloc_fcp_iod(queue); 2663 if (fod) { 2664 spin_unlock_irqrestore(&queue->qlock, flags); 2665 2666 fcpreq->nvmet_fc_private = fod; 2667 fod->fcpreq = fcpreq; 2668 2669 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); 2670 2671 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); 2672 2673 return 0; 2674 } 2675 2676 if (!tgtport->ops->defer_rcv) { 2677 spin_unlock_irqrestore(&queue->qlock, flags); 2678 /* release the queue lookup reference */ 2679 nvmet_fc_tgt_q_put(queue); 2680 return -ENOENT; 2681 } 2682 2683 deferfcp = list_first_entry_or_null(&queue->avail_defer_list, 2684 struct nvmet_fc_defer_fcp_req, req_list); 2685 if (deferfcp) { 2686 /* Just re-use one that was previously allocated */ 2687 list_del(&deferfcp->req_list); 2688 } else { 2689 spin_unlock_irqrestore(&queue->qlock, flags); 2690 2691 /* Now we need to dynamically allocate one */ 2692 deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL); 2693 if (!deferfcp) { 2694 /* release the queue lookup reference */ 2695 nvmet_fc_tgt_q_put(queue); 2696 return -ENOMEM; 2697 } 2698 spin_lock_irqsave(&queue->qlock, flags); 2699 } 2700 2701 /* For now, use rspaddr / rsplen to save payload information */ 2702 fcpreq->rspaddr = cmdiubuf; 2703 fcpreq->rsplen = cmdiubuf_len; 2704 deferfcp->fcp_req = fcpreq; 2705 2706 /* defer processing till a fod becomes available */ 2707 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list); 2708 2709 /* NOTE: the queue lookup reference is still valid */ 2710 2711 spin_unlock_irqrestore(&queue->qlock, flags); 2712 2713 return -EOVERFLOW; 2714 } 2715 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); 2716 2717 /** 2718 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD 2719 * upon the reception of an ABTS for a FCP command 2720 * 2721 * Notify the transport that an ABTS has been received for a FCP command 2722 * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The 2723 * LLDD believes the command is still being worked on 2724 * (template_ops->fcp_req_release() has not been called). 2725 * 2726 * The transport will wait for any outstanding work (an op to the LLDD, 2727 * which the lldd should complete with error due to the ABTS; or the 2728 * completion from the nvmet layer of the nvme command), then will 2729 * stop processing and call the nvmet_fc_rcv_fcp_req() callback to 2730 * return the i/o context to the LLDD. The LLDD may send the BA_ACC 2731 * to the ABTS either after return from this function (assuming any 2732 * outstanding op work has been terminated) or upon the callback being 2733 * called. 2734 * 2735 * @target_port: pointer to the (registered) target port the FCP CMD IU 2736 * was received on. 2737 * @fcpreq: pointer to the fcpreq request structure that corresponds 2738 * to the exchange that received the ABTS. 2739 */ 2740 void 2741 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port, 2742 struct nvmefc_tgt_fcp_req *fcpreq) 2743 { 2744 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 2745 struct nvmet_fc_tgt_queue *queue; 2746 unsigned long flags; 2747 2748 if (!fod || fod->fcpreq != fcpreq) 2749 /* job appears to have already completed, ignore abort */ 2750 return; 2751 2752 queue = fod->queue; 2753 2754 spin_lock_irqsave(&queue->qlock, flags); 2755 if (fod->active) { 2756 /* 2757 * mark as abort. The abort handler, invoked upon completion 2758 * of any work, will detect the aborted status and do the 2759 * callback. 2760 */ 2761 spin_lock(&fod->flock); 2762 fod->abort = true; 2763 fod->aborted = true; 2764 spin_unlock(&fod->flock); 2765 } 2766 spin_unlock_irqrestore(&queue->qlock, flags); 2767 } 2768 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort); 2769 2770 2771 struct nvmet_fc_traddr { 2772 u64 nn; 2773 u64 pn; 2774 }; 2775 2776 static int 2777 __nvme_fc_parse_u64(substring_t *sstr, u64 *val) 2778 { 2779 u64 token64; 2780 2781 if (match_u64(sstr, &token64)) 2782 return -EINVAL; 2783 *val = token64; 2784 2785 return 0; 2786 } 2787 2788 /* 2789 * This routine validates and extracts the WWN's from the TRADDR string. 2790 * As kernel parsers need the 0x to determine number base, universally 2791 * build string to parse with 0x prefix before parsing name strings. 2792 */ 2793 static int 2794 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) 2795 { 2796 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; 2797 substring_t wwn = { name, &name[sizeof(name)-1] }; 2798 int nnoffset, pnoffset; 2799 2800 /* validate if string is one of the 2 allowed formats */ 2801 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && 2802 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && 2803 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], 2804 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { 2805 nnoffset = NVME_FC_TRADDR_OXNNLEN; 2806 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + 2807 NVME_FC_TRADDR_OXNNLEN; 2808 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH && 2809 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && 2810 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], 2811 "pn-", NVME_FC_TRADDR_NNLEN))) { 2812 nnoffset = NVME_FC_TRADDR_NNLEN; 2813 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; 2814 } else 2815 goto out_einval; 2816 2817 name[0] = '0'; 2818 name[1] = 'x'; 2819 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; 2820 2821 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); 2822 if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) 2823 goto out_einval; 2824 2825 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); 2826 if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) 2827 goto out_einval; 2828 2829 return 0; 2830 2831 out_einval: 2832 pr_warn("%s: bad traddr string\n", __func__); 2833 return -EINVAL; 2834 } 2835 2836 static int 2837 nvmet_fc_add_port(struct nvmet_port *port) 2838 { 2839 struct nvmet_fc_tgtport *tgtport; 2840 struct nvmet_fc_port_entry *pe; 2841 struct nvmet_fc_traddr traddr = { 0L, 0L }; 2842 unsigned long flags; 2843 int ret; 2844 2845 /* validate the address info */ 2846 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) || 2847 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC)) 2848 return -EINVAL; 2849 2850 /* map the traddr address info to a target port */ 2851 2852 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr, 2853 sizeof(port->disc_addr.traddr)); 2854 if (ret) 2855 return ret; 2856 2857 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2858 if (!pe) 2859 return -ENOMEM; 2860 2861 ret = -ENXIO; 2862 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 2863 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { 2864 if ((tgtport->fc_target_port.node_name == traddr.nn) && 2865 (tgtport->fc_target_port.port_name == traddr.pn)) { 2866 /* a FC port can only be 1 nvmet port id */ 2867 if (!tgtport->pe) { 2868 nvmet_fc_portentry_bind(tgtport, pe, port); 2869 ret = 0; 2870 } else 2871 ret = -EALREADY; 2872 break; 2873 } 2874 } 2875 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 2876 2877 if (ret) 2878 kfree(pe); 2879 2880 return ret; 2881 } 2882 2883 static void 2884 nvmet_fc_remove_port(struct nvmet_port *port) 2885 { 2886 struct nvmet_fc_port_entry *pe = port->priv; 2887 2888 nvmet_fc_portentry_unbind(pe); 2889 2890 /* terminate any outstanding associations */ 2891 __nvmet_fc_free_assocs(pe->tgtport); 2892 2893 kfree(pe); 2894 } 2895 2896 static void 2897 nvmet_fc_discovery_chg(struct nvmet_port *port) 2898 { 2899 struct nvmet_fc_port_entry *pe = port->priv; 2900 struct nvmet_fc_tgtport *tgtport = pe->tgtport; 2901 2902 if (tgtport && tgtport->ops->discovery_event) 2903 tgtport->ops->discovery_event(&tgtport->fc_target_port); 2904 } 2905 2906 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { 2907 .owner = THIS_MODULE, 2908 .type = NVMF_TRTYPE_FC, 2909 .msdbd = 1, 2910 .add_port = nvmet_fc_add_port, 2911 .remove_port = nvmet_fc_remove_port, 2912 .queue_response = nvmet_fc_fcp_nvme_cmd_done, 2913 .delete_ctrl = nvmet_fc_delete_ctrl, 2914 .discovery_chg = nvmet_fc_discovery_chg, 2915 }; 2916 2917 static int __init nvmet_fc_init_module(void) 2918 { 2919 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops); 2920 } 2921 2922 static void __exit nvmet_fc_exit_module(void) 2923 { 2924 /* ensure any shutdown operation, e.g. delete ctrls have finished */ 2925 flush_workqueue(nvmet_wq); 2926 2927 /* sanity check - all lports should be removed */ 2928 if (!list_empty(&nvmet_fc_target_list)) 2929 pr_warn("%s: targetport list not empty\n", __func__); 2930 2931 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops); 2932 2933 ida_destroy(&nvmet_fc_tgtport_cnt); 2934 } 2935 2936 module_init(nvmet_fc_init_module); 2937 module_exit(nvmet_fc_exit_module); 2938 2939 MODULE_LICENSE("GPL v2"); 2940