1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016 Avago Technologies. All rights reserved. 4 */ 5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 6 #include <linux/module.h> 7 #include <linux/parser.h> 8 #include <uapi/scsi/fc/fc_fs.h> 9 #include <uapi/scsi/fc/fc_els.h> 10 #include <linux/delay.h> 11 #include <linux/overflow.h> 12 #include <linux/blk-cgroup.h> 13 #include "nvme.h" 14 #include "fabrics.h" 15 #include <linux/nvme-fc-driver.h> 16 #include <linux/nvme-fc.h> 17 #include "fc.h" 18 #include <scsi/scsi_transport_fc.h> 19 #include <linux/blk-mq-pci.h> 20 21 /* *************************** Data Structures/Defines ****************** */ 22 23 24 enum nvme_fc_queue_flags { 25 NVME_FC_Q_CONNECTED = 0, 26 NVME_FC_Q_LIVE, 27 }; 28 29 #define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */ 30 #define NVME_FC_DEFAULT_RECONNECT_TMO 2 /* delay between reconnects 31 * when connected and a 32 * connection failure. 33 */ 34 35 struct nvme_fc_queue { 36 struct nvme_fc_ctrl *ctrl; 37 struct device *dev; 38 struct blk_mq_hw_ctx *hctx; 39 void *lldd_handle; 40 size_t cmnd_capsule_len; 41 u32 qnum; 42 u32 rqcnt; 43 u32 seqno; 44 45 u64 connection_id; 46 atomic_t csn; 47 48 unsigned long flags; 49 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ 50 51 enum nvme_fcop_flags { 52 FCOP_FLAGS_TERMIO = (1 << 0), 53 FCOP_FLAGS_AEN = (1 << 1), 54 }; 55 56 struct nvmefc_ls_req_op { 57 struct nvmefc_ls_req ls_req; 58 59 struct nvme_fc_rport *rport; 60 struct nvme_fc_queue *queue; 61 struct request *rq; 62 u32 flags; 63 64 int ls_error; 65 struct completion ls_done; 66 struct list_head lsreq_list; /* rport->ls_req_list */ 67 bool req_queued; 68 }; 69 70 struct nvmefc_ls_rcv_op { 71 struct nvme_fc_rport *rport; 72 struct nvmefc_ls_rsp *lsrsp; 73 union nvmefc_ls_requests *rqstbuf; 74 union nvmefc_ls_responses *rspbuf; 75 u16 rqstdatalen; 76 bool handled; 77 dma_addr_t rspdma; 78 struct list_head lsrcv_list; /* rport->ls_rcv_list */ 79 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ 80 81 enum nvme_fcpop_state { 82 FCPOP_STATE_UNINIT = 0, 83 FCPOP_STATE_IDLE = 1, 84 FCPOP_STATE_ACTIVE = 2, 85 FCPOP_STATE_ABORTED = 3, 86 FCPOP_STATE_COMPLETE = 4, 87 }; 88 89 struct nvme_fc_fcp_op { 90 struct nvme_request nreq; /* 91 * nvme/host/core.c 92 * requires this to be 93 * the 1st element in the 94 * private structure 95 * associated with the 96 * request. 97 */ 98 struct nvmefc_fcp_req fcp_req; 99 100 struct nvme_fc_ctrl *ctrl; 101 struct nvme_fc_queue *queue; 102 struct request *rq; 103 104 atomic_t state; 105 u32 flags; 106 u32 rqno; 107 u32 nents; 108 109 struct nvme_fc_cmd_iu cmd_iu; 110 struct nvme_fc_ersp_iu rsp_iu; 111 }; 112 113 struct nvme_fcp_op_w_sgl { 114 struct nvme_fc_fcp_op op; 115 struct scatterlist sgl[NVME_INLINE_SG_CNT]; 116 uint8_t priv[]; 117 }; 118 119 struct nvme_fc_lport { 120 struct nvme_fc_local_port localport; 121 122 struct ida endp_cnt; 123 struct list_head port_list; /* nvme_fc_port_list */ 124 struct list_head endp_list; 125 struct device *dev; /* physical device for dma */ 126 struct nvme_fc_port_template *ops; 127 struct kref ref; 128 atomic_t act_rport_cnt; 129 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ 130 131 struct nvme_fc_rport { 132 struct nvme_fc_remote_port remoteport; 133 134 struct list_head endp_list; /* for lport->endp_list */ 135 struct list_head ctrl_list; 136 struct list_head ls_req_list; 137 struct list_head ls_rcv_list; 138 struct list_head disc_list; 139 struct device *dev; /* physical device for dma */ 140 struct nvme_fc_lport *lport; 141 spinlock_t lock; 142 struct kref ref; 143 atomic_t act_ctrl_cnt; 144 unsigned long dev_loss_end; 145 struct work_struct lsrcv_work; 146 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ 147 148 /* fc_ctrl flags values - specified as bit positions */ 149 #define ASSOC_ACTIVE 0 150 #define ASSOC_FAILED 1 151 #define FCCTRL_TERMIO 2 152 153 struct nvme_fc_ctrl { 154 spinlock_t lock; 155 struct nvme_fc_queue *queues; 156 struct device *dev; 157 struct nvme_fc_lport *lport; 158 struct nvme_fc_rport *rport; 159 u32 cnum; 160 161 bool ioq_live; 162 u64 association_id; 163 struct nvmefc_ls_rcv_op *rcv_disconn; 164 165 struct list_head ctrl_list; /* rport->ctrl_list */ 166 167 struct blk_mq_tag_set admin_tag_set; 168 struct blk_mq_tag_set tag_set; 169 170 struct work_struct ioerr_work; 171 struct delayed_work connect_work; 172 173 struct kref ref; 174 unsigned long flags; 175 u32 iocnt; 176 wait_queue_head_t ioabort_wait; 177 178 struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS]; 179 180 struct nvme_ctrl ctrl; 181 }; 182 183 static inline struct nvme_fc_ctrl * 184 to_fc_ctrl(struct nvme_ctrl *ctrl) 185 { 186 return container_of(ctrl, struct nvme_fc_ctrl, ctrl); 187 } 188 189 static inline struct nvme_fc_lport * 190 localport_to_lport(struct nvme_fc_local_port *portptr) 191 { 192 return container_of(portptr, struct nvme_fc_lport, localport); 193 } 194 195 static inline struct nvme_fc_rport * 196 remoteport_to_rport(struct nvme_fc_remote_port *portptr) 197 { 198 return container_of(portptr, struct nvme_fc_rport, remoteport); 199 } 200 201 static inline struct nvmefc_ls_req_op * 202 ls_req_to_lsop(struct nvmefc_ls_req *lsreq) 203 { 204 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req); 205 } 206 207 static inline struct nvme_fc_fcp_op * 208 fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq) 209 { 210 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req); 211 } 212 213 214 215 /* *************************** Globals **************************** */ 216 217 218 static DEFINE_SPINLOCK(nvme_fc_lock); 219 220 static LIST_HEAD(nvme_fc_lport_list); 221 static DEFINE_IDA(nvme_fc_local_port_cnt); 222 static DEFINE_IDA(nvme_fc_ctrl_cnt); 223 224 static struct workqueue_struct *nvme_fc_wq; 225 226 static bool nvme_fc_waiting_to_unload; 227 static DECLARE_COMPLETION(nvme_fc_unload_proceed); 228 229 /* 230 * These items are short-term. They will eventually be moved into 231 * a generic FC class. See comments in module init. 232 */ 233 static struct device *fc_udev_device; 234 235 static void nvme_fc_complete_rq(struct request *rq); 236 237 /* *********************** FC-NVME Port Management ************************ */ 238 239 static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *, 240 struct nvme_fc_queue *, unsigned int); 241 242 static void nvme_fc_handle_ls_rqst_work(struct work_struct *work); 243 244 245 static void 246 nvme_fc_free_lport(struct kref *ref) 247 { 248 struct nvme_fc_lport *lport = 249 container_of(ref, struct nvme_fc_lport, ref); 250 unsigned long flags; 251 252 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED); 253 WARN_ON(!list_empty(&lport->endp_list)); 254 255 /* remove from transport list */ 256 spin_lock_irqsave(&nvme_fc_lock, flags); 257 list_del(&lport->port_list); 258 if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list)) 259 complete(&nvme_fc_unload_proceed); 260 spin_unlock_irqrestore(&nvme_fc_lock, flags); 261 262 ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num); 263 ida_destroy(&lport->endp_cnt); 264 265 put_device(lport->dev); 266 267 kfree(lport); 268 } 269 270 static void 271 nvme_fc_lport_put(struct nvme_fc_lport *lport) 272 { 273 kref_put(&lport->ref, nvme_fc_free_lport); 274 } 275 276 static int 277 nvme_fc_lport_get(struct nvme_fc_lport *lport) 278 { 279 return kref_get_unless_zero(&lport->ref); 280 } 281 282 283 static struct nvme_fc_lport * 284 nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo, 285 struct nvme_fc_port_template *ops, 286 struct device *dev) 287 { 288 struct nvme_fc_lport *lport; 289 unsigned long flags; 290 291 spin_lock_irqsave(&nvme_fc_lock, flags); 292 293 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { 294 if (lport->localport.node_name != pinfo->node_name || 295 lport->localport.port_name != pinfo->port_name) 296 continue; 297 298 if (lport->dev != dev) { 299 lport = ERR_PTR(-EXDEV); 300 goto out_done; 301 } 302 303 if (lport->localport.port_state != FC_OBJSTATE_DELETED) { 304 lport = ERR_PTR(-EEXIST); 305 goto out_done; 306 } 307 308 if (!nvme_fc_lport_get(lport)) { 309 /* 310 * fails if ref cnt already 0. If so, 311 * act as if lport already deleted 312 */ 313 lport = NULL; 314 goto out_done; 315 } 316 317 /* resume the lport */ 318 319 lport->ops = ops; 320 lport->localport.port_role = pinfo->port_role; 321 lport->localport.port_id = pinfo->port_id; 322 lport->localport.port_state = FC_OBJSTATE_ONLINE; 323 324 spin_unlock_irqrestore(&nvme_fc_lock, flags); 325 326 return lport; 327 } 328 329 lport = NULL; 330 331 out_done: 332 spin_unlock_irqrestore(&nvme_fc_lock, flags); 333 334 return lport; 335 } 336 337 /** 338 * nvme_fc_register_localport - transport entry point called by an 339 * LLDD to register the existence of a NVME 340 * host FC port. 341 * @pinfo: pointer to information about the port to be registered 342 * @template: LLDD entrypoints and operational parameters for the port 343 * @dev: physical hardware device node port corresponds to. Will be 344 * used for DMA mappings 345 * @portptr: pointer to a local port pointer. Upon success, the routine 346 * will allocate a nvme_fc_local_port structure and place its 347 * address in the local port pointer. Upon failure, local port 348 * pointer will be set to 0. 349 * 350 * Returns: 351 * a completion status. Must be 0 upon success; a negative errno 352 * (ex: -ENXIO) upon failure. 353 */ 354 int 355 nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, 356 struct nvme_fc_port_template *template, 357 struct device *dev, 358 struct nvme_fc_local_port **portptr) 359 { 360 struct nvme_fc_lport *newrec; 361 unsigned long flags; 362 int ret, idx; 363 364 if (!template->localport_delete || !template->remoteport_delete || 365 !template->ls_req || !template->fcp_io || 366 !template->ls_abort || !template->fcp_abort || 367 !template->max_hw_queues || !template->max_sgl_segments || 368 !template->max_dif_sgl_segments || !template->dma_boundary) { 369 ret = -EINVAL; 370 goto out_reghost_failed; 371 } 372 373 /* 374 * look to see if there is already a localport that had been 375 * deregistered and in the process of waiting for all the 376 * references to fully be removed. If the references haven't 377 * expired, we can simply re-enable the localport. Remoteports 378 * and controller reconnections should resume naturally. 379 */ 380 newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev); 381 382 /* found an lport, but something about its state is bad */ 383 if (IS_ERR(newrec)) { 384 ret = PTR_ERR(newrec); 385 goto out_reghost_failed; 386 387 /* found existing lport, which was resumed */ 388 } else if (newrec) { 389 *portptr = &newrec->localport; 390 return 0; 391 } 392 393 /* nothing found - allocate a new localport struct */ 394 395 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz), 396 GFP_KERNEL); 397 if (!newrec) { 398 ret = -ENOMEM; 399 goto out_reghost_failed; 400 } 401 402 idx = ida_alloc(&nvme_fc_local_port_cnt, GFP_KERNEL); 403 if (idx < 0) { 404 ret = -ENOSPC; 405 goto out_fail_kfree; 406 } 407 408 if (!get_device(dev) && dev) { 409 ret = -ENODEV; 410 goto out_ida_put; 411 } 412 413 INIT_LIST_HEAD(&newrec->port_list); 414 INIT_LIST_HEAD(&newrec->endp_list); 415 kref_init(&newrec->ref); 416 atomic_set(&newrec->act_rport_cnt, 0); 417 newrec->ops = template; 418 newrec->dev = dev; 419 ida_init(&newrec->endp_cnt); 420 if (template->local_priv_sz) 421 newrec->localport.private = &newrec[1]; 422 else 423 newrec->localport.private = NULL; 424 newrec->localport.node_name = pinfo->node_name; 425 newrec->localport.port_name = pinfo->port_name; 426 newrec->localport.port_role = pinfo->port_role; 427 newrec->localport.port_id = pinfo->port_id; 428 newrec->localport.port_state = FC_OBJSTATE_ONLINE; 429 newrec->localport.port_num = idx; 430 431 spin_lock_irqsave(&nvme_fc_lock, flags); 432 list_add_tail(&newrec->port_list, &nvme_fc_lport_list); 433 spin_unlock_irqrestore(&nvme_fc_lock, flags); 434 435 if (dev) 436 dma_set_seg_boundary(dev, template->dma_boundary); 437 438 *portptr = &newrec->localport; 439 return 0; 440 441 out_ida_put: 442 ida_free(&nvme_fc_local_port_cnt, idx); 443 out_fail_kfree: 444 kfree(newrec); 445 out_reghost_failed: 446 *portptr = NULL; 447 448 return ret; 449 } 450 EXPORT_SYMBOL_GPL(nvme_fc_register_localport); 451 452 /** 453 * nvme_fc_unregister_localport - transport entry point called by an 454 * LLDD to deregister/remove a previously 455 * registered a NVME host FC port. 456 * @portptr: pointer to the (registered) local port that is to be deregistered. 457 * 458 * Returns: 459 * a completion status. Must be 0 upon success; a negative errno 460 * (ex: -ENXIO) upon failure. 461 */ 462 int 463 nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr) 464 { 465 struct nvme_fc_lport *lport = localport_to_lport(portptr); 466 unsigned long flags; 467 468 if (!portptr) 469 return -EINVAL; 470 471 spin_lock_irqsave(&nvme_fc_lock, flags); 472 473 if (portptr->port_state != FC_OBJSTATE_ONLINE) { 474 spin_unlock_irqrestore(&nvme_fc_lock, flags); 475 return -EINVAL; 476 } 477 portptr->port_state = FC_OBJSTATE_DELETED; 478 479 spin_unlock_irqrestore(&nvme_fc_lock, flags); 480 481 if (atomic_read(&lport->act_rport_cnt) == 0) 482 lport->ops->localport_delete(&lport->localport); 483 484 nvme_fc_lport_put(lport); 485 486 return 0; 487 } 488 EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport); 489 490 /* 491 * TRADDR strings, per FC-NVME are fixed format: 492 * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters 493 * udev event will only differ by prefix of what field is 494 * being specified: 495 * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters 496 * 19 + 43 + null_fudge = 64 characters 497 */ 498 #define FCNVME_TRADDR_LENGTH 64 499 500 static void 501 nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport, 502 struct nvme_fc_rport *rport) 503 { 504 char hostaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_HOST_TRADDR=...*/ 505 char tgtaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_TRADDR=...*/ 506 char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL }; 507 508 if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY)) 509 return; 510 511 snprintf(hostaddr, sizeof(hostaddr), 512 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx", 513 lport->localport.node_name, lport->localport.port_name); 514 snprintf(tgtaddr, sizeof(tgtaddr), 515 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx", 516 rport->remoteport.node_name, rport->remoteport.port_name); 517 kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp); 518 } 519 520 static void 521 nvme_fc_free_rport(struct kref *ref) 522 { 523 struct nvme_fc_rport *rport = 524 container_of(ref, struct nvme_fc_rport, ref); 525 struct nvme_fc_lport *lport = 526 localport_to_lport(rport->remoteport.localport); 527 unsigned long flags; 528 529 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED); 530 WARN_ON(!list_empty(&rport->ctrl_list)); 531 532 /* remove from lport list */ 533 spin_lock_irqsave(&nvme_fc_lock, flags); 534 list_del(&rport->endp_list); 535 spin_unlock_irqrestore(&nvme_fc_lock, flags); 536 537 WARN_ON(!list_empty(&rport->disc_list)); 538 ida_free(&lport->endp_cnt, rport->remoteport.port_num); 539 540 kfree(rport); 541 542 nvme_fc_lport_put(lport); 543 } 544 545 static void 546 nvme_fc_rport_put(struct nvme_fc_rport *rport) 547 { 548 kref_put(&rport->ref, nvme_fc_free_rport); 549 } 550 551 static int 552 nvme_fc_rport_get(struct nvme_fc_rport *rport) 553 { 554 return kref_get_unless_zero(&rport->ref); 555 } 556 557 static void 558 nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl) 559 { 560 switch (ctrl->ctrl.state) { 561 case NVME_CTRL_NEW: 562 case NVME_CTRL_CONNECTING: 563 /* 564 * As all reconnects were suppressed, schedule a 565 * connect. 566 */ 567 dev_info(ctrl->ctrl.device, 568 "NVME-FC{%d}: connectivity re-established. " 569 "Attempting reconnect\n", ctrl->cnum); 570 571 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0); 572 break; 573 574 case NVME_CTRL_RESETTING: 575 /* 576 * Controller is already in the process of terminating the 577 * association. No need to do anything further. The reconnect 578 * step will naturally occur after the reset completes. 579 */ 580 break; 581 582 default: 583 /* no action to take - let it delete */ 584 break; 585 } 586 } 587 588 static struct nvme_fc_rport * 589 nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport, 590 struct nvme_fc_port_info *pinfo) 591 { 592 struct nvme_fc_rport *rport; 593 struct nvme_fc_ctrl *ctrl; 594 unsigned long flags; 595 596 spin_lock_irqsave(&nvme_fc_lock, flags); 597 598 list_for_each_entry(rport, &lport->endp_list, endp_list) { 599 if (rport->remoteport.node_name != pinfo->node_name || 600 rport->remoteport.port_name != pinfo->port_name) 601 continue; 602 603 if (!nvme_fc_rport_get(rport)) { 604 rport = ERR_PTR(-ENOLCK); 605 goto out_done; 606 } 607 608 spin_unlock_irqrestore(&nvme_fc_lock, flags); 609 610 spin_lock_irqsave(&rport->lock, flags); 611 612 /* has it been unregistered */ 613 if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) { 614 /* means lldd called us twice */ 615 spin_unlock_irqrestore(&rport->lock, flags); 616 nvme_fc_rport_put(rport); 617 return ERR_PTR(-ESTALE); 618 } 619 620 rport->remoteport.port_role = pinfo->port_role; 621 rport->remoteport.port_id = pinfo->port_id; 622 rport->remoteport.port_state = FC_OBJSTATE_ONLINE; 623 rport->dev_loss_end = 0; 624 625 /* 626 * kick off a reconnect attempt on all associations to the 627 * remote port. A successful reconnects will resume i/o. 628 */ 629 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) 630 nvme_fc_resume_controller(ctrl); 631 632 spin_unlock_irqrestore(&rport->lock, flags); 633 634 return rport; 635 } 636 637 rport = NULL; 638 639 out_done: 640 spin_unlock_irqrestore(&nvme_fc_lock, flags); 641 642 return rport; 643 } 644 645 static inline void 646 __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport, 647 struct nvme_fc_port_info *pinfo) 648 { 649 if (pinfo->dev_loss_tmo) 650 rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo; 651 else 652 rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO; 653 } 654 655 /** 656 * nvme_fc_register_remoteport - transport entry point called by an 657 * LLDD to register the existence of a NVME 658 * subsystem FC port on its fabric. 659 * @localport: pointer to the (registered) local port that the remote 660 * subsystem port is connected to. 661 * @pinfo: pointer to information about the port to be registered 662 * @portptr: pointer to a remote port pointer. Upon success, the routine 663 * will allocate a nvme_fc_remote_port structure and place its 664 * address in the remote port pointer. Upon failure, remote port 665 * pointer will be set to 0. 666 * 667 * Returns: 668 * a completion status. Must be 0 upon success; a negative errno 669 * (ex: -ENXIO) upon failure. 670 */ 671 int 672 nvme_fc_register_remoteport(struct nvme_fc_local_port *localport, 673 struct nvme_fc_port_info *pinfo, 674 struct nvme_fc_remote_port **portptr) 675 { 676 struct nvme_fc_lport *lport = localport_to_lport(localport); 677 struct nvme_fc_rport *newrec; 678 unsigned long flags; 679 int ret, idx; 680 681 if (!nvme_fc_lport_get(lport)) { 682 ret = -ESHUTDOWN; 683 goto out_reghost_failed; 684 } 685 686 /* 687 * look to see if there is already a remoteport that is waiting 688 * for a reconnect (within dev_loss_tmo) with the same WWN's. 689 * If so, transition to it and reconnect. 690 */ 691 newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo); 692 693 /* found an rport, but something about its state is bad */ 694 if (IS_ERR(newrec)) { 695 ret = PTR_ERR(newrec); 696 goto out_lport_put; 697 698 /* found existing rport, which was resumed */ 699 } else if (newrec) { 700 nvme_fc_lport_put(lport); 701 __nvme_fc_set_dev_loss_tmo(newrec, pinfo); 702 nvme_fc_signal_discovery_scan(lport, newrec); 703 *portptr = &newrec->remoteport; 704 return 0; 705 } 706 707 /* nothing found - allocate a new remoteport struct */ 708 709 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz), 710 GFP_KERNEL); 711 if (!newrec) { 712 ret = -ENOMEM; 713 goto out_lport_put; 714 } 715 716 idx = ida_alloc(&lport->endp_cnt, GFP_KERNEL); 717 if (idx < 0) { 718 ret = -ENOSPC; 719 goto out_kfree_rport; 720 } 721 722 INIT_LIST_HEAD(&newrec->endp_list); 723 INIT_LIST_HEAD(&newrec->ctrl_list); 724 INIT_LIST_HEAD(&newrec->ls_req_list); 725 INIT_LIST_HEAD(&newrec->disc_list); 726 kref_init(&newrec->ref); 727 atomic_set(&newrec->act_ctrl_cnt, 0); 728 spin_lock_init(&newrec->lock); 729 newrec->remoteport.localport = &lport->localport; 730 INIT_LIST_HEAD(&newrec->ls_rcv_list); 731 newrec->dev = lport->dev; 732 newrec->lport = lport; 733 if (lport->ops->remote_priv_sz) 734 newrec->remoteport.private = &newrec[1]; 735 else 736 newrec->remoteport.private = NULL; 737 newrec->remoteport.port_role = pinfo->port_role; 738 newrec->remoteport.node_name = pinfo->node_name; 739 newrec->remoteport.port_name = pinfo->port_name; 740 newrec->remoteport.port_id = pinfo->port_id; 741 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE; 742 newrec->remoteport.port_num = idx; 743 __nvme_fc_set_dev_loss_tmo(newrec, pinfo); 744 INIT_WORK(&newrec->lsrcv_work, nvme_fc_handle_ls_rqst_work); 745 746 spin_lock_irqsave(&nvme_fc_lock, flags); 747 list_add_tail(&newrec->endp_list, &lport->endp_list); 748 spin_unlock_irqrestore(&nvme_fc_lock, flags); 749 750 nvme_fc_signal_discovery_scan(lport, newrec); 751 752 *portptr = &newrec->remoteport; 753 return 0; 754 755 out_kfree_rport: 756 kfree(newrec); 757 out_lport_put: 758 nvme_fc_lport_put(lport); 759 out_reghost_failed: 760 *portptr = NULL; 761 return ret; 762 } 763 EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport); 764 765 static int 766 nvme_fc_abort_lsops(struct nvme_fc_rport *rport) 767 { 768 struct nvmefc_ls_req_op *lsop; 769 unsigned long flags; 770 771 restart: 772 spin_lock_irqsave(&rport->lock, flags); 773 774 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) { 775 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) { 776 lsop->flags |= FCOP_FLAGS_TERMIO; 777 spin_unlock_irqrestore(&rport->lock, flags); 778 rport->lport->ops->ls_abort(&rport->lport->localport, 779 &rport->remoteport, 780 &lsop->ls_req); 781 goto restart; 782 } 783 } 784 spin_unlock_irqrestore(&rport->lock, flags); 785 786 return 0; 787 } 788 789 static void 790 nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl) 791 { 792 dev_info(ctrl->ctrl.device, 793 "NVME-FC{%d}: controller connectivity lost. Awaiting " 794 "Reconnect", ctrl->cnum); 795 796 switch (ctrl->ctrl.state) { 797 case NVME_CTRL_NEW: 798 case NVME_CTRL_LIVE: 799 /* 800 * Schedule a controller reset. The reset will terminate the 801 * association and schedule the reconnect timer. Reconnects 802 * will be attempted until either the ctlr_loss_tmo 803 * (max_retries * connect_delay) expires or the remoteport's 804 * dev_loss_tmo expires. 805 */ 806 if (nvme_reset_ctrl(&ctrl->ctrl)) { 807 dev_warn(ctrl->ctrl.device, 808 "NVME-FC{%d}: Couldn't schedule reset.\n", 809 ctrl->cnum); 810 nvme_delete_ctrl(&ctrl->ctrl); 811 } 812 break; 813 814 case NVME_CTRL_CONNECTING: 815 /* 816 * The association has already been terminated and the 817 * controller is attempting reconnects. No need to do anything 818 * futher. Reconnects will be attempted until either the 819 * ctlr_loss_tmo (max_retries * connect_delay) expires or the 820 * remoteport's dev_loss_tmo expires. 821 */ 822 break; 823 824 case NVME_CTRL_RESETTING: 825 /* 826 * Controller is already in the process of terminating the 827 * association. No need to do anything further. The reconnect 828 * step will kick in naturally after the association is 829 * terminated. 830 */ 831 break; 832 833 case NVME_CTRL_DELETING: 834 case NVME_CTRL_DELETING_NOIO: 835 default: 836 /* no action to take - let it delete */ 837 break; 838 } 839 } 840 841 /** 842 * nvme_fc_unregister_remoteport - transport entry point called by an 843 * LLDD to deregister/remove a previously 844 * registered a NVME subsystem FC port. 845 * @portptr: pointer to the (registered) remote port that is to be 846 * deregistered. 847 * 848 * Returns: 849 * a completion status. Must be 0 upon success; a negative errno 850 * (ex: -ENXIO) upon failure. 851 */ 852 int 853 nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr) 854 { 855 struct nvme_fc_rport *rport = remoteport_to_rport(portptr); 856 struct nvme_fc_ctrl *ctrl; 857 unsigned long flags; 858 859 if (!portptr) 860 return -EINVAL; 861 862 spin_lock_irqsave(&rport->lock, flags); 863 864 if (portptr->port_state != FC_OBJSTATE_ONLINE) { 865 spin_unlock_irqrestore(&rport->lock, flags); 866 return -EINVAL; 867 } 868 portptr->port_state = FC_OBJSTATE_DELETED; 869 870 rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ); 871 872 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { 873 /* if dev_loss_tmo==0, dev loss is immediate */ 874 if (!portptr->dev_loss_tmo) { 875 dev_warn(ctrl->ctrl.device, 876 "NVME-FC{%d}: controller connectivity lost.\n", 877 ctrl->cnum); 878 nvme_delete_ctrl(&ctrl->ctrl); 879 } else 880 nvme_fc_ctrl_connectivity_loss(ctrl); 881 } 882 883 spin_unlock_irqrestore(&rport->lock, flags); 884 885 nvme_fc_abort_lsops(rport); 886 887 if (atomic_read(&rport->act_ctrl_cnt) == 0) 888 rport->lport->ops->remoteport_delete(portptr); 889 890 /* 891 * release the reference, which will allow, if all controllers 892 * go away, which should only occur after dev_loss_tmo occurs, 893 * for the rport to be torn down. 894 */ 895 nvme_fc_rport_put(rport); 896 897 return 0; 898 } 899 EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport); 900 901 /** 902 * nvme_fc_rescan_remoteport - transport entry point called by an 903 * LLDD to request a nvme device rescan. 904 * @remoteport: pointer to the (registered) remote port that is to be 905 * rescanned. 906 * 907 * Returns: N/A 908 */ 909 void 910 nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport) 911 { 912 struct nvme_fc_rport *rport = remoteport_to_rport(remoteport); 913 914 nvme_fc_signal_discovery_scan(rport->lport, rport); 915 } 916 EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport); 917 918 int 919 nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr, 920 u32 dev_loss_tmo) 921 { 922 struct nvme_fc_rport *rport = remoteport_to_rport(portptr); 923 unsigned long flags; 924 925 spin_lock_irqsave(&rport->lock, flags); 926 927 if (portptr->port_state != FC_OBJSTATE_ONLINE) { 928 spin_unlock_irqrestore(&rport->lock, flags); 929 return -EINVAL; 930 } 931 932 /* a dev_loss_tmo of 0 (immediate) is allowed to be set */ 933 rport->remoteport.dev_loss_tmo = dev_loss_tmo; 934 935 spin_unlock_irqrestore(&rport->lock, flags); 936 937 return 0; 938 } 939 EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss); 940 941 942 /* *********************** FC-NVME DMA Handling **************************** */ 943 944 /* 945 * The fcloop device passes in a NULL device pointer. Real LLD's will 946 * pass in a valid device pointer. If NULL is passed to the dma mapping 947 * routines, depending on the platform, it may or may not succeed, and 948 * may crash. 949 * 950 * As such: 951 * Wrapper all the dma routines and check the dev pointer. 952 * 953 * If simple mappings (return just a dma address, we'll noop them, 954 * returning a dma address of 0. 955 * 956 * On more complex mappings (dma_map_sg), a pseudo routine fills 957 * in the scatter list, setting all dma addresses to 0. 958 */ 959 960 static inline dma_addr_t 961 fc_dma_map_single(struct device *dev, void *ptr, size_t size, 962 enum dma_data_direction dir) 963 { 964 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; 965 } 966 967 static inline int 968 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 969 { 970 return dev ? dma_mapping_error(dev, dma_addr) : 0; 971 } 972 973 static inline void 974 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, 975 enum dma_data_direction dir) 976 { 977 if (dev) 978 dma_unmap_single(dev, addr, size, dir); 979 } 980 981 static inline void 982 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, 983 enum dma_data_direction dir) 984 { 985 if (dev) 986 dma_sync_single_for_cpu(dev, addr, size, dir); 987 } 988 989 static inline void 990 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, 991 enum dma_data_direction dir) 992 { 993 if (dev) 994 dma_sync_single_for_device(dev, addr, size, dir); 995 } 996 997 /* pseudo dma_map_sg call */ 998 static int 999 fc_map_sg(struct scatterlist *sg, int nents) 1000 { 1001 struct scatterlist *s; 1002 int i; 1003 1004 WARN_ON(nents == 0 || sg[0].length == 0); 1005 1006 for_each_sg(sg, s, nents, i) { 1007 s->dma_address = 0L; 1008 #ifdef CONFIG_NEED_SG_DMA_LENGTH 1009 s->dma_length = s->length; 1010 #endif 1011 } 1012 return nents; 1013 } 1014 1015 static inline int 1016 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 1017 enum dma_data_direction dir) 1018 { 1019 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); 1020 } 1021 1022 static inline void 1023 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 1024 enum dma_data_direction dir) 1025 { 1026 if (dev) 1027 dma_unmap_sg(dev, sg, nents, dir); 1028 } 1029 1030 /* *********************** FC-NVME LS Handling **************************** */ 1031 1032 static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *); 1033 static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *); 1034 1035 static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg); 1036 1037 static void 1038 __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop) 1039 { 1040 struct nvme_fc_rport *rport = lsop->rport; 1041 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 1042 unsigned long flags; 1043 1044 spin_lock_irqsave(&rport->lock, flags); 1045 1046 if (!lsop->req_queued) { 1047 spin_unlock_irqrestore(&rport->lock, flags); 1048 return; 1049 } 1050 1051 list_del(&lsop->lsreq_list); 1052 1053 lsop->req_queued = false; 1054 1055 spin_unlock_irqrestore(&rport->lock, flags); 1056 1057 fc_dma_unmap_single(rport->dev, lsreq->rqstdma, 1058 (lsreq->rqstlen + lsreq->rsplen), 1059 DMA_BIDIRECTIONAL); 1060 1061 nvme_fc_rport_put(rport); 1062 } 1063 1064 static int 1065 __nvme_fc_send_ls_req(struct nvme_fc_rport *rport, 1066 struct nvmefc_ls_req_op *lsop, 1067 void (*done)(struct nvmefc_ls_req *req, int status)) 1068 { 1069 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 1070 unsigned long flags; 1071 int ret = 0; 1072 1073 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 1074 return -ECONNREFUSED; 1075 1076 if (!nvme_fc_rport_get(rport)) 1077 return -ESHUTDOWN; 1078 1079 lsreq->done = done; 1080 lsop->rport = rport; 1081 lsop->req_queued = false; 1082 INIT_LIST_HEAD(&lsop->lsreq_list); 1083 init_completion(&lsop->ls_done); 1084 1085 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr, 1086 lsreq->rqstlen + lsreq->rsplen, 1087 DMA_BIDIRECTIONAL); 1088 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) { 1089 ret = -EFAULT; 1090 goto out_putrport; 1091 } 1092 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; 1093 1094 spin_lock_irqsave(&rport->lock, flags); 1095 1096 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list); 1097 1098 lsop->req_queued = true; 1099 1100 spin_unlock_irqrestore(&rport->lock, flags); 1101 1102 ret = rport->lport->ops->ls_req(&rport->lport->localport, 1103 &rport->remoteport, lsreq); 1104 if (ret) 1105 goto out_unlink; 1106 1107 return 0; 1108 1109 out_unlink: 1110 lsop->ls_error = ret; 1111 spin_lock_irqsave(&rport->lock, flags); 1112 lsop->req_queued = false; 1113 list_del(&lsop->lsreq_list); 1114 spin_unlock_irqrestore(&rport->lock, flags); 1115 fc_dma_unmap_single(rport->dev, lsreq->rqstdma, 1116 (lsreq->rqstlen + lsreq->rsplen), 1117 DMA_BIDIRECTIONAL); 1118 out_putrport: 1119 nvme_fc_rport_put(rport); 1120 1121 return ret; 1122 } 1123 1124 static void 1125 nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status) 1126 { 1127 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq); 1128 1129 lsop->ls_error = status; 1130 complete(&lsop->ls_done); 1131 } 1132 1133 static int 1134 nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop) 1135 { 1136 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 1137 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr; 1138 int ret; 1139 1140 ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done); 1141 1142 if (!ret) { 1143 /* 1144 * No timeout/not interruptible as we need the struct 1145 * to exist until the lldd calls us back. Thus mandate 1146 * wait until driver calls back. lldd responsible for 1147 * the timeout action 1148 */ 1149 wait_for_completion(&lsop->ls_done); 1150 1151 __nvme_fc_finish_ls_req(lsop); 1152 1153 ret = lsop->ls_error; 1154 } 1155 1156 if (ret) 1157 return ret; 1158 1159 /* ACC or RJT payload ? */ 1160 if (rjt->w0.ls_cmd == FCNVME_LS_RJT) 1161 return -ENXIO; 1162 1163 return 0; 1164 } 1165 1166 static int 1167 nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport, 1168 struct nvmefc_ls_req_op *lsop, 1169 void (*done)(struct nvmefc_ls_req *req, int status)) 1170 { 1171 /* don't wait for completion */ 1172 1173 return __nvme_fc_send_ls_req(rport, lsop, done); 1174 } 1175 1176 static int 1177 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl, 1178 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio) 1179 { 1180 struct nvmefc_ls_req_op *lsop; 1181 struct nvmefc_ls_req *lsreq; 1182 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst; 1183 struct fcnvme_ls_cr_assoc_acc *assoc_acc; 1184 unsigned long flags; 1185 int ret, fcret = 0; 1186 1187 lsop = kzalloc((sizeof(*lsop) + 1188 sizeof(*assoc_rqst) + sizeof(*assoc_acc) + 1189 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); 1190 if (!lsop) { 1191 dev_info(ctrl->ctrl.device, 1192 "NVME-FC{%d}: send Create Association failed: ENOMEM\n", 1193 ctrl->cnum); 1194 ret = -ENOMEM; 1195 goto out_no_memory; 1196 } 1197 1198 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)&lsop[1]; 1199 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1]; 1200 lsreq = &lsop->ls_req; 1201 if (ctrl->lport->ops->lsrqst_priv_sz) 1202 lsreq->private = &assoc_acc[1]; 1203 else 1204 lsreq->private = NULL; 1205 1206 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION; 1207 assoc_rqst->desc_list_len = 1208 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); 1209 1210 assoc_rqst->assoc_cmd.desc_tag = 1211 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD); 1212 assoc_rqst->assoc_cmd.desc_len = 1213 fcnvme_lsdesc_len( 1214 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); 1215 1216 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); 1217 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1); 1218 /* Linux supports only Dynamic controllers */ 1219 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff); 1220 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); 1221 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn, 1222 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE)); 1223 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn, 1224 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE)); 1225 1226 lsop->queue = queue; 1227 lsreq->rqstaddr = assoc_rqst; 1228 lsreq->rqstlen = sizeof(*assoc_rqst); 1229 lsreq->rspaddr = assoc_acc; 1230 lsreq->rsplen = sizeof(*assoc_acc); 1231 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; 1232 1233 ret = nvme_fc_send_ls_req(ctrl->rport, lsop); 1234 if (ret) 1235 goto out_free_buffer; 1236 1237 /* process connect LS completion */ 1238 1239 /* validate the ACC response */ 1240 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) 1241 fcret = VERR_LSACC; 1242 else if (assoc_acc->hdr.desc_list_len != 1243 fcnvme_lsdesc_len( 1244 sizeof(struct fcnvme_ls_cr_assoc_acc))) 1245 fcret = VERR_CR_ASSOC_ACC_LEN; 1246 else if (assoc_acc->hdr.rqst.desc_tag != 1247 cpu_to_be32(FCNVME_LSDESC_RQST)) 1248 fcret = VERR_LSDESC_RQST; 1249 else if (assoc_acc->hdr.rqst.desc_len != 1250 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst))) 1251 fcret = VERR_LSDESC_RQST_LEN; 1252 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION) 1253 fcret = VERR_CR_ASSOC; 1254 else if (assoc_acc->associd.desc_tag != 1255 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) 1256 fcret = VERR_ASSOC_ID; 1257 else if (assoc_acc->associd.desc_len != 1258 fcnvme_lsdesc_len( 1259 sizeof(struct fcnvme_lsdesc_assoc_id))) 1260 fcret = VERR_ASSOC_ID_LEN; 1261 else if (assoc_acc->connectid.desc_tag != 1262 cpu_to_be32(FCNVME_LSDESC_CONN_ID)) 1263 fcret = VERR_CONN_ID; 1264 else if (assoc_acc->connectid.desc_len != 1265 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id))) 1266 fcret = VERR_CONN_ID_LEN; 1267 1268 if (fcret) { 1269 ret = -EBADF; 1270 dev_err(ctrl->dev, 1271 "q %d Create Association LS failed: %s\n", 1272 queue->qnum, validation_errors[fcret]); 1273 } else { 1274 spin_lock_irqsave(&ctrl->lock, flags); 1275 ctrl->association_id = 1276 be64_to_cpu(assoc_acc->associd.association_id); 1277 queue->connection_id = 1278 be64_to_cpu(assoc_acc->connectid.connection_id); 1279 set_bit(NVME_FC_Q_CONNECTED, &queue->flags); 1280 spin_unlock_irqrestore(&ctrl->lock, flags); 1281 } 1282 1283 out_free_buffer: 1284 kfree(lsop); 1285 out_no_memory: 1286 if (ret) 1287 dev_err(ctrl->dev, 1288 "queue %d connect admin queue failed (%d).\n", 1289 queue->qnum, ret); 1290 return ret; 1291 } 1292 1293 static int 1294 nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, 1295 u16 qsize, u16 ersp_ratio) 1296 { 1297 struct nvmefc_ls_req_op *lsop; 1298 struct nvmefc_ls_req *lsreq; 1299 struct fcnvme_ls_cr_conn_rqst *conn_rqst; 1300 struct fcnvme_ls_cr_conn_acc *conn_acc; 1301 int ret, fcret = 0; 1302 1303 lsop = kzalloc((sizeof(*lsop) + 1304 sizeof(*conn_rqst) + sizeof(*conn_acc) + 1305 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); 1306 if (!lsop) { 1307 dev_info(ctrl->ctrl.device, 1308 "NVME-FC{%d}: send Create Connection failed: ENOMEM\n", 1309 ctrl->cnum); 1310 ret = -ENOMEM; 1311 goto out_no_memory; 1312 } 1313 1314 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)&lsop[1]; 1315 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1]; 1316 lsreq = &lsop->ls_req; 1317 if (ctrl->lport->ops->lsrqst_priv_sz) 1318 lsreq->private = (void *)&conn_acc[1]; 1319 else 1320 lsreq->private = NULL; 1321 1322 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION; 1323 conn_rqst->desc_list_len = cpu_to_be32( 1324 sizeof(struct fcnvme_lsdesc_assoc_id) + 1325 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); 1326 1327 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); 1328 conn_rqst->associd.desc_len = 1329 fcnvme_lsdesc_len( 1330 sizeof(struct fcnvme_lsdesc_assoc_id)); 1331 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id); 1332 conn_rqst->connect_cmd.desc_tag = 1333 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD); 1334 conn_rqst->connect_cmd.desc_len = 1335 fcnvme_lsdesc_len( 1336 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); 1337 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); 1338 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum); 1339 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1); 1340 1341 lsop->queue = queue; 1342 lsreq->rqstaddr = conn_rqst; 1343 lsreq->rqstlen = sizeof(*conn_rqst); 1344 lsreq->rspaddr = conn_acc; 1345 lsreq->rsplen = sizeof(*conn_acc); 1346 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; 1347 1348 ret = nvme_fc_send_ls_req(ctrl->rport, lsop); 1349 if (ret) 1350 goto out_free_buffer; 1351 1352 /* process connect LS completion */ 1353 1354 /* validate the ACC response */ 1355 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) 1356 fcret = VERR_LSACC; 1357 else if (conn_acc->hdr.desc_list_len != 1358 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc))) 1359 fcret = VERR_CR_CONN_ACC_LEN; 1360 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST)) 1361 fcret = VERR_LSDESC_RQST; 1362 else if (conn_acc->hdr.rqst.desc_len != 1363 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst))) 1364 fcret = VERR_LSDESC_RQST_LEN; 1365 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION) 1366 fcret = VERR_CR_CONN; 1367 else if (conn_acc->connectid.desc_tag != 1368 cpu_to_be32(FCNVME_LSDESC_CONN_ID)) 1369 fcret = VERR_CONN_ID; 1370 else if (conn_acc->connectid.desc_len != 1371 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id))) 1372 fcret = VERR_CONN_ID_LEN; 1373 1374 if (fcret) { 1375 ret = -EBADF; 1376 dev_err(ctrl->dev, 1377 "q %d Create I/O Connection LS failed: %s\n", 1378 queue->qnum, validation_errors[fcret]); 1379 } else { 1380 queue->connection_id = 1381 be64_to_cpu(conn_acc->connectid.connection_id); 1382 set_bit(NVME_FC_Q_CONNECTED, &queue->flags); 1383 } 1384 1385 out_free_buffer: 1386 kfree(lsop); 1387 out_no_memory: 1388 if (ret) 1389 dev_err(ctrl->dev, 1390 "queue %d connect I/O queue failed (%d).\n", 1391 queue->qnum, ret); 1392 return ret; 1393 } 1394 1395 static void 1396 nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) 1397 { 1398 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq); 1399 1400 __nvme_fc_finish_ls_req(lsop); 1401 1402 /* fc-nvme initiator doesn't care about success or failure of cmd */ 1403 1404 kfree(lsop); 1405 } 1406 1407 /* 1408 * This routine sends a FC-NVME LS to disconnect (aka terminate) 1409 * the FC-NVME Association. Terminating the association also 1410 * terminates the FC-NVME connections (per queue, both admin and io 1411 * queues) that are part of the association. E.g. things are torn 1412 * down, and the related FC-NVME Association ID and Connection IDs 1413 * become invalid. 1414 * 1415 * The behavior of the fc-nvme initiator is such that it's 1416 * understanding of the association and connections will implicitly 1417 * be torn down. The action is implicit as it may be due to a loss of 1418 * connectivity with the fc-nvme target, so you may never get a 1419 * response even if you tried. As such, the action of this routine 1420 * is to asynchronously send the LS, ignore any results of the LS, and 1421 * continue on with terminating the association. If the fc-nvme target 1422 * is present and receives the LS, it too can tear down. 1423 */ 1424 static void 1425 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) 1426 { 1427 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; 1428 struct fcnvme_ls_disconnect_assoc_acc *discon_acc; 1429 struct nvmefc_ls_req_op *lsop; 1430 struct nvmefc_ls_req *lsreq; 1431 int ret; 1432 1433 lsop = kzalloc((sizeof(*lsop) + 1434 sizeof(*discon_rqst) + sizeof(*discon_acc) + 1435 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); 1436 if (!lsop) { 1437 dev_info(ctrl->ctrl.device, 1438 "NVME-FC{%d}: send Disconnect Association " 1439 "failed: ENOMEM\n", 1440 ctrl->cnum); 1441 return; 1442 } 1443 1444 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1]; 1445 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1]; 1446 lsreq = &lsop->ls_req; 1447 if (ctrl->lport->ops->lsrqst_priv_sz) 1448 lsreq->private = (void *)&discon_acc[1]; 1449 else 1450 lsreq->private = NULL; 1451 1452 nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc, 1453 ctrl->association_id); 1454 1455 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop, 1456 nvme_fc_disconnect_assoc_done); 1457 if (ret) 1458 kfree(lsop); 1459 } 1460 1461 static void 1462 nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) 1463 { 1464 struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private; 1465 struct nvme_fc_rport *rport = lsop->rport; 1466 struct nvme_fc_lport *lport = rport->lport; 1467 unsigned long flags; 1468 1469 spin_lock_irqsave(&rport->lock, flags); 1470 list_del(&lsop->lsrcv_list); 1471 spin_unlock_irqrestore(&rport->lock, flags); 1472 1473 fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma, 1474 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); 1475 fc_dma_unmap_single(lport->dev, lsop->rspdma, 1476 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); 1477 1478 kfree(lsop->rspbuf); 1479 kfree(lsop->rqstbuf); 1480 kfree(lsop); 1481 1482 nvme_fc_rport_put(rport); 1483 } 1484 1485 static void 1486 nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop) 1487 { 1488 struct nvme_fc_rport *rport = lsop->rport; 1489 struct nvme_fc_lport *lport = rport->lport; 1490 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0; 1491 int ret; 1492 1493 fc_dma_sync_single_for_device(lport->dev, lsop->rspdma, 1494 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); 1495 1496 ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport, 1497 lsop->lsrsp); 1498 if (ret) { 1499 dev_warn(lport->dev, 1500 "LLDD rejected LS RSP xmt: LS %d status %d\n", 1501 w0->ls_cmd, ret); 1502 nvme_fc_xmt_ls_rsp_done(lsop->lsrsp); 1503 return; 1504 } 1505 } 1506 1507 static struct nvme_fc_ctrl * 1508 nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport, 1509 struct nvmefc_ls_rcv_op *lsop) 1510 { 1511 struct fcnvme_ls_disconnect_assoc_rqst *rqst = 1512 &lsop->rqstbuf->rq_dis_assoc; 1513 struct nvme_fc_ctrl *ctrl, *ret = NULL; 1514 struct nvmefc_ls_rcv_op *oldls = NULL; 1515 u64 association_id = be64_to_cpu(rqst->associd.association_id); 1516 unsigned long flags; 1517 1518 spin_lock_irqsave(&rport->lock, flags); 1519 1520 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { 1521 if (!nvme_fc_ctrl_get(ctrl)) 1522 continue; 1523 spin_lock(&ctrl->lock); 1524 if (association_id == ctrl->association_id) { 1525 oldls = ctrl->rcv_disconn; 1526 ctrl->rcv_disconn = lsop; 1527 ret = ctrl; 1528 } 1529 spin_unlock(&ctrl->lock); 1530 if (ret) 1531 /* leave the ctrl get reference */ 1532 break; 1533 nvme_fc_ctrl_put(ctrl); 1534 } 1535 1536 spin_unlock_irqrestore(&rport->lock, flags); 1537 1538 /* transmit a response for anything that was pending */ 1539 if (oldls) { 1540 dev_info(rport->lport->dev, 1541 "NVME-FC{%d}: Multiple Disconnect Association " 1542 "LS's received\n", ctrl->cnum); 1543 /* overwrite good response with bogus failure */ 1544 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, 1545 sizeof(*oldls->rspbuf), 1546 rqst->w0.ls_cmd, 1547 FCNVME_RJT_RC_UNAB, 1548 FCNVME_RJT_EXP_NONE, 0); 1549 nvme_fc_xmt_ls_rsp(oldls); 1550 } 1551 1552 return ret; 1553 } 1554 1555 /* 1556 * returns true to mean LS handled and ls_rsp can be sent 1557 * returns false to defer ls_rsp xmt (will be done as part of 1558 * association termination) 1559 */ 1560 static bool 1561 nvme_fc_ls_disconnect_assoc(struct nvmefc_ls_rcv_op *lsop) 1562 { 1563 struct nvme_fc_rport *rport = lsop->rport; 1564 struct fcnvme_ls_disconnect_assoc_rqst *rqst = 1565 &lsop->rqstbuf->rq_dis_assoc; 1566 struct fcnvme_ls_disconnect_assoc_acc *acc = 1567 &lsop->rspbuf->rsp_dis_assoc; 1568 struct nvme_fc_ctrl *ctrl = NULL; 1569 int ret = 0; 1570 1571 memset(acc, 0, sizeof(*acc)); 1572 1573 ret = nvmefc_vldt_lsreq_discon_assoc(lsop->rqstdatalen, rqst); 1574 if (!ret) { 1575 /* match an active association */ 1576 ctrl = nvme_fc_match_disconn_ls(rport, lsop); 1577 if (!ctrl) 1578 ret = VERR_NO_ASSOC; 1579 } 1580 1581 if (ret) { 1582 dev_info(rport->lport->dev, 1583 "Disconnect LS failed: %s\n", 1584 validation_errors[ret]); 1585 lsop->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1586 sizeof(*acc), rqst->w0.ls_cmd, 1587 (ret == VERR_NO_ASSOC) ? 1588 FCNVME_RJT_RC_INV_ASSOC : 1589 FCNVME_RJT_RC_LOGIC, 1590 FCNVME_RJT_EXP_NONE, 0); 1591 return true; 1592 } 1593 1594 /* format an ACCept response */ 1595 1596 lsop->lsrsp->rsplen = sizeof(*acc); 1597 1598 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1599 fcnvme_lsdesc_len( 1600 sizeof(struct fcnvme_ls_disconnect_assoc_acc)), 1601 FCNVME_LS_DISCONNECT_ASSOC); 1602 1603 /* 1604 * the transmit of the response will occur after the exchanges 1605 * for the association have been ABTS'd by 1606 * nvme_fc_delete_association(). 1607 */ 1608 1609 /* fail the association */ 1610 nvme_fc_error_recovery(ctrl, "Disconnect Association LS received"); 1611 1612 /* release the reference taken by nvme_fc_match_disconn_ls() */ 1613 nvme_fc_ctrl_put(ctrl); 1614 1615 return false; 1616 } 1617 1618 /* 1619 * Actual Processing routine for received FC-NVME LS Requests from the LLD 1620 * returns true if a response should be sent afterward, false if rsp will 1621 * be sent asynchronously. 1622 */ 1623 static bool 1624 nvme_fc_handle_ls_rqst(struct nvmefc_ls_rcv_op *lsop) 1625 { 1626 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0; 1627 bool ret = true; 1628 1629 lsop->lsrsp->nvme_fc_private = lsop; 1630 lsop->lsrsp->rspbuf = lsop->rspbuf; 1631 lsop->lsrsp->rspdma = lsop->rspdma; 1632 lsop->lsrsp->done = nvme_fc_xmt_ls_rsp_done; 1633 /* Be preventative. handlers will later set to valid length */ 1634 lsop->lsrsp->rsplen = 0; 1635 1636 /* 1637 * handlers: 1638 * parse request input, execute the request, and format the 1639 * LS response 1640 */ 1641 switch (w0->ls_cmd) { 1642 case FCNVME_LS_DISCONNECT_ASSOC: 1643 ret = nvme_fc_ls_disconnect_assoc(lsop); 1644 break; 1645 case FCNVME_LS_DISCONNECT_CONN: 1646 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, 1647 sizeof(*lsop->rspbuf), w0->ls_cmd, 1648 FCNVME_RJT_RC_UNSUP, FCNVME_RJT_EXP_NONE, 0); 1649 break; 1650 case FCNVME_LS_CREATE_ASSOCIATION: 1651 case FCNVME_LS_CREATE_CONNECTION: 1652 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, 1653 sizeof(*lsop->rspbuf), w0->ls_cmd, 1654 FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0); 1655 break; 1656 default: 1657 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, 1658 sizeof(*lsop->rspbuf), w0->ls_cmd, 1659 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0); 1660 break; 1661 } 1662 1663 return(ret); 1664 } 1665 1666 static void 1667 nvme_fc_handle_ls_rqst_work(struct work_struct *work) 1668 { 1669 struct nvme_fc_rport *rport = 1670 container_of(work, struct nvme_fc_rport, lsrcv_work); 1671 struct fcnvme_ls_rqst_w0 *w0; 1672 struct nvmefc_ls_rcv_op *lsop; 1673 unsigned long flags; 1674 bool sendrsp; 1675 1676 restart: 1677 sendrsp = true; 1678 spin_lock_irqsave(&rport->lock, flags); 1679 list_for_each_entry(lsop, &rport->ls_rcv_list, lsrcv_list) { 1680 if (lsop->handled) 1681 continue; 1682 1683 lsop->handled = true; 1684 if (rport->remoteport.port_state == FC_OBJSTATE_ONLINE) { 1685 spin_unlock_irqrestore(&rport->lock, flags); 1686 sendrsp = nvme_fc_handle_ls_rqst(lsop); 1687 } else { 1688 spin_unlock_irqrestore(&rport->lock, flags); 1689 w0 = &lsop->rqstbuf->w0; 1690 lsop->lsrsp->rsplen = nvme_fc_format_rjt( 1691 lsop->rspbuf, 1692 sizeof(*lsop->rspbuf), 1693 w0->ls_cmd, 1694 FCNVME_RJT_RC_UNAB, 1695 FCNVME_RJT_EXP_NONE, 0); 1696 } 1697 if (sendrsp) 1698 nvme_fc_xmt_ls_rsp(lsop); 1699 goto restart; 1700 } 1701 spin_unlock_irqrestore(&rport->lock, flags); 1702 } 1703 1704 /** 1705 * nvme_fc_rcv_ls_req - transport entry point called by an LLDD 1706 * upon the reception of a NVME LS request. 1707 * 1708 * The nvme-fc layer will copy payload to an internal structure for 1709 * processing. As such, upon completion of the routine, the LLDD may 1710 * immediately free/reuse the LS request buffer passed in the call. 1711 * 1712 * If this routine returns error, the LLDD should abort the exchange. 1713 * 1714 * @portptr: pointer to the (registered) remote port that the LS 1715 * was received from. The remoteport is associated with 1716 * a specific localport. 1717 * @lsrsp: pointer to a nvmefc_ls_rsp response structure to be 1718 * used to reference the exchange corresponding to the LS 1719 * when issuing an ls response. 1720 * @lsreqbuf: pointer to the buffer containing the LS Request 1721 * @lsreqbuf_len: length, in bytes, of the received LS request 1722 */ 1723 int 1724 nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *portptr, 1725 struct nvmefc_ls_rsp *lsrsp, 1726 void *lsreqbuf, u32 lsreqbuf_len) 1727 { 1728 struct nvme_fc_rport *rport = remoteport_to_rport(portptr); 1729 struct nvme_fc_lport *lport = rport->lport; 1730 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; 1731 struct nvmefc_ls_rcv_op *lsop; 1732 unsigned long flags; 1733 int ret; 1734 1735 nvme_fc_rport_get(rport); 1736 1737 /* validate there's a routine to transmit a response */ 1738 if (!lport->ops->xmt_ls_rsp) { 1739 dev_info(lport->dev, 1740 "RCV %s LS failed: no LLDD xmt_ls_rsp\n", 1741 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 1742 nvmefc_ls_names[w0->ls_cmd] : ""); 1743 ret = -EINVAL; 1744 goto out_put; 1745 } 1746 1747 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { 1748 dev_info(lport->dev, 1749 "RCV %s LS failed: payload too large\n", 1750 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 1751 nvmefc_ls_names[w0->ls_cmd] : ""); 1752 ret = -E2BIG; 1753 goto out_put; 1754 } 1755 1756 lsop = kzalloc(sizeof(*lsop), GFP_KERNEL); 1757 lsop->rqstbuf = kzalloc(sizeof(*lsop->rqstbuf), GFP_KERNEL); 1758 lsop->rspbuf = kzalloc(sizeof(*lsop->rspbuf), GFP_KERNEL); 1759 if (!lsop || !lsop->rqstbuf || !lsop->rspbuf) { 1760 dev_info(lport->dev, 1761 "RCV %s LS failed: No memory\n", 1762 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 1763 nvmefc_ls_names[w0->ls_cmd] : ""); 1764 ret = -ENOMEM; 1765 goto out_free; 1766 } 1767 1768 lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf, 1769 sizeof(*lsop->rspbuf), 1770 DMA_TO_DEVICE); 1771 if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) { 1772 dev_info(lport->dev, 1773 "RCV %s LS failed: DMA mapping failure\n", 1774 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 1775 nvmefc_ls_names[w0->ls_cmd] : ""); 1776 ret = -EFAULT; 1777 goto out_free; 1778 } 1779 1780 lsop->rport = rport; 1781 lsop->lsrsp = lsrsp; 1782 1783 memcpy(lsop->rqstbuf, lsreqbuf, lsreqbuf_len); 1784 lsop->rqstdatalen = lsreqbuf_len; 1785 1786 spin_lock_irqsave(&rport->lock, flags); 1787 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) { 1788 spin_unlock_irqrestore(&rport->lock, flags); 1789 ret = -ENOTCONN; 1790 goto out_unmap; 1791 } 1792 list_add_tail(&lsop->lsrcv_list, &rport->ls_rcv_list); 1793 spin_unlock_irqrestore(&rport->lock, flags); 1794 1795 schedule_work(&rport->lsrcv_work); 1796 1797 return 0; 1798 1799 out_unmap: 1800 fc_dma_unmap_single(lport->dev, lsop->rspdma, 1801 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); 1802 out_free: 1803 kfree(lsop->rspbuf); 1804 kfree(lsop->rqstbuf); 1805 kfree(lsop); 1806 out_put: 1807 nvme_fc_rport_put(rport); 1808 return ret; 1809 } 1810 EXPORT_SYMBOL_GPL(nvme_fc_rcv_ls_req); 1811 1812 1813 /* *********************** NVME Ctrl Routines **************************** */ 1814 1815 static void 1816 __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl, 1817 struct nvme_fc_fcp_op *op) 1818 { 1819 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma, 1820 sizeof(op->rsp_iu), DMA_FROM_DEVICE); 1821 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma, 1822 sizeof(op->cmd_iu), DMA_TO_DEVICE); 1823 1824 atomic_set(&op->state, FCPOP_STATE_UNINIT); 1825 } 1826 1827 static void 1828 nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq, 1829 unsigned int hctx_idx) 1830 { 1831 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 1832 1833 return __nvme_fc_exit_request(to_fc_ctrl(set->driver_data), op); 1834 } 1835 1836 static int 1837 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op) 1838 { 1839 unsigned long flags; 1840 int opstate; 1841 1842 spin_lock_irqsave(&ctrl->lock, flags); 1843 opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); 1844 if (opstate != FCPOP_STATE_ACTIVE) 1845 atomic_set(&op->state, opstate); 1846 else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) { 1847 op->flags |= FCOP_FLAGS_TERMIO; 1848 ctrl->iocnt++; 1849 } 1850 spin_unlock_irqrestore(&ctrl->lock, flags); 1851 1852 if (opstate != FCPOP_STATE_ACTIVE) 1853 return -ECANCELED; 1854 1855 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, 1856 &ctrl->rport->remoteport, 1857 op->queue->lldd_handle, 1858 &op->fcp_req); 1859 1860 return 0; 1861 } 1862 1863 static void 1864 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) 1865 { 1866 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; 1867 int i; 1868 1869 /* ensure we've initialized the ops once */ 1870 if (!(aen_op->flags & FCOP_FLAGS_AEN)) 1871 return; 1872 1873 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) 1874 __nvme_fc_abort_op(ctrl, aen_op); 1875 } 1876 1877 static inline void 1878 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, 1879 struct nvme_fc_fcp_op *op, int opstate) 1880 { 1881 unsigned long flags; 1882 1883 if (opstate == FCPOP_STATE_ABORTED) { 1884 spin_lock_irqsave(&ctrl->lock, flags); 1885 if (test_bit(FCCTRL_TERMIO, &ctrl->flags) && 1886 op->flags & FCOP_FLAGS_TERMIO) { 1887 if (!--ctrl->iocnt) 1888 wake_up(&ctrl->ioabort_wait); 1889 } 1890 spin_unlock_irqrestore(&ctrl->lock, flags); 1891 } 1892 } 1893 1894 static void 1895 nvme_fc_ctrl_ioerr_work(struct work_struct *work) 1896 { 1897 struct nvme_fc_ctrl *ctrl = 1898 container_of(work, struct nvme_fc_ctrl, ioerr_work); 1899 1900 nvme_fc_error_recovery(ctrl, "transport detected io error"); 1901 } 1902 1903 /* 1904 * nvme_fc_io_getuuid - Routine called to get the appid field 1905 * associated with request by the lldd 1906 * @req:IO request from nvme fc to driver 1907 * Returns: UUID if there is an appid associated with VM or 1908 * NULL if the user/libvirt has not set the appid to VM 1909 */ 1910 char *nvme_fc_io_getuuid(struct nvmefc_fcp_req *req) 1911 { 1912 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req); 1913 struct request *rq = op->rq; 1914 1915 if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq->bio) 1916 return NULL; 1917 return blkcg_get_fc_appid(rq->bio); 1918 } 1919 EXPORT_SYMBOL_GPL(nvme_fc_io_getuuid); 1920 1921 static void 1922 nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) 1923 { 1924 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req); 1925 struct request *rq = op->rq; 1926 struct nvmefc_fcp_req *freq = &op->fcp_req; 1927 struct nvme_fc_ctrl *ctrl = op->ctrl; 1928 struct nvme_fc_queue *queue = op->queue; 1929 struct nvme_completion *cqe = &op->rsp_iu.cqe; 1930 struct nvme_command *sqe = &op->cmd_iu.sqe; 1931 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); 1932 union nvme_result result; 1933 bool terminate_assoc = true; 1934 int opstate; 1935 1936 /* 1937 * WARNING: 1938 * The current linux implementation of a nvme controller 1939 * allocates a single tag set for all io queues and sizes 1940 * the io queues to fully hold all possible tags. Thus, the 1941 * implementation does not reference or care about the sqhd 1942 * value as it never needs to use the sqhd/sqtail pointers 1943 * for submission pacing. 1944 * 1945 * This affects the FC-NVME implementation in two ways: 1946 * 1) As the value doesn't matter, we don't need to waste 1947 * cycles extracting it from ERSPs and stamping it in the 1948 * cases where the transport fabricates CQEs on successful 1949 * completions. 1950 * 2) The FC-NVME implementation requires that delivery of 1951 * ERSP completions are to go back to the nvme layer in order 1952 * relative to the rsn, such that the sqhd value will always 1953 * be "in order" for the nvme layer. As the nvme layer in 1954 * linux doesn't care about sqhd, there's no need to return 1955 * them in order. 1956 * 1957 * Additionally: 1958 * As the core nvme layer in linux currently does not look at 1959 * every field in the cqe - in cases where the FC transport must 1960 * fabricate a CQE, the following fields will not be set as they 1961 * are not referenced: 1962 * cqe.sqid, cqe.sqhd, cqe.command_id 1963 * 1964 * Failure or error of an individual i/o, in a transport 1965 * detected fashion unrelated to the nvme completion status, 1966 * potentially cause the initiator and target sides to get out 1967 * of sync on SQ head/tail (aka outstanding io count allowed). 1968 * Per FC-NVME spec, failure of an individual command requires 1969 * the connection to be terminated, which in turn requires the 1970 * association to be terminated. 1971 */ 1972 1973 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); 1974 1975 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, 1976 sizeof(op->rsp_iu), DMA_FROM_DEVICE); 1977 1978 if (opstate == FCPOP_STATE_ABORTED) 1979 status = cpu_to_le16(NVME_SC_HOST_ABORTED_CMD << 1); 1980 else if (freq->status) { 1981 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); 1982 dev_info(ctrl->ctrl.device, 1983 "NVME-FC{%d}: io failed due to lldd error %d\n", 1984 ctrl->cnum, freq->status); 1985 } 1986 1987 /* 1988 * For the linux implementation, if we have an unsuccesful 1989 * status, they blk-mq layer can typically be called with the 1990 * non-zero status and the content of the cqe isn't important. 1991 */ 1992 if (status) 1993 goto done; 1994 1995 /* 1996 * command completed successfully relative to the wire 1997 * protocol. However, validate anything received and 1998 * extract the status and result from the cqe (create it 1999 * where necessary). 2000 */ 2001 2002 switch (freq->rcv_rsplen) { 2003 2004 case 0: 2005 case NVME_FC_SIZEOF_ZEROS_RSP: 2006 /* 2007 * No response payload or 12 bytes of payload (which 2008 * should all be zeros) are considered successful and 2009 * no payload in the CQE by the transport. 2010 */ 2011 if (freq->transferred_length != 2012 be32_to_cpu(op->cmd_iu.data_len)) { 2013 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); 2014 dev_info(ctrl->ctrl.device, 2015 "NVME-FC{%d}: io failed due to bad transfer " 2016 "length: %d vs expected %d\n", 2017 ctrl->cnum, freq->transferred_length, 2018 be32_to_cpu(op->cmd_iu.data_len)); 2019 goto done; 2020 } 2021 result.u64 = 0; 2022 break; 2023 2024 case sizeof(struct nvme_fc_ersp_iu): 2025 /* 2026 * The ERSP IU contains a full completion with CQE. 2027 * Validate ERSP IU and look at cqe. 2028 */ 2029 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) != 2030 (freq->rcv_rsplen / 4) || 2031 be32_to_cpu(op->rsp_iu.xfrd_len) != 2032 freq->transferred_length || 2033 op->rsp_iu.ersp_result || 2034 sqe->common.command_id != cqe->command_id)) { 2035 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); 2036 dev_info(ctrl->ctrl.device, 2037 "NVME-FC{%d}: io failed due to bad NVMe_ERSP: " 2038 "iu len %d, xfr len %d vs %d, status code " 2039 "%d, cmdid %d vs %d\n", 2040 ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len), 2041 be32_to_cpu(op->rsp_iu.xfrd_len), 2042 freq->transferred_length, 2043 op->rsp_iu.ersp_result, 2044 sqe->common.command_id, 2045 cqe->command_id); 2046 goto done; 2047 } 2048 result = cqe->result; 2049 status = cqe->status; 2050 break; 2051 2052 default: 2053 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); 2054 dev_info(ctrl->ctrl.device, 2055 "NVME-FC{%d}: io failed due to odd NVMe_xRSP iu " 2056 "len %d\n", 2057 ctrl->cnum, freq->rcv_rsplen); 2058 goto done; 2059 } 2060 2061 terminate_assoc = false; 2062 2063 done: 2064 if (op->flags & FCOP_FLAGS_AEN) { 2065 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); 2066 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); 2067 atomic_set(&op->state, FCPOP_STATE_IDLE); 2068 op->flags = FCOP_FLAGS_AEN; /* clear other flags */ 2069 nvme_fc_ctrl_put(ctrl); 2070 goto check_error; 2071 } 2072 2073 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); 2074 if (!nvme_try_complete_req(rq, status, result)) 2075 nvme_fc_complete_rq(rq); 2076 2077 check_error: 2078 if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING) 2079 queue_work(nvme_reset_wq, &ctrl->ioerr_work); 2080 } 2081 2082 static int 2083 __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, 2084 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op, 2085 struct request *rq, u32 rqno) 2086 { 2087 struct nvme_fcp_op_w_sgl *op_w_sgl = 2088 container_of(op, typeof(*op_w_sgl), op); 2089 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; 2090 int ret = 0; 2091 2092 memset(op, 0, sizeof(*op)); 2093 op->fcp_req.cmdaddr = &op->cmd_iu; 2094 op->fcp_req.cmdlen = sizeof(op->cmd_iu); 2095 op->fcp_req.rspaddr = &op->rsp_iu; 2096 op->fcp_req.rsplen = sizeof(op->rsp_iu); 2097 op->fcp_req.done = nvme_fc_fcpio_done; 2098 op->ctrl = ctrl; 2099 op->queue = queue; 2100 op->rq = rq; 2101 op->rqno = rqno; 2102 2103 cmdiu->format_id = NVME_CMD_FORMAT_ID; 2104 cmdiu->fc_id = NVME_CMD_FC_ID; 2105 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32)); 2106 if (queue->qnum) 2107 cmdiu->rsv_cat = fccmnd_set_cat_css(0, 2108 (NVME_CC_CSS_NVM >> NVME_CC_CSS_SHIFT)); 2109 else 2110 cmdiu->rsv_cat = fccmnd_set_cat_admin(0); 2111 2112 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev, 2113 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE); 2114 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) { 2115 dev_err(ctrl->dev, 2116 "FCP Op failed - cmdiu dma mapping failed.\n"); 2117 ret = -EFAULT; 2118 goto out_on_error; 2119 } 2120 2121 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev, 2122 &op->rsp_iu, sizeof(op->rsp_iu), 2123 DMA_FROM_DEVICE); 2124 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) { 2125 dev_err(ctrl->dev, 2126 "FCP Op failed - rspiu dma mapping failed.\n"); 2127 ret = -EFAULT; 2128 } 2129 2130 atomic_set(&op->state, FCPOP_STATE_IDLE); 2131 out_on_error: 2132 return ret; 2133 } 2134 2135 static int 2136 nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq, 2137 unsigned int hctx_idx, unsigned int numa_node) 2138 { 2139 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data); 2140 struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq); 2141 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; 2142 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx]; 2143 int res; 2144 2145 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++); 2146 if (res) 2147 return res; 2148 op->op.fcp_req.first_sgl = op->sgl; 2149 op->op.fcp_req.private = &op->priv[0]; 2150 nvme_req(rq)->ctrl = &ctrl->ctrl; 2151 nvme_req(rq)->cmd = &op->op.cmd_iu.sqe; 2152 return res; 2153 } 2154 2155 static int 2156 nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl) 2157 { 2158 struct nvme_fc_fcp_op *aen_op; 2159 struct nvme_fc_cmd_iu *cmdiu; 2160 struct nvme_command *sqe; 2161 void *private = NULL; 2162 int i, ret; 2163 2164 aen_op = ctrl->aen_ops; 2165 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { 2166 if (ctrl->lport->ops->fcprqst_priv_sz) { 2167 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz, 2168 GFP_KERNEL); 2169 if (!private) 2170 return -ENOMEM; 2171 } 2172 2173 cmdiu = &aen_op->cmd_iu; 2174 sqe = &cmdiu->sqe; 2175 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0], 2176 aen_op, (struct request *)NULL, 2177 (NVME_AQ_BLK_MQ_DEPTH + i)); 2178 if (ret) { 2179 kfree(private); 2180 return ret; 2181 } 2182 2183 aen_op->flags = FCOP_FLAGS_AEN; 2184 aen_op->fcp_req.private = private; 2185 2186 memset(sqe, 0, sizeof(*sqe)); 2187 sqe->common.opcode = nvme_admin_async_event; 2188 /* Note: core layer may overwrite the sqe.command_id value */ 2189 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i; 2190 } 2191 return 0; 2192 } 2193 2194 static void 2195 nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl) 2196 { 2197 struct nvme_fc_fcp_op *aen_op; 2198 int i; 2199 2200 cancel_work_sync(&ctrl->ctrl.async_event_work); 2201 aen_op = ctrl->aen_ops; 2202 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { 2203 __nvme_fc_exit_request(ctrl, aen_op); 2204 2205 kfree(aen_op->fcp_req.private); 2206 aen_op->fcp_req.private = NULL; 2207 } 2208 } 2209 2210 static inline int 2211 __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int qidx) 2212 { 2213 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(data); 2214 struct nvme_fc_queue *queue = &ctrl->queues[qidx]; 2215 2216 hctx->driver_data = queue; 2217 queue->hctx = hctx; 2218 return 0; 2219 } 2220 2221 static int 2222 nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx) 2223 { 2224 return __nvme_fc_init_hctx(hctx, data, hctx_idx + 1); 2225 } 2226 2227 static int 2228 nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, 2229 unsigned int hctx_idx) 2230 { 2231 return __nvme_fc_init_hctx(hctx, data, hctx_idx); 2232 } 2233 2234 static void 2235 nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx) 2236 { 2237 struct nvme_fc_queue *queue; 2238 2239 queue = &ctrl->queues[idx]; 2240 memset(queue, 0, sizeof(*queue)); 2241 queue->ctrl = ctrl; 2242 queue->qnum = idx; 2243 atomic_set(&queue->csn, 0); 2244 queue->dev = ctrl->dev; 2245 2246 if (idx > 0) 2247 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; 2248 else 2249 queue->cmnd_capsule_len = sizeof(struct nvme_command); 2250 2251 /* 2252 * Considered whether we should allocate buffers for all SQEs 2253 * and CQEs and dma map them - mapping their respective entries 2254 * into the request structures (kernel vm addr and dma address) 2255 * thus the driver could use the buffers/mappings directly. 2256 * It only makes sense if the LLDD would use them for its 2257 * messaging api. It's very unlikely most adapter api's would use 2258 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload 2259 * structures were used instead. 2260 */ 2261 } 2262 2263 /* 2264 * This routine terminates a queue at the transport level. 2265 * The transport has already ensured that all outstanding ios on 2266 * the queue have been terminated. 2267 * The transport will send a Disconnect LS request to terminate 2268 * the queue's connection. Termination of the admin queue will also 2269 * terminate the association at the target. 2270 */ 2271 static void 2272 nvme_fc_free_queue(struct nvme_fc_queue *queue) 2273 { 2274 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags)) 2275 return; 2276 2277 clear_bit(NVME_FC_Q_LIVE, &queue->flags); 2278 /* 2279 * Current implementation never disconnects a single queue. 2280 * It always terminates a whole association. So there is never 2281 * a disconnect(queue) LS sent to the target. 2282 */ 2283 2284 queue->connection_id = 0; 2285 atomic_set(&queue->csn, 0); 2286 } 2287 2288 static void 2289 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl, 2290 struct nvme_fc_queue *queue, unsigned int qidx) 2291 { 2292 if (ctrl->lport->ops->delete_queue) 2293 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx, 2294 queue->lldd_handle); 2295 queue->lldd_handle = NULL; 2296 } 2297 2298 static void 2299 nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl) 2300 { 2301 int i; 2302 2303 for (i = 1; i < ctrl->ctrl.queue_count; i++) 2304 nvme_fc_free_queue(&ctrl->queues[i]); 2305 } 2306 2307 static int 2308 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl, 2309 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize) 2310 { 2311 int ret = 0; 2312 2313 queue->lldd_handle = NULL; 2314 if (ctrl->lport->ops->create_queue) 2315 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport, 2316 qidx, qsize, &queue->lldd_handle); 2317 2318 return ret; 2319 } 2320 2321 static void 2322 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl) 2323 { 2324 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1]; 2325 int i; 2326 2327 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--) 2328 __nvme_fc_delete_hw_queue(ctrl, queue, i); 2329 } 2330 2331 static int 2332 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) 2333 { 2334 struct nvme_fc_queue *queue = &ctrl->queues[1]; 2335 int i, ret; 2336 2337 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) { 2338 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize); 2339 if (ret) 2340 goto delete_queues; 2341 } 2342 2343 return 0; 2344 2345 delete_queues: 2346 for (; i > 0; i--) 2347 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i); 2348 return ret; 2349 } 2350 2351 static int 2352 nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) 2353 { 2354 int i, ret = 0; 2355 2356 for (i = 1; i < ctrl->ctrl.queue_count; i++) { 2357 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize, 2358 (qsize / 5)); 2359 if (ret) 2360 break; 2361 ret = nvmf_connect_io_queue(&ctrl->ctrl, i); 2362 if (ret) 2363 break; 2364 2365 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags); 2366 } 2367 2368 return ret; 2369 } 2370 2371 static void 2372 nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl) 2373 { 2374 int i; 2375 2376 for (i = 1; i < ctrl->ctrl.queue_count; i++) 2377 nvme_fc_init_queue(ctrl, i); 2378 } 2379 2380 static void 2381 nvme_fc_ctrl_free(struct kref *ref) 2382 { 2383 struct nvme_fc_ctrl *ctrl = 2384 container_of(ref, struct nvme_fc_ctrl, ref); 2385 unsigned long flags; 2386 2387 if (ctrl->ctrl.tagset) 2388 nvme_remove_io_tag_set(&ctrl->ctrl); 2389 2390 /* remove from rport list */ 2391 spin_lock_irqsave(&ctrl->rport->lock, flags); 2392 list_del(&ctrl->ctrl_list); 2393 spin_unlock_irqrestore(&ctrl->rport->lock, flags); 2394 2395 nvme_unquiesce_admin_queue(&ctrl->ctrl); 2396 nvme_remove_admin_tag_set(&ctrl->ctrl); 2397 2398 kfree(ctrl->queues); 2399 2400 put_device(ctrl->dev); 2401 nvme_fc_rport_put(ctrl->rport); 2402 2403 ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum); 2404 if (ctrl->ctrl.opts) 2405 nvmf_free_options(ctrl->ctrl.opts); 2406 kfree(ctrl); 2407 } 2408 2409 static void 2410 nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl) 2411 { 2412 kref_put(&ctrl->ref, nvme_fc_ctrl_free); 2413 } 2414 2415 static int 2416 nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl) 2417 { 2418 return kref_get_unless_zero(&ctrl->ref); 2419 } 2420 2421 /* 2422 * All accesses from nvme core layer done - can now free the 2423 * controller. Called after last nvme_put_ctrl() call 2424 */ 2425 static void 2426 nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl) 2427 { 2428 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 2429 2430 WARN_ON(nctrl != &ctrl->ctrl); 2431 2432 nvme_fc_ctrl_put(ctrl); 2433 } 2434 2435 /* 2436 * This routine is used by the transport when it needs to find active 2437 * io on a queue that is to be terminated. The transport uses 2438 * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke 2439 * this routine to kill them on a 1 by 1 basis. 2440 * 2441 * As FC allocates FC exchange for each io, the transport must contact 2442 * the LLDD to terminate the exchange, thus releasing the FC exchange. 2443 * After terminating the exchange the LLDD will call the transport's 2444 * normal io done path for the request, but it will have an aborted 2445 * status. The done path will return the io request back to the block 2446 * layer with an error status. 2447 */ 2448 static bool nvme_fc_terminate_exchange(struct request *req, void *data) 2449 { 2450 struct nvme_ctrl *nctrl = data; 2451 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 2452 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req); 2453 2454 op->nreq.flags |= NVME_REQ_CANCELLED; 2455 __nvme_fc_abort_op(ctrl, op); 2456 return true; 2457 } 2458 2459 /* 2460 * This routine runs through all outstanding commands on the association 2461 * and aborts them. This routine is typically be called by the 2462 * delete_association routine. It is also called due to an error during 2463 * reconnect. In that scenario, it is most likely a command that initializes 2464 * the controller, including fabric Connect commands on io queues, that 2465 * may have timed out or failed thus the io must be killed for the connect 2466 * thread to see the error. 2467 */ 2468 static void 2469 __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues) 2470 { 2471 int q; 2472 2473 /* 2474 * if aborting io, the queues are no longer good, mark them 2475 * all as not live. 2476 */ 2477 if (ctrl->ctrl.queue_count > 1) { 2478 for (q = 1; q < ctrl->ctrl.queue_count; q++) 2479 clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags); 2480 } 2481 clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); 2482 2483 /* 2484 * If io queues are present, stop them and terminate all outstanding 2485 * ios on them. As FC allocates FC exchange for each io, the 2486 * transport must contact the LLDD to terminate the exchange, 2487 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr() 2488 * to tell us what io's are busy and invoke a transport routine 2489 * to kill them with the LLDD. After terminating the exchange 2490 * the LLDD will call the transport's normal io done path, but it 2491 * will have an aborted status. The done path will return the 2492 * io requests back to the block layer as part of normal completions 2493 * (but with error status). 2494 */ 2495 if (ctrl->ctrl.queue_count > 1) { 2496 nvme_quiesce_io_queues(&ctrl->ctrl); 2497 nvme_sync_io_queues(&ctrl->ctrl); 2498 blk_mq_tagset_busy_iter(&ctrl->tag_set, 2499 nvme_fc_terminate_exchange, &ctrl->ctrl); 2500 blk_mq_tagset_wait_completed_request(&ctrl->tag_set); 2501 if (start_queues) 2502 nvme_unquiesce_io_queues(&ctrl->ctrl); 2503 } 2504 2505 /* 2506 * Other transports, which don't have link-level contexts bound 2507 * to sqe's, would try to gracefully shutdown the controller by 2508 * writing the registers for shutdown and polling (call 2509 * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially 2510 * just aborted and we will wait on those contexts, and given 2511 * there was no indication of how live the controlelr is on the 2512 * link, don't send more io to create more contexts for the 2513 * shutdown. Let the controller fail via keepalive failure if 2514 * its still present. 2515 */ 2516 2517 /* 2518 * clean up the admin queue. Same thing as above. 2519 */ 2520 nvme_quiesce_admin_queue(&ctrl->ctrl); 2521 blk_sync_queue(ctrl->ctrl.admin_q); 2522 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, 2523 nvme_fc_terminate_exchange, &ctrl->ctrl); 2524 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set); 2525 if (start_queues) 2526 nvme_unquiesce_admin_queue(&ctrl->ctrl); 2527 } 2528 2529 static void 2530 nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) 2531 { 2532 /* 2533 * if an error (io timeout, etc) while (re)connecting, the remote 2534 * port requested terminating of the association (disconnect_ls) 2535 * or an error (timeout or abort) occurred on an io while creating 2536 * the controller. Abort any ios on the association and let the 2537 * create_association error path resolve things. 2538 */ 2539 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) { 2540 __nvme_fc_abort_outstanding_ios(ctrl, true); 2541 set_bit(ASSOC_FAILED, &ctrl->flags); 2542 return; 2543 } 2544 2545 /* Otherwise, only proceed if in LIVE state - e.g. on first error */ 2546 if (ctrl->ctrl.state != NVME_CTRL_LIVE) 2547 return; 2548 2549 dev_warn(ctrl->ctrl.device, 2550 "NVME-FC{%d}: transport association event: %s\n", 2551 ctrl->cnum, errmsg); 2552 dev_warn(ctrl->ctrl.device, 2553 "NVME-FC{%d}: resetting controller\n", ctrl->cnum); 2554 2555 nvme_reset_ctrl(&ctrl->ctrl); 2556 } 2557 2558 static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq) 2559 { 2560 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 2561 struct nvme_fc_ctrl *ctrl = op->ctrl; 2562 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; 2563 struct nvme_command *sqe = &cmdiu->sqe; 2564 2565 /* 2566 * Attempt to abort the offending command. Command completion 2567 * will detect the aborted io and will fail the connection. 2568 */ 2569 dev_info(ctrl->ctrl.device, 2570 "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: " 2571 "x%08x/x%08x\n", 2572 ctrl->cnum, op->queue->qnum, sqe->common.opcode, 2573 sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11); 2574 if (__nvme_fc_abort_op(ctrl, op)) 2575 nvme_fc_error_recovery(ctrl, "io timeout abort failed"); 2576 2577 /* 2578 * the io abort has been initiated. Have the reset timer 2579 * restarted and the abort completion will complete the io 2580 * shortly. Avoids a synchronous wait while the abort finishes. 2581 */ 2582 return BLK_EH_RESET_TIMER; 2583 } 2584 2585 static int 2586 nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, 2587 struct nvme_fc_fcp_op *op) 2588 { 2589 struct nvmefc_fcp_req *freq = &op->fcp_req; 2590 int ret; 2591 2592 freq->sg_cnt = 0; 2593 2594 if (!blk_rq_nr_phys_segments(rq)) 2595 return 0; 2596 2597 freq->sg_table.sgl = freq->first_sgl; 2598 ret = sg_alloc_table_chained(&freq->sg_table, 2599 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl, 2600 NVME_INLINE_SG_CNT); 2601 if (ret) 2602 return -ENOMEM; 2603 2604 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); 2605 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq)); 2606 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, 2607 op->nents, rq_dma_dir(rq)); 2608 if (unlikely(freq->sg_cnt <= 0)) { 2609 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); 2610 freq->sg_cnt = 0; 2611 return -EFAULT; 2612 } 2613 2614 /* 2615 * TODO: blk_integrity_rq(rq) for DIF 2616 */ 2617 return 0; 2618 } 2619 2620 static void 2621 nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq, 2622 struct nvme_fc_fcp_op *op) 2623 { 2624 struct nvmefc_fcp_req *freq = &op->fcp_req; 2625 2626 if (!freq->sg_cnt) 2627 return; 2628 2629 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, 2630 rq_dma_dir(rq)); 2631 2632 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); 2633 2634 freq->sg_cnt = 0; 2635 } 2636 2637 /* 2638 * In FC, the queue is a logical thing. At transport connect, the target 2639 * creates its "queue" and returns a handle that is to be given to the 2640 * target whenever it posts something to the corresponding SQ. When an 2641 * SQE is sent on a SQ, FC effectively considers the SQE, or rather the 2642 * command contained within the SQE, an io, and assigns a FC exchange 2643 * to it. The SQE and the associated SQ handle are sent in the initial 2644 * CMD IU sents on the exchange. All transfers relative to the io occur 2645 * as part of the exchange. The CQE is the last thing for the io, 2646 * which is transferred (explicitly or implicitly) with the RSP IU 2647 * sent on the exchange. After the CQE is received, the FC exchange is 2648 * terminaed and the Exchange may be used on a different io. 2649 * 2650 * The transport to LLDD api has the transport making a request for a 2651 * new fcp io request to the LLDD. The LLDD then allocates a FC exchange 2652 * resource and transfers the command. The LLDD will then process all 2653 * steps to complete the io. Upon completion, the transport done routine 2654 * is called. 2655 * 2656 * So - while the operation is outstanding to the LLDD, there is a link 2657 * level FC exchange resource that is also outstanding. This must be 2658 * considered in all cleanup operations. 2659 */ 2660 static blk_status_t 2661 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, 2662 struct nvme_fc_fcp_op *op, u32 data_len, 2663 enum nvmefc_fcp_datadir io_dir) 2664 { 2665 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; 2666 struct nvme_command *sqe = &cmdiu->sqe; 2667 int ret, opstate; 2668 2669 /* 2670 * before attempting to send the io, check to see if we believe 2671 * the target device is present 2672 */ 2673 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 2674 return BLK_STS_RESOURCE; 2675 2676 if (!nvme_fc_ctrl_get(ctrl)) 2677 return BLK_STS_IOERR; 2678 2679 /* format the FC-NVME CMD IU and fcp_req */ 2680 cmdiu->connection_id = cpu_to_be64(queue->connection_id); 2681 cmdiu->data_len = cpu_to_be32(data_len); 2682 switch (io_dir) { 2683 case NVMEFC_FCP_WRITE: 2684 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE; 2685 break; 2686 case NVMEFC_FCP_READ: 2687 cmdiu->flags = FCNVME_CMD_FLAGS_READ; 2688 break; 2689 case NVMEFC_FCP_NODATA: 2690 cmdiu->flags = 0; 2691 break; 2692 } 2693 op->fcp_req.payload_length = data_len; 2694 op->fcp_req.io_dir = io_dir; 2695 op->fcp_req.transferred_length = 0; 2696 op->fcp_req.rcv_rsplen = 0; 2697 op->fcp_req.status = NVME_SC_SUCCESS; 2698 op->fcp_req.sqid = cpu_to_le16(queue->qnum); 2699 2700 /* 2701 * validate per fabric rules, set fields mandated by fabric spec 2702 * as well as those by FC-NVME spec. 2703 */ 2704 WARN_ON_ONCE(sqe->common.metadata); 2705 sqe->common.flags |= NVME_CMD_SGL_METABUF; 2706 2707 /* 2708 * format SQE DPTR field per FC-NVME rules: 2709 * type=0x5 Transport SGL Data Block Descriptor 2710 * subtype=0xA Transport-specific value 2711 * address=0 2712 * length=length of the data series 2713 */ 2714 sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | 2715 NVME_SGL_FMT_TRANSPORT_A; 2716 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len); 2717 sqe->rw.dptr.sgl.addr = 0; 2718 2719 if (!(op->flags & FCOP_FLAGS_AEN)) { 2720 ret = nvme_fc_map_data(ctrl, op->rq, op); 2721 if (ret < 0) { 2722 nvme_cleanup_cmd(op->rq); 2723 nvme_fc_ctrl_put(ctrl); 2724 if (ret == -ENOMEM || ret == -EAGAIN) 2725 return BLK_STS_RESOURCE; 2726 return BLK_STS_IOERR; 2727 } 2728 } 2729 2730 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma, 2731 sizeof(op->cmd_iu), DMA_TO_DEVICE); 2732 2733 atomic_set(&op->state, FCPOP_STATE_ACTIVE); 2734 2735 if (!(op->flags & FCOP_FLAGS_AEN)) 2736 blk_mq_start_request(op->rq); 2737 2738 cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn)); 2739 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, 2740 &ctrl->rport->remoteport, 2741 queue->lldd_handle, &op->fcp_req); 2742 2743 if (ret) { 2744 /* 2745 * If the lld fails to send the command is there an issue with 2746 * the csn value? If the command that fails is the Connect, 2747 * no - as the connection won't be live. If it is a command 2748 * post-connect, it's possible a gap in csn may be created. 2749 * Does this matter? As Linux initiators don't send fused 2750 * commands, no. The gap would exist, but as there's nothing 2751 * that depends on csn order to be delivered on the target 2752 * side, it shouldn't hurt. It would be difficult for a 2753 * target to even detect the csn gap as it has no idea when the 2754 * cmd with the csn was supposed to arrive. 2755 */ 2756 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); 2757 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); 2758 2759 if (!(op->flags & FCOP_FLAGS_AEN)) { 2760 nvme_fc_unmap_data(ctrl, op->rq, op); 2761 nvme_cleanup_cmd(op->rq); 2762 } 2763 2764 nvme_fc_ctrl_put(ctrl); 2765 2766 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE && 2767 ret != -EBUSY) 2768 return BLK_STS_IOERR; 2769 2770 return BLK_STS_RESOURCE; 2771 } 2772 2773 return BLK_STS_OK; 2774 } 2775 2776 static blk_status_t 2777 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, 2778 const struct blk_mq_queue_data *bd) 2779 { 2780 struct nvme_ns *ns = hctx->queue->queuedata; 2781 struct nvme_fc_queue *queue = hctx->driver_data; 2782 struct nvme_fc_ctrl *ctrl = queue->ctrl; 2783 struct request *rq = bd->rq; 2784 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 2785 enum nvmefc_fcp_datadir io_dir; 2786 bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags); 2787 u32 data_len; 2788 blk_status_t ret; 2789 2790 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE || 2791 !nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) 2792 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); 2793 2794 ret = nvme_setup_cmd(ns, rq); 2795 if (ret) 2796 return ret; 2797 2798 /* 2799 * nvme core doesn't quite treat the rq opaquely. Commands such 2800 * as WRITE ZEROES will return a non-zero rq payload_bytes yet 2801 * there is no actual payload to be transferred. 2802 * To get it right, key data transmission on there being 1 or 2803 * more physical segments in the sg list. If there is no 2804 * physical segments, there is no payload. 2805 */ 2806 if (blk_rq_nr_phys_segments(rq)) { 2807 data_len = blk_rq_payload_bytes(rq); 2808 io_dir = ((rq_data_dir(rq) == WRITE) ? 2809 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ); 2810 } else { 2811 data_len = 0; 2812 io_dir = NVMEFC_FCP_NODATA; 2813 } 2814 2815 2816 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir); 2817 } 2818 2819 static void 2820 nvme_fc_submit_async_event(struct nvme_ctrl *arg) 2821 { 2822 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg); 2823 struct nvme_fc_fcp_op *aen_op; 2824 blk_status_t ret; 2825 2826 if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) 2827 return; 2828 2829 aen_op = &ctrl->aen_ops[0]; 2830 2831 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0, 2832 NVMEFC_FCP_NODATA); 2833 if (ret) 2834 dev_err(ctrl->ctrl.device, 2835 "failed async event work\n"); 2836 } 2837 2838 static void 2839 nvme_fc_complete_rq(struct request *rq) 2840 { 2841 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 2842 struct nvme_fc_ctrl *ctrl = op->ctrl; 2843 2844 atomic_set(&op->state, FCPOP_STATE_IDLE); 2845 op->flags &= ~FCOP_FLAGS_TERMIO; 2846 2847 nvme_fc_unmap_data(ctrl, rq, op); 2848 nvme_complete_rq(rq); 2849 nvme_fc_ctrl_put(ctrl); 2850 } 2851 2852 static void nvme_fc_map_queues(struct blk_mq_tag_set *set) 2853 { 2854 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data); 2855 int i; 2856 2857 for (i = 0; i < set->nr_maps; i++) { 2858 struct blk_mq_queue_map *map = &set->map[i]; 2859 2860 if (!map->nr_queues) { 2861 WARN_ON(i == HCTX_TYPE_DEFAULT); 2862 continue; 2863 } 2864 2865 /* Call LLDD map queue functionality if defined */ 2866 if (ctrl->lport->ops->map_queues) 2867 ctrl->lport->ops->map_queues(&ctrl->lport->localport, 2868 map); 2869 else 2870 blk_mq_map_queues(map); 2871 } 2872 } 2873 2874 static const struct blk_mq_ops nvme_fc_mq_ops = { 2875 .queue_rq = nvme_fc_queue_rq, 2876 .complete = nvme_fc_complete_rq, 2877 .init_request = nvme_fc_init_request, 2878 .exit_request = nvme_fc_exit_request, 2879 .init_hctx = nvme_fc_init_hctx, 2880 .timeout = nvme_fc_timeout, 2881 .map_queues = nvme_fc_map_queues, 2882 }; 2883 2884 static int 2885 nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) 2886 { 2887 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 2888 unsigned int nr_io_queues; 2889 int ret; 2890 2891 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), 2892 ctrl->lport->ops->max_hw_queues); 2893 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); 2894 if (ret) { 2895 dev_info(ctrl->ctrl.device, 2896 "set_queue_count failed: %d\n", ret); 2897 return ret; 2898 } 2899 2900 ctrl->ctrl.queue_count = nr_io_queues + 1; 2901 if (!nr_io_queues) 2902 return 0; 2903 2904 nvme_fc_init_io_queues(ctrl); 2905 2906 ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set, 2907 &nvme_fc_mq_ops, BLK_MQ_F_SHOULD_MERGE, 2908 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv, 2909 ctrl->lport->ops->fcprqst_priv_sz)); 2910 if (ret) 2911 return ret; 2912 2913 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); 2914 if (ret) 2915 goto out_cleanup_tagset; 2916 2917 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); 2918 if (ret) 2919 goto out_delete_hw_queues; 2920 2921 ctrl->ioq_live = true; 2922 2923 return 0; 2924 2925 out_delete_hw_queues: 2926 nvme_fc_delete_hw_io_queues(ctrl); 2927 out_cleanup_tagset: 2928 nvme_remove_io_tag_set(&ctrl->ctrl); 2929 nvme_fc_free_io_queues(ctrl); 2930 2931 /* force put free routine to ignore io queues */ 2932 ctrl->ctrl.tagset = NULL; 2933 2934 return ret; 2935 } 2936 2937 static int 2938 nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl) 2939 { 2940 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 2941 u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1; 2942 unsigned int nr_io_queues; 2943 int ret; 2944 2945 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), 2946 ctrl->lport->ops->max_hw_queues); 2947 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); 2948 if (ret) { 2949 dev_info(ctrl->ctrl.device, 2950 "set_queue_count failed: %d\n", ret); 2951 return ret; 2952 } 2953 2954 if (!nr_io_queues && prior_ioq_cnt) { 2955 dev_info(ctrl->ctrl.device, 2956 "Fail Reconnect: At least 1 io queue " 2957 "required (was %d)\n", prior_ioq_cnt); 2958 return -ENOSPC; 2959 } 2960 2961 ctrl->ctrl.queue_count = nr_io_queues + 1; 2962 /* check for io queues existing */ 2963 if (ctrl->ctrl.queue_count == 1) 2964 return 0; 2965 2966 if (prior_ioq_cnt != nr_io_queues) { 2967 dev_info(ctrl->ctrl.device, 2968 "reconnect: revising io queue count from %d to %d\n", 2969 prior_ioq_cnt, nr_io_queues); 2970 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues); 2971 } 2972 2973 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); 2974 if (ret) 2975 goto out_free_io_queues; 2976 2977 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); 2978 if (ret) 2979 goto out_delete_hw_queues; 2980 2981 return 0; 2982 2983 out_delete_hw_queues: 2984 nvme_fc_delete_hw_io_queues(ctrl); 2985 out_free_io_queues: 2986 nvme_fc_free_io_queues(ctrl); 2987 return ret; 2988 } 2989 2990 static void 2991 nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport) 2992 { 2993 struct nvme_fc_lport *lport = rport->lport; 2994 2995 atomic_inc(&lport->act_rport_cnt); 2996 } 2997 2998 static void 2999 nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport) 3000 { 3001 struct nvme_fc_lport *lport = rport->lport; 3002 u32 cnt; 3003 3004 cnt = atomic_dec_return(&lport->act_rport_cnt); 3005 if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED) 3006 lport->ops->localport_delete(&lport->localport); 3007 } 3008 3009 static int 3010 nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl) 3011 { 3012 struct nvme_fc_rport *rport = ctrl->rport; 3013 u32 cnt; 3014 3015 if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags)) 3016 return 1; 3017 3018 cnt = atomic_inc_return(&rport->act_ctrl_cnt); 3019 if (cnt == 1) 3020 nvme_fc_rport_active_on_lport(rport); 3021 3022 return 0; 3023 } 3024 3025 static int 3026 nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl) 3027 { 3028 struct nvme_fc_rport *rport = ctrl->rport; 3029 struct nvme_fc_lport *lport = rport->lport; 3030 u32 cnt; 3031 3032 /* clearing of ctrl->flags ASSOC_ACTIVE bit is in association delete */ 3033 3034 cnt = atomic_dec_return(&rport->act_ctrl_cnt); 3035 if (cnt == 0) { 3036 if (rport->remoteport.port_state == FC_OBJSTATE_DELETED) 3037 lport->ops->remoteport_delete(&rport->remoteport); 3038 nvme_fc_rport_inactive_on_lport(rport); 3039 } 3040 3041 return 0; 3042 } 3043 3044 /* 3045 * This routine restarts the controller on the host side, and 3046 * on the link side, recreates the controller association. 3047 */ 3048 static int 3049 nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) 3050 { 3051 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 3052 struct nvmefc_ls_rcv_op *disls = NULL; 3053 unsigned long flags; 3054 int ret; 3055 bool changed; 3056 3057 ++ctrl->ctrl.nr_reconnects; 3058 3059 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 3060 return -ENODEV; 3061 3062 if (nvme_fc_ctlr_active_on_rport(ctrl)) 3063 return -ENOTUNIQ; 3064 3065 dev_info(ctrl->ctrl.device, 3066 "NVME-FC{%d}: create association : host wwpn 0x%016llx " 3067 " rport wwpn 0x%016llx: NQN \"%s\"\n", 3068 ctrl->cnum, ctrl->lport->localport.port_name, 3069 ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn); 3070 3071 clear_bit(ASSOC_FAILED, &ctrl->flags); 3072 3073 /* 3074 * Create the admin queue 3075 */ 3076 3077 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, 3078 NVME_AQ_DEPTH); 3079 if (ret) 3080 goto out_free_queue; 3081 3082 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], 3083 NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4)); 3084 if (ret) 3085 goto out_delete_hw_queue; 3086 3087 ret = nvmf_connect_admin_queue(&ctrl->ctrl); 3088 if (ret) 3089 goto out_disconnect_admin_queue; 3090 3091 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); 3092 3093 /* 3094 * Check controller capabilities 3095 * 3096 * todo:- add code to check if ctrl attributes changed from 3097 * prior connection values 3098 */ 3099 3100 ret = nvme_enable_ctrl(&ctrl->ctrl); 3101 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) 3102 goto out_disconnect_admin_queue; 3103 3104 ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments; 3105 ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments << 3106 (ilog2(SZ_4K) - 9); 3107 3108 nvme_unquiesce_admin_queue(&ctrl->ctrl); 3109 3110 ret = nvme_init_ctrl_finish(&ctrl->ctrl, false); 3111 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) 3112 goto out_disconnect_admin_queue; 3113 3114 /* sanity checks */ 3115 3116 /* FC-NVME does not have other data in the capsule */ 3117 if (ctrl->ctrl.icdoff) { 3118 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", 3119 ctrl->ctrl.icdoff); 3120 ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 3121 goto out_disconnect_admin_queue; 3122 } 3123 3124 /* FC-NVME supports normal SGL Data Block Descriptors */ 3125 if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) { 3126 dev_err(ctrl->ctrl.device, 3127 "Mandatory sgls are not supported!\n"); 3128 ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 3129 goto out_disconnect_admin_queue; 3130 } 3131 3132 if (opts->queue_size > ctrl->ctrl.maxcmd) { 3133 /* warn if maxcmd is lower than queue_size */ 3134 dev_warn(ctrl->ctrl.device, 3135 "queue_size %zu > ctrl maxcmd %u, reducing " 3136 "to maxcmd\n", 3137 opts->queue_size, ctrl->ctrl.maxcmd); 3138 opts->queue_size = ctrl->ctrl.maxcmd; 3139 ctrl->ctrl.sqsize = opts->queue_size - 1; 3140 } 3141 3142 ret = nvme_fc_init_aen_ops(ctrl); 3143 if (ret) 3144 goto out_term_aen_ops; 3145 3146 /* 3147 * Create the io queues 3148 */ 3149 3150 if (ctrl->ctrl.queue_count > 1) { 3151 if (!ctrl->ioq_live) 3152 ret = nvme_fc_create_io_queues(ctrl); 3153 else 3154 ret = nvme_fc_recreate_io_queues(ctrl); 3155 } 3156 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) 3157 goto out_term_aen_ops; 3158 3159 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 3160 3161 ctrl->ctrl.nr_reconnects = 0; 3162 3163 if (changed) 3164 nvme_start_ctrl(&ctrl->ctrl); 3165 3166 return 0; /* Success */ 3167 3168 out_term_aen_ops: 3169 nvme_fc_term_aen_ops(ctrl); 3170 out_disconnect_admin_queue: 3171 /* send a Disconnect(association) LS to fc-nvme target */ 3172 nvme_fc_xmt_disconnect_assoc(ctrl); 3173 spin_lock_irqsave(&ctrl->lock, flags); 3174 ctrl->association_id = 0; 3175 disls = ctrl->rcv_disconn; 3176 ctrl->rcv_disconn = NULL; 3177 spin_unlock_irqrestore(&ctrl->lock, flags); 3178 if (disls) 3179 nvme_fc_xmt_ls_rsp(disls); 3180 out_delete_hw_queue: 3181 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); 3182 out_free_queue: 3183 nvme_fc_free_queue(&ctrl->queues[0]); 3184 clear_bit(ASSOC_ACTIVE, &ctrl->flags); 3185 nvme_fc_ctlr_inactive_on_rport(ctrl); 3186 3187 return ret; 3188 } 3189 3190 3191 /* 3192 * This routine stops operation of the controller on the host side. 3193 * On the host os stack side: Admin and IO queues are stopped, 3194 * outstanding ios on them terminated via FC ABTS. 3195 * On the link side: the association is terminated. 3196 */ 3197 static void 3198 nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) 3199 { 3200 struct nvmefc_ls_rcv_op *disls = NULL; 3201 unsigned long flags; 3202 3203 if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags)) 3204 return; 3205 3206 spin_lock_irqsave(&ctrl->lock, flags); 3207 set_bit(FCCTRL_TERMIO, &ctrl->flags); 3208 ctrl->iocnt = 0; 3209 spin_unlock_irqrestore(&ctrl->lock, flags); 3210 3211 __nvme_fc_abort_outstanding_ios(ctrl, false); 3212 3213 /* kill the aens as they are a separate path */ 3214 nvme_fc_abort_aen_ops(ctrl); 3215 3216 /* wait for all io that had to be aborted */ 3217 spin_lock_irq(&ctrl->lock); 3218 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock); 3219 clear_bit(FCCTRL_TERMIO, &ctrl->flags); 3220 spin_unlock_irq(&ctrl->lock); 3221 3222 nvme_fc_term_aen_ops(ctrl); 3223 3224 /* 3225 * send a Disconnect(association) LS to fc-nvme target 3226 * Note: could have been sent at top of process, but 3227 * cleaner on link traffic if after the aborts complete. 3228 * Note: if association doesn't exist, association_id will be 0 3229 */ 3230 if (ctrl->association_id) 3231 nvme_fc_xmt_disconnect_assoc(ctrl); 3232 3233 spin_lock_irqsave(&ctrl->lock, flags); 3234 ctrl->association_id = 0; 3235 disls = ctrl->rcv_disconn; 3236 ctrl->rcv_disconn = NULL; 3237 spin_unlock_irqrestore(&ctrl->lock, flags); 3238 if (disls) 3239 /* 3240 * if a Disconnect Request was waiting for a response, send 3241 * now that all ABTS's have been issued (and are complete). 3242 */ 3243 nvme_fc_xmt_ls_rsp(disls); 3244 3245 if (ctrl->ctrl.tagset) { 3246 nvme_fc_delete_hw_io_queues(ctrl); 3247 nvme_fc_free_io_queues(ctrl); 3248 } 3249 3250 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); 3251 nvme_fc_free_queue(&ctrl->queues[0]); 3252 3253 /* re-enable the admin_q so anything new can fast fail */ 3254 nvme_unquiesce_admin_queue(&ctrl->ctrl); 3255 3256 /* resume the io queues so that things will fast fail */ 3257 nvme_unquiesce_io_queues(&ctrl->ctrl); 3258 3259 nvme_fc_ctlr_inactive_on_rport(ctrl); 3260 } 3261 3262 static void 3263 nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl) 3264 { 3265 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 3266 3267 cancel_work_sync(&ctrl->ioerr_work); 3268 cancel_delayed_work_sync(&ctrl->connect_work); 3269 /* 3270 * kill the association on the link side. this will block 3271 * waiting for io to terminate 3272 */ 3273 nvme_fc_delete_association(ctrl); 3274 } 3275 3276 static void 3277 nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) 3278 { 3279 struct nvme_fc_rport *rport = ctrl->rport; 3280 struct nvme_fc_remote_port *portptr = &rport->remoteport; 3281 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ; 3282 bool recon = true; 3283 3284 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) 3285 return; 3286 3287 if (portptr->port_state == FC_OBJSTATE_ONLINE) { 3288 dev_info(ctrl->ctrl.device, 3289 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n", 3290 ctrl->cnum, status); 3291 if (status > 0 && (status & NVME_SC_DNR)) 3292 recon = false; 3293 } else if (time_after_eq(jiffies, rport->dev_loss_end)) 3294 recon = false; 3295 3296 if (recon && nvmf_should_reconnect(&ctrl->ctrl)) { 3297 if (portptr->port_state == FC_OBJSTATE_ONLINE) 3298 dev_info(ctrl->ctrl.device, 3299 "NVME-FC{%d}: Reconnect attempt in %ld " 3300 "seconds\n", 3301 ctrl->cnum, recon_delay / HZ); 3302 else if (time_after(jiffies + recon_delay, rport->dev_loss_end)) 3303 recon_delay = rport->dev_loss_end - jiffies; 3304 3305 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay); 3306 } else { 3307 if (portptr->port_state == FC_OBJSTATE_ONLINE) { 3308 if (status > 0 && (status & NVME_SC_DNR)) 3309 dev_warn(ctrl->ctrl.device, 3310 "NVME-FC{%d}: reconnect failure\n", 3311 ctrl->cnum); 3312 else 3313 dev_warn(ctrl->ctrl.device, 3314 "NVME-FC{%d}: Max reconnect attempts " 3315 "(%d) reached.\n", 3316 ctrl->cnum, ctrl->ctrl.nr_reconnects); 3317 } else 3318 dev_warn(ctrl->ctrl.device, 3319 "NVME-FC{%d}: dev_loss_tmo (%d) expired " 3320 "while waiting for remoteport connectivity.\n", 3321 ctrl->cnum, min_t(int, portptr->dev_loss_tmo, 3322 (ctrl->ctrl.opts->max_reconnects * 3323 ctrl->ctrl.opts->reconnect_delay))); 3324 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl)); 3325 } 3326 } 3327 3328 static void 3329 nvme_fc_reset_ctrl_work(struct work_struct *work) 3330 { 3331 struct nvme_fc_ctrl *ctrl = 3332 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work); 3333 3334 nvme_stop_ctrl(&ctrl->ctrl); 3335 3336 /* will block will waiting for io to terminate */ 3337 nvme_fc_delete_association(ctrl); 3338 3339 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) 3340 dev_err(ctrl->ctrl.device, 3341 "NVME-FC{%d}: error_recovery: Couldn't change state " 3342 "to CONNECTING\n", ctrl->cnum); 3343 3344 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) { 3345 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { 3346 dev_err(ctrl->ctrl.device, 3347 "NVME-FC{%d}: failed to schedule connect " 3348 "after reset\n", ctrl->cnum); 3349 } else { 3350 flush_delayed_work(&ctrl->connect_work); 3351 } 3352 } else { 3353 nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN); 3354 } 3355 } 3356 3357 3358 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { 3359 .name = "fc", 3360 .module = THIS_MODULE, 3361 .flags = NVME_F_FABRICS, 3362 .reg_read32 = nvmf_reg_read32, 3363 .reg_read64 = nvmf_reg_read64, 3364 .reg_write32 = nvmf_reg_write32, 3365 .free_ctrl = nvme_fc_nvme_ctrl_freed, 3366 .submit_async_event = nvme_fc_submit_async_event, 3367 .delete_ctrl = nvme_fc_delete_ctrl, 3368 .get_address = nvmf_get_address, 3369 }; 3370 3371 static void 3372 nvme_fc_connect_ctrl_work(struct work_struct *work) 3373 { 3374 int ret; 3375 3376 struct nvme_fc_ctrl *ctrl = 3377 container_of(to_delayed_work(work), 3378 struct nvme_fc_ctrl, connect_work); 3379 3380 ret = nvme_fc_create_association(ctrl); 3381 if (ret) 3382 nvme_fc_reconnect_or_delete(ctrl, ret); 3383 else 3384 dev_info(ctrl->ctrl.device, 3385 "NVME-FC{%d}: controller connect complete\n", 3386 ctrl->cnum); 3387 } 3388 3389 3390 static const struct blk_mq_ops nvme_fc_admin_mq_ops = { 3391 .queue_rq = nvme_fc_queue_rq, 3392 .complete = nvme_fc_complete_rq, 3393 .init_request = nvme_fc_init_request, 3394 .exit_request = nvme_fc_exit_request, 3395 .init_hctx = nvme_fc_init_admin_hctx, 3396 .timeout = nvme_fc_timeout, 3397 }; 3398 3399 3400 /* 3401 * Fails a controller request if it matches an existing controller 3402 * (association) with the same tuple: 3403 * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN> 3404 * 3405 * The ports don't need to be compared as they are intrinsically 3406 * already matched by the port pointers supplied. 3407 */ 3408 static bool 3409 nvme_fc_existing_controller(struct nvme_fc_rport *rport, 3410 struct nvmf_ctrl_options *opts) 3411 { 3412 struct nvme_fc_ctrl *ctrl; 3413 unsigned long flags; 3414 bool found = false; 3415 3416 spin_lock_irqsave(&rport->lock, flags); 3417 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { 3418 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts); 3419 if (found) 3420 break; 3421 } 3422 spin_unlock_irqrestore(&rport->lock, flags); 3423 3424 return found; 3425 } 3426 3427 static struct nvme_ctrl * 3428 nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, 3429 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport) 3430 { 3431 struct nvme_fc_ctrl *ctrl; 3432 unsigned long flags; 3433 int ret, idx, ctrl_loss_tmo; 3434 3435 if (!(rport->remoteport.port_role & 3436 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) { 3437 ret = -EBADR; 3438 goto out_fail; 3439 } 3440 3441 if (!opts->duplicate_connect && 3442 nvme_fc_existing_controller(rport, opts)) { 3443 ret = -EALREADY; 3444 goto out_fail; 3445 } 3446 3447 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 3448 if (!ctrl) { 3449 ret = -ENOMEM; 3450 goto out_fail; 3451 } 3452 3453 idx = ida_alloc(&nvme_fc_ctrl_cnt, GFP_KERNEL); 3454 if (idx < 0) { 3455 ret = -ENOSPC; 3456 goto out_free_ctrl; 3457 } 3458 3459 /* 3460 * if ctrl_loss_tmo is being enforced and the default reconnect delay 3461 * is being used, change to a shorter reconnect delay for FC. 3462 */ 3463 if (opts->max_reconnects != -1 && 3464 opts->reconnect_delay == NVMF_DEF_RECONNECT_DELAY && 3465 opts->reconnect_delay > NVME_FC_DEFAULT_RECONNECT_TMO) { 3466 ctrl_loss_tmo = opts->max_reconnects * opts->reconnect_delay; 3467 opts->reconnect_delay = NVME_FC_DEFAULT_RECONNECT_TMO; 3468 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo, 3469 opts->reconnect_delay); 3470 } 3471 3472 ctrl->ctrl.opts = opts; 3473 ctrl->ctrl.nr_reconnects = 0; 3474 if (lport->dev) 3475 ctrl->ctrl.numa_node = dev_to_node(lport->dev); 3476 else 3477 ctrl->ctrl.numa_node = NUMA_NO_NODE; 3478 INIT_LIST_HEAD(&ctrl->ctrl_list); 3479 ctrl->lport = lport; 3480 ctrl->rport = rport; 3481 ctrl->dev = lport->dev; 3482 ctrl->cnum = idx; 3483 ctrl->ioq_live = false; 3484 init_waitqueue_head(&ctrl->ioabort_wait); 3485 3486 get_device(ctrl->dev); 3487 kref_init(&ctrl->ref); 3488 3489 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work); 3490 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); 3491 INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work); 3492 spin_lock_init(&ctrl->lock); 3493 3494 /* io queue count */ 3495 ctrl->ctrl.queue_count = min_t(unsigned int, 3496 opts->nr_io_queues, 3497 lport->ops->max_hw_queues); 3498 ctrl->ctrl.queue_count++; /* +1 for admin queue */ 3499 3500 ctrl->ctrl.sqsize = opts->queue_size - 1; 3501 ctrl->ctrl.kato = opts->kato; 3502 ctrl->ctrl.cntlid = 0xffff; 3503 3504 ret = -ENOMEM; 3505 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, 3506 sizeof(struct nvme_fc_queue), GFP_KERNEL); 3507 if (!ctrl->queues) 3508 goto out_free_ida; 3509 3510 nvme_fc_init_queue(ctrl, 0); 3511 3512 ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set, 3513 &nvme_fc_admin_mq_ops, BLK_MQ_F_NO_SCHED, 3514 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv, 3515 ctrl->lport->ops->fcprqst_priv_sz)); 3516 if (ret) 3517 goto out_free_queues; 3518 3519 /* 3520 * Would have been nice to init io queues tag set as well. 3521 * However, we require interaction from the controller 3522 * for max io queue count before we can do so. 3523 * Defer this to the connect path. 3524 */ 3525 3526 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0); 3527 if (ret) 3528 goto out_cleanup_tagset; 3529 3530 /* at this point, teardown path changes to ref counting on nvme ctrl */ 3531 3532 spin_lock_irqsave(&rport->lock, flags); 3533 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); 3534 spin_unlock_irqrestore(&rport->lock, flags); 3535 3536 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) || 3537 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { 3538 dev_err(ctrl->ctrl.device, 3539 "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum); 3540 goto fail_ctrl; 3541 } 3542 3543 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { 3544 dev_err(ctrl->ctrl.device, 3545 "NVME-FC{%d}: failed to schedule initial connect\n", 3546 ctrl->cnum); 3547 goto fail_ctrl; 3548 } 3549 3550 flush_delayed_work(&ctrl->connect_work); 3551 3552 dev_info(ctrl->ctrl.device, 3553 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n", 3554 ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl)); 3555 3556 return &ctrl->ctrl; 3557 3558 fail_ctrl: 3559 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING); 3560 cancel_work_sync(&ctrl->ioerr_work); 3561 cancel_work_sync(&ctrl->ctrl.reset_work); 3562 cancel_delayed_work_sync(&ctrl->connect_work); 3563 3564 ctrl->ctrl.opts = NULL; 3565 3566 /* initiate nvme ctrl ref counting teardown */ 3567 nvme_uninit_ctrl(&ctrl->ctrl); 3568 3569 /* Remove core ctrl ref. */ 3570 nvme_put_ctrl(&ctrl->ctrl); 3571 3572 /* as we're past the point where we transition to the ref 3573 * counting teardown path, if we return a bad pointer here, 3574 * the calling routine, thinking it's prior to the 3575 * transition, will do an rport put. Since the teardown 3576 * path also does a rport put, we do an extra get here to 3577 * so proper order/teardown happens. 3578 */ 3579 nvme_fc_rport_get(rport); 3580 3581 return ERR_PTR(-EIO); 3582 3583 out_cleanup_tagset: 3584 nvme_remove_admin_tag_set(&ctrl->ctrl); 3585 out_free_queues: 3586 kfree(ctrl->queues); 3587 out_free_ida: 3588 put_device(ctrl->dev); 3589 ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum); 3590 out_free_ctrl: 3591 kfree(ctrl); 3592 out_fail: 3593 /* exit via here doesn't follow ctlr ref points */ 3594 return ERR_PTR(ret); 3595 } 3596 3597 3598 struct nvmet_fc_traddr { 3599 u64 nn; 3600 u64 pn; 3601 }; 3602 3603 static int 3604 __nvme_fc_parse_u64(substring_t *sstr, u64 *val) 3605 { 3606 u64 token64; 3607 3608 if (match_u64(sstr, &token64)) 3609 return -EINVAL; 3610 *val = token64; 3611 3612 return 0; 3613 } 3614 3615 /* 3616 * This routine validates and extracts the WWN's from the TRADDR string. 3617 * As kernel parsers need the 0x to determine number base, universally 3618 * build string to parse with 0x prefix before parsing name strings. 3619 */ 3620 static int 3621 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) 3622 { 3623 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; 3624 substring_t wwn = { name, &name[sizeof(name)-1] }; 3625 int nnoffset, pnoffset; 3626 3627 /* validate if string is one of the 2 allowed formats */ 3628 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && 3629 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && 3630 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], 3631 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { 3632 nnoffset = NVME_FC_TRADDR_OXNNLEN; 3633 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + 3634 NVME_FC_TRADDR_OXNNLEN; 3635 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH && 3636 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && 3637 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], 3638 "pn-", NVME_FC_TRADDR_NNLEN))) { 3639 nnoffset = NVME_FC_TRADDR_NNLEN; 3640 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; 3641 } else 3642 goto out_einval; 3643 3644 name[0] = '0'; 3645 name[1] = 'x'; 3646 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; 3647 3648 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); 3649 if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) 3650 goto out_einval; 3651 3652 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); 3653 if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) 3654 goto out_einval; 3655 3656 return 0; 3657 3658 out_einval: 3659 pr_warn("%s: bad traddr string\n", __func__); 3660 return -EINVAL; 3661 } 3662 3663 static struct nvme_ctrl * 3664 nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) 3665 { 3666 struct nvme_fc_lport *lport; 3667 struct nvme_fc_rport *rport; 3668 struct nvme_ctrl *ctrl; 3669 struct nvmet_fc_traddr laddr = { 0L, 0L }; 3670 struct nvmet_fc_traddr raddr = { 0L, 0L }; 3671 unsigned long flags; 3672 int ret; 3673 3674 ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE); 3675 if (ret || !raddr.nn || !raddr.pn) 3676 return ERR_PTR(-EINVAL); 3677 3678 ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE); 3679 if (ret || !laddr.nn || !laddr.pn) 3680 return ERR_PTR(-EINVAL); 3681 3682 /* find the host and remote ports to connect together */ 3683 spin_lock_irqsave(&nvme_fc_lock, flags); 3684 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { 3685 if (lport->localport.node_name != laddr.nn || 3686 lport->localport.port_name != laddr.pn || 3687 lport->localport.port_state != FC_OBJSTATE_ONLINE) 3688 continue; 3689 3690 list_for_each_entry(rport, &lport->endp_list, endp_list) { 3691 if (rport->remoteport.node_name != raddr.nn || 3692 rport->remoteport.port_name != raddr.pn || 3693 rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 3694 continue; 3695 3696 /* if fail to get reference fall through. Will error */ 3697 if (!nvme_fc_rport_get(rport)) 3698 break; 3699 3700 spin_unlock_irqrestore(&nvme_fc_lock, flags); 3701 3702 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport); 3703 if (IS_ERR(ctrl)) 3704 nvme_fc_rport_put(rport); 3705 return ctrl; 3706 } 3707 } 3708 spin_unlock_irqrestore(&nvme_fc_lock, flags); 3709 3710 pr_warn("%s: %s - %s combination not found\n", 3711 __func__, opts->traddr, opts->host_traddr); 3712 return ERR_PTR(-ENOENT); 3713 } 3714 3715 3716 static struct nvmf_transport_ops nvme_fc_transport = { 3717 .name = "fc", 3718 .module = THIS_MODULE, 3719 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR, 3720 .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO, 3721 .create_ctrl = nvme_fc_create_ctrl, 3722 }; 3723 3724 /* Arbitrary successive failures max. With lots of subsystems could be high */ 3725 #define DISCOVERY_MAX_FAIL 20 3726 3727 static ssize_t nvme_fc_nvme_discovery_store(struct device *dev, 3728 struct device_attribute *attr, const char *buf, size_t count) 3729 { 3730 unsigned long flags; 3731 LIST_HEAD(local_disc_list); 3732 struct nvme_fc_lport *lport; 3733 struct nvme_fc_rport *rport; 3734 int failcnt = 0; 3735 3736 spin_lock_irqsave(&nvme_fc_lock, flags); 3737 restart: 3738 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { 3739 list_for_each_entry(rport, &lport->endp_list, endp_list) { 3740 if (!nvme_fc_lport_get(lport)) 3741 continue; 3742 if (!nvme_fc_rport_get(rport)) { 3743 /* 3744 * This is a temporary condition. Upon restart 3745 * this rport will be gone from the list. 3746 * 3747 * Revert the lport put and retry. Anything 3748 * added to the list already will be skipped (as 3749 * they are no longer list_empty). Loops should 3750 * resume at rports that were not yet seen. 3751 */ 3752 nvme_fc_lport_put(lport); 3753 3754 if (failcnt++ < DISCOVERY_MAX_FAIL) 3755 goto restart; 3756 3757 pr_err("nvme_discovery: too many reference " 3758 "failures\n"); 3759 goto process_local_list; 3760 } 3761 if (list_empty(&rport->disc_list)) 3762 list_add_tail(&rport->disc_list, 3763 &local_disc_list); 3764 } 3765 } 3766 3767 process_local_list: 3768 while (!list_empty(&local_disc_list)) { 3769 rport = list_first_entry(&local_disc_list, 3770 struct nvme_fc_rport, disc_list); 3771 list_del_init(&rport->disc_list); 3772 spin_unlock_irqrestore(&nvme_fc_lock, flags); 3773 3774 lport = rport->lport; 3775 /* signal discovery. Won't hurt if it repeats */ 3776 nvme_fc_signal_discovery_scan(lport, rport); 3777 nvme_fc_rport_put(rport); 3778 nvme_fc_lport_put(lport); 3779 3780 spin_lock_irqsave(&nvme_fc_lock, flags); 3781 } 3782 spin_unlock_irqrestore(&nvme_fc_lock, flags); 3783 3784 return count; 3785 } 3786 3787 static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store); 3788 3789 #ifdef CONFIG_BLK_CGROUP_FC_APPID 3790 /* Parse the cgroup id from a buf and return the length of cgrpid */ 3791 static int fc_parse_cgrpid(const char *buf, u64 *id) 3792 { 3793 char cgrp_id[16+1]; 3794 int cgrpid_len, j; 3795 3796 memset(cgrp_id, 0x0, sizeof(cgrp_id)); 3797 for (cgrpid_len = 0, j = 0; cgrpid_len < 17; cgrpid_len++) { 3798 if (buf[cgrpid_len] != ':') 3799 cgrp_id[cgrpid_len] = buf[cgrpid_len]; 3800 else { 3801 j = 1; 3802 break; 3803 } 3804 } 3805 if (!j) 3806 return -EINVAL; 3807 if (kstrtou64(cgrp_id, 16, id) < 0) 3808 return -EINVAL; 3809 return cgrpid_len; 3810 } 3811 3812 /* 3813 * Parse and update the appid in the blkcg associated with the cgroupid. 3814 */ 3815 static ssize_t fc_appid_store(struct device *dev, 3816 struct device_attribute *attr, const char *buf, size_t count) 3817 { 3818 size_t orig_count = count; 3819 u64 cgrp_id; 3820 int appid_len = 0; 3821 int cgrpid_len = 0; 3822 char app_id[FC_APPID_LEN]; 3823 int ret = 0; 3824 3825 if (buf[count-1] == '\n') 3826 count--; 3827 3828 if ((count > (16+1+FC_APPID_LEN)) || (!strchr(buf, ':'))) 3829 return -EINVAL; 3830 3831 cgrpid_len = fc_parse_cgrpid(buf, &cgrp_id); 3832 if (cgrpid_len < 0) 3833 return -EINVAL; 3834 appid_len = count - cgrpid_len - 1; 3835 if (appid_len > FC_APPID_LEN) 3836 return -EINVAL; 3837 3838 memset(app_id, 0x0, sizeof(app_id)); 3839 memcpy(app_id, &buf[cgrpid_len+1], appid_len); 3840 ret = blkcg_set_fc_appid(app_id, cgrp_id, sizeof(app_id)); 3841 if (ret < 0) 3842 return ret; 3843 return orig_count; 3844 } 3845 static DEVICE_ATTR(appid_store, 0200, NULL, fc_appid_store); 3846 #endif /* CONFIG_BLK_CGROUP_FC_APPID */ 3847 3848 static struct attribute *nvme_fc_attrs[] = { 3849 &dev_attr_nvme_discovery.attr, 3850 #ifdef CONFIG_BLK_CGROUP_FC_APPID 3851 &dev_attr_appid_store.attr, 3852 #endif 3853 NULL 3854 }; 3855 3856 static const struct attribute_group nvme_fc_attr_group = { 3857 .attrs = nvme_fc_attrs, 3858 }; 3859 3860 static const struct attribute_group *nvme_fc_attr_groups[] = { 3861 &nvme_fc_attr_group, 3862 NULL 3863 }; 3864 3865 static struct class fc_class = { 3866 .name = "fc", 3867 .dev_groups = nvme_fc_attr_groups, 3868 .owner = THIS_MODULE, 3869 }; 3870 3871 static int __init nvme_fc_init_module(void) 3872 { 3873 int ret; 3874 3875 nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0); 3876 if (!nvme_fc_wq) 3877 return -ENOMEM; 3878 3879 /* 3880 * NOTE: 3881 * It is expected that in the future the kernel will combine 3882 * the FC-isms that are currently under scsi and now being 3883 * added to by NVME into a new standalone FC class. The SCSI 3884 * and NVME protocols and their devices would be under this 3885 * new FC class. 3886 * 3887 * As we need something to post FC-specific udev events to, 3888 * specifically for nvme probe events, start by creating the 3889 * new device class. When the new standalone FC class is 3890 * put in place, this code will move to a more generic 3891 * location for the class. 3892 */ 3893 ret = class_register(&fc_class); 3894 if (ret) { 3895 pr_err("couldn't register class fc\n"); 3896 goto out_destroy_wq; 3897 } 3898 3899 /* 3900 * Create a device for the FC-centric udev events 3901 */ 3902 fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL, 3903 "fc_udev_device"); 3904 if (IS_ERR(fc_udev_device)) { 3905 pr_err("couldn't create fc_udev device!\n"); 3906 ret = PTR_ERR(fc_udev_device); 3907 goto out_destroy_class; 3908 } 3909 3910 ret = nvmf_register_transport(&nvme_fc_transport); 3911 if (ret) 3912 goto out_destroy_device; 3913 3914 return 0; 3915 3916 out_destroy_device: 3917 device_destroy(&fc_class, MKDEV(0, 0)); 3918 out_destroy_class: 3919 class_unregister(&fc_class); 3920 out_destroy_wq: 3921 destroy_workqueue(nvme_fc_wq); 3922 3923 return ret; 3924 } 3925 3926 static void 3927 nvme_fc_delete_controllers(struct nvme_fc_rport *rport) 3928 { 3929 struct nvme_fc_ctrl *ctrl; 3930 3931 spin_lock(&rport->lock); 3932 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { 3933 dev_warn(ctrl->ctrl.device, 3934 "NVME-FC{%d}: transport unloading: deleting ctrl\n", 3935 ctrl->cnum); 3936 nvme_delete_ctrl(&ctrl->ctrl); 3937 } 3938 spin_unlock(&rport->lock); 3939 } 3940 3941 static void 3942 nvme_fc_cleanup_for_unload(void) 3943 { 3944 struct nvme_fc_lport *lport; 3945 struct nvme_fc_rport *rport; 3946 3947 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { 3948 list_for_each_entry(rport, &lport->endp_list, endp_list) { 3949 nvme_fc_delete_controllers(rport); 3950 } 3951 } 3952 } 3953 3954 static void __exit nvme_fc_exit_module(void) 3955 { 3956 unsigned long flags; 3957 bool need_cleanup = false; 3958 3959 spin_lock_irqsave(&nvme_fc_lock, flags); 3960 nvme_fc_waiting_to_unload = true; 3961 if (!list_empty(&nvme_fc_lport_list)) { 3962 need_cleanup = true; 3963 nvme_fc_cleanup_for_unload(); 3964 } 3965 spin_unlock_irqrestore(&nvme_fc_lock, flags); 3966 if (need_cleanup) { 3967 pr_info("%s: waiting for ctlr deletes\n", __func__); 3968 wait_for_completion(&nvme_fc_unload_proceed); 3969 pr_info("%s: ctrl deletes complete\n", __func__); 3970 } 3971 3972 nvmf_unregister_transport(&nvme_fc_transport); 3973 3974 ida_destroy(&nvme_fc_local_port_cnt); 3975 ida_destroy(&nvme_fc_ctrl_cnt); 3976 3977 device_destroy(&fc_class, MKDEV(0, 0)); 3978 class_unregister(&fc_class); 3979 destroy_workqueue(nvme_fc_wq); 3980 } 3981 3982 module_init(nvme_fc_init_module); 3983 module_exit(nvme_fc_exit_module); 3984 3985 MODULE_LICENSE("GPL v2"); 3986