1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016 Avago Technologies. All rights reserved. 4 */ 5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 6 #include <linux/module.h> 7 #include <linux/parser.h> 8 #include <uapi/scsi/fc/fc_fs.h> 9 #include <uapi/scsi/fc/fc_els.h> 10 #include <linux/delay.h> 11 #include <linux/overflow.h> 12 13 #include "nvme.h" 14 #include "fabrics.h" 15 #include <linux/nvme-fc-driver.h> 16 #include <linux/nvme-fc.h> 17 #include <scsi/scsi_transport_fc.h> 18 19 /* *************************** Data Structures/Defines ****************** */ 20 21 22 enum nvme_fc_queue_flags { 23 NVME_FC_Q_CONNECTED = 0, 24 NVME_FC_Q_LIVE, 25 }; 26 27 #define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */ 28 29 struct nvme_fc_queue { 30 struct nvme_fc_ctrl *ctrl; 31 struct device *dev; 32 struct blk_mq_hw_ctx *hctx; 33 void *lldd_handle; 34 size_t cmnd_capsule_len; 35 u32 qnum; 36 u32 rqcnt; 37 u32 seqno; 38 39 u64 connection_id; 40 atomic_t csn; 41 42 unsigned long flags; 43 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ 44 45 enum nvme_fcop_flags { 46 FCOP_FLAGS_TERMIO = (1 << 0), 47 FCOP_FLAGS_AEN = (1 << 1), 48 }; 49 50 struct nvmefc_ls_req_op { 51 struct nvmefc_ls_req ls_req; 52 53 struct nvme_fc_rport *rport; 54 struct nvme_fc_queue *queue; 55 struct request *rq; 56 u32 flags; 57 58 int ls_error; 59 struct completion ls_done; 60 struct list_head lsreq_list; /* rport->ls_req_list */ 61 bool req_queued; 62 }; 63 64 enum nvme_fcpop_state { 65 FCPOP_STATE_UNINIT = 0, 66 FCPOP_STATE_IDLE = 1, 67 FCPOP_STATE_ACTIVE = 2, 68 FCPOP_STATE_ABORTED = 3, 69 FCPOP_STATE_COMPLETE = 4, 70 }; 71 72 struct nvme_fc_fcp_op { 73 struct nvme_request nreq; /* 74 * nvme/host/core.c 75 * requires this to be 76 * the 1st element in the 77 * private structure 78 * associated with the 79 * request. 80 */ 81 struct nvmefc_fcp_req fcp_req; 82 83 struct nvme_fc_ctrl *ctrl; 84 struct nvme_fc_queue *queue; 85 struct request *rq; 86 87 atomic_t state; 88 u32 flags; 89 u32 rqno; 90 u32 nents; 91 92 struct nvme_fc_cmd_iu cmd_iu; 93 struct nvme_fc_ersp_iu rsp_iu; 94 }; 95 96 struct nvme_fcp_op_w_sgl { 97 struct nvme_fc_fcp_op op; 98 struct scatterlist sgl[SG_CHUNK_SIZE]; 99 uint8_t priv[0]; 100 }; 101 102 struct nvme_fc_lport { 103 struct nvme_fc_local_port localport; 104 105 struct ida endp_cnt; 106 struct list_head port_list; /* nvme_fc_port_list */ 107 struct list_head endp_list; 108 struct device *dev; /* physical device for dma */ 109 struct nvme_fc_port_template *ops; 110 struct kref ref; 111 atomic_t act_rport_cnt; 112 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ 113 114 struct nvme_fc_rport { 115 struct nvme_fc_remote_port remoteport; 116 117 struct list_head endp_list; /* for lport->endp_list */ 118 struct list_head ctrl_list; 119 struct list_head ls_req_list; 120 struct list_head disc_list; 121 struct device *dev; /* physical device for dma */ 122 struct nvme_fc_lport *lport; 123 spinlock_t lock; 124 struct kref ref; 125 atomic_t act_ctrl_cnt; 126 unsigned long dev_loss_end; 127 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ 128 129 enum nvme_fcctrl_flags { 130 FCCTRL_TERMIO = (1 << 0), 131 }; 132 133 struct nvme_fc_ctrl { 134 spinlock_t lock; 135 struct nvme_fc_queue *queues; 136 struct device *dev; 137 struct nvme_fc_lport *lport; 138 struct nvme_fc_rport *rport; 139 u32 cnum; 140 141 bool ioq_live; 142 bool assoc_active; 143 atomic_t err_work_active; 144 u64 association_id; 145 146 struct list_head ctrl_list; /* rport->ctrl_list */ 147 148 struct blk_mq_tag_set admin_tag_set; 149 struct blk_mq_tag_set tag_set; 150 151 struct delayed_work connect_work; 152 struct work_struct err_work; 153 154 struct kref ref; 155 u32 flags; 156 u32 iocnt; 157 wait_queue_head_t ioabort_wait; 158 159 struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS]; 160 161 struct nvme_ctrl ctrl; 162 }; 163 164 static inline struct nvme_fc_ctrl * 165 to_fc_ctrl(struct nvme_ctrl *ctrl) 166 { 167 return container_of(ctrl, struct nvme_fc_ctrl, ctrl); 168 } 169 170 static inline struct nvme_fc_lport * 171 localport_to_lport(struct nvme_fc_local_port *portptr) 172 { 173 return container_of(portptr, struct nvme_fc_lport, localport); 174 } 175 176 static inline struct nvme_fc_rport * 177 remoteport_to_rport(struct nvme_fc_remote_port *portptr) 178 { 179 return container_of(portptr, struct nvme_fc_rport, remoteport); 180 } 181 182 static inline struct nvmefc_ls_req_op * 183 ls_req_to_lsop(struct nvmefc_ls_req *lsreq) 184 { 185 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req); 186 } 187 188 static inline struct nvme_fc_fcp_op * 189 fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq) 190 { 191 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req); 192 } 193 194 195 196 /* *************************** Globals **************************** */ 197 198 199 static DEFINE_SPINLOCK(nvme_fc_lock); 200 201 static LIST_HEAD(nvme_fc_lport_list); 202 static DEFINE_IDA(nvme_fc_local_port_cnt); 203 static DEFINE_IDA(nvme_fc_ctrl_cnt); 204 205 static struct workqueue_struct *nvme_fc_wq; 206 207 /* 208 * These items are short-term. They will eventually be moved into 209 * a generic FC class. See comments in module init. 210 */ 211 static struct device *fc_udev_device; 212 213 214 /* *********************** FC-NVME Port Management ************************ */ 215 216 static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *, 217 struct nvme_fc_queue *, unsigned int); 218 219 static void 220 nvme_fc_free_lport(struct kref *ref) 221 { 222 struct nvme_fc_lport *lport = 223 container_of(ref, struct nvme_fc_lport, ref); 224 unsigned long flags; 225 226 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED); 227 WARN_ON(!list_empty(&lport->endp_list)); 228 229 /* remove from transport list */ 230 spin_lock_irqsave(&nvme_fc_lock, flags); 231 list_del(&lport->port_list); 232 spin_unlock_irqrestore(&nvme_fc_lock, flags); 233 234 ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num); 235 ida_destroy(&lport->endp_cnt); 236 237 put_device(lport->dev); 238 239 kfree(lport); 240 } 241 242 static void 243 nvme_fc_lport_put(struct nvme_fc_lport *lport) 244 { 245 kref_put(&lport->ref, nvme_fc_free_lport); 246 } 247 248 static int 249 nvme_fc_lport_get(struct nvme_fc_lport *lport) 250 { 251 return kref_get_unless_zero(&lport->ref); 252 } 253 254 255 static struct nvme_fc_lport * 256 nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo, 257 struct nvme_fc_port_template *ops, 258 struct device *dev) 259 { 260 struct nvme_fc_lport *lport; 261 unsigned long flags; 262 263 spin_lock_irqsave(&nvme_fc_lock, flags); 264 265 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { 266 if (lport->localport.node_name != pinfo->node_name || 267 lport->localport.port_name != pinfo->port_name) 268 continue; 269 270 if (lport->dev != dev) { 271 lport = ERR_PTR(-EXDEV); 272 goto out_done; 273 } 274 275 if (lport->localport.port_state != FC_OBJSTATE_DELETED) { 276 lport = ERR_PTR(-EEXIST); 277 goto out_done; 278 } 279 280 if (!nvme_fc_lport_get(lport)) { 281 /* 282 * fails if ref cnt already 0. If so, 283 * act as if lport already deleted 284 */ 285 lport = NULL; 286 goto out_done; 287 } 288 289 /* resume the lport */ 290 291 lport->ops = ops; 292 lport->localport.port_role = pinfo->port_role; 293 lport->localport.port_id = pinfo->port_id; 294 lport->localport.port_state = FC_OBJSTATE_ONLINE; 295 296 spin_unlock_irqrestore(&nvme_fc_lock, flags); 297 298 return lport; 299 } 300 301 lport = NULL; 302 303 out_done: 304 spin_unlock_irqrestore(&nvme_fc_lock, flags); 305 306 return lport; 307 } 308 309 /** 310 * nvme_fc_register_localport - transport entry point called by an 311 * LLDD to register the existence of a NVME 312 * host FC port. 313 * @pinfo: pointer to information about the port to be registered 314 * @template: LLDD entrypoints and operational parameters for the port 315 * @dev: physical hardware device node port corresponds to. Will be 316 * used for DMA mappings 317 * @portptr: pointer to a local port pointer. Upon success, the routine 318 * will allocate a nvme_fc_local_port structure and place its 319 * address in the local port pointer. Upon failure, local port 320 * pointer will be set to 0. 321 * 322 * Returns: 323 * a completion status. Must be 0 upon success; a negative errno 324 * (ex: -ENXIO) upon failure. 325 */ 326 int 327 nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, 328 struct nvme_fc_port_template *template, 329 struct device *dev, 330 struct nvme_fc_local_port **portptr) 331 { 332 struct nvme_fc_lport *newrec; 333 unsigned long flags; 334 int ret, idx; 335 336 if (!template->localport_delete || !template->remoteport_delete || 337 !template->ls_req || !template->fcp_io || 338 !template->ls_abort || !template->fcp_abort || 339 !template->max_hw_queues || !template->max_sgl_segments || 340 !template->max_dif_sgl_segments || !template->dma_boundary) { 341 ret = -EINVAL; 342 goto out_reghost_failed; 343 } 344 345 /* 346 * look to see if there is already a localport that had been 347 * deregistered and in the process of waiting for all the 348 * references to fully be removed. If the references haven't 349 * expired, we can simply re-enable the localport. Remoteports 350 * and controller reconnections should resume naturally. 351 */ 352 newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev); 353 354 /* found an lport, but something about its state is bad */ 355 if (IS_ERR(newrec)) { 356 ret = PTR_ERR(newrec); 357 goto out_reghost_failed; 358 359 /* found existing lport, which was resumed */ 360 } else if (newrec) { 361 *portptr = &newrec->localport; 362 return 0; 363 } 364 365 /* nothing found - allocate a new localport struct */ 366 367 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz), 368 GFP_KERNEL); 369 if (!newrec) { 370 ret = -ENOMEM; 371 goto out_reghost_failed; 372 } 373 374 idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL); 375 if (idx < 0) { 376 ret = -ENOSPC; 377 goto out_fail_kfree; 378 } 379 380 if (!get_device(dev) && dev) { 381 ret = -ENODEV; 382 goto out_ida_put; 383 } 384 385 INIT_LIST_HEAD(&newrec->port_list); 386 INIT_LIST_HEAD(&newrec->endp_list); 387 kref_init(&newrec->ref); 388 atomic_set(&newrec->act_rport_cnt, 0); 389 newrec->ops = template; 390 newrec->dev = dev; 391 ida_init(&newrec->endp_cnt); 392 newrec->localport.private = &newrec[1]; 393 newrec->localport.node_name = pinfo->node_name; 394 newrec->localport.port_name = pinfo->port_name; 395 newrec->localport.port_role = pinfo->port_role; 396 newrec->localport.port_id = pinfo->port_id; 397 newrec->localport.port_state = FC_OBJSTATE_ONLINE; 398 newrec->localport.port_num = idx; 399 400 spin_lock_irqsave(&nvme_fc_lock, flags); 401 list_add_tail(&newrec->port_list, &nvme_fc_lport_list); 402 spin_unlock_irqrestore(&nvme_fc_lock, flags); 403 404 if (dev) 405 dma_set_seg_boundary(dev, template->dma_boundary); 406 407 *portptr = &newrec->localport; 408 return 0; 409 410 out_ida_put: 411 ida_simple_remove(&nvme_fc_local_port_cnt, idx); 412 out_fail_kfree: 413 kfree(newrec); 414 out_reghost_failed: 415 *portptr = NULL; 416 417 return ret; 418 } 419 EXPORT_SYMBOL_GPL(nvme_fc_register_localport); 420 421 /** 422 * nvme_fc_unregister_localport - transport entry point called by an 423 * LLDD to deregister/remove a previously 424 * registered a NVME host FC port. 425 * @portptr: pointer to the (registered) local port that is to be deregistered. 426 * 427 * Returns: 428 * a completion status. Must be 0 upon success; a negative errno 429 * (ex: -ENXIO) upon failure. 430 */ 431 int 432 nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr) 433 { 434 struct nvme_fc_lport *lport = localport_to_lport(portptr); 435 unsigned long flags; 436 437 if (!portptr) 438 return -EINVAL; 439 440 spin_lock_irqsave(&nvme_fc_lock, flags); 441 442 if (portptr->port_state != FC_OBJSTATE_ONLINE) { 443 spin_unlock_irqrestore(&nvme_fc_lock, flags); 444 return -EINVAL; 445 } 446 portptr->port_state = FC_OBJSTATE_DELETED; 447 448 spin_unlock_irqrestore(&nvme_fc_lock, flags); 449 450 if (atomic_read(&lport->act_rport_cnt) == 0) 451 lport->ops->localport_delete(&lport->localport); 452 453 nvme_fc_lport_put(lport); 454 455 return 0; 456 } 457 EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport); 458 459 /* 460 * TRADDR strings, per FC-NVME are fixed format: 461 * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters 462 * udev event will only differ by prefix of what field is 463 * being specified: 464 * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters 465 * 19 + 43 + null_fudge = 64 characters 466 */ 467 #define FCNVME_TRADDR_LENGTH 64 468 469 static void 470 nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport, 471 struct nvme_fc_rport *rport) 472 { 473 char hostaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_HOST_TRADDR=...*/ 474 char tgtaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_TRADDR=...*/ 475 char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL }; 476 477 if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY)) 478 return; 479 480 snprintf(hostaddr, sizeof(hostaddr), 481 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx", 482 lport->localport.node_name, lport->localport.port_name); 483 snprintf(tgtaddr, sizeof(tgtaddr), 484 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx", 485 rport->remoteport.node_name, rport->remoteport.port_name); 486 kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp); 487 } 488 489 static void 490 nvme_fc_free_rport(struct kref *ref) 491 { 492 struct nvme_fc_rport *rport = 493 container_of(ref, struct nvme_fc_rport, ref); 494 struct nvme_fc_lport *lport = 495 localport_to_lport(rport->remoteport.localport); 496 unsigned long flags; 497 498 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED); 499 WARN_ON(!list_empty(&rport->ctrl_list)); 500 501 /* remove from lport list */ 502 spin_lock_irqsave(&nvme_fc_lock, flags); 503 list_del(&rport->endp_list); 504 spin_unlock_irqrestore(&nvme_fc_lock, flags); 505 506 WARN_ON(!list_empty(&rport->disc_list)); 507 ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num); 508 509 kfree(rport); 510 511 nvme_fc_lport_put(lport); 512 } 513 514 static void 515 nvme_fc_rport_put(struct nvme_fc_rport *rport) 516 { 517 kref_put(&rport->ref, nvme_fc_free_rport); 518 } 519 520 static int 521 nvme_fc_rport_get(struct nvme_fc_rport *rport) 522 { 523 return kref_get_unless_zero(&rport->ref); 524 } 525 526 static void 527 nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl) 528 { 529 switch (ctrl->ctrl.state) { 530 case NVME_CTRL_NEW: 531 case NVME_CTRL_CONNECTING: 532 /* 533 * As all reconnects were suppressed, schedule a 534 * connect. 535 */ 536 dev_info(ctrl->ctrl.device, 537 "NVME-FC{%d}: connectivity re-established. " 538 "Attempting reconnect\n", ctrl->cnum); 539 540 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0); 541 break; 542 543 case NVME_CTRL_RESETTING: 544 /* 545 * Controller is already in the process of terminating the 546 * association. No need to do anything further. The reconnect 547 * step will naturally occur after the reset completes. 548 */ 549 break; 550 551 default: 552 /* no action to take - let it delete */ 553 break; 554 } 555 } 556 557 static struct nvme_fc_rport * 558 nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport, 559 struct nvme_fc_port_info *pinfo) 560 { 561 struct nvme_fc_rport *rport; 562 struct nvme_fc_ctrl *ctrl; 563 unsigned long flags; 564 565 spin_lock_irqsave(&nvme_fc_lock, flags); 566 567 list_for_each_entry(rport, &lport->endp_list, endp_list) { 568 if (rport->remoteport.node_name != pinfo->node_name || 569 rport->remoteport.port_name != pinfo->port_name) 570 continue; 571 572 if (!nvme_fc_rport_get(rport)) { 573 rport = ERR_PTR(-ENOLCK); 574 goto out_done; 575 } 576 577 spin_unlock_irqrestore(&nvme_fc_lock, flags); 578 579 spin_lock_irqsave(&rport->lock, flags); 580 581 /* has it been unregistered */ 582 if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) { 583 /* means lldd called us twice */ 584 spin_unlock_irqrestore(&rport->lock, flags); 585 nvme_fc_rport_put(rport); 586 return ERR_PTR(-ESTALE); 587 } 588 589 rport->remoteport.port_role = pinfo->port_role; 590 rport->remoteport.port_id = pinfo->port_id; 591 rport->remoteport.port_state = FC_OBJSTATE_ONLINE; 592 rport->dev_loss_end = 0; 593 594 /* 595 * kick off a reconnect attempt on all associations to the 596 * remote port. A successful reconnects will resume i/o. 597 */ 598 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) 599 nvme_fc_resume_controller(ctrl); 600 601 spin_unlock_irqrestore(&rport->lock, flags); 602 603 return rport; 604 } 605 606 rport = NULL; 607 608 out_done: 609 spin_unlock_irqrestore(&nvme_fc_lock, flags); 610 611 return rport; 612 } 613 614 static inline void 615 __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport, 616 struct nvme_fc_port_info *pinfo) 617 { 618 if (pinfo->dev_loss_tmo) 619 rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo; 620 else 621 rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO; 622 } 623 624 /** 625 * nvme_fc_register_remoteport - transport entry point called by an 626 * LLDD to register the existence of a NVME 627 * subsystem FC port on its fabric. 628 * @localport: pointer to the (registered) local port that the remote 629 * subsystem port is connected to. 630 * @pinfo: pointer to information about the port to be registered 631 * @portptr: pointer to a remote port pointer. Upon success, the routine 632 * will allocate a nvme_fc_remote_port structure and place its 633 * address in the remote port pointer. Upon failure, remote port 634 * pointer will be set to 0. 635 * 636 * Returns: 637 * a completion status. Must be 0 upon success; a negative errno 638 * (ex: -ENXIO) upon failure. 639 */ 640 int 641 nvme_fc_register_remoteport(struct nvme_fc_local_port *localport, 642 struct nvme_fc_port_info *pinfo, 643 struct nvme_fc_remote_port **portptr) 644 { 645 struct nvme_fc_lport *lport = localport_to_lport(localport); 646 struct nvme_fc_rport *newrec; 647 unsigned long flags; 648 int ret, idx; 649 650 if (!nvme_fc_lport_get(lport)) { 651 ret = -ESHUTDOWN; 652 goto out_reghost_failed; 653 } 654 655 /* 656 * look to see if there is already a remoteport that is waiting 657 * for a reconnect (within dev_loss_tmo) with the same WWN's. 658 * If so, transition to it and reconnect. 659 */ 660 newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo); 661 662 /* found an rport, but something about its state is bad */ 663 if (IS_ERR(newrec)) { 664 ret = PTR_ERR(newrec); 665 goto out_lport_put; 666 667 /* found existing rport, which was resumed */ 668 } else if (newrec) { 669 nvme_fc_lport_put(lport); 670 __nvme_fc_set_dev_loss_tmo(newrec, pinfo); 671 nvme_fc_signal_discovery_scan(lport, newrec); 672 *portptr = &newrec->remoteport; 673 return 0; 674 } 675 676 /* nothing found - allocate a new remoteport struct */ 677 678 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz), 679 GFP_KERNEL); 680 if (!newrec) { 681 ret = -ENOMEM; 682 goto out_lport_put; 683 } 684 685 idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL); 686 if (idx < 0) { 687 ret = -ENOSPC; 688 goto out_kfree_rport; 689 } 690 691 INIT_LIST_HEAD(&newrec->endp_list); 692 INIT_LIST_HEAD(&newrec->ctrl_list); 693 INIT_LIST_HEAD(&newrec->ls_req_list); 694 INIT_LIST_HEAD(&newrec->disc_list); 695 kref_init(&newrec->ref); 696 atomic_set(&newrec->act_ctrl_cnt, 0); 697 spin_lock_init(&newrec->lock); 698 newrec->remoteport.localport = &lport->localport; 699 newrec->dev = lport->dev; 700 newrec->lport = lport; 701 newrec->remoteport.private = &newrec[1]; 702 newrec->remoteport.port_role = pinfo->port_role; 703 newrec->remoteport.node_name = pinfo->node_name; 704 newrec->remoteport.port_name = pinfo->port_name; 705 newrec->remoteport.port_id = pinfo->port_id; 706 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE; 707 newrec->remoteport.port_num = idx; 708 __nvme_fc_set_dev_loss_tmo(newrec, pinfo); 709 710 spin_lock_irqsave(&nvme_fc_lock, flags); 711 list_add_tail(&newrec->endp_list, &lport->endp_list); 712 spin_unlock_irqrestore(&nvme_fc_lock, flags); 713 714 nvme_fc_signal_discovery_scan(lport, newrec); 715 716 *portptr = &newrec->remoteport; 717 return 0; 718 719 out_kfree_rport: 720 kfree(newrec); 721 out_lport_put: 722 nvme_fc_lport_put(lport); 723 out_reghost_failed: 724 *portptr = NULL; 725 return ret; 726 } 727 EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport); 728 729 static int 730 nvme_fc_abort_lsops(struct nvme_fc_rport *rport) 731 { 732 struct nvmefc_ls_req_op *lsop; 733 unsigned long flags; 734 735 restart: 736 spin_lock_irqsave(&rport->lock, flags); 737 738 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) { 739 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) { 740 lsop->flags |= FCOP_FLAGS_TERMIO; 741 spin_unlock_irqrestore(&rport->lock, flags); 742 rport->lport->ops->ls_abort(&rport->lport->localport, 743 &rport->remoteport, 744 &lsop->ls_req); 745 goto restart; 746 } 747 } 748 spin_unlock_irqrestore(&rport->lock, flags); 749 750 return 0; 751 } 752 753 static void 754 nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl) 755 { 756 dev_info(ctrl->ctrl.device, 757 "NVME-FC{%d}: controller connectivity lost. Awaiting " 758 "Reconnect", ctrl->cnum); 759 760 switch (ctrl->ctrl.state) { 761 case NVME_CTRL_NEW: 762 case NVME_CTRL_LIVE: 763 /* 764 * Schedule a controller reset. The reset will terminate the 765 * association and schedule the reconnect timer. Reconnects 766 * will be attempted until either the ctlr_loss_tmo 767 * (max_retries * connect_delay) expires or the remoteport's 768 * dev_loss_tmo expires. 769 */ 770 if (nvme_reset_ctrl(&ctrl->ctrl)) { 771 dev_warn(ctrl->ctrl.device, 772 "NVME-FC{%d}: Couldn't schedule reset.\n", 773 ctrl->cnum); 774 nvme_delete_ctrl(&ctrl->ctrl); 775 } 776 break; 777 778 case NVME_CTRL_CONNECTING: 779 /* 780 * The association has already been terminated and the 781 * controller is attempting reconnects. No need to do anything 782 * futher. Reconnects will be attempted until either the 783 * ctlr_loss_tmo (max_retries * connect_delay) expires or the 784 * remoteport's dev_loss_tmo expires. 785 */ 786 break; 787 788 case NVME_CTRL_RESETTING: 789 /* 790 * Controller is already in the process of terminating the 791 * association. No need to do anything further. The reconnect 792 * step will kick in naturally after the association is 793 * terminated. 794 */ 795 break; 796 797 case NVME_CTRL_DELETING: 798 default: 799 /* no action to take - let it delete */ 800 break; 801 } 802 } 803 804 /** 805 * nvme_fc_unregister_remoteport - transport entry point called by an 806 * LLDD to deregister/remove a previously 807 * registered a NVME subsystem FC port. 808 * @portptr: pointer to the (registered) remote port that is to be 809 * deregistered. 810 * 811 * Returns: 812 * a completion status. Must be 0 upon success; a negative errno 813 * (ex: -ENXIO) upon failure. 814 */ 815 int 816 nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr) 817 { 818 struct nvme_fc_rport *rport = remoteport_to_rport(portptr); 819 struct nvme_fc_ctrl *ctrl; 820 unsigned long flags; 821 822 if (!portptr) 823 return -EINVAL; 824 825 spin_lock_irqsave(&rport->lock, flags); 826 827 if (portptr->port_state != FC_OBJSTATE_ONLINE) { 828 spin_unlock_irqrestore(&rport->lock, flags); 829 return -EINVAL; 830 } 831 portptr->port_state = FC_OBJSTATE_DELETED; 832 833 rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ); 834 835 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { 836 /* if dev_loss_tmo==0, dev loss is immediate */ 837 if (!portptr->dev_loss_tmo) { 838 dev_warn(ctrl->ctrl.device, 839 "NVME-FC{%d}: controller connectivity lost.\n", 840 ctrl->cnum); 841 nvme_delete_ctrl(&ctrl->ctrl); 842 } else 843 nvme_fc_ctrl_connectivity_loss(ctrl); 844 } 845 846 spin_unlock_irqrestore(&rport->lock, flags); 847 848 nvme_fc_abort_lsops(rport); 849 850 if (atomic_read(&rport->act_ctrl_cnt) == 0) 851 rport->lport->ops->remoteport_delete(portptr); 852 853 /* 854 * release the reference, which will allow, if all controllers 855 * go away, which should only occur after dev_loss_tmo occurs, 856 * for the rport to be torn down. 857 */ 858 nvme_fc_rport_put(rport); 859 860 return 0; 861 } 862 EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport); 863 864 /** 865 * nvme_fc_rescan_remoteport - transport entry point called by an 866 * LLDD to request a nvme device rescan. 867 * @remoteport: pointer to the (registered) remote port that is to be 868 * rescanned. 869 * 870 * Returns: N/A 871 */ 872 void 873 nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport) 874 { 875 struct nvme_fc_rport *rport = remoteport_to_rport(remoteport); 876 877 nvme_fc_signal_discovery_scan(rport->lport, rport); 878 } 879 EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport); 880 881 int 882 nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr, 883 u32 dev_loss_tmo) 884 { 885 struct nvme_fc_rport *rport = remoteport_to_rport(portptr); 886 unsigned long flags; 887 888 spin_lock_irqsave(&rport->lock, flags); 889 890 if (portptr->port_state != FC_OBJSTATE_ONLINE) { 891 spin_unlock_irqrestore(&rport->lock, flags); 892 return -EINVAL; 893 } 894 895 /* a dev_loss_tmo of 0 (immediate) is allowed to be set */ 896 rport->remoteport.dev_loss_tmo = dev_loss_tmo; 897 898 spin_unlock_irqrestore(&rport->lock, flags); 899 900 return 0; 901 } 902 EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss); 903 904 905 /* *********************** FC-NVME DMA Handling **************************** */ 906 907 /* 908 * The fcloop device passes in a NULL device pointer. Real LLD's will 909 * pass in a valid device pointer. If NULL is passed to the dma mapping 910 * routines, depending on the platform, it may or may not succeed, and 911 * may crash. 912 * 913 * As such: 914 * Wrapper all the dma routines and check the dev pointer. 915 * 916 * If simple mappings (return just a dma address, we'll noop them, 917 * returning a dma address of 0. 918 * 919 * On more complex mappings (dma_map_sg), a pseudo routine fills 920 * in the scatter list, setting all dma addresses to 0. 921 */ 922 923 static inline dma_addr_t 924 fc_dma_map_single(struct device *dev, void *ptr, size_t size, 925 enum dma_data_direction dir) 926 { 927 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; 928 } 929 930 static inline int 931 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 932 { 933 return dev ? dma_mapping_error(dev, dma_addr) : 0; 934 } 935 936 static inline void 937 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, 938 enum dma_data_direction dir) 939 { 940 if (dev) 941 dma_unmap_single(dev, addr, size, dir); 942 } 943 944 static inline void 945 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, 946 enum dma_data_direction dir) 947 { 948 if (dev) 949 dma_sync_single_for_cpu(dev, addr, size, dir); 950 } 951 952 static inline void 953 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, 954 enum dma_data_direction dir) 955 { 956 if (dev) 957 dma_sync_single_for_device(dev, addr, size, dir); 958 } 959 960 /* pseudo dma_map_sg call */ 961 static int 962 fc_map_sg(struct scatterlist *sg, int nents) 963 { 964 struct scatterlist *s; 965 int i; 966 967 WARN_ON(nents == 0 || sg[0].length == 0); 968 969 for_each_sg(sg, s, nents, i) { 970 s->dma_address = 0L; 971 #ifdef CONFIG_NEED_SG_DMA_LENGTH 972 s->dma_length = s->length; 973 #endif 974 } 975 return nents; 976 } 977 978 static inline int 979 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 980 enum dma_data_direction dir) 981 { 982 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); 983 } 984 985 static inline void 986 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 987 enum dma_data_direction dir) 988 { 989 if (dev) 990 dma_unmap_sg(dev, sg, nents, dir); 991 } 992 993 /* *********************** FC-NVME LS Handling **************************** */ 994 995 static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *); 996 static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *); 997 998 999 static void 1000 __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop) 1001 { 1002 struct nvme_fc_rport *rport = lsop->rport; 1003 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 1004 unsigned long flags; 1005 1006 spin_lock_irqsave(&rport->lock, flags); 1007 1008 if (!lsop->req_queued) { 1009 spin_unlock_irqrestore(&rport->lock, flags); 1010 return; 1011 } 1012 1013 list_del(&lsop->lsreq_list); 1014 1015 lsop->req_queued = false; 1016 1017 spin_unlock_irqrestore(&rport->lock, flags); 1018 1019 fc_dma_unmap_single(rport->dev, lsreq->rqstdma, 1020 (lsreq->rqstlen + lsreq->rsplen), 1021 DMA_BIDIRECTIONAL); 1022 1023 nvme_fc_rport_put(rport); 1024 } 1025 1026 static int 1027 __nvme_fc_send_ls_req(struct nvme_fc_rport *rport, 1028 struct nvmefc_ls_req_op *lsop, 1029 void (*done)(struct nvmefc_ls_req *req, int status)) 1030 { 1031 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 1032 unsigned long flags; 1033 int ret = 0; 1034 1035 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 1036 return -ECONNREFUSED; 1037 1038 if (!nvme_fc_rport_get(rport)) 1039 return -ESHUTDOWN; 1040 1041 lsreq->done = done; 1042 lsop->rport = rport; 1043 lsop->req_queued = false; 1044 INIT_LIST_HEAD(&lsop->lsreq_list); 1045 init_completion(&lsop->ls_done); 1046 1047 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr, 1048 lsreq->rqstlen + lsreq->rsplen, 1049 DMA_BIDIRECTIONAL); 1050 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) { 1051 ret = -EFAULT; 1052 goto out_putrport; 1053 } 1054 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; 1055 1056 spin_lock_irqsave(&rport->lock, flags); 1057 1058 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list); 1059 1060 lsop->req_queued = true; 1061 1062 spin_unlock_irqrestore(&rport->lock, flags); 1063 1064 ret = rport->lport->ops->ls_req(&rport->lport->localport, 1065 &rport->remoteport, lsreq); 1066 if (ret) 1067 goto out_unlink; 1068 1069 return 0; 1070 1071 out_unlink: 1072 lsop->ls_error = ret; 1073 spin_lock_irqsave(&rport->lock, flags); 1074 lsop->req_queued = false; 1075 list_del(&lsop->lsreq_list); 1076 spin_unlock_irqrestore(&rport->lock, flags); 1077 fc_dma_unmap_single(rport->dev, lsreq->rqstdma, 1078 (lsreq->rqstlen + lsreq->rsplen), 1079 DMA_BIDIRECTIONAL); 1080 out_putrport: 1081 nvme_fc_rport_put(rport); 1082 1083 return ret; 1084 } 1085 1086 static void 1087 nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status) 1088 { 1089 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq); 1090 1091 lsop->ls_error = status; 1092 complete(&lsop->ls_done); 1093 } 1094 1095 static int 1096 nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop) 1097 { 1098 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 1099 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr; 1100 int ret; 1101 1102 ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done); 1103 1104 if (!ret) { 1105 /* 1106 * No timeout/not interruptible as we need the struct 1107 * to exist until the lldd calls us back. Thus mandate 1108 * wait until driver calls back. lldd responsible for 1109 * the timeout action 1110 */ 1111 wait_for_completion(&lsop->ls_done); 1112 1113 __nvme_fc_finish_ls_req(lsop); 1114 1115 ret = lsop->ls_error; 1116 } 1117 1118 if (ret) 1119 return ret; 1120 1121 /* ACC or RJT payload ? */ 1122 if (rjt->w0.ls_cmd == FCNVME_LS_RJT) 1123 return -ENXIO; 1124 1125 return 0; 1126 } 1127 1128 static int 1129 nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport, 1130 struct nvmefc_ls_req_op *lsop, 1131 void (*done)(struct nvmefc_ls_req *req, int status)) 1132 { 1133 /* don't wait for completion */ 1134 1135 return __nvme_fc_send_ls_req(rport, lsop, done); 1136 } 1137 1138 /* Validation Error indexes into the string table below */ 1139 enum { 1140 VERR_NO_ERROR = 0, 1141 VERR_LSACC = 1, 1142 VERR_LSDESC_RQST = 2, 1143 VERR_LSDESC_RQST_LEN = 3, 1144 VERR_ASSOC_ID = 4, 1145 VERR_ASSOC_ID_LEN = 5, 1146 VERR_CONN_ID = 6, 1147 VERR_CONN_ID_LEN = 7, 1148 VERR_CR_ASSOC = 8, 1149 VERR_CR_ASSOC_ACC_LEN = 9, 1150 VERR_CR_CONN = 10, 1151 VERR_CR_CONN_ACC_LEN = 11, 1152 VERR_DISCONN = 12, 1153 VERR_DISCONN_ACC_LEN = 13, 1154 }; 1155 1156 static char *validation_errors[] = { 1157 "OK", 1158 "Not LS_ACC", 1159 "Not LSDESC_RQST", 1160 "Bad LSDESC_RQST Length", 1161 "Not Association ID", 1162 "Bad Association ID Length", 1163 "Not Connection ID", 1164 "Bad Connection ID Length", 1165 "Not CR_ASSOC Rqst", 1166 "Bad CR_ASSOC ACC Length", 1167 "Not CR_CONN Rqst", 1168 "Bad CR_CONN ACC Length", 1169 "Not Disconnect Rqst", 1170 "Bad Disconnect ACC Length", 1171 }; 1172 1173 static int 1174 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl, 1175 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio) 1176 { 1177 struct nvmefc_ls_req_op *lsop; 1178 struct nvmefc_ls_req *lsreq; 1179 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst; 1180 struct fcnvme_ls_cr_assoc_acc *assoc_acc; 1181 int ret, fcret = 0; 1182 1183 lsop = kzalloc((sizeof(*lsop) + 1184 ctrl->lport->ops->lsrqst_priv_sz + 1185 sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL); 1186 if (!lsop) { 1187 ret = -ENOMEM; 1188 goto out_no_memory; 1189 } 1190 lsreq = &lsop->ls_req; 1191 1192 lsreq->private = (void *)&lsop[1]; 1193 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *) 1194 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz); 1195 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1]; 1196 1197 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION; 1198 assoc_rqst->desc_list_len = 1199 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); 1200 1201 assoc_rqst->assoc_cmd.desc_tag = 1202 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD); 1203 assoc_rqst->assoc_cmd.desc_len = 1204 fcnvme_lsdesc_len( 1205 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); 1206 1207 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); 1208 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1); 1209 /* Linux supports only Dynamic controllers */ 1210 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff); 1211 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); 1212 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn, 1213 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE)); 1214 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn, 1215 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE)); 1216 1217 lsop->queue = queue; 1218 lsreq->rqstaddr = assoc_rqst; 1219 lsreq->rqstlen = sizeof(*assoc_rqst); 1220 lsreq->rspaddr = assoc_acc; 1221 lsreq->rsplen = sizeof(*assoc_acc); 1222 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC; 1223 1224 ret = nvme_fc_send_ls_req(ctrl->rport, lsop); 1225 if (ret) 1226 goto out_free_buffer; 1227 1228 /* process connect LS completion */ 1229 1230 /* validate the ACC response */ 1231 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) 1232 fcret = VERR_LSACC; 1233 else if (assoc_acc->hdr.desc_list_len != 1234 fcnvme_lsdesc_len( 1235 sizeof(struct fcnvme_ls_cr_assoc_acc))) 1236 fcret = VERR_CR_ASSOC_ACC_LEN; 1237 else if (assoc_acc->hdr.rqst.desc_tag != 1238 cpu_to_be32(FCNVME_LSDESC_RQST)) 1239 fcret = VERR_LSDESC_RQST; 1240 else if (assoc_acc->hdr.rqst.desc_len != 1241 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst))) 1242 fcret = VERR_LSDESC_RQST_LEN; 1243 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION) 1244 fcret = VERR_CR_ASSOC; 1245 else if (assoc_acc->associd.desc_tag != 1246 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) 1247 fcret = VERR_ASSOC_ID; 1248 else if (assoc_acc->associd.desc_len != 1249 fcnvme_lsdesc_len( 1250 sizeof(struct fcnvme_lsdesc_assoc_id))) 1251 fcret = VERR_ASSOC_ID_LEN; 1252 else if (assoc_acc->connectid.desc_tag != 1253 cpu_to_be32(FCNVME_LSDESC_CONN_ID)) 1254 fcret = VERR_CONN_ID; 1255 else if (assoc_acc->connectid.desc_len != 1256 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id))) 1257 fcret = VERR_CONN_ID_LEN; 1258 1259 if (fcret) { 1260 ret = -EBADF; 1261 dev_err(ctrl->dev, 1262 "q %d connect failed: %s\n", 1263 queue->qnum, validation_errors[fcret]); 1264 } else { 1265 ctrl->association_id = 1266 be64_to_cpu(assoc_acc->associd.association_id); 1267 queue->connection_id = 1268 be64_to_cpu(assoc_acc->connectid.connection_id); 1269 set_bit(NVME_FC_Q_CONNECTED, &queue->flags); 1270 } 1271 1272 out_free_buffer: 1273 kfree(lsop); 1274 out_no_memory: 1275 if (ret) 1276 dev_err(ctrl->dev, 1277 "queue %d connect admin queue failed (%d).\n", 1278 queue->qnum, ret); 1279 return ret; 1280 } 1281 1282 static int 1283 nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, 1284 u16 qsize, u16 ersp_ratio) 1285 { 1286 struct nvmefc_ls_req_op *lsop; 1287 struct nvmefc_ls_req *lsreq; 1288 struct fcnvme_ls_cr_conn_rqst *conn_rqst; 1289 struct fcnvme_ls_cr_conn_acc *conn_acc; 1290 int ret, fcret = 0; 1291 1292 lsop = kzalloc((sizeof(*lsop) + 1293 ctrl->lport->ops->lsrqst_priv_sz + 1294 sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL); 1295 if (!lsop) { 1296 ret = -ENOMEM; 1297 goto out_no_memory; 1298 } 1299 lsreq = &lsop->ls_req; 1300 1301 lsreq->private = (void *)&lsop[1]; 1302 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *) 1303 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz); 1304 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1]; 1305 1306 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION; 1307 conn_rqst->desc_list_len = cpu_to_be32( 1308 sizeof(struct fcnvme_lsdesc_assoc_id) + 1309 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); 1310 1311 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); 1312 conn_rqst->associd.desc_len = 1313 fcnvme_lsdesc_len( 1314 sizeof(struct fcnvme_lsdesc_assoc_id)); 1315 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id); 1316 conn_rqst->connect_cmd.desc_tag = 1317 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD); 1318 conn_rqst->connect_cmd.desc_len = 1319 fcnvme_lsdesc_len( 1320 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); 1321 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); 1322 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum); 1323 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1); 1324 1325 lsop->queue = queue; 1326 lsreq->rqstaddr = conn_rqst; 1327 lsreq->rqstlen = sizeof(*conn_rqst); 1328 lsreq->rspaddr = conn_acc; 1329 lsreq->rsplen = sizeof(*conn_acc); 1330 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC; 1331 1332 ret = nvme_fc_send_ls_req(ctrl->rport, lsop); 1333 if (ret) 1334 goto out_free_buffer; 1335 1336 /* process connect LS completion */ 1337 1338 /* validate the ACC response */ 1339 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) 1340 fcret = VERR_LSACC; 1341 else if (conn_acc->hdr.desc_list_len != 1342 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc))) 1343 fcret = VERR_CR_CONN_ACC_LEN; 1344 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST)) 1345 fcret = VERR_LSDESC_RQST; 1346 else if (conn_acc->hdr.rqst.desc_len != 1347 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst))) 1348 fcret = VERR_LSDESC_RQST_LEN; 1349 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION) 1350 fcret = VERR_CR_CONN; 1351 else if (conn_acc->connectid.desc_tag != 1352 cpu_to_be32(FCNVME_LSDESC_CONN_ID)) 1353 fcret = VERR_CONN_ID; 1354 else if (conn_acc->connectid.desc_len != 1355 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id))) 1356 fcret = VERR_CONN_ID_LEN; 1357 1358 if (fcret) { 1359 ret = -EBADF; 1360 dev_err(ctrl->dev, 1361 "q %d connect failed: %s\n", 1362 queue->qnum, validation_errors[fcret]); 1363 } else { 1364 queue->connection_id = 1365 be64_to_cpu(conn_acc->connectid.connection_id); 1366 set_bit(NVME_FC_Q_CONNECTED, &queue->flags); 1367 } 1368 1369 out_free_buffer: 1370 kfree(lsop); 1371 out_no_memory: 1372 if (ret) 1373 dev_err(ctrl->dev, 1374 "queue %d connect command failed (%d).\n", 1375 queue->qnum, ret); 1376 return ret; 1377 } 1378 1379 static void 1380 nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) 1381 { 1382 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq); 1383 1384 __nvme_fc_finish_ls_req(lsop); 1385 1386 /* fc-nvme initiator doesn't care about success or failure of cmd */ 1387 1388 kfree(lsop); 1389 } 1390 1391 /* 1392 * This routine sends a FC-NVME LS to disconnect (aka terminate) 1393 * the FC-NVME Association. Terminating the association also 1394 * terminates the FC-NVME connections (per queue, both admin and io 1395 * queues) that are part of the association. E.g. things are torn 1396 * down, and the related FC-NVME Association ID and Connection IDs 1397 * become invalid. 1398 * 1399 * The behavior of the fc-nvme initiator is such that it's 1400 * understanding of the association and connections will implicitly 1401 * be torn down. The action is implicit as it may be due to a loss of 1402 * connectivity with the fc-nvme target, so you may never get a 1403 * response even if you tried. As such, the action of this routine 1404 * is to asynchronously send the LS, ignore any results of the LS, and 1405 * continue on with terminating the association. If the fc-nvme target 1406 * is present and receives the LS, it too can tear down. 1407 */ 1408 static void 1409 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) 1410 { 1411 struct fcnvme_ls_disconnect_rqst *discon_rqst; 1412 struct fcnvme_ls_disconnect_acc *discon_acc; 1413 struct nvmefc_ls_req_op *lsop; 1414 struct nvmefc_ls_req *lsreq; 1415 int ret; 1416 1417 lsop = kzalloc((sizeof(*lsop) + 1418 ctrl->lport->ops->lsrqst_priv_sz + 1419 sizeof(*discon_rqst) + sizeof(*discon_acc)), 1420 GFP_KERNEL); 1421 if (!lsop) 1422 /* couldn't sent it... too bad */ 1423 return; 1424 1425 lsreq = &lsop->ls_req; 1426 1427 lsreq->private = (void *)&lsop[1]; 1428 discon_rqst = (struct fcnvme_ls_disconnect_rqst *) 1429 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz); 1430 discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1]; 1431 1432 discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT; 1433 discon_rqst->desc_list_len = cpu_to_be32( 1434 sizeof(struct fcnvme_lsdesc_assoc_id) + 1435 sizeof(struct fcnvme_lsdesc_disconn_cmd)); 1436 1437 discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); 1438 discon_rqst->associd.desc_len = 1439 fcnvme_lsdesc_len( 1440 sizeof(struct fcnvme_lsdesc_assoc_id)); 1441 1442 discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id); 1443 1444 discon_rqst->discon_cmd.desc_tag = cpu_to_be32( 1445 FCNVME_LSDESC_DISCONN_CMD); 1446 discon_rqst->discon_cmd.desc_len = 1447 fcnvme_lsdesc_len( 1448 sizeof(struct fcnvme_lsdesc_disconn_cmd)); 1449 discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION; 1450 discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id); 1451 1452 lsreq->rqstaddr = discon_rqst; 1453 lsreq->rqstlen = sizeof(*discon_rqst); 1454 lsreq->rspaddr = discon_acc; 1455 lsreq->rsplen = sizeof(*discon_acc); 1456 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC; 1457 1458 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop, 1459 nvme_fc_disconnect_assoc_done); 1460 if (ret) 1461 kfree(lsop); 1462 1463 /* only meaningful part to terminating the association */ 1464 ctrl->association_id = 0; 1465 } 1466 1467 1468 /* *********************** NVME Ctrl Routines **************************** */ 1469 1470 static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg); 1471 1472 static void 1473 __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl, 1474 struct nvme_fc_fcp_op *op) 1475 { 1476 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma, 1477 sizeof(op->rsp_iu), DMA_FROM_DEVICE); 1478 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma, 1479 sizeof(op->cmd_iu), DMA_TO_DEVICE); 1480 1481 atomic_set(&op->state, FCPOP_STATE_UNINIT); 1482 } 1483 1484 static void 1485 nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq, 1486 unsigned int hctx_idx) 1487 { 1488 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 1489 1490 return __nvme_fc_exit_request(set->driver_data, op); 1491 } 1492 1493 static int 1494 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op) 1495 { 1496 unsigned long flags; 1497 int opstate; 1498 1499 spin_lock_irqsave(&ctrl->lock, flags); 1500 opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); 1501 if (opstate != FCPOP_STATE_ACTIVE) 1502 atomic_set(&op->state, opstate); 1503 else if (ctrl->flags & FCCTRL_TERMIO) 1504 ctrl->iocnt++; 1505 spin_unlock_irqrestore(&ctrl->lock, flags); 1506 1507 if (opstate != FCPOP_STATE_ACTIVE) 1508 return -ECANCELED; 1509 1510 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, 1511 &ctrl->rport->remoteport, 1512 op->queue->lldd_handle, 1513 &op->fcp_req); 1514 1515 return 0; 1516 } 1517 1518 static void 1519 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) 1520 { 1521 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; 1522 int i; 1523 1524 /* ensure we've initialized the ops once */ 1525 if (!(aen_op->flags & FCOP_FLAGS_AEN)) 1526 return; 1527 1528 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) 1529 __nvme_fc_abort_op(ctrl, aen_op); 1530 } 1531 1532 static inline void 1533 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, 1534 struct nvme_fc_fcp_op *op, int opstate) 1535 { 1536 unsigned long flags; 1537 1538 if (opstate == FCPOP_STATE_ABORTED) { 1539 spin_lock_irqsave(&ctrl->lock, flags); 1540 if (ctrl->flags & FCCTRL_TERMIO) { 1541 if (!--ctrl->iocnt) 1542 wake_up(&ctrl->ioabort_wait); 1543 } 1544 spin_unlock_irqrestore(&ctrl->lock, flags); 1545 } 1546 } 1547 1548 static void 1549 nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) 1550 { 1551 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req); 1552 struct request *rq = op->rq; 1553 struct nvmefc_fcp_req *freq = &op->fcp_req; 1554 struct nvme_fc_ctrl *ctrl = op->ctrl; 1555 struct nvme_fc_queue *queue = op->queue; 1556 struct nvme_completion *cqe = &op->rsp_iu.cqe; 1557 struct nvme_command *sqe = &op->cmd_iu.sqe; 1558 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); 1559 union nvme_result result; 1560 bool terminate_assoc = true; 1561 int opstate; 1562 1563 /* 1564 * WARNING: 1565 * The current linux implementation of a nvme controller 1566 * allocates a single tag set for all io queues and sizes 1567 * the io queues to fully hold all possible tags. Thus, the 1568 * implementation does not reference or care about the sqhd 1569 * value as it never needs to use the sqhd/sqtail pointers 1570 * for submission pacing. 1571 * 1572 * This affects the FC-NVME implementation in two ways: 1573 * 1) As the value doesn't matter, we don't need to waste 1574 * cycles extracting it from ERSPs and stamping it in the 1575 * cases where the transport fabricates CQEs on successful 1576 * completions. 1577 * 2) The FC-NVME implementation requires that delivery of 1578 * ERSP completions are to go back to the nvme layer in order 1579 * relative to the rsn, such that the sqhd value will always 1580 * be "in order" for the nvme layer. As the nvme layer in 1581 * linux doesn't care about sqhd, there's no need to return 1582 * them in order. 1583 * 1584 * Additionally: 1585 * As the core nvme layer in linux currently does not look at 1586 * every field in the cqe - in cases where the FC transport must 1587 * fabricate a CQE, the following fields will not be set as they 1588 * are not referenced: 1589 * cqe.sqid, cqe.sqhd, cqe.command_id 1590 * 1591 * Failure or error of an individual i/o, in a transport 1592 * detected fashion unrelated to the nvme completion status, 1593 * potentially cause the initiator and target sides to get out 1594 * of sync on SQ head/tail (aka outstanding io count allowed). 1595 * Per FC-NVME spec, failure of an individual command requires 1596 * the connection to be terminated, which in turn requires the 1597 * association to be terminated. 1598 */ 1599 1600 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); 1601 1602 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, 1603 sizeof(op->rsp_iu), DMA_FROM_DEVICE); 1604 1605 if (opstate == FCPOP_STATE_ABORTED) 1606 status = cpu_to_le16(NVME_SC_ABORT_REQ << 1); 1607 else if (freq->status) 1608 status = cpu_to_le16(NVME_SC_INTERNAL << 1); 1609 1610 /* 1611 * For the linux implementation, if we have an unsuccesful 1612 * status, they blk-mq layer can typically be called with the 1613 * non-zero status and the content of the cqe isn't important. 1614 */ 1615 if (status) 1616 goto done; 1617 1618 /* 1619 * command completed successfully relative to the wire 1620 * protocol. However, validate anything received and 1621 * extract the status and result from the cqe (create it 1622 * where necessary). 1623 */ 1624 1625 switch (freq->rcv_rsplen) { 1626 1627 case 0: 1628 case NVME_FC_SIZEOF_ZEROS_RSP: 1629 /* 1630 * No response payload or 12 bytes of payload (which 1631 * should all be zeros) are considered successful and 1632 * no payload in the CQE by the transport. 1633 */ 1634 if (freq->transferred_length != 1635 be32_to_cpu(op->cmd_iu.data_len)) { 1636 status = cpu_to_le16(NVME_SC_INTERNAL << 1); 1637 goto done; 1638 } 1639 result.u64 = 0; 1640 break; 1641 1642 case sizeof(struct nvme_fc_ersp_iu): 1643 /* 1644 * The ERSP IU contains a full completion with CQE. 1645 * Validate ERSP IU and look at cqe. 1646 */ 1647 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) != 1648 (freq->rcv_rsplen / 4) || 1649 be32_to_cpu(op->rsp_iu.xfrd_len) != 1650 freq->transferred_length || 1651 op->rsp_iu.status_code || 1652 sqe->common.command_id != cqe->command_id)) { 1653 status = cpu_to_le16(NVME_SC_INTERNAL << 1); 1654 goto done; 1655 } 1656 result = cqe->result; 1657 status = cqe->status; 1658 break; 1659 1660 default: 1661 status = cpu_to_le16(NVME_SC_INTERNAL << 1); 1662 goto done; 1663 } 1664 1665 terminate_assoc = false; 1666 1667 done: 1668 if (op->flags & FCOP_FLAGS_AEN) { 1669 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); 1670 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); 1671 atomic_set(&op->state, FCPOP_STATE_IDLE); 1672 op->flags = FCOP_FLAGS_AEN; /* clear other flags */ 1673 nvme_fc_ctrl_put(ctrl); 1674 goto check_error; 1675 } 1676 1677 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); 1678 nvme_end_request(rq, status, result); 1679 1680 check_error: 1681 if (terminate_assoc) 1682 nvme_fc_error_recovery(ctrl, "transport detected io error"); 1683 } 1684 1685 static int 1686 __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, 1687 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op, 1688 struct request *rq, u32 rqno) 1689 { 1690 struct nvme_fcp_op_w_sgl *op_w_sgl = 1691 container_of(op, typeof(*op_w_sgl), op); 1692 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; 1693 int ret = 0; 1694 1695 memset(op, 0, sizeof(*op)); 1696 op->fcp_req.cmdaddr = &op->cmd_iu; 1697 op->fcp_req.cmdlen = sizeof(op->cmd_iu); 1698 op->fcp_req.rspaddr = &op->rsp_iu; 1699 op->fcp_req.rsplen = sizeof(op->rsp_iu); 1700 op->fcp_req.done = nvme_fc_fcpio_done; 1701 op->ctrl = ctrl; 1702 op->queue = queue; 1703 op->rq = rq; 1704 op->rqno = rqno; 1705 1706 cmdiu->scsi_id = NVME_CMD_SCSI_ID; 1707 cmdiu->fc_id = NVME_CMD_FC_ID; 1708 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32)); 1709 1710 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev, 1711 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE); 1712 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) { 1713 dev_err(ctrl->dev, 1714 "FCP Op failed - cmdiu dma mapping failed.\n"); 1715 ret = EFAULT; 1716 goto out_on_error; 1717 } 1718 1719 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev, 1720 &op->rsp_iu, sizeof(op->rsp_iu), 1721 DMA_FROM_DEVICE); 1722 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) { 1723 dev_err(ctrl->dev, 1724 "FCP Op failed - rspiu dma mapping failed.\n"); 1725 ret = EFAULT; 1726 } 1727 1728 atomic_set(&op->state, FCPOP_STATE_IDLE); 1729 out_on_error: 1730 return ret; 1731 } 1732 1733 static int 1734 nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq, 1735 unsigned int hctx_idx, unsigned int numa_node) 1736 { 1737 struct nvme_fc_ctrl *ctrl = set->driver_data; 1738 struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq); 1739 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; 1740 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx]; 1741 int res; 1742 1743 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++); 1744 if (res) 1745 return res; 1746 op->op.fcp_req.first_sgl = &op->sgl[0]; 1747 op->op.fcp_req.private = &op->priv[0]; 1748 nvme_req(rq)->ctrl = &ctrl->ctrl; 1749 return res; 1750 } 1751 1752 static int 1753 nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl) 1754 { 1755 struct nvme_fc_fcp_op *aen_op; 1756 struct nvme_fc_cmd_iu *cmdiu; 1757 struct nvme_command *sqe; 1758 void *private; 1759 int i, ret; 1760 1761 aen_op = ctrl->aen_ops; 1762 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { 1763 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz, 1764 GFP_KERNEL); 1765 if (!private) 1766 return -ENOMEM; 1767 1768 cmdiu = &aen_op->cmd_iu; 1769 sqe = &cmdiu->sqe; 1770 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0], 1771 aen_op, (struct request *)NULL, 1772 (NVME_AQ_BLK_MQ_DEPTH + i)); 1773 if (ret) { 1774 kfree(private); 1775 return ret; 1776 } 1777 1778 aen_op->flags = FCOP_FLAGS_AEN; 1779 aen_op->fcp_req.private = private; 1780 1781 memset(sqe, 0, sizeof(*sqe)); 1782 sqe->common.opcode = nvme_admin_async_event; 1783 /* Note: core layer may overwrite the sqe.command_id value */ 1784 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i; 1785 } 1786 return 0; 1787 } 1788 1789 static void 1790 nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl) 1791 { 1792 struct nvme_fc_fcp_op *aen_op; 1793 int i; 1794 1795 aen_op = ctrl->aen_ops; 1796 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { 1797 if (!aen_op->fcp_req.private) 1798 continue; 1799 1800 __nvme_fc_exit_request(ctrl, aen_op); 1801 1802 kfree(aen_op->fcp_req.private); 1803 aen_op->fcp_req.private = NULL; 1804 } 1805 } 1806 1807 static inline void 1808 __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl, 1809 unsigned int qidx) 1810 { 1811 struct nvme_fc_queue *queue = &ctrl->queues[qidx]; 1812 1813 hctx->driver_data = queue; 1814 queue->hctx = hctx; 1815 } 1816 1817 static int 1818 nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 1819 unsigned int hctx_idx) 1820 { 1821 struct nvme_fc_ctrl *ctrl = data; 1822 1823 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1); 1824 1825 return 0; 1826 } 1827 1828 static int 1829 nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, 1830 unsigned int hctx_idx) 1831 { 1832 struct nvme_fc_ctrl *ctrl = data; 1833 1834 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx); 1835 1836 return 0; 1837 } 1838 1839 static void 1840 nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx) 1841 { 1842 struct nvme_fc_queue *queue; 1843 1844 queue = &ctrl->queues[idx]; 1845 memset(queue, 0, sizeof(*queue)); 1846 queue->ctrl = ctrl; 1847 queue->qnum = idx; 1848 atomic_set(&queue->csn, 0); 1849 queue->dev = ctrl->dev; 1850 1851 if (idx > 0) 1852 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; 1853 else 1854 queue->cmnd_capsule_len = sizeof(struct nvme_command); 1855 1856 /* 1857 * Considered whether we should allocate buffers for all SQEs 1858 * and CQEs and dma map them - mapping their respective entries 1859 * into the request structures (kernel vm addr and dma address) 1860 * thus the driver could use the buffers/mappings directly. 1861 * It only makes sense if the LLDD would use them for its 1862 * messaging api. It's very unlikely most adapter api's would use 1863 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload 1864 * structures were used instead. 1865 */ 1866 } 1867 1868 /* 1869 * This routine terminates a queue at the transport level. 1870 * The transport has already ensured that all outstanding ios on 1871 * the queue have been terminated. 1872 * The transport will send a Disconnect LS request to terminate 1873 * the queue's connection. Termination of the admin queue will also 1874 * terminate the association at the target. 1875 */ 1876 static void 1877 nvme_fc_free_queue(struct nvme_fc_queue *queue) 1878 { 1879 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags)) 1880 return; 1881 1882 clear_bit(NVME_FC_Q_LIVE, &queue->flags); 1883 /* 1884 * Current implementation never disconnects a single queue. 1885 * It always terminates a whole association. So there is never 1886 * a disconnect(queue) LS sent to the target. 1887 */ 1888 1889 queue->connection_id = 0; 1890 atomic_set(&queue->csn, 0); 1891 } 1892 1893 static void 1894 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl, 1895 struct nvme_fc_queue *queue, unsigned int qidx) 1896 { 1897 if (ctrl->lport->ops->delete_queue) 1898 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx, 1899 queue->lldd_handle); 1900 queue->lldd_handle = NULL; 1901 } 1902 1903 static void 1904 nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl) 1905 { 1906 int i; 1907 1908 for (i = 1; i < ctrl->ctrl.queue_count; i++) 1909 nvme_fc_free_queue(&ctrl->queues[i]); 1910 } 1911 1912 static int 1913 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl, 1914 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize) 1915 { 1916 int ret = 0; 1917 1918 queue->lldd_handle = NULL; 1919 if (ctrl->lport->ops->create_queue) 1920 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport, 1921 qidx, qsize, &queue->lldd_handle); 1922 1923 return ret; 1924 } 1925 1926 static void 1927 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl) 1928 { 1929 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1]; 1930 int i; 1931 1932 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--) 1933 __nvme_fc_delete_hw_queue(ctrl, queue, i); 1934 } 1935 1936 static int 1937 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) 1938 { 1939 struct nvme_fc_queue *queue = &ctrl->queues[1]; 1940 int i, ret; 1941 1942 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) { 1943 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize); 1944 if (ret) 1945 goto delete_queues; 1946 } 1947 1948 return 0; 1949 1950 delete_queues: 1951 for (; i >= 0; i--) 1952 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i); 1953 return ret; 1954 } 1955 1956 static int 1957 nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) 1958 { 1959 int i, ret = 0; 1960 1961 for (i = 1; i < ctrl->ctrl.queue_count; i++) { 1962 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize, 1963 (qsize / 5)); 1964 if (ret) 1965 break; 1966 ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false); 1967 if (ret) 1968 break; 1969 1970 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags); 1971 } 1972 1973 return ret; 1974 } 1975 1976 static void 1977 nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl) 1978 { 1979 int i; 1980 1981 for (i = 1; i < ctrl->ctrl.queue_count; i++) 1982 nvme_fc_init_queue(ctrl, i); 1983 } 1984 1985 static void 1986 nvme_fc_ctrl_free(struct kref *ref) 1987 { 1988 struct nvme_fc_ctrl *ctrl = 1989 container_of(ref, struct nvme_fc_ctrl, ref); 1990 unsigned long flags; 1991 1992 if (ctrl->ctrl.tagset) { 1993 blk_cleanup_queue(ctrl->ctrl.connect_q); 1994 blk_mq_free_tag_set(&ctrl->tag_set); 1995 } 1996 1997 /* remove from rport list */ 1998 spin_lock_irqsave(&ctrl->rport->lock, flags); 1999 list_del(&ctrl->ctrl_list); 2000 spin_unlock_irqrestore(&ctrl->rport->lock, flags); 2001 2002 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 2003 blk_cleanup_queue(ctrl->ctrl.admin_q); 2004 blk_mq_free_tag_set(&ctrl->admin_tag_set); 2005 2006 kfree(ctrl->queues); 2007 2008 put_device(ctrl->dev); 2009 nvme_fc_rport_put(ctrl->rport); 2010 2011 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); 2012 if (ctrl->ctrl.opts) 2013 nvmf_free_options(ctrl->ctrl.opts); 2014 kfree(ctrl); 2015 } 2016 2017 static void 2018 nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl) 2019 { 2020 kref_put(&ctrl->ref, nvme_fc_ctrl_free); 2021 } 2022 2023 static int 2024 nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl) 2025 { 2026 return kref_get_unless_zero(&ctrl->ref); 2027 } 2028 2029 /* 2030 * All accesses from nvme core layer done - can now free the 2031 * controller. Called after last nvme_put_ctrl() call 2032 */ 2033 static void 2034 nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl) 2035 { 2036 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 2037 2038 WARN_ON(nctrl != &ctrl->ctrl); 2039 2040 nvme_fc_ctrl_put(ctrl); 2041 } 2042 2043 static void 2044 nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) 2045 { 2046 int active; 2047 2048 /* 2049 * if an error (io timeout, etc) while (re)connecting, 2050 * it's an error on creating the new association. 2051 * Start the error recovery thread if it hasn't already 2052 * been started. It is expected there could be multiple 2053 * ios hitting this path before things are cleaned up. 2054 */ 2055 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) { 2056 active = atomic_xchg(&ctrl->err_work_active, 1); 2057 if (!active && !queue_work(nvme_fc_wq, &ctrl->err_work)) { 2058 atomic_set(&ctrl->err_work_active, 0); 2059 WARN_ON(1); 2060 } 2061 return; 2062 } 2063 2064 /* Otherwise, only proceed if in LIVE state - e.g. on first error */ 2065 if (ctrl->ctrl.state != NVME_CTRL_LIVE) 2066 return; 2067 2068 dev_warn(ctrl->ctrl.device, 2069 "NVME-FC{%d}: transport association error detected: %s\n", 2070 ctrl->cnum, errmsg); 2071 dev_warn(ctrl->ctrl.device, 2072 "NVME-FC{%d}: resetting controller\n", ctrl->cnum); 2073 2074 nvme_reset_ctrl(&ctrl->ctrl); 2075 } 2076 2077 static enum blk_eh_timer_return 2078 nvme_fc_timeout(struct request *rq, bool reserved) 2079 { 2080 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 2081 struct nvme_fc_ctrl *ctrl = op->ctrl; 2082 2083 /* 2084 * we can't individually ABTS an io without affecting the queue, 2085 * thus killing the queue, and thus the association. 2086 * So resolve by performing a controller reset, which will stop 2087 * the host/io stack, terminate the association on the link, 2088 * and recreate an association on the link. 2089 */ 2090 nvme_fc_error_recovery(ctrl, "io timeout error"); 2091 2092 /* 2093 * the io abort has been initiated. Have the reset timer 2094 * restarted and the abort completion will complete the io 2095 * shortly. Avoids a synchronous wait while the abort finishes. 2096 */ 2097 return BLK_EH_RESET_TIMER; 2098 } 2099 2100 static int 2101 nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, 2102 struct nvme_fc_fcp_op *op) 2103 { 2104 struct nvmefc_fcp_req *freq = &op->fcp_req; 2105 enum dma_data_direction dir; 2106 int ret; 2107 2108 freq->sg_cnt = 0; 2109 2110 if (!blk_rq_nr_phys_segments(rq)) 2111 return 0; 2112 2113 freq->sg_table.sgl = freq->first_sgl; 2114 ret = sg_alloc_table_chained(&freq->sg_table, 2115 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl); 2116 if (ret) 2117 return -ENOMEM; 2118 2119 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); 2120 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq)); 2121 dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 2122 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, 2123 op->nents, dir); 2124 if (unlikely(freq->sg_cnt <= 0)) { 2125 sg_free_table_chained(&freq->sg_table, true); 2126 freq->sg_cnt = 0; 2127 return -EFAULT; 2128 } 2129 2130 /* 2131 * TODO: blk_integrity_rq(rq) for DIF 2132 */ 2133 return 0; 2134 } 2135 2136 static void 2137 nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq, 2138 struct nvme_fc_fcp_op *op) 2139 { 2140 struct nvmefc_fcp_req *freq = &op->fcp_req; 2141 2142 if (!freq->sg_cnt) 2143 return; 2144 2145 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, 2146 ((rq_data_dir(rq) == WRITE) ? 2147 DMA_TO_DEVICE : DMA_FROM_DEVICE)); 2148 2149 nvme_cleanup_cmd(rq); 2150 2151 sg_free_table_chained(&freq->sg_table, true); 2152 2153 freq->sg_cnt = 0; 2154 } 2155 2156 /* 2157 * In FC, the queue is a logical thing. At transport connect, the target 2158 * creates its "queue" and returns a handle that is to be given to the 2159 * target whenever it posts something to the corresponding SQ. When an 2160 * SQE is sent on a SQ, FC effectively considers the SQE, or rather the 2161 * command contained within the SQE, an io, and assigns a FC exchange 2162 * to it. The SQE and the associated SQ handle are sent in the initial 2163 * CMD IU sents on the exchange. All transfers relative to the io occur 2164 * as part of the exchange. The CQE is the last thing for the io, 2165 * which is transferred (explicitly or implicitly) with the RSP IU 2166 * sent on the exchange. After the CQE is received, the FC exchange is 2167 * terminaed and the Exchange may be used on a different io. 2168 * 2169 * The transport to LLDD api has the transport making a request for a 2170 * new fcp io request to the LLDD. The LLDD then allocates a FC exchange 2171 * resource and transfers the command. The LLDD will then process all 2172 * steps to complete the io. Upon completion, the transport done routine 2173 * is called. 2174 * 2175 * So - while the operation is outstanding to the LLDD, there is a link 2176 * level FC exchange resource that is also outstanding. This must be 2177 * considered in all cleanup operations. 2178 */ 2179 static blk_status_t 2180 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, 2181 struct nvme_fc_fcp_op *op, u32 data_len, 2182 enum nvmefc_fcp_datadir io_dir) 2183 { 2184 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; 2185 struct nvme_command *sqe = &cmdiu->sqe; 2186 int ret, opstate; 2187 2188 /* 2189 * before attempting to send the io, check to see if we believe 2190 * the target device is present 2191 */ 2192 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 2193 return BLK_STS_RESOURCE; 2194 2195 if (!nvme_fc_ctrl_get(ctrl)) 2196 return BLK_STS_IOERR; 2197 2198 /* format the FC-NVME CMD IU and fcp_req */ 2199 cmdiu->connection_id = cpu_to_be64(queue->connection_id); 2200 cmdiu->data_len = cpu_to_be32(data_len); 2201 switch (io_dir) { 2202 case NVMEFC_FCP_WRITE: 2203 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE; 2204 break; 2205 case NVMEFC_FCP_READ: 2206 cmdiu->flags = FCNVME_CMD_FLAGS_READ; 2207 break; 2208 case NVMEFC_FCP_NODATA: 2209 cmdiu->flags = 0; 2210 break; 2211 } 2212 op->fcp_req.payload_length = data_len; 2213 op->fcp_req.io_dir = io_dir; 2214 op->fcp_req.transferred_length = 0; 2215 op->fcp_req.rcv_rsplen = 0; 2216 op->fcp_req.status = NVME_SC_SUCCESS; 2217 op->fcp_req.sqid = cpu_to_le16(queue->qnum); 2218 2219 /* 2220 * validate per fabric rules, set fields mandated by fabric spec 2221 * as well as those by FC-NVME spec. 2222 */ 2223 WARN_ON_ONCE(sqe->common.metadata); 2224 sqe->common.flags |= NVME_CMD_SGL_METABUF; 2225 2226 /* 2227 * format SQE DPTR field per FC-NVME rules: 2228 * type=0x5 Transport SGL Data Block Descriptor 2229 * subtype=0xA Transport-specific value 2230 * address=0 2231 * length=length of the data series 2232 */ 2233 sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | 2234 NVME_SGL_FMT_TRANSPORT_A; 2235 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len); 2236 sqe->rw.dptr.sgl.addr = 0; 2237 2238 if (!(op->flags & FCOP_FLAGS_AEN)) { 2239 ret = nvme_fc_map_data(ctrl, op->rq, op); 2240 if (ret < 0) { 2241 nvme_cleanup_cmd(op->rq); 2242 nvme_fc_ctrl_put(ctrl); 2243 if (ret == -ENOMEM || ret == -EAGAIN) 2244 return BLK_STS_RESOURCE; 2245 return BLK_STS_IOERR; 2246 } 2247 } 2248 2249 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma, 2250 sizeof(op->cmd_iu), DMA_TO_DEVICE); 2251 2252 atomic_set(&op->state, FCPOP_STATE_ACTIVE); 2253 2254 if (!(op->flags & FCOP_FLAGS_AEN)) 2255 blk_mq_start_request(op->rq); 2256 2257 cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn)); 2258 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, 2259 &ctrl->rport->remoteport, 2260 queue->lldd_handle, &op->fcp_req); 2261 2262 if (ret) { 2263 /* 2264 * If the lld fails to send the command is there an issue with 2265 * the csn value? If the command that fails is the Connect, 2266 * no - as the connection won't be live. If it is a command 2267 * post-connect, it's possible a gap in csn may be created. 2268 * Does this matter? As Linux initiators don't send fused 2269 * commands, no. The gap would exist, but as there's nothing 2270 * that depends on csn order to be delivered on the target 2271 * side, it shouldn't hurt. It would be difficult for a 2272 * target to even detect the csn gap as it has no idea when the 2273 * cmd with the csn was supposed to arrive. 2274 */ 2275 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); 2276 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); 2277 2278 if (!(op->flags & FCOP_FLAGS_AEN)) 2279 nvme_fc_unmap_data(ctrl, op->rq, op); 2280 2281 nvme_fc_ctrl_put(ctrl); 2282 2283 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE && 2284 ret != -EBUSY) 2285 return BLK_STS_IOERR; 2286 2287 return BLK_STS_RESOURCE; 2288 } 2289 2290 return BLK_STS_OK; 2291 } 2292 2293 static blk_status_t 2294 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, 2295 const struct blk_mq_queue_data *bd) 2296 { 2297 struct nvme_ns *ns = hctx->queue->queuedata; 2298 struct nvme_fc_queue *queue = hctx->driver_data; 2299 struct nvme_fc_ctrl *ctrl = queue->ctrl; 2300 struct request *rq = bd->rq; 2301 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 2302 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; 2303 struct nvme_command *sqe = &cmdiu->sqe; 2304 enum nvmefc_fcp_datadir io_dir; 2305 bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags); 2306 u32 data_len; 2307 blk_status_t ret; 2308 2309 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE || 2310 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) 2311 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq); 2312 2313 ret = nvme_setup_cmd(ns, rq, sqe); 2314 if (ret) 2315 return ret; 2316 2317 /* 2318 * nvme core doesn't quite treat the rq opaquely. Commands such 2319 * as WRITE ZEROES will return a non-zero rq payload_bytes yet 2320 * there is no actual payload to be transferred. 2321 * To get it right, key data transmission on there being 1 or 2322 * more physical segments in the sg list. If there is no 2323 * physical segments, there is no payload. 2324 */ 2325 if (blk_rq_nr_phys_segments(rq)) { 2326 data_len = blk_rq_payload_bytes(rq); 2327 io_dir = ((rq_data_dir(rq) == WRITE) ? 2328 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ); 2329 } else { 2330 data_len = 0; 2331 io_dir = NVMEFC_FCP_NODATA; 2332 } 2333 2334 2335 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir); 2336 } 2337 2338 static void 2339 nvme_fc_submit_async_event(struct nvme_ctrl *arg) 2340 { 2341 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg); 2342 struct nvme_fc_fcp_op *aen_op; 2343 unsigned long flags; 2344 bool terminating = false; 2345 blk_status_t ret; 2346 2347 spin_lock_irqsave(&ctrl->lock, flags); 2348 if (ctrl->flags & FCCTRL_TERMIO) 2349 terminating = true; 2350 spin_unlock_irqrestore(&ctrl->lock, flags); 2351 2352 if (terminating) 2353 return; 2354 2355 aen_op = &ctrl->aen_ops[0]; 2356 2357 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0, 2358 NVMEFC_FCP_NODATA); 2359 if (ret) 2360 dev_err(ctrl->ctrl.device, 2361 "failed async event work\n"); 2362 } 2363 2364 static void 2365 nvme_fc_complete_rq(struct request *rq) 2366 { 2367 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 2368 struct nvme_fc_ctrl *ctrl = op->ctrl; 2369 2370 atomic_set(&op->state, FCPOP_STATE_IDLE); 2371 2372 nvme_fc_unmap_data(ctrl, rq, op); 2373 nvme_complete_rq(rq); 2374 nvme_fc_ctrl_put(ctrl); 2375 } 2376 2377 /* 2378 * This routine is used by the transport when it needs to find active 2379 * io on a queue that is to be terminated. The transport uses 2380 * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke 2381 * this routine to kill them on a 1 by 1 basis. 2382 * 2383 * As FC allocates FC exchange for each io, the transport must contact 2384 * the LLDD to terminate the exchange, thus releasing the FC exchange. 2385 * After terminating the exchange the LLDD will call the transport's 2386 * normal io done path for the request, but it will have an aborted 2387 * status. The done path will return the io request back to the block 2388 * layer with an error status. 2389 */ 2390 static bool 2391 nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved) 2392 { 2393 struct nvme_ctrl *nctrl = data; 2394 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 2395 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req); 2396 2397 __nvme_fc_abort_op(ctrl, op); 2398 return true; 2399 } 2400 2401 2402 static const struct blk_mq_ops nvme_fc_mq_ops = { 2403 .queue_rq = nvme_fc_queue_rq, 2404 .complete = nvme_fc_complete_rq, 2405 .init_request = nvme_fc_init_request, 2406 .exit_request = nvme_fc_exit_request, 2407 .init_hctx = nvme_fc_init_hctx, 2408 .timeout = nvme_fc_timeout, 2409 }; 2410 2411 static int 2412 nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) 2413 { 2414 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 2415 unsigned int nr_io_queues; 2416 int ret; 2417 2418 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), 2419 ctrl->lport->ops->max_hw_queues); 2420 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); 2421 if (ret) { 2422 dev_info(ctrl->ctrl.device, 2423 "set_queue_count failed: %d\n", ret); 2424 return ret; 2425 } 2426 2427 ctrl->ctrl.queue_count = nr_io_queues + 1; 2428 if (!nr_io_queues) 2429 return 0; 2430 2431 nvme_fc_init_io_queues(ctrl); 2432 2433 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); 2434 ctrl->tag_set.ops = &nvme_fc_mq_ops; 2435 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; 2436 ctrl->tag_set.reserved_tags = 1; /* fabric connect */ 2437 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node; 2438 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 2439 ctrl->tag_set.cmd_size = 2440 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv, 2441 ctrl->lport->ops->fcprqst_priv_sz); 2442 ctrl->tag_set.driver_data = ctrl; 2443 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1; 2444 ctrl->tag_set.timeout = NVME_IO_TIMEOUT; 2445 2446 ret = blk_mq_alloc_tag_set(&ctrl->tag_set); 2447 if (ret) 2448 return ret; 2449 2450 ctrl->ctrl.tagset = &ctrl->tag_set; 2451 2452 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); 2453 if (IS_ERR(ctrl->ctrl.connect_q)) { 2454 ret = PTR_ERR(ctrl->ctrl.connect_q); 2455 goto out_free_tag_set; 2456 } 2457 2458 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); 2459 if (ret) 2460 goto out_cleanup_blk_queue; 2461 2462 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); 2463 if (ret) 2464 goto out_delete_hw_queues; 2465 2466 ctrl->ioq_live = true; 2467 2468 return 0; 2469 2470 out_delete_hw_queues: 2471 nvme_fc_delete_hw_io_queues(ctrl); 2472 out_cleanup_blk_queue: 2473 blk_cleanup_queue(ctrl->ctrl.connect_q); 2474 out_free_tag_set: 2475 blk_mq_free_tag_set(&ctrl->tag_set); 2476 nvme_fc_free_io_queues(ctrl); 2477 2478 /* force put free routine to ignore io queues */ 2479 ctrl->ctrl.tagset = NULL; 2480 2481 return ret; 2482 } 2483 2484 static int 2485 nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl) 2486 { 2487 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 2488 u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1; 2489 unsigned int nr_io_queues; 2490 int ret; 2491 2492 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), 2493 ctrl->lport->ops->max_hw_queues); 2494 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); 2495 if (ret) { 2496 dev_info(ctrl->ctrl.device, 2497 "set_queue_count failed: %d\n", ret); 2498 return ret; 2499 } 2500 2501 if (!nr_io_queues && prior_ioq_cnt) { 2502 dev_info(ctrl->ctrl.device, 2503 "Fail Reconnect: At least 1 io queue " 2504 "required (was %d)\n", prior_ioq_cnt); 2505 return -ENOSPC; 2506 } 2507 2508 ctrl->ctrl.queue_count = nr_io_queues + 1; 2509 /* check for io queues existing */ 2510 if (ctrl->ctrl.queue_count == 1) 2511 return 0; 2512 2513 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); 2514 if (ret) 2515 goto out_free_io_queues; 2516 2517 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); 2518 if (ret) 2519 goto out_delete_hw_queues; 2520 2521 if (prior_ioq_cnt != nr_io_queues) 2522 dev_info(ctrl->ctrl.device, 2523 "reconnect: revising io queue count from %d to %d\n", 2524 prior_ioq_cnt, nr_io_queues); 2525 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues); 2526 2527 return 0; 2528 2529 out_delete_hw_queues: 2530 nvme_fc_delete_hw_io_queues(ctrl); 2531 out_free_io_queues: 2532 nvme_fc_free_io_queues(ctrl); 2533 return ret; 2534 } 2535 2536 static void 2537 nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport) 2538 { 2539 struct nvme_fc_lport *lport = rport->lport; 2540 2541 atomic_inc(&lport->act_rport_cnt); 2542 } 2543 2544 static void 2545 nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport) 2546 { 2547 struct nvme_fc_lport *lport = rport->lport; 2548 u32 cnt; 2549 2550 cnt = atomic_dec_return(&lport->act_rport_cnt); 2551 if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED) 2552 lport->ops->localport_delete(&lport->localport); 2553 } 2554 2555 static int 2556 nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl) 2557 { 2558 struct nvme_fc_rport *rport = ctrl->rport; 2559 u32 cnt; 2560 2561 if (ctrl->assoc_active) 2562 return 1; 2563 2564 ctrl->assoc_active = true; 2565 cnt = atomic_inc_return(&rport->act_ctrl_cnt); 2566 if (cnt == 1) 2567 nvme_fc_rport_active_on_lport(rport); 2568 2569 return 0; 2570 } 2571 2572 static int 2573 nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl) 2574 { 2575 struct nvme_fc_rport *rport = ctrl->rport; 2576 struct nvme_fc_lport *lport = rport->lport; 2577 u32 cnt; 2578 2579 /* ctrl->assoc_active=false will be set independently */ 2580 2581 cnt = atomic_dec_return(&rport->act_ctrl_cnt); 2582 if (cnt == 0) { 2583 if (rport->remoteport.port_state == FC_OBJSTATE_DELETED) 2584 lport->ops->remoteport_delete(&rport->remoteport); 2585 nvme_fc_rport_inactive_on_lport(rport); 2586 } 2587 2588 return 0; 2589 } 2590 2591 /* 2592 * This routine restarts the controller on the host side, and 2593 * on the link side, recreates the controller association. 2594 */ 2595 static int 2596 nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) 2597 { 2598 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 2599 int ret; 2600 bool changed; 2601 2602 ++ctrl->ctrl.nr_reconnects; 2603 2604 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 2605 return -ENODEV; 2606 2607 if (nvme_fc_ctlr_active_on_rport(ctrl)) 2608 return -ENOTUNIQ; 2609 2610 /* 2611 * Create the admin queue 2612 */ 2613 2614 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, 2615 NVME_AQ_DEPTH); 2616 if (ret) 2617 goto out_free_queue; 2618 2619 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], 2620 NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4)); 2621 if (ret) 2622 goto out_delete_hw_queue; 2623 2624 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 2625 2626 ret = nvmf_connect_admin_queue(&ctrl->ctrl); 2627 if (ret) 2628 goto out_disconnect_admin_queue; 2629 2630 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); 2631 2632 /* 2633 * Check controller capabilities 2634 * 2635 * todo:- add code to check if ctrl attributes changed from 2636 * prior connection values 2637 */ 2638 2639 ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap); 2640 if (ret) { 2641 dev_err(ctrl->ctrl.device, 2642 "prop_get NVME_REG_CAP failed\n"); 2643 goto out_disconnect_admin_queue; 2644 } 2645 2646 ctrl->ctrl.sqsize = 2647 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize); 2648 2649 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); 2650 if (ret) 2651 goto out_disconnect_admin_queue; 2652 2653 ctrl->ctrl.max_hw_sectors = 2654 (ctrl->lport->ops->max_sgl_segments - 1) << (PAGE_SHIFT - 9); 2655 2656 ret = nvme_init_identify(&ctrl->ctrl); 2657 if (ret) 2658 goto out_disconnect_admin_queue; 2659 2660 /* sanity checks */ 2661 2662 /* FC-NVME does not have other data in the capsule */ 2663 if (ctrl->ctrl.icdoff) { 2664 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", 2665 ctrl->ctrl.icdoff); 2666 goto out_disconnect_admin_queue; 2667 } 2668 2669 /* FC-NVME supports normal SGL Data Block Descriptors */ 2670 2671 if (opts->queue_size > ctrl->ctrl.maxcmd) { 2672 /* warn if maxcmd is lower than queue_size */ 2673 dev_warn(ctrl->ctrl.device, 2674 "queue_size %zu > ctrl maxcmd %u, reducing " 2675 "to queue_size\n", 2676 opts->queue_size, ctrl->ctrl.maxcmd); 2677 opts->queue_size = ctrl->ctrl.maxcmd; 2678 } 2679 2680 if (opts->queue_size > ctrl->ctrl.sqsize + 1) { 2681 /* warn if sqsize is lower than queue_size */ 2682 dev_warn(ctrl->ctrl.device, 2683 "queue_size %zu > ctrl sqsize %u, clamping down\n", 2684 opts->queue_size, ctrl->ctrl.sqsize + 1); 2685 opts->queue_size = ctrl->ctrl.sqsize + 1; 2686 } 2687 2688 ret = nvme_fc_init_aen_ops(ctrl); 2689 if (ret) 2690 goto out_term_aen_ops; 2691 2692 /* 2693 * Create the io queues 2694 */ 2695 2696 if (ctrl->ctrl.queue_count > 1) { 2697 if (!ctrl->ioq_live) 2698 ret = nvme_fc_create_io_queues(ctrl); 2699 else 2700 ret = nvme_fc_recreate_io_queues(ctrl); 2701 if (ret) 2702 goto out_term_aen_ops; 2703 } 2704 2705 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 2706 2707 ctrl->ctrl.nr_reconnects = 0; 2708 2709 if (changed) 2710 nvme_start_ctrl(&ctrl->ctrl); 2711 2712 return 0; /* Success */ 2713 2714 out_term_aen_ops: 2715 nvme_fc_term_aen_ops(ctrl); 2716 out_disconnect_admin_queue: 2717 /* send a Disconnect(association) LS to fc-nvme target */ 2718 nvme_fc_xmt_disconnect_assoc(ctrl); 2719 out_delete_hw_queue: 2720 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); 2721 out_free_queue: 2722 nvme_fc_free_queue(&ctrl->queues[0]); 2723 ctrl->assoc_active = false; 2724 nvme_fc_ctlr_inactive_on_rport(ctrl); 2725 2726 return ret; 2727 } 2728 2729 /* 2730 * This routine stops operation of the controller on the host side. 2731 * On the host os stack side: Admin and IO queues are stopped, 2732 * outstanding ios on them terminated via FC ABTS. 2733 * On the link side: the association is terminated. 2734 */ 2735 static void 2736 nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) 2737 { 2738 unsigned long flags; 2739 2740 if (!ctrl->assoc_active) 2741 return; 2742 ctrl->assoc_active = false; 2743 2744 spin_lock_irqsave(&ctrl->lock, flags); 2745 ctrl->flags |= FCCTRL_TERMIO; 2746 ctrl->iocnt = 0; 2747 spin_unlock_irqrestore(&ctrl->lock, flags); 2748 2749 /* 2750 * If io queues are present, stop them and terminate all outstanding 2751 * ios on them. As FC allocates FC exchange for each io, the 2752 * transport must contact the LLDD to terminate the exchange, 2753 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr() 2754 * to tell us what io's are busy and invoke a transport routine 2755 * to kill them with the LLDD. After terminating the exchange 2756 * the LLDD will call the transport's normal io done path, but it 2757 * will have an aborted status. The done path will return the 2758 * io requests back to the block layer as part of normal completions 2759 * (but with error status). 2760 */ 2761 if (ctrl->ctrl.queue_count > 1) { 2762 nvme_stop_queues(&ctrl->ctrl); 2763 blk_mq_tagset_busy_iter(&ctrl->tag_set, 2764 nvme_fc_terminate_exchange, &ctrl->ctrl); 2765 } 2766 2767 /* 2768 * Other transports, which don't have link-level contexts bound 2769 * to sqe's, would try to gracefully shutdown the controller by 2770 * writing the registers for shutdown and polling (call 2771 * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially 2772 * just aborted and we will wait on those contexts, and given 2773 * there was no indication of how live the controlelr is on the 2774 * link, don't send more io to create more contexts for the 2775 * shutdown. Let the controller fail via keepalive failure if 2776 * its still present. 2777 */ 2778 2779 /* 2780 * clean up the admin queue. Same thing as above. 2781 * use blk_mq_tagset_busy_itr() and the transport routine to 2782 * terminate the exchanges. 2783 */ 2784 blk_mq_quiesce_queue(ctrl->ctrl.admin_q); 2785 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, 2786 nvme_fc_terminate_exchange, &ctrl->ctrl); 2787 2788 /* kill the aens as they are a separate path */ 2789 nvme_fc_abort_aen_ops(ctrl); 2790 2791 /* wait for all io that had to be aborted */ 2792 spin_lock_irq(&ctrl->lock); 2793 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock); 2794 ctrl->flags &= ~FCCTRL_TERMIO; 2795 spin_unlock_irq(&ctrl->lock); 2796 2797 nvme_fc_term_aen_ops(ctrl); 2798 2799 /* 2800 * send a Disconnect(association) LS to fc-nvme target 2801 * Note: could have been sent at top of process, but 2802 * cleaner on link traffic if after the aborts complete. 2803 * Note: if association doesn't exist, association_id will be 0 2804 */ 2805 if (ctrl->association_id) 2806 nvme_fc_xmt_disconnect_assoc(ctrl); 2807 2808 if (ctrl->ctrl.tagset) { 2809 nvme_fc_delete_hw_io_queues(ctrl); 2810 nvme_fc_free_io_queues(ctrl); 2811 } 2812 2813 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); 2814 nvme_fc_free_queue(&ctrl->queues[0]); 2815 2816 /* re-enable the admin_q so anything new can fast fail */ 2817 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 2818 2819 /* resume the io queues so that things will fast fail */ 2820 nvme_start_queues(&ctrl->ctrl); 2821 2822 nvme_fc_ctlr_inactive_on_rport(ctrl); 2823 } 2824 2825 static void 2826 nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl) 2827 { 2828 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 2829 2830 cancel_work_sync(&ctrl->err_work); 2831 cancel_delayed_work_sync(&ctrl->connect_work); 2832 /* 2833 * kill the association on the link side. this will block 2834 * waiting for io to terminate 2835 */ 2836 nvme_fc_delete_association(ctrl); 2837 } 2838 2839 static void 2840 nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) 2841 { 2842 struct nvme_fc_rport *rport = ctrl->rport; 2843 struct nvme_fc_remote_port *portptr = &rport->remoteport; 2844 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ; 2845 bool recon = true; 2846 2847 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) 2848 return; 2849 2850 if (portptr->port_state == FC_OBJSTATE_ONLINE) 2851 dev_info(ctrl->ctrl.device, 2852 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n", 2853 ctrl->cnum, status); 2854 else if (time_after_eq(jiffies, rport->dev_loss_end)) 2855 recon = false; 2856 2857 if (recon && nvmf_should_reconnect(&ctrl->ctrl)) { 2858 if (portptr->port_state == FC_OBJSTATE_ONLINE) 2859 dev_info(ctrl->ctrl.device, 2860 "NVME-FC{%d}: Reconnect attempt in %ld " 2861 "seconds\n", 2862 ctrl->cnum, recon_delay / HZ); 2863 else if (time_after(jiffies + recon_delay, rport->dev_loss_end)) 2864 recon_delay = rport->dev_loss_end - jiffies; 2865 2866 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay); 2867 } else { 2868 if (portptr->port_state == FC_OBJSTATE_ONLINE) 2869 dev_warn(ctrl->ctrl.device, 2870 "NVME-FC{%d}: Max reconnect attempts (%d) " 2871 "reached.\n", 2872 ctrl->cnum, ctrl->ctrl.nr_reconnects); 2873 else 2874 dev_warn(ctrl->ctrl.device, 2875 "NVME-FC{%d}: dev_loss_tmo (%d) expired " 2876 "while waiting for remoteport connectivity.\n", 2877 ctrl->cnum, portptr->dev_loss_tmo); 2878 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl)); 2879 } 2880 } 2881 2882 static void 2883 __nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl) 2884 { 2885 nvme_stop_keep_alive(&ctrl->ctrl); 2886 2887 /* will block will waiting for io to terminate */ 2888 nvme_fc_delete_association(ctrl); 2889 2890 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING && 2891 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) 2892 dev_err(ctrl->ctrl.device, 2893 "NVME-FC{%d}: error_recovery: Couldn't change state " 2894 "to CONNECTING\n", ctrl->cnum); 2895 } 2896 2897 static void 2898 nvme_fc_reset_ctrl_work(struct work_struct *work) 2899 { 2900 struct nvme_fc_ctrl *ctrl = 2901 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work); 2902 int ret; 2903 2904 __nvme_fc_terminate_io(ctrl); 2905 2906 nvme_stop_ctrl(&ctrl->ctrl); 2907 2908 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) 2909 ret = nvme_fc_create_association(ctrl); 2910 else 2911 ret = -ENOTCONN; 2912 2913 if (ret) 2914 nvme_fc_reconnect_or_delete(ctrl, ret); 2915 else 2916 dev_info(ctrl->ctrl.device, 2917 "NVME-FC{%d}: controller reset complete\n", 2918 ctrl->cnum); 2919 } 2920 2921 static void 2922 nvme_fc_connect_err_work(struct work_struct *work) 2923 { 2924 struct nvme_fc_ctrl *ctrl = 2925 container_of(work, struct nvme_fc_ctrl, err_work); 2926 2927 __nvme_fc_terminate_io(ctrl); 2928 2929 atomic_set(&ctrl->err_work_active, 0); 2930 2931 /* 2932 * Rescheduling the connection after recovering 2933 * from the io error is left to the reconnect work 2934 * item, which is what should have stalled waiting on 2935 * the io that had the error that scheduled this work. 2936 */ 2937 } 2938 2939 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { 2940 .name = "fc", 2941 .module = THIS_MODULE, 2942 .flags = NVME_F_FABRICS, 2943 .reg_read32 = nvmf_reg_read32, 2944 .reg_read64 = nvmf_reg_read64, 2945 .reg_write32 = nvmf_reg_write32, 2946 .free_ctrl = nvme_fc_nvme_ctrl_freed, 2947 .submit_async_event = nvme_fc_submit_async_event, 2948 .delete_ctrl = nvme_fc_delete_ctrl, 2949 .get_address = nvmf_get_address, 2950 }; 2951 2952 static void 2953 nvme_fc_connect_ctrl_work(struct work_struct *work) 2954 { 2955 int ret; 2956 2957 struct nvme_fc_ctrl *ctrl = 2958 container_of(to_delayed_work(work), 2959 struct nvme_fc_ctrl, connect_work); 2960 2961 ret = nvme_fc_create_association(ctrl); 2962 if (ret) 2963 nvme_fc_reconnect_or_delete(ctrl, ret); 2964 else 2965 dev_info(ctrl->ctrl.device, 2966 "NVME-FC{%d}: controller connect complete\n", 2967 ctrl->cnum); 2968 } 2969 2970 2971 static const struct blk_mq_ops nvme_fc_admin_mq_ops = { 2972 .queue_rq = nvme_fc_queue_rq, 2973 .complete = nvme_fc_complete_rq, 2974 .init_request = nvme_fc_init_request, 2975 .exit_request = nvme_fc_exit_request, 2976 .init_hctx = nvme_fc_init_admin_hctx, 2977 .timeout = nvme_fc_timeout, 2978 }; 2979 2980 2981 /* 2982 * Fails a controller request if it matches an existing controller 2983 * (association) with the same tuple: 2984 * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN> 2985 * 2986 * The ports don't need to be compared as they are intrinsically 2987 * already matched by the port pointers supplied. 2988 */ 2989 static bool 2990 nvme_fc_existing_controller(struct nvme_fc_rport *rport, 2991 struct nvmf_ctrl_options *opts) 2992 { 2993 struct nvme_fc_ctrl *ctrl; 2994 unsigned long flags; 2995 bool found = false; 2996 2997 spin_lock_irqsave(&rport->lock, flags); 2998 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { 2999 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts); 3000 if (found) 3001 break; 3002 } 3003 spin_unlock_irqrestore(&rport->lock, flags); 3004 3005 return found; 3006 } 3007 3008 static struct nvme_ctrl * 3009 nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, 3010 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport) 3011 { 3012 struct nvme_fc_ctrl *ctrl; 3013 unsigned long flags; 3014 int ret, idx; 3015 3016 if (!(rport->remoteport.port_role & 3017 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) { 3018 ret = -EBADR; 3019 goto out_fail; 3020 } 3021 3022 if (!opts->duplicate_connect && 3023 nvme_fc_existing_controller(rport, opts)) { 3024 ret = -EALREADY; 3025 goto out_fail; 3026 } 3027 3028 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 3029 if (!ctrl) { 3030 ret = -ENOMEM; 3031 goto out_fail; 3032 } 3033 3034 idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL); 3035 if (idx < 0) { 3036 ret = -ENOSPC; 3037 goto out_free_ctrl; 3038 } 3039 3040 ctrl->ctrl.opts = opts; 3041 ctrl->ctrl.nr_reconnects = 0; 3042 if (lport->dev) 3043 ctrl->ctrl.numa_node = dev_to_node(lport->dev); 3044 else 3045 ctrl->ctrl.numa_node = NUMA_NO_NODE; 3046 INIT_LIST_HEAD(&ctrl->ctrl_list); 3047 ctrl->lport = lport; 3048 ctrl->rport = rport; 3049 ctrl->dev = lport->dev; 3050 ctrl->cnum = idx; 3051 ctrl->ioq_live = false; 3052 ctrl->assoc_active = false; 3053 atomic_set(&ctrl->err_work_active, 0); 3054 init_waitqueue_head(&ctrl->ioabort_wait); 3055 3056 get_device(ctrl->dev); 3057 kref_init(&ctrl->ref); 3058 3059 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work); 3060 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); 3061 INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work); 3062 spin_lock_init(&ctrl->lock); 3063 3064 /* io queue count */ 3065 ctrl->ctrl.queue_count = min_t(unsigned int, 3066 opts->nr_io_queues, 3067 lport->ops->max_hw_queues); 3068 ctrl->ctrl.queue_count++; /* +1 for admin queue */ 3069 3070 ctrl->ctrl.sqsize = opts->queue_size - 1; 3071 ctrl->ctrl.kato = opts->kato; 3072 ctrl->ctrl.cntlid = 0xffff; 3073 3074 ret = -ENOMEM; 3075 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, 3076 sizeof(struct nvme_fc_queue), GFP_KERNEL); 3077 if (!ctrl->queues) 3078 goto out_free_ida; 3079 3080 nvme_fc_init_queue(ctrl, 0); 3081 3082 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); 3083 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops; 3084 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH; 3085 ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */ 3086 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node; 3087 ctrl->admin_tag_set.cmd_size = 3088 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv, 3089 ctrl->lport->ops->fcprqst_priv_sz); 3090 ctrl->admin_tag_set.driver_data = ctrl; 3091 ctrl->admin_tag_set.nr_hw_queues = 1; 3092 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT; 3093 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED; 3094 3095 ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); 3096 if (ret) 3097 goto out_free_queues; 3098 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set; 3099 3100 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); 3101 if (IS_ERR(ctrl->ctrl.admin_q)) { 3102 ret = PTR_ERR(ctrl->ctrl.admin_q); 3103 goto out_free_admin_tag_set; 3104 } 3105 3106 /* 3107 * Would have been nice to init io queues tag set as well. 3108 * However, we require interaction from the controller 3109 * for max io queue count before we can do so. 3110 * Defer this to the connect path. 3111 */ 3112 3113 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0); 3114 if (ret) 3115 goto out_cleanup_admin_q; 3116 3117 /* at this point, teardown path changes to ref counting on nvme ctrl */ 3118 3119 spin_lock_irqsave(&rport->lock, flags); 3120 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); 3121 spin_unlock_irqrestore(&rport->lock, flags); 3122 3123 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) || 3124 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { 3125 dev_err(ctrl->ctrl.device, 3126 "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum); 3127 goto fail_ctrl; 3128 } 3129 3130 nvme_get_ctrl(&ctrl->ctrl); 3131 3132 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { 3133 nvme_put_ctrl(&ctrl->ctrl); 3134 dev_err(ctrl->ctrl.device, 3135 "NVME-FC{%d}: failed to schedule initial connect\n", 3136 ctrl->cnum); 3137 goto fail_ctrl; 3138 } 3139 3140 flush_delayed_work(&ctrl->connect_work); 3141 3142 dev_info(ctrl->ctrl.device, 3143 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n", 3144 ctrl->cnum, ctrl->ctrl.opts->subsysnqn); 3145 3146 return &ctrl->ctrl; 3147 3148 fail_ctrl: 3149 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING); 3150 cancel_work_sync(&ctrl->ctrl.reset_work); 3151 cancel_work_sync(&ctrl->err_work); 3152 cancel_delayed_work_sync(&ctrl->connect_work); 3153 3154 ctrl->ctrl.opts = NULL; 3155 3156 /* initiate nvme ctrl ref counting teardown */ 3157 nvme_uninit_ctrl(&ctrl->ctrl); 3158 3159 /* Remove core ctrl ref. */ 3160 nvme_put_ctrl(&ctrl->ctrl); 3161 3162 /* as we're past the point where we transition to the ref 3163 * counting teardown path, if we return a bad pointer here, 3164 * the calling routine, thinking it's prior to the 3165 * transition, will do an rport put. Since the teardown 3166 * path also does a rport put, we do an extra get here to 3167 * so proper order/teardown happens. 3168 */ 3169 nvme_fc_rport_get(rport); 3170 3171 return ERR_PTR(-EIO); 3172 3173 out_cleanup_admin_q: 3174 blk_cleanup_queue(ctrl->ctrl.admin_q); 3175 out_free_admin_tag_set: 3176 blk_mq_free_tag_set(&ctrl->admin_tag_set); 3177 out_free_queues: 3178 kfree(ctrl->queues); 3179 out_free_ida: 3180 put_device(ctrl->dev); 3181 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); 3182 out_free_ctrl: 3183 kfree(ctrl); 3184 out_fail: 3185 /* exit via here doesn't follow ctlr ref points */ 3186 return ERR_PTR(ret); 3187 } 3188 3189 3190 struct nvmet_fc_traddr { 3191 u64 nn; 3192 u64 pn; 3193 }; 3194 3195 static int 3196 __nvme_fc_parse_u64(substring_t *sstr, u64 *val) 3197 { 3198 u64 token64; 3199 3200 if (match_u64(sstr, &token64)) 3201 return -EINVAL; 3202 *val = token64; 3203 3204 return 0; 3205 } 3206 3207 /* 3208 * This routine validates and extracts the WWN's from the TRADDR string. 3209 * As kernel parsers need the 0x to determine number base, universally 3210 * build string to parse with 0x prefix before parsing name strings. 3211 */ 3212 static int 3213 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) 3214 { 3215 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; 3216 substring_t wwn = { name, &name[sizeof(name)-1] }; 3217 int nnoffset, pnoffset; 3218 3219 /* validate if string is one of the 2 allowed formats */ 3220 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && 3221 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && 3222 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], 3223 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { 3224 nnoffset = NVME_FC_TRADDR_OXNNLEN; 3225 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + 3226 NVME_FC_TRADDR_OXNNLEN; 3227 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH && 3228 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && 3229 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], 3230 "pn-", NVME_FC_TRADDR_NNLEN))) { 3231 nnoffset = NVME_FC_TRADDR_NNLEN; 3232 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; 3233 } else 3234 goto out_einval; 3235 3236 name[0] = '0'; 3237 name[1] = 'x'; 3238 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; 3239 3240 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); 3241 if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) 3242 goto out_einval; 3243 3244 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); 3245 if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) 3246 goto out_einval; 3247 3248 return 0; 3249 3250 out_einval: 3251 pr_warn("%s: bad traddr string\n", __func__); 3252 return -EINVAL; 3253 } 3254 3255 static struct nvme_ctrl * 3256 nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) 3257 { 3258 struct nvme_fc_lport *lport; 3259 struct nvme_fc_rport *rport; 3260 struct nvme_ctrl *ctrl; 3261 struct nvmet_fc_traddr laddr = { 0L, 0L }; 3262 struct nvmet_fc_traddr raddr = { 0L, 0L }; 3263 unsigned long flags; 3264 int ret; 3265 3266 ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE); 3267 if (ret || !raddr.nn || !raddr.pn) 3268 return ERR_PTR(-EINVAL); 3269 3270 ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE); 3271 if (ret || !laddr.nn || !laddr.pn) 3272 return ERR_PTR(-EINVAL); 3273 3274 /* find the host and remote ports to connect together */ 3275 spin_lock_irqsave(&nvme_fc_lock, flags); 3276 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { 3277 if (lport->localport.node_name != laddr.nn || 3278 lport->localport.port_name != laddr.pn) 3279 continue; 3280 3281 list_for_each_entry(rport, &lport->endp_list, endp_list) { 3282 if (rport->remoteport.node_name != raddr.nn || 3283 rport->remoteport.port_name != raddr.pn) 3284 continue; 3285 3286 /* if fail to get reference fall through. Will error */ 3287 if (!nvme_fc_rport_get(rport)) 3288 break; 3289 3290 spin_unlock_irqrestore(&nvme_fc_lock, flags); 3291 3292 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport); 3293 if (IS_ERR(ctrl)) 3294 nvme_fc_rport_put(rport); 3295 return ctrl; 3296 } 3297 } 3298 spin_unlock_irqrestore(&nvme_fc_lock, flags); 3299 3300 pr_warn("%s: %s - %s combination not found\n", 3301 __func__, opts->traddr, opts->host_traddr); 3302 return ERR_PTR(-ENOENT); 3303 } 3304 3305 3306 static struct nvmf_transport_ops nvme_fc_transport = { 3307 .name = "fc", 3308 .module = THIS_MODULE, 3309 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR, 3310 .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO, 3311 .create_ctrl = nvme_fc_create_ctrl, 3312 }; 3313 3314 /* Arbitrary successive failures max. With lots of subsystems could be high */ 3315 #define DISCOVERY_MAX_FAIL 20 3316 3317 static ssize_t nvme_fc_nvme_discovery_store(struct device *dev, 3318 struct device_attribute *attr, const char *buf, size_t count) 3319 { 3320 unsigned long flags; 3321 LIST_HEAD(local_disc_list); 3322 struct nvme_fc_lport *lport; 3323 struct nvme_fc_rport *rport; 3324 int failcnt = 0; 3325 3326 spin_lock_irqsave(&nvme_fc_lock, flags); 3327 restart: 3328 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { 3329 list_for_each_entry(rport, &lport->endp_list, endp_list) { 3330 if (!nvme_fc_lport_get(lport)) 3331 continue; 3332 if (!nvme_fc_rport_get(rport)) { 3333 /* 3334 * This is a temporary condition. Upon restart 3335 * this rport will be gone from the list. 3336 * 3337 * Revert the lport put and retry. Anything 3338 * added to the list already will be skipped (as 3339 * they are no longer list_empty). Loops should 3340 * resume at rports that were not yet seen. 3341 */ 3342 nvme_fc_lport_put(lport); 3343 3344 if (failcnt++ < DISCOVERY_MAX_FAIL) 3345 goto restart; 3346 3347 pr_err("nvme_discovery: too many reference " 3348 "failures\n"); 3349 goto process_local_list; 3350 } 3351 if (list_empty(&rport->disc_list)) 3352 list_add_tail(&rport->disc_list, 3353 &local_disc_list); 3354 } 3355 } 3356 3357 process_local_list: 3358 while (!list_empty(&local_disc_list)) { 3359 rport = list_first_entry(&local_disc_list, 3360 struct nvme_fc_rport, disc_list); 3361 list_del_init(&rport->disc_list); 3362 spin_unlock_irqrestore(&nvme_fc_lock, flags); 3363 3364 lport = rport->lport; 3365 /* signal discovery. Won't hurt if it repeats */ 3366 nvme_fc_signal_discovery_scan(lport, rport); 3367 nvme_fc_rport_put(rport); 3368 nvme_fc_lport_put(lport); 3369 3370 spin_lock_irqsave(&nvme_fc_lock, flags); 3371 } 3372 spin_unlock_irqrestore(&nvme_fc_lock, flags); 3373 3374 return count; 3375 } 3376 static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store); 3377 3378 static struct attribute *nvme_fc_attrs[] = { 3379 &dev_attr_nvme_discovery.attr, 3380 NULL 3381 }; 3382 3383 static struct attribute_group nvme_fc_attr_group = { 3384 .attrs = nvme_fc_attrs, 3385 }; 3386 3387 static const struct attribute_group *nvme_fc_attr_groups[] = { 3388 &nvme_fc_attr_group, 3389 NULL 3390 }; 3391 3392 static struct class fc_class = { 3393 .name = "fc", 3394 .dev_groups = nvme_fc_attr_groups, 3395 .owner = THIS_MODULE, 3396 }; 3397 3398 static int __init nvme_fc_init_module(void) 3399 { 3400 int ret; 3401 3402 nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0); 3403 if (!nvme_fc_wq) 3404 return -ENOMEM; 3405 3406 /* 3407 * NOTE: 3408 * It is expected that in the future the kernel will combine 3409 * the FC-isms that are currently under scsi and now being 3410 * added to by NVME into a new standalone FC class. The SCSI 3411 * and NVME protocols and their devices would be under this 3412 * new FC class. 3413 * 3414 * As we need something to post FC-specific udev events to, 3415 * specifically for nvme probe events, start by creating the 3416 * new device class. When the new standalone FC class is 3417 * put in place, this code will move to a more generic 3418 * location for the class. 3419 */ 3420 ret = class_register(&fc_class); 3421 if (ret) { 3422 pr_err("couldn't register class fc\n"); 3423 goto out_destroy_wq; 3424 } 3425 3426 /* 3427 * Create a device for the FC-centric udev events 3428 */ 3429 fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL, 3430 "fc_udev_device"); 3431 if (IS_ERR(fc_udev_device)) { 3432 pr_err("couldn't create fc_udev device!\n"); 3433 ret = PTR_ERR(fc_udev_device); 3434 goto out_destroy_class; 3435 } 3436 3437 ret = nvmf_register_transport(&nvme_fc_transport); 3438 if (ret) 3439 goto out_destroy_device; 3440 3441 return 0; 3442 3443 out_destroy_device: 3444 device_destroy(&fc_class, MKDEV(0, 0)); 3445 out_destroy_class: 3446 class_unregister(&fc_class); 3447 out_destroy_wq: 3448 destroy_workqueue(nvme_fc_wq); 3449 3450 return ret; 3451 } 3452 3453 static void __exit nvme_fc_exit_module(void) 3454 { 3455 /* sanity check - all lports should be removed */ 3456 if (!list_empty(&nvme_fc_lport_list)) 3457 pr_warn("%s: localport list not empty\n", __func__); 3458 3459 nvmf_unregister_transport(&nvme_fc_transport); 3460 3461 ida_destroy(&nvme_fc_local_port_cnt); 3462 ida_destroy(&nvme_fc_ctrl_cnt); 3463 3464 device_destroy(&fc_class, MKDEV(0, 0)); 3465 class_unregister(&fc_class); 3466 destroy_workqueue(nvme_fc_wq); 3467 } 3468 3469 module_init(nvme_fc_init_module); 3470 module_exit(nvme_fc_exit_module); 3471 3472 MODULE_LICENSE("GPL v2"); 3473