1 /* 2 * Common code for the NVMe target. 3 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 #include <linux/module.h> 16 #include <linux/random.h> 17 #include <linux/rculist.h> 18 19 #include "nvmet.h" 20 21 static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; 22 static DEFINE_IDA(cntlid_ida); 23 24 /* 25 * This read/write semaphore is used to synchronize access to configuration 26 * information on a target system that will result in discovery log page 27 * information change for at least one host. 28 * The full list of resources to protected by this semaphore is: 29 * 30 * - subsystems list 31 * - per-subsystem allowed hosts list 32 * - allow_any_host subsystem attribute 33 * - nvmet_genctr 34 * - the nvmet_transports array 35 * 36 * When updating any of those lists/structures write lock should be obtained, 37 * while when reading (popolating discovery log page or checking host-subsystem 38 * link) read lock is obtained to allow concurrent reads. 39 */ 40 DECLARE_RWSEM(nvmet_config_sem); 41 42 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, 43 const char *subsysnqn); 44 45 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf, 46 size_t len) 47 { 48 if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) 49 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; 50 return 0; 51 } 52 53 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len) 54 { 55 if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) 56 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; 57 return 0; 58 } 59 60 static u32 nvmet_async_event_result(struct nvmet_async_event *aen) 61 { 62 return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16); 63 } 64 65 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl) 66 { 67 struct nvmet_req *req; 68 69 while (1) { 70 mutex_lock(&ctrl->lock); 71 if (!ctrl->nr_async_event_cmds) { 72 mutex_unlock(&ctrl->lock); 73 return; 74 } 75 76 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; 77 mutex_unlock(&ctrl->lock); 78 nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR); 79 } 80 } 81 82 static void nvmet_async_event_work(struct work_struct *work) 83 { 84 struct nvmet_ctrl *ctrl = 85 container_of(work, struct nvmet_ctrl, async_event_work); 86 struct nvmet_async_event *aen; 87 struct nvmet_req *req; 88 89 while (1) { 90 mutex_lock(&ctrl->lock); 91 aen = list_first_entry_or_null(&ctrl->async_events, 92 struct nvmet_async_event, entry); 93 if (!aen || !ctrl->nr_async_event_cmds) { 94 mutex_unlock(&ctrl->lock); 95 return; 96 } 97 98 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; 99 nvmet_set_result(req, nvmet_async_event_result(aen)); 100 101 list_del(&aen->entry); 102 kfree(aen); 103 104 mutex_unlock(&ctrl->lock); 105 nvmet_req_complete(req, 0); 106 } 107 } 108 109 static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, 110 u8 event_info, u8 log_page) 111 { 112 struct nvmet_async_event *aen; 113 114 aen = kmalloc(sizeof(*aen), GFP_KERNEL); 115 if (!aen) 116 return; 117 118 aen->event_type = event_type; 119 aen->event_info = event_info; 120 aen->log_page = log_page; 121 122 mutex_lock(&ctrl->lock); 123 list_add_tail(&aen->entry, &ctrl->async_events); 124 mutex_unlock(&ctrl->lock); 125 126 schedule_work(&ctrl->async_event_work); 127 } 128 129 int nvmet_register_transport(struct nvmet_fabrics_ops *ops) 130 { 131 int ret = 0; 132 133 down_write(&nvmet_config_sem); 134 if (nvmet_transports[ops->type]) 135 ret = -EINVAL; 136 else 137 nvmet_transports[ops->type] = ops; 138 up_write(&nvmet_config_sem); 139 140 return ret; 141 } 142 EXPORT_SYMBOL_GPL(nvmet_register_transport); 143 144 void nvmet_unregister_transport(struct nvmet_fabrics_ops *ops) 145 { 146 down_write(&nvmet_config_sem); 147 nvmet_transports[ops->type] = NULL; 148 up_write(&nvmet_config_sem); 149 } 150 EXPORT_SYMBOL_GPL(nvmet_unregister_transport); 151 152 int nvmet_enable_port(struct nvmet_port *port) 153 { 154 struct nvmet_fabrics_ops *ops; 155 int ret; 156 157 lockdep_assert_held(&nvmet_config_sem); 158 159 ops = nvmet_transports[port->disc_addr.trtype]; 160 if (!ops) { 161 up_write(&nvmet_config_sem); 162 request_module("nvmet-transport-%d", port->disc_addr.trtype); 163 down_write(&nvmet_config_sem); 164 ops = nvmet_transports[port->disc_addr.trtype]; 165 if (!ops) { 166 pr_err("transport type %d not supported\n", 167 port->disc_addr.trtype); 168 return -EINVAL; 169 } 170 } 171 172 if (!try_module_get(ops->owner)) 173 return -EINVAL; 174 175 ret = ops->add_port(port); 176 if (ret) { 177 module_put(ops->owner); 178 return ret; 179 } 180 181 port->enabled = true; 182 return 0; 183 } 184 185 void nvmet_disable_port(struct nvmet_port *port) 186 { 187 struct nvmet_fabrics_ops *ops; 188 189 lockdep_assert_held(&nvmet_config_sem); 190 191 port->enabled = false; 192 193 ops = nvmet_transports[port->disc_addr.trtype]; 194 ops->remove_port(port); 195 module_put(ops->owner); 196 } 197 198 static void nvmet_keep_alive_timer(struct work_struct *work) 199 { 200 struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work), 201 struct nvmet_ctrl, ka_work); 202 203 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n", 204 ctrl->cntlid, ctrl->kato); 205 206 nvmet_ctrl_fatal_error(ctrl); 207 } 208 209 static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) 210 { 211 pr_debug("ctrl %d start keep-alive timer for %d secs\n", 212 ctrl->cntlid, ctrl->kato); 213 214 INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer); 215 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 216 } 217 218 static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) 219 { 220 pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid); 221 222 cancel_delayed_work_sync(&ctrl->ka_work); 223 } 224 225 static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl, 226 __le32 nsid) 227 { 228 struct nvmet_ns *ns; 229 230 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) { 231 if (ns->nsid == le32_to_cpu(nsid)) 232 return ns; 233 } 234 235 return NULL; 236 } 237 238 struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid) 239 { 240 struct nvmet_ns *ns; 241 242 rcu_read_lock(); 243 ns = __nvmet_find_namespace(ctrl, nsid); 244 if (ns) 245 percpu_ref_get(&ns->ref); 246 rcu_read_unlock(); 247 248 return ns; 249 } 250 251 static void nvmet_destroy_namespace(struct percpu_ref *ref) 252 { 253 struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref); 254 255 complete(&ns->disable_done); 256 } 257 258 void nvmet_put_namespace(struct nvmet_ns *ns) 259 { 260 percpu_ref_put(&ns->ref); 261 } 262 263 int nvmet_ns_enable(struct nvmet_ns *ns) 264 { 265 struct nvmet_subsys *subsys = ns->subsys; 266 struct nvmet_ctrl *ctrl; 267 int ret = 0; 268 269 mutex_lock(&subsys->lock); 270 if (ns->enabled) 271 goto out_unlock; 272 273 ns->bdev = blkdev_get_by_path(ns->device_path, FMODE_READ | FMODE_WRITE, 274 NULL); 275 if (IS_ERR(ns->bdev)) { 276 pr_err("failed to open block device %s: (%ld)\n", 277 ns->device_path, PTR_ERR(ns->bdev)); 278 ret = PTR_ERR(ns->bdev); 279 ns->bdev = NULL; 280 goto out_unlock; 281 } 282 283 ns->size = i_size_read(ns->bdev->bd_inode); 284 ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev)); 285 286 ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace, 287 0, GFP_KERNEL); 288 if (ret) 289 goto out_blkdev_put; 290 291 if (ns->nsid > subsys->max_nsid) 292 subsys->max_nsid = ns->nsid; 293 294 /* 295 * The namespaces list needs to be sorted to simplify the implementation 296 * of the Identify Namepace List subcommand. 297 */ 298 if (list_empty(&subsys->namespaces)) { 299 list_add_tail_rcu(&ns->dev_link, &subsys->namespaces); 300 } else { 301 struct nvmet_ns *old; 302 303 list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) { 304 BUG_ON(ns->nsid == old->nsid); 305 if (ns->nsid < old->nsid) 306 break; 307 } 308 309 list_add_tail_rcu(&ns->dev_link, &old->dev_link); 310 } 311 312 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) 313 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0); 314 315 ns->enabled = true; 316 ret = 0; 317 out_unlock: 318 mutex_unlock(&subsys->lock); 319 return ret; 320 out_blkdev_put: 321 blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ); 322 ns->bdev = NULL; 323 goto out_unlock; 324 } 325 326 void nvmet_ns_disable(struct nvmet_ns *ns) 327 { 328 struct nvmet_subsys *subsys = ns->subsys; 329 struct nvmet_ctrl *ctrl; 330 331 mutex_lock(&subsys->lock); 332 if (!ns->enabled) 333 goto out_unlock; 334 335 ns->enabled = false; 336 list_del_rcu(&ns->dev_link); 337 mutex_unlock(&subsys->lock); 338 339 /* 340 * Now that we removed the namespaces from the lookup list, we 341 * can kill the per_cpu ref and wait for any remaining references 342 * to be dropped, as well as a RCU grace period for anyone only 343 * using the namepace under rcu_read_lock(). Note that we can't 344 * use call_rcu here as we need to ensure the namespaces have 345 * been fully destroyed before unloading the module. 346 */ 347 percpu_ref_kill(&ns->ref); 348 synchronize_rcu(); 349 wait_for_completion(&ns->disable_done); 350 percpu_ref_exit(&ns->ref); 351 352 mutex_lock(&subsys->lock); 353 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) 354 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0); 355 356 if (ns->bdev) 357 blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ); 358 out_unlock: 359 mutex_unlock(&subsys->lock); 360 } 361 362 void nvmet_ns_free(struct nvmet_ns *ns) 363 { 364 nvmet_ns_disable(ns); 365 366 kfree(ns->device_path); 367 kfree(ns); 368 } 369 370 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid) 371 { 372 struct nvmet_ns *ns; 373 374 ns = kzalloc(sizeof(*ns), GFP_KERNEL); 375 if (!ns) 376 return NULL; 377 378 INIT_LIST_HEAD(&ns->dev_link); 379 init_completion(&ns->disable_done); 380 381 ns->nsid = nsid; 382 ns->subsys = subsys; 383 uuid_gen(&ns->uuid); 384 385 return ns; 386 } 387 388 static void __nvmet_req_complete(struct nvmet_req *req, u16 status) 389 { 390 if (status) 391 nvmet_set_status(req, status); 392 393 /* XXX: need to fill in something useful for sq_head */ 394 req->rsp->sq_head = 0; 395 if (likely(req->sq)) /* may happen during early failure */ 396 req->rsp->sq_id = cpu_to_le16(req->sq->qid); 397 req->rsp->command_id = req->cmd->common.command_id; 398 399 if (req->ns) 400 nvmet_put_namespace(req->ns); 401 req->ops->queue_response(req); 402 } 403 404 void nvmet_req_complete(struct nvmet_req *req, u16 status) 405 { 406 __nvmet_req_complete(req, status); 407 percpu_ref_put(&req->sq->ref); 408 } 409 EXPORT_SYMBOL_GPL(nvmet_req_complete); 410 411 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, 412 u16 qid, u16 size) 413 { 414 cq->qid = qid; 415 cq->size = size; 416 417 ctrl->cqs[qid] = cq; 418 } 419 420 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, 421 u16 qid, u16 size) 422 { 423 sq->qid = qid; 424 sq->size = size; 425 426 ctrl->sqs[qid] = sq; 427 } 428 429 static void nvmet_confirm_sq(struct percpu_ref *ref) 430 { 431 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); 432 433 complete(&sq->confirm_done); 434 } 435 436 void nvmet_sq_destroy(struct nvmet_sq *sq) 437 { 438 /* 439 * If this is the admin queue, complete all AERs so that our 440 * queue doesn't have outstanding requests on it. 441 */ 442 if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq) 443 nvmet_async_events_free(sq->ctrl); 444 percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq); 445 wait_for_completion(&sq->confirm_done); 446 wait_for_completion(&sq->free_done); 447 percpu_ref_exit(&sq->ref); 448 449 if (sq->ctrl) { 450 nvmet_ctrl_put(sq->ctrl); 451 sq->ctrl = NULL; /* allows reusing the queue later */ 452 } 453 } 454 EXPORT_SYMBOL_GPL(nvmet_sq_destroy); 455 456 static void nvmet_sq_free(struct percpu_ref *ref) 457 { 458 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); 459 460 complete(&sq->free_done); 461 } 462 463 int nvmet_sq_init(struct nvmet_sq *sq) 464 { 465 int ret; 466 467 ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL); 468 if (ret) { 469 pr_err("percpu_ref init failed!\n"); 470 return ret; 471 } 472 init_completion(&sq->free_done); 473 init_completion(&sq->confirm_done); 474 475 return 0; 476 } 477 EXPORT_SYMBOL_GPL(nvmet_sq_init); 478 479 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, 480 struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops) 481 { 482 u8 flags = req->cmd->common.flags; 483 u16 status; 484 485 req->cq = cq; 486 req->sq = sq; 487 req->ops = ops; 488 req->sg = NULL; 489 req->sg_cnt = 0; 490 req->rsp->status = 0; 491 492 /* no support for fused commands yet */ 493 if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) { 494 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 495 goto fail; 496 } 497 498 /* either variant of SGLs is fine, as we don't support metadata */ 499 if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF && 500 (flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) { 501 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 502 goto fail; 503 } 504 505 if (unlikely(!req->sq->ctrl)) 506 /* will return an error for any Non-connect command: */ 507 status = nvmet_parse_connect_cmd(req); 508 else if (likely(req->sq->qid != 0)) 509 status = nvmet_parse_io_cmd(req); 510 else if (req->cmd->common.opcode == nvme_fabrics_command) 511 status = nvmet_parse_fabrics_cmd(req); 512 else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC) 513 status = nvmet_parse_discovery_cmd(req); 514 else 515 status = nvmet_parse_admin_cmd(req); 516 517 if (status) 518 goto fail; 519 520 if (unlikely(!percpu_ref_tryget_live(&sq->ref))) { 521 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 522 goto fail; 523 } 524 525 return true; 526 527 fail: 528 __nvmet_req_complete(req, status); 529 return false; 530 } 531 EXPORT_SYMBOL_GPL(nvmet_req_init); 532 533 void nvmet_req_uninit(struct nvmet_req *req) 534 { 535 percpu_ref_put(&req->sq->ref); 536 } 537 EXPORT_SYMBOL_GPL(nvmet_req_uninit); 538 539 static inline bool nvmet_cc_en(u32 cc) 540 { 541 return (cc >> NVME_CC_EN_SHIFT) & 0x1; 542 } 543 544 static inline u8 nvmet_cc_css(u32 cc) 545 { 546 return (cc >> NVME_CC_CSS_SHIFT) & 0x7; 547 } 548 549 static inline u8 nvmet_cc_mps(u32 cc) 550 { 551 return (cc >> NVME_CC_MPS_SHIFT) & 0xf; 552 } 553 554 static inline u8 nvmet_cc_ams(u32 cc) 555 { 556 return (cc >> NVME_CC_AMS_SHIFT) & 0x7; 557 } 558 559 static inline u8 nvmet_cc_shn(u32 cc) 560 { 561 return (cc >> NVME_CC_SHN_SHIFT) & 0x3; 562 } 563 564 static inline u8 nvmet_cc_iosqes(u32 cc) 565 { 566 return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf; 567 } 568 569 static inline u8 nvmet_cc_iocqes(u32 cc) 570 { 571 return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf; 572 } 573 574 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) 575 { 576 lockdep_assert_held(&ctrl->lock); 577 578 if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES || 579 nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES || 580 nvmet_cc_mps(ctrl->cc) != 0 || 581 nvmet_cc_ams(ctrl->cc) != 0 || 582 nvmet_cc_css(ctrl->cc) != 0) { 583 ctrl->csts = NVME_CSTS_CFS; 584 return; 585 } 586 587 ctrl->csts = NVME_CSTS_RDY; 588 } 589 590 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl) 591 { 592 lockdep_assert_held(&ctrl->lock); 593 594 /* XXX: tear down queues? */ 595 ctrl->csts &= ~NVME_CSTS_RDY; 596 ctrl->cc = 0; 597 } 598 599 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new) 600 { 601 u32 old; 602 603 mutex_lock(&ctrl->lock); 604 old = ctrl->cc; 605 ctrl->cc = new; 606 607 if (nvmet_cc_en(new) && !nvmet_cc_en(old)) 608 nvmet_start_ctrl(ctrl); 609 if (!nvmet_cc_en(new) && nvmet_cc_en(old)) 610 nvmet_clear_ctrl(ctrl); 611 if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) { 612 nvmet_clear_ctrl(ctrl); 613 ctrl->csts |= NVME_CSTS_SHST_CMPLT; 614 } 615 if (!nvmet_cc_shn(new) && nvmet_cc_shn(old)) 616 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT; 617 mutex_unlock(&ctrl->lock); 618 } 619 620 static void nvmet_init_cap(struct nvmet_ctrl *ctrl) 621 { 622 /* command sets supported: NVMe command set: */ 623 ctrl->cap = (1ULL << 37); 624 /* CC.EN timeout in 500msec units: */ 625 ctrl->cap |= (15ULL << 24); 626 /* maximum queue entries supported: */ 627 ctrl->cap |= NVMET_QUEUE_SIZE - 1; 628 } 629 630 u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid, 631 struct nvmet_req *req, struct nvmet_ctrl **ret) 632 { 633 struct nvmet_subsys *subsys; 634 struct nvmet_ctrl *ctrl; 635 u16 status = 0; 636 637 subsys = nvmet_find_get_subsys(req->port, subsysnqn); 638 if (!subsys) { 639 pr_warn("connect request for invalid subsystem %s!\n", 640 subsysnqn); 641 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); 642 return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; 643 } 644 645 mutex_lock(&subsys->lock); 646 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { 647 if (ctrl->cntlid == cntlid) { 648 if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) { 649 pr_warn("hostnqn mismatch.\n"); 650 continue; 651 } 652 if (!kref_get_unless_zero(&ctrl->ref)) 653 continue; 654 655 *ret = ctrl; 656 goto out; 657 } 658 } 659 660 pr_warn("could not find controller %d for subsys %s / host %s\n", 661 cntlid, subsysnqn, hostnqn); 662 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); 663 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; 664 665 out: 666 mutex_unlock(&subsys->lock); 667 nvmet_subsys_put(subsys); 668 return status; 669 } 670 671 u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd) 672 { 673 if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) { 674 pr_err("got io cmd %d while CC.EN == 0 on qid = %d\n", 675 cmd->common.opcode, req->sq->qid); 676 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; 677 } 678 679 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { 680 pr_err("got io cmd %d while CSTS.RDY == 0 on qid = %d\n", 681 cmd->common.opcode, req->sq->qid); 682 req->ns = NULL; 683 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; 684 } 685 return 0; 686 } 687 688 static bool __nvmet_host_allowed(struct nvmet_subsys *subsys, 689 const char *hostnqn) 690 { 691 struct nvmet_host_link *p; 692 693 if (subsys->allow_any_host) 694 return true; 695 696 list_for_each_entry(p, &subsys->hosts, entry) { 697 if (!strcmp(nvmet_host_name(p->host), hostnqn)) 698 return true; 699 } 700 701 return false; 702 } 703 704 static bool nvmet_host_discovery_allowed(struct nvmet_req *req, 705 const char *hostnqn) 706 { 707 struct nvmet_subsys_link *s; 708 709 list_for_each_entry(s, &req->port->subsystems, entry) { 710 if (__nvmet_host_allowed(s->subsys, hostnqn)) 711 return true; 712 } 713 714 return false; 715 } 716 717 bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys, 718 const char *hostnqn) 719 { 720 lockdep_assert_held(&nvmet_config_sem); 721 722 if (subsys->type == NVME_NQN_DISC) 723 return nvmet_host_discovery_allowed(req, hostnqn); 724 else 725 return __nvmet_host_allowed(subsys, hostnqn); 726 } 727 728 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, 729 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp) 730 { 731 struct nvmet_subsys *subsys; 732 struct nvmet_ctrl *ctrl; 733 int ret; 734 u16 status; 735 736 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; 737 subsys = nvmet_find_get_subsys(req->port, subsysnqn); 738 if (!subsys) { 739 pr_warn("connect request for invalid subsystem %s!\n", 740 subsysnqn); 741 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); 742 goto out; 743 } 744 745 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; 746 down_read(&nvmet_config_sem); 747 if (!nvmet_host_allowed(req, subsys, hostnqn)) { 748 pr_info("connect by host %s for subsystem %s not allowed\n", 749 hostnqn, subsysnqn); 750 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn); 751 up_read(&nvmet_config_sem); 752 status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR; 753 goto out_put_subsystem; 754 } 755 up_read(&nvmet_config_sem); 756 757 status = NVME_SC_INTERNAL; 758 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 759 if (!ctrl) 760 goto out_put_subsystem; 761 mutex_init(&ctrl->lock); 762 763 nvmet_init_cap(ctrl); 764 765 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work); 766 INIT_LIST_HEAD(&ctrl->async_events); 767 768 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); 769 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); 770 771 kref_init(&ctrl->ref); 772 ctrl->subsys = subsys; 773 774 ctrl->cqs = kcalloc(subsys->max_qid + 1, 775 sizeof(struct nvmet_cq *), 776 GFP_KERNEL); 777 if (!ctrl->cqs) 778 goto out_free_ctrl; 779 780 ctrl->sqs = kcalloc(subsys->max_qid + 1, 781 sizeof(struct nvmet_sq *), 782 GFP_KERNEL); 783 if (!ctrl->sqs) 784 goto out_free_cqs; 785 786 ret = ida_simple_get(&cntlid_ida, 787 NVME_CNTLID_MIN, NVME_CNTLID_MAX, 788 GFP_KERNEL); 789 if (ret < 0) { 790 status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; 791 goto out_free_sqs; 792 } 793 ctrl->cntlid = ret; 794 795 ctrl->ops = req->ops; 796 if (ctrl->subsys->type == NVME_NQN_DISC) { 797 /* Don't accept keep-alive timeout for discovery controllers */ 798 if (kato) { 799 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 800 goto out_free_sqs; 801 } 802 803 /* 804 * Discovery controllers use some arbitrary high value in order 805 * to cleanup stale discovery sessions 806 * 807 * From the latest base diff RC: 808 * "The Keep Alive command is not supported by 809 * Discovery controllers. A transport may specify a 810 * fixed Discovery controller activity timeout value 811 * (e.g., 2 minutes). If no commands are received 812 * by a Discovery controller within that time 813 * period, the controller may perform the 814 * actions for Keep Alive Timer expiration". 815 */ 816 ctrl->kato = NVMET_DISC_KATO; 817 } else { 818 /* keep-alive timeout in seconds */ 819 ctrl->kato = DIV_ROUND_UP(kato, 1000); 820 } 821 nvmet_start_keep_alive_timer(ctrl); 822 823 mutex_lock(&subsys->lock); 824 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); 825 mutex_unlock(&subsys->lock); 826 827 *ctrlp = ctrl; 828 return 0; 829 830 out_free_sqs: 831 kfree(ctrl->sqs); 832 out_free_cqs: 833 kfree(ctrl->cqs); 834 out_free_ctrl: 835 kfree(ctrl); 836 out_put_subsystem: 837 nvmet_subsys_put(subsys); 838 out: 839 return status; 840 } 841 842 static void nvmet_ctrl_free(struct kref *ref) 843 { 844 struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref); 845 struct nvmet_subsys *subsys = ctrl->subsys; 846 847 nvmet_stop_keep_alive_timer(ctrl); 848 849 mutex_lock(&subsys->lock); 850 list_del(&ctrl->subsys_entry); 851 mutex_unlock(&subsys->lock); 852 853 flush_work(&ctrl->async_event_work); 854 cancel_work_sync(&ctrl->fatal_err_work); 855 856 ida_simple_remove(&cntlid_ida, ctrl->cntlid); 857 nvmet_subsys_put(subsys); 858 859 kfree(ctrl->sqs); 860 kfree(ctrl->cqs); 861 kfree(ctrl); 862 } 863 864 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl) 865 { 866 kref_put(&ctrl->ref, nvmet_ctrl_free); 867 } 868 869 static void nvmet_fatal_error_handler(struct work_struct *work) 870 { 871 struct nvmet_ctrl *ctrl = 872 container_of(work, struct nvmet_ctrl, fatal_err_work); 873 874 pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid); 875 ctrl->ops->delete_ctrl(ctrl); 876 } 877 878 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) 879 { 880 mutex_lock(&ctrl->lock); 881 if (!(ctrl->csts & NVME_CSTS_CFS)) { 882 ctrl->csts |= NVME_CSTS_CFS; 883 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); 884 schedule_work(&ctrl->fatal_err_work); 885 } 886 mutex_unlock(&ctrl->lock); 887 } 888 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error); 889 890 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, 891 const char *subsysnqn) 892 { 893 struct nvmet_subsys_link *p; 894 895 if (!port) 896 return NULL; 897 898 if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn, 899 NVMF_NQN_SIZE)) { 900 if (!kref_get_unless_zero(&nvmet_disc_subsys->ref)) 901 return NULL; 902 return nvmet_disc_subsys; 903 } 904 905 down_read(&nvmet_config_sem); 906 list_for_each_entry(p, &port->subsystems, entry) { 907 if (!strncmp(p->subsys->subsysnqn, subsysnqn, 908 NVMF_NQN_SIZE)) { 909 if (!kref_get_unless_zero(&p->subsys->ref)) 910 break; 911 up_read(&nvmet_config_sem); 912 return p->subsys; 913 } 914 } 915 up_read(&nvmet_config_sem); 916 return NULL; 917 } 918 919 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, 920 enum nvme_subsys_type type) 921 { 922 struct nvmet_subsys *subsys; 923 924 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); 925 if (!subsys) 926 return NULL; 927 928 subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */ 929 /* generate a random serial number as our controllers are ephemeral: */ 930 get_random_bytes(&subsys->serial, sizeof(subsys->serial)); 931 932 switch (type) { 933 case NVME_NQN_NVME: 934 subsys->max_qid = NVMET_NR_QUEUES; 935 break; 936 case NVME_NQN_DISC: 937 subsys->max_qid = 0; 938 break; 939 default: 940 pr_err("%s: Unknown Subsystem type - %d\n", __func__, type); 941 kfree(subsys); 942 return NULL; 943 } 944 subsys->type = type; 945 subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE, 946 GFP_KERNEL); 947 if (!subsys->subsysnqn) { 948 kfree(subsys); 949 return NULL; 950 } 951 952 kref_init(&subsys->ref); 953 954 mutex_init(&subsys->lock); 955 INIT_LIST_HEAD(&subsys->namespaces); 956 INIT_LIST_HEAD(&subsys->ctrls); 957 INIT_LIST_HEAD(&subsys->hosts); 958 959 return subsys; 960 } 961 962 static void nvmet_subsys_free(struct kref *ref) 963 { 964 struct nvmet_subsys *subsys = 965 container_of(ref, struct nvmet_subsys, ref); 966 967 WARN_ON_ONCE(!list_empty(&subsys->namespaces)); 968 969 kfree(subsys->subsysnqn); 970 kfree(subsys); 971 } 972 973 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys) 974 { 975 struct nvmet_ctrl *ctrl; 976 977 mutex_lock(&subsys->lock); 978 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) 979 ctrl->ops->delete_ctrl(ctrl); 980 mutex_unlock(&subsys->lock); 981 } 982 983 void nvmet_subsys_put(struct nvmet_subsys *subsys) 984 { 985 kref_put(&subsys->ref, nvmet_subsys_free); 986 } 987 988 static int __init nvmet_init(void) 989 { 990 int error; 991 992 error = nvmet_init_discovery(); 993 if (error) 994 goto out; 995 996 error = nvmet_init_configfs(); 997 if (error) 998 goto out_exit_discovery; 999 return 0; 1000 1001 out_exit_discovery: 1002 nvmet_exit_discovery(); 1003 out: 1004 return error; 1005 } 1006 1007 static void __exit nvmet_exit(void) 1008 { 1009 nvmet_exit_configfs(); 1010 nvmet_exit_discovery(); 1011 ida_destroy(&cntlid_ida); 1012 1013 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024); 1014 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024); 1015 } 1016 1017 module_init(nvmet_init); 1018 module_exit(nvmet_exit); 1019 1020 MODULE_LICENSE("GPL v2"); 1021