loop.c (b7405176b58aa475354f3c0d2ca1c560e9354288) | loop.c (fe4a97918de02d5c656f29664770e335df12e090) |
---|---|
1/* 2 * NVMe over Fabrics loopback device. 3 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * --- 31 unchanged lines hidden (view full) --- 40 struct blk_mq_tag_set admin_tag_set; 41 42 struct list_head list; 43 struct blk_mq_tag_set tag_set; 44 struct nvme_loop_iod async_event_iod; 45 struct nvme_ctrl ctrl; 46 47 struct nvmet_ctrl *target_ctrl; | 1/* 2 * NVMe over Fabrics loopback device. 3 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * --- 31 unchanged lines hidden (view full) --- 40 struct blk_mq_tag_set admin_tag_set; 41 42 struct list_head list; 43 struct blk_mq_tag_set tag_set; 44 struct nvme_loop_iod async_event_iod; 45 struct nvme_ctrl ctrl; 46 47 struct nvmet_ctrl *target_ctrl; |
48 struct nvmet_port *port; |
|
48}; 49 50static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl) 51{ 52 return container_of(ctrl, struct nvme_loop_ctrl, ctrl); 53} 54 55enum nvme_loop_queue_flags { 56 NVME_LOOP_Q_LIVE = 0, 57}; 58 59struct nvme_loop_queue { 60 struct nvmet_cq nvme_cq; 61 struct nvmet_sq nvme_sq; 62 struct nvme_loop_ctrl *ctrl; 63 unsigned long flags; 64}; 65 | 49}; 50 51static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl) 52{ 53 return container_of(ctrl, struct nvme_loop_ctrl, ctrl); 54} 55 56enum nvme_loop_queue_flags { 57 NVME_LOOP_Q_LIVE = 0, 58}; 59 60struct nvme_loop_queue { 61 struct nvmet_cq nvme_cq; 62 struct nvmet_sq nvme_sq; 63 struct nvme_loop_ctrl *ctrl; 64 unsigned long flags; 65}; 66 |
66static struct nvmet_port *nvmet_loop_port; | 67static LIST_HEAD(nvme_loop_ports); 68static DEFINE_MUTEX(nvme_loop_ports_mutex); |
67 68static LIST_HEAD(nvme_loop_ctrl_list); 69static DEFINE_MUTEX(nvme_loop_ctrl_mutex); 70 71static void nvme_loop_queue_response(struct nvmet_req *nvme_req); 72static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl); 73 74static const struct nvmet_fabrics_ops nvme_loop_ops; --- 89 unchanged lines hidden (view full) --- 164 return ret; 165 166 ret = nvme_setup_cmd(ns, req, &iod->cmd); 167 if (ret) 168 return ret; 169 170 blk_mq_start_request(req); 171 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; | 69 70static LIST_HEAD(nvme_loop_ctrl_list); 71static DEFINE_MUTEX(nvme_loop_ctrl_mutex); 72 73static void nvme_loop_queue_response(struct nvmet_req *nvme_req); 74static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl); 75 76static const struct nvmet_fabrics_ops nvme_loop_ops; --- 89 unchanged lines hidden (view full) --- 166 return ret; 167 168 ret = nvme_setup_cmd(ns, req, &iod->cmd); 169 if (ret) 170 return ret; 171 172 blk_mq_start_request(req); 173 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; |
172 iod->req.port = nvmet_loop_port; | 174 iod->req.port = queue->ctrl->port; |
173 if (!nvmet_req_init(&iod->req, &queue->nvme_cq, 174 &queue->nvme_sq, &nvme_loop_ops)) 175 return BLK_STS_OK; 176 177 if (blk_rq_nr_phys_segments(req)) { 178 iod->sg_table.sgl = iod->first_sgl; 179 if (sg_alloc_table_chained(&iod->sg_table, 180 blk_rq_nr_phys_segments(req), --- 331 unchanged lines hidden (view full) --- 512 .module = THIS_MODULE, 513 .flags = NVME_F_FABRICS, 514 .reg_read32 = nvmf_reg_read32, 515 .reg_read64 = nvmf_reg_read64, 516 .reg_write32 = nvmf_reg_write32, 517 .free_ctrl = nvme_loop_free_ctrl, 518 .submit_async_event = nvme_loop_submit_async_event, 519 .delete_ctrl = nvme_loop_delete_ctrl_host, | 175 if (!nvmet_req_init(&iod->req, &queue->nvme_cq, 176 &queue->nvme_sq, &nvme_loop_ops)) 177 return BLK_STS_OK; 178 179 if (blk_rq_nr_phys_segments(req)) { 180 iod->sg_table.sgl = iod->first_sgl; 181 if (sg_alloc_table_chained(&iod->sg_table, 182 blk_rq_nr_phys_segments(req), --- 331 unchanged lines hidden (view full) --- 514 .module = THIS_MODULE, 515 .flags = NVME_F_FABRICS, 516 .reg_read32 = nvmf_reg_read32, 517 .reg_read64 = nvmf_reg_read64, 518 .reg_write32 = nvmf_reg_write32, 519 .free_ctrl = nvme_loop_free_ctrl, 520 .submit_async_event = nvme_loop_submit_async_event, 521 .delete_ctrl = nvme_loop_delete_ctrl_host, |
522 .get_address = nvmf_get_address, |
|
520}; 521 522static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) 523{ 524 int ret; 525 526 ret = nvme_loop_init_io_queues(ctrl); 527 if (ret) --- 32 unchanged lines hidden (view full) --- 560 blk_cleanup_queue(ctrl->ctrl.connect_q); 561out_free_tagset: 562 blk_mq_free_tag_set(&ctrl->tag_set); 563out_destroy_queues: 564 nvme_loop_destroy_io_queues(ctrl); 565 return ret; 566} 567 | 523}; 524 525static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) 526{ 527 int ret; 528 529 ret = nvme_loop_init_io_queues(ctrl); 530 if (ret) --- 32 unchanged lines hidden (view full) --- 563 blk_cleanup_queue(ctrl->ctrl.connect_q); 564out_free_tagset: 565 blk_mq_free_tag_set(&ctrl->tag_set); 566out_destroy_queues: 567 nvme_loop_destroy_io_queues(ctrl); 568 return ret; 569} 570 |
571static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl) 572{ 573 struct nvmet_port *p, *found = NULL; 574 575 mutex_lock(&nvme_loop_ports_mutex); 576 list_for_each_entry(p, &nvme_loop_ports, entry) { 577 /* if no transport address is specified use the first port */ 578 if ((ctrl->opts->mask & NVMF_OPT_TRADDR) && 579 strcmp(ctrl->opts->traddr, p->disc_addr.traddr)) 580 continue; 581 found = p; 582 break; 583 } 584 mutex_unlock(&nvme_loop_ports_mutex); 585 return found; 586} 587 |
|
568static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev, 569 struct nvmf_ctrl_options *opts) 570{ 571 struct nvme_loop_ctrl *ctrl; 572 bool changed; 573 int ret; 574 575 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); --- 8 unchanged lines hidden (view full) --- 584 0 /* no quirks, we're perfect! */); 585 if (ret) 586 goto out_put_ctrl; 587 588 ret = -ENOMEM; 589 590 ctrl->ctrl.sqsize = opts->queue_size - 1; 591 ctrl->ctrl.kato = opts->kato; | 588static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev, 589 struct nvmf_ctrl_options *opts) 590{ 591 struct nvme_loop_ctrl *ctrl; 592 bool changed; 593 int ret; 594 595 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); --- 8 unchanged lines hidden (view full) --- 604 0 /* no quirks, we're perfect! */); 605 if (ret) 606 goto out_put_ctrl; 607 608 ret = -ENOMEM; 609 610 ctrl->ctrl.sqsize = opts->queue_size - 1; 611 ctrl->ctrl.kato = opts->kato; |
612 ctrl->port = nvme_loop_find_port(&ctrl->ctrl); |
|
592 593 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues), 594 GFP_KERNEL); 595 if (!ctrl->queues) 596 goto out_uninit_ctrl; 597 598 ret = nvme_loop_configure_admin_queue(ctrl); 599 if (ret) --- 41 unchanged lines hidden (view full) --- 641 nvme_put_ctrl(&ctrl->ctrl); 642 if (ret > 0) 643 ret = -EIO; 644 return ERR_PTR(ret); 645} 646 647static int nvme_loop_add_port(struct nvmet_port *port) 648{ | 613 614 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues), 615 GFP_KERNEL); 616 if (!ctrl->queues) 617 goto out_uninit_ctrl; 618 619 ret = nvme_loop_configure_admin_queue(ctrl); 620 if (ret) --- 41 unchanged lines hidden (view full) --- 662 nvme_put_ctrl(&ctrl->ctrl); 663 if (ret > 0) 664 ret = -EIO; 665 return ERR_PTR(ret); 666} 667 668static int nvme_loop_add_port(struct nvmet_port *port) 669{ |
649 /* 650 * XXX: disalow adding more than one port so 651 * there is no connection rejections when a 652 * a subsystem is assigned to a port for which 653 * loop doesn't have a pointer. 654 * This scenario would be possible if we allowed 655 * more than one port to be added and a subsystem 656 * was assigned to a port other than nvmet_loop_port. 657 */ 658 659 if (nvmet_loop_port) 660 return -EPERM; 661 662 nvmet_loop_port = port; | 670 mutex_lock(&nvme_loop_ports_mutex); 671 list_add_tail(&port->entry, &nvme_loop_ports); 672 mutex_unlock(&nvme_loop_ports_mutex); |
663 return 0; 664} 665 666static void nvme_loop_remove_port(struct nvmet_port *port) 667{ | 673 return 0; 674} 675 676static void nvme_loop_remove_port(struct nvmet_port *port) 677{ |
668 if (port == nvmet_loop_port) 669 nvmet_loop_port = NULL; | 678 mutex_lock(&nvme_loop_ports_mutex); 679 list_del_init(&port->entry); 680 mutex_unlock(&nvme_loop_ports_mutex); |
670} 671 672static const struct nvmet_fabrics_ops nvme_loop_ops = { 673 .owner = THIS_MODULE, 674 .type = NVMF_TRTYPE_LOOP, 675 .add_port = nvme_loop_add_port, 676 .remove_port = nvme_loop_remove_port, 677 .queue_response = nvme_loop_queue_response, 678 .delete_ctrl = nvme_loop_delete_ctrl, 679}; 680 681static struct nvmf_transport_ops nvme_loop_transport = { 682 .name = "loop", 683 .module = THIS_MODULE, 684 .create_ctrl = nvme_loop_create_ctrl, | 681} 682 683static const struct nvmet_fabrics_ops nvme_loop_ops = { 684 .owner = THIS_MODULE, 685 .type = NVMF_TRTYPE_LOOP, 686 .add_port = nvme_loop_add_port, 687 .remove_port = nvme_loop_remove_port, 688 .queue_response = nvme_loop_queue_response, 689 .delete_ctrl = nvme_loop_delete_ctrl, 690}; 691 692static struct nvmf_transport_ops nvme_loop_transport = { 693 .name = "loop", 694 .module = THIS_MODULE, 695 .create_ctrl = nvme_loop_create_ctrl, |
696 .allowed_opts = NVMF_OPT_TRADDR, |
|
685}; 686 687static int __init nvme_loop_init_module(void) 688{ 689 int ret; 690 691 ret = nvmet_register_transport(&nvme_loop_ops); 692 if (ret) --- 29 unchanged lines hidden --- | 697}; 698 699static int __init nvme_loop_init_module(void) 700{ 701 int ret; 702 703 ret = nvmet_register_transport(&nvme_loop_ops); 704 if (ret) --- 29 unchanged lines hidden --- |