1d0ad6904SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0 23a85a5deSChristoph Hellwig /* 33a85a5deSChristoph Hellwig * NVMe over Fabrics loopback device. 43a85a5deSChristoph Hellwig * Copyright (c) 2015-2016 HGST, a Western Digital Company. 53a85a5deSChristoph Hellwig */ 63a85a5deSChristoph Hellwig #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 73a85a5deSChristoph Hellwig #include <linux/scatterlist.h> 83a85a5deSChristoph Hellwig #include <linux/blk-mq.h> 93a85a5deSChristoph Hellwig #include <linux/nvme.h> 103a85a5deSChristoph Hellwig #include <linux/module.h> 113a85a5deSChristoph Hellwig #include <linux/parser.h> 123a85a5deSChristoph Hellwig #include "nvmet.h" 133a85a5deSChristoph Hellwig #include "../host/nvme.h" 143a85a5deSChristoph Hellwig #include "../host/fabrics.h" 153a85a5deSChristoph Hellwig 163a85a5deSChristoph Hellwig #define NVME_LOOP_MAX_SEGMENTS 256 173a85a5deSChristoph Hellwig 183a85a5deSChristoph Hellwig struct nvme_loop_iod { 19d49187e9SChristoph Hellwig struct nvme_request nvme_req; 203a85a5deSChristoph Hellwig struct nvme_command cmd; 21fc6c9730SMax Gurtovoy struct nvme_completion cqe; 223a85a5deSChristoph Hellwig struct nvmet_req req; 233a85a5deSChristoph Hellwig struct nvme_loop_queue *queue; 243a85a5deSChristoph Hellwig struct work_struct work; 253a85a5deSChristoph Hellwig struct sg_table sg_table; 263a85a5deSChristoph Hellwig struct scatterlist first_sgl[]; 273a85a5deSChristoph Hellwig }; 283a85a5deSChristoph Hellwig 293a85a5deSChristoph Hellwig struct nvme_loop_ctrl { 303a85a5deSChristoph Hellwig struct nvme_loop_queue *queues; 313a85a5deSChristoph Hellwig 323a85a5deSChristoph Hellwig struct blk_mq_tag_set admin_tag_set; 333a85a5deSChristoph Hellwig 343a85a5deSChristoph Hellwig struct list_head list; 353a85a5deSChristoph Hellwig struct blk_mq_tag_set tag_set; 363a85a5deSChristoph Hellwig struct nvme_loop_iod async_event_iod; 373a85a5deSChristoph Hellwig struct nvme_ctrl ctrl; 383a85a5deSChristoph Hellwig 393a85a5deSChristoph Hellwig struct nvmet_ctrl *target_ctrl; 40fe4a9791SChristoph Hellwig struct nvmet_port *port; 413a85a5deSChristoph Hellwig }; 423a85a5deSChristoph Hellwig 433a85a5deSChristoph Hellwig static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl) 443a85a5deSChristoph Hellwig { 453a85a5deSChristoph Hellwig return container_of(ctrl, struct nvme_loop_ctrl, ctrl); 463a85a5deSChristoph Hellwig } 473a85a5deSChristoph Hellwig 489d7fab04SSagi Grimberg enum nvme_loop_queue_flags { 499d7fab04SSagi Grimberg NVME_LOOP_Q_LIVE = 0, 509d7fab04SSagi Grimberg }; 519d7fab04SSagi Grimberg 523a85a5deSChristoph Hellwig struct nvme_loop_queue { 533a85a5deSChristoph Hellwig struct nvmet_cq nvme_cq; 543a85a5deSChristoph Hellwig struct nvmet_sq nvme_sq; 553a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl; 569d7fab04SSagi Grimberg unsigned long flags; 573a85a5deSChristoph Hellwig }; 583a85a5deSChristoph Hellwig 59fe4a9791SChristoph Hellwig static LIST_HEAD(nvme_loop_ports); 60fe4a9791SChristoph Hellwig static DEFINE_MUTEX(nvme_loop_ports_mutex); 613a85a5deSChristoph Hellwig 623a85a5deSChristoph Hellwig static LIST_HEAD(nvme_loop_ctrl_list); 633a85a5deSChristoph Hellwig static DEFINE_MUTEX(nvme_loop_ctrl_mutex); 643a85a5deSChristoph Hellwig 653a85a5deSChristoph Hellwig static void nvme_loop_queue_response(struct nvmet_req *nvme_req); 663a85a5deSChristoph Hellwig static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl); 673a85a5deSChristoph Hellwig 68e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops nvme_loop_ops; 693a85a5deSChristoph Hellwig 703a85a5deSChristoph Hellwig static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue) 713a85a5deSChristoph Hellwig { 723a85a5deSChristoph Hellwig return queue - queue->ctrl->queues; 733a85a5deSChristoph Hellwig } 743a85a5deSChristoph Hellwig 753a85a5deSChristoph Hellwig static void nvme_loop_complete_rq(struct request *req) 763a85a5deSChristoph Hellwig { 773a85a5deSChristoph Hellwig struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); 783a85a5deSChristoph Hellwig 793a85a5deSChristoph Hellwig nvme_cleanup_cmd(req); 804635873cSMing Lei sg_free_table_chained(&iod->sg_table, SG_CHUNK_SIZE); 8177f02a7aSChristoph Hellwig nvme_complete_rq(req); 823a85a5deSChristoph Hellwig } 833a85a5deSChristoph Hellwig 843b068376SSagi Grimberg static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue) 853b068376SSagi Grimberg { 863b068376SSagi Grimberg u32 queue_idx = nvme_loop_queue_idx(queue); 873a85a5deSChristoph Hellwig 883b068376SSagi Grimberg if (queue_idx == 0) 893b068376SSagi Grimberg return queue->ctrl->admin_tag_set.tags[queue_idx]; 903b068376SSagi Grimberg return queue->ctrl->tag_set.tags[queue_idx - 1]; 913a85a5deSChristoph Hellwig } 923a85a5deSChristoph Hellwig 93d49187e9SChristoph Hellwig static void nvme_loop_queue_response(struct nvmet_req *req) 943a85a5deSChristoph Hellwig { 953b068376SSagi Grimberg struct nvme_loop_queue *queue = 963b068376SSagi Grimberg container_of(req->sq, struct nvme_loop_queue, nvme_sq); 97fc6c9730SMax Gurtovoy struct nvme_completion *cqe = req->cqe; 983a85a5deSChristoph Hellwig 993a85a5deSChristoph Hellwig /* 1003a85a5deSChristoph Hellwig * AEN requests are special as they don't time out and can 1013a85a5deSChristoph Hellwig * survive any kind of queue freeze and often don't respond to 1023a85a5deSChristoph Hellwig * aborts. We don't even bother to allocate a struct request 1033a85a5deSChristoph Hellwig * for them but rather special case them here. 1043a85a5deSChristoph Hellwig */ 1053b068376SSagi Grimberg if (unlikely(nvme_loop_queue_idx(queue) == 0 && 10638dabe21SKeith Busch cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) { 1073b068376SSagi Grimberg nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, 1087bf58533SChristoph Hellwig &cqe->result); 1093a85a5deSChristoph Hellwig } else { 1103b068376SSagi Grimberg struct request *rq; 1113a85a5deSChristoph Hellwig 1123b068376SSagi Grimberg rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id); 1133b068376SSagi Grimberg if (!rq) { 1143b068376SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 1153b068376SSagi Grimberg "tag 0x%x on queue %d not found\n", 1163b068376SSagi Grimberg cqe->command_id, nvme_loop_queue_idx(queue)); 1173b068376SSagi Grimberg return; 1183b068376SSagi Grimberg } 1193b068376SSagi Grimberg 12027fa9bc5SChristoph Hellwig nvme_end_request(rq, cqe->status, cqe->result); 1213a85a5deSChristoph Hellwig } 1223a85a5deSChristoph Hellwig } 1233a85a5deSChristoph Hellwig 1243a85a5deSChristoph Hellwig static void nvme_loop_execute_work(struct work_struct *work) 1253a85a5deSChristoph Hellwig { 1263a85a5deSChristoph Hellwig struct nvme_loop_iod *iod = 1273a85a5deSChristoph Hellwig container_of(work, struct nvme_loop_iod, work); 1283a85a5deSChristoph Hellwig 1295e62d5c9SChristoph Hellwig nvmet_req_execute(&iod->req); 1303a85a5deSChristoph Hellwig } 1313a85a5deSChristoph Hellwig 132fc17b653SChristoph Hellwig static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, 1333a85a5deSChristoph Hellwig const struct blk_mq_queue_data *bd) 1343a85a5deSChristoph Hellwig { 1353a85a5deSChristoph Hellwig struct nvme_ns *ns = hctx->queue->queuedata; 1363a85a5deSChristoph Hellwig struct nvme_loop_queue *queue = hctx->driver_data; 1373a85a5deSChristoph Hellwig struct request *req = bd->rq; 1383a85a5deSChristoph Hellwig struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); 1393bc32bb1SChristoph Hellwig bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags); 140fc17b653SChristoph Hellwig blk_status_t ret; 1413a85a5deSChristoph Hellwig 1423bc32bb1SChristoph Hellwig if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready)) 1436cdefc6eSJames Smart return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req); 1449d7fab04SSagi Grimberg 1453a85a5deSChristoph Hellwig ret = nvme_setup_cmd(ns, req, &iod->cmd); 146fc17b653SChristoph Hellwig if (ret) 1473a85a5deSChristoph Hellwig return ret; 1483a85a5deSChristoph Hellwig 14911d9ea6fSMing Lei blk_mq_start_request(req); 1503a85a5deSChristoph Hellwig iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; 151fe4a9791SChristoph Hellwig iod->req.port = queue->ctrl->port; 1523a85a5deSChristoph Hellwig if (!nvmet_req_init(&iod->req, &queue->nvme_cq, 15311d9ea6fSMing Lei &queue->nvme_sq, &nvme_loop_ops)) 154fc17b653SChristoph Hellwig return BLK_STS_OK; 1553a85a5deSChristoph Hellwig 156eb464833SChaitanya Kulkarni if (blk_rq_nr_phys_segments(req)) { 1573a85a5deSChristoph Hellwig iod->sg_table.sgl = iod->first_sgl; 158fc17b653SChristoph Hellwig if (sg_alloc_table_chained(&iod->sg_table, 159f9d03f96SChristoph Hellwig blk_rq_nr_phys_segments(req), 1604635873cSMing Lei iod->sg_table.sgl, SG_CHUNK_SIZE)) 161fc17b653SChristoph Hellwig return BLK_STS_RESOURCE; 1623a85a5deSChristoph Hellwig 1633a85a5deSChristoph Hellwig iod->req.sg = iod->sg_table.sgl; 1643a85a5deSChristoph Hellwig iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl); 165796b0b8dSChristoph Hellwig iod->req.transfer_len = blk_rq_payload_bytes(req); 1663a85a5deSChristoph Hellwig } 1673a85a5deSChristoph Hellwig 1683a85a5deSChristoph Hellwig schedule_work(&iod->work); 169fc17b653SChristoph Hellwig return BLK_STS_OK; 1703a85a5deSChristoph Hellwig } 1713a85a5deSChristoph Hellwig 172ad22c355SKeith Busch static void nvme_loop_submit_async_event(struct nvme_ctrl *arg) 1733a85a5deSChristoph Hellwig { 1743a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg); 1753a85a5deSChristoph Hellwig struct nvme_loop_queue *queue = &ctrl->queues[0]; 1763a85a5deSChristoph Hellwig struct nvme_loop_iod *iod = &ctrl->async_event_iod; 1773a85a5deSChristoph Hellwig 1783a85a5deSChristoph Hellwig memset(&iod->cmd, 0, sizeof(iod->cmd)); 1793a85a5deSChristoph Hellwig iod->cmd.common.opcode = nvme_admin_async_event; 18038dabe21SKeith Busch iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH; 1813a85a5deSChristoph Hellwig iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; 1823a85a5deSChristoph Hellwig 1833a85a5deSChristoph Hellwig if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq, 1843a85a5deSChristoph Hellwig &nvme_loop_ops)) { 1853a85a5deSChristoph Hellwig dev_err(ctrl->ctrl.device, "failed async event work\n"); 1863a85a5deSChristoph Hellwig return; 1873a85a5deSChristoph Hellwig } 1883a85a5deSChristoph Hellwig 1893a85a5deSChristoph Hellwig schedule_work(&iod->work); 1903a85a5deSChristoph Hellwig } 1913a85a5deSChristoph Hellwig 1923a85a5deSChristoph Hellwig static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl, 1933a85a5deSChristoph Hellwig struct nvme_loop_iod *iod, unsigned int queue_idx) 1943a85a5deSChristoph Hellwig { 1953a85a5deSChristoph Hellwig iod->req.cmd = &iod->cmd; 196fc6c9730SMax Gurtovoy iod->req.cqe = &iod->cqe; 1973a85a5deSChristoph Hellwig iod->queue = &ctrl->queues[queue_idx]; 1983a85a5deSChristoph Hellwig INIT_WORK(&iod->work, nvme_loop_execute_work); 1993a85a5deSChristoph Hellwig return 0; 2003a85a5deSChristoph Hellwig } 2013a85a5deSChristoph Hellwig 202d6296d39SChristoph Hellwig static int nvme_loop_init_request(struct blk_mq_tag_set *set, 203d6296d39SChristoph Hellwig struct request *req, unsigned int hctx_idx, 2043a85a5deSChristoph Hellwig unsigned int numa_node) 2053a85a5deSChristoph Hellwig { 20662b83b18SChristoph Hellwig struct nvme_loop_ctrl *ctrl = set->driver_data; 2073a85a5deSChristoph Hellwig 20859e29ce6SSagi Grimberg nvme_req(req)->ctrl = &ctrl->ctrl; 20962b83b18SChristoph Hellwig return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req), 21062b83b18SChristoph Hellwig (set == &ctrl->tag_set) ? hctx_idx + 1 : 0); 2113a85a5deSChristoph Hellwig } 2123a85a5deSChristoph Hellwig 2133a85a5deSChristoph Hellwig static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 2143a85a5deSChristoph Hellwig unsigned int hctx_idx) 2153a85a5deSChristoph Hellwig { 2163a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl = data; 2173a85a5deSChristoph Hellwig struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1]; 2183a85a5deSChristoph Hellwig 219d858e5f0SSagi Grimberg BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); 2203a85a5deSChristoph Hellwig 2213a85a5deSChristoph Hellwig hctx->driver_data = queue; 2223a85a5deSChristoph Hellwig return 0; 2233a85a5deSChristoph Hellwig } 2243a85a5deSChristoph Hellwig 2253a85a5deSChristoph Hellwig static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, 2263a85a5deSChristoph Hellwig unsigned int hctx_idx) 2273a85a5deSChristoph Hellwig { 2283a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl = data; 2293a85a5deSChristoph Hellwig struct nvme_loop_queue *queue = &ctrl->queues[0]; 2303a85a5deSChristoph Hellwig 2313a85a5deSChristoph Hellwig BUG_ON(hctx_idx != 0); 2323a85a5deSChristoph Hellwig 2333a85a5deSChristoph Hellwig hctx->driver_data = queue; 2343a85a5deSChristoph Hellwig return 0; 2353a85a5deSChristoph Hellwig } 2363a85a5deSChristoph Hellwig 237f363b089SEric Biggers static const struct blk_mq_ops nvme_loop_mq_ops = { 2383a85a5deSChristoph Hellwig .queue_rq = nvme_loop_queue_rq, 2393a85a5deSChristoph Hellwig .complete = nvme_loop_complete_rq, 2403a85a5deSChristoph Hellwig .init_request = nvme_loop_init_request, 2413a85a5deSChristoph Hellwig .init_hctx = nvme_loop_init_hctx, 2423a85a5deSChristoph Hellwig }; 2433a85a5deSChristoph Hellwig 244f363b089SEric Biggers static const struct blk_mq_ops nvme_loop_admin_mq_ops = { 2453a85a5deSChristoph Hellwig .queue_rq = nvme_loop_queue_rq, 2463a85a5deSChristoph Hellwig .complete = nvme_loop_complete_rq, 24762b83b18SChristoph Hellwig .init_request = nvme_loop_init_request, 2483a85a5deSChristoph Hellwig .init_hctx = nvme_loop_init_admin_hctx, 2493a85a5deSChristoph Hellwig }; 2503a85a5deSChristoph Hellwig 2513a85a5deSChristoph Hellwig static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) 2523a85a5deSChristoph Hellwig { 2539d7fab04SSagi Grimberg clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); 254e4c5d376SSagi Grimberg nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); 2553a85a5deSChristoph Hellwig blk_cleanup_queue(ctrl->ctrl.admin_q); 2563a85a5deSChristoph Hellwig blk_mq_free_tag_set(&ctrl->admin_tag_set); 2573a85a5deSChristoph Hellwig } 2583a85a5deSChristoph Hellwig 2593a85a5deSChristoph Hellwig static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl) 2603a85a5deSChristoph Hellwig { 2613a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl); 2623a85a5deSChristoph Hellwig 2633a85a5deSChristoph Hellwig if (list_empty(&ctrl->list)) 2643a85a5deSChristoph Hellwig goto free_ctrl; 2653a85a5deSChristoph Hellwig 2663a85a5deSChristoph Hellwig mutex_lock(&nvme_loop_ctrl_mutex); 2673a85a5deSChristoph Hellwig list_del(&ctrl->list); 2683a85a5deSChristoph Hellwig mutex_unlock(&nvme_loop_ctrl_mutex); 2693a85a5deSChristoph Hellwig 2703a85a5deSChristoph Hellwig if (nctrl->tagset) { 2713a85a5deSChristoph Hellwig blk_cleanup_queue(ctrl->ctrl.connect_q); 2723a85a5deSChristoph Hellwig blk_mq_free_tag_set(&ctrl->tag_set); 2733a85a5deSChristoph Hellwig } 2743a85a5deSChristoph Hellwig kfree(ctrl->queues); 2753a85a5deSChristoph Hellwig nvmf_free_options(nctrl->opts); 2763a85a5deSChristoph Hellwig free_ctrl: 2773a85a5deSChristoph Hellwig kfree(ctrl); 2783a85a5deSChristoph Hellwig } 2793a85a5deSChristoph Hellwig 280945dd5baSSagi Grimberg static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) 281945dd5baSSagi Grimberg { 282945dd5baSSagi Grimberg int i; 283945dd5baSSagi Grimberg 2849d7fab04SSagi Grimberg for (i = 1; i < ctrl->ctrl.queue_count; i++) { 2859d7fab04SSagi Grimberg clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); 286945dd5baSSagi Grimberg nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); 287945dd5baSSagi Grimberg } 2889d7fab04SSagi Grimberg } 289945dd5baSSagi Grimberg 290945dd5baSSagi Grimberg static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) 291945dd5baSSagi Grimberg { 292945dd5baSSagi Grimberg struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 293945dd5baSSagi Grimberg unsigned int nr_io_queues; 294945dd5baSSagi Grimberg int ret, i; 295945dd5baSSagi Grimberg 296945dd5baSSagi Grimberg nr_io_queues = min(opts->nr_io_queues, num_online_cpus()); 297945dd5baSSagi Grimberg ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); 298945dd5baSSagi Grimberg if (ret || !nr_io_queues) 299945dd5baSSagi Grimberg return ret; 300945dd5baSSagi Grimberg 301945dd5baSSagi Grimberg dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues); 302945dd5baSSagi Grimberg 303945dd5baSSagi Grimberg for (i = 1; i <= nr_io_queues; i++) { 304945dd5baSSagi Grimberg ctrl->queues[i].ctrl = ctrl; 305945dd5baSSagi Grimberg ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); 306945dd5baSSagi Grimberg if (ret) 307945dd5baSSagi Grimberg goto out_destroy_queues; 308945dd5baSSagi Grimberg 309d858e5f0SSagi Grimberg ctrl->ctrl.queue_count++; 310945dd5baSSagi Grimberg } 311945dd5baSSagi Grimberg 312945dd5baSSagi Grimberg return 0; 313945dd5baSSagi Grimberg 314945dd5baSSagi Grimberg out_destroy_queues: 315945dd5baSSagi Grimberg nvme_loop_destroy_io_queues(ctrl); 316945dd5baSSagi Grimberg return ret; 317945dd5baSSagi Grimberg } 318945dd5baSSagi Grimberg 319297186d6SSagi Grimberg static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl) 320297186d6SSagi Grimberg { 321297186d6SSagi Grimberg int i, ret; 322297186d6SSagi Grimberg 323d858e5f0SSagi Grimberg for (i = 1; i < ctrl->ctrl.queue_count; i++) { 32426c68227SSagi Grimberg ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false); 325297186d6SSagi Grimberg if (ret) 326297186d6SSagi Grimberg return ret; 3279d7fab04SSagi Grimberg set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); 328297186d6SSagi Grimberg } 329297186d6SSagi Grimberg 330297186d6SSagi Grimberg return 0; 331297186d6SSagi Grimberg } 332297186d6SSagi Grimberg 3333a85a5deSChristoph Hellwig static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) 3343a85a5deSChristoph Hellwig { 3353a85a5deSChristoph Hellwig int error; 3363a85a5deSChristoph Hellwig 3373a85a5deSChristoph Hellwig memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); 3383a85a5deSChristoph Hellwig ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops; 33938dabe21SKeith Busch ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH; 3403a85a5deSChristoph Hellwig ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */ 3413a85a5deSChristoph Hellwig ctrl->admin_tag_set.numa_node = NUMA_NO_NODE; 3423a85a5deSChristoph Hellwig ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) + 3433a85a5deSChristoph Hellwig SG_CHUNK_SIZE * sizeof(struct scatterlist); 3443a85a5deSChristoph Hellwig ctrl->admin_tag_set.driver_data = ctrl; 3453a85a5deSChristoph Hellwig ctrl->admin_tag_set.nr_hw_queues = 1; 3463a85a5deSChristoph Hellwig ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT; 34786f36b9cSIsrael Rukshin ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED; 3483a85a5deSChristoph Hellwig 3493a85a5deSChristoph Hellwig ctrl->queues[0].ctrl = ctrl; 3503a85a5deSChristoph Hellwig error = nvmet_sq_init(&ctrl->queues[0].nvme_sq); 3513a85a5deSChristoph Hellwig if (error) 3523a85a5deSChristoph Hellwig return error; 353d858e5f0SSagi Grimberg ctrl->ctrl.queue_count = 1; 3543a85a5deSChristoph Hellwig 3553a85a5deSChristoph Hellwig error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); 3563a85a5deSChristoph Hellwig if (error) 3573a85a5deSChristoph Hellwig goto out_free_sq; 35834b6c231SSagi Grimberg ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set; 3593a85a5deSChristoph Hellwig 3603a85a5deSChristoph Hellwig ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); 3613a85a5deSChristoph Hellwig if (IS_ERR(ctrl->ctrl.admin_q)) { 3623a85a5deSChristoph Hellwig error = PTR_ERR(ctrl->ctrl.admin_q); 3633a85a5deSChristoph Hellwig goto out_free_tagset; 3643a85a5deSChristoph Hellwig } 3653a85a5deSChristoph Hellwig 3663a85a5deSChristoph Hellwig error = nvmf_connect_admin_queue(&ctrl->ctrl); 3673a85a5deSChristoph Hellwig if (error) 3683a85a5deSChristoph Hellwig goto out_cleanup_queue; 3693a85a5deSChristoph Hellwig 3709d7fab04SSagi Grimberg set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); 3719d7fab04SSagi Grimberg 37220d0dfe6SSagi Grimberg error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap); 3733a85a5deSChristoph Hellwig if (error) { 3743a85a5deSChristoph Hellwig dev_err(ctrl->ctrl.device, 3753a85a5deSChristoph Hellwig "prop_get NVME_REG_CAP failed\n"); 3763a85a5deSChristoph Hellwig goto out_cleanup_queue; 3773a85a5deSChristoph Hellwig } 3783a85a5deSChristoph Hellwig 3793a85a5deSChristoph Hellwig ctrl->ctrl.sqsize = 38020d0dfe6SSagi Grimberg min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize); 3813a85a5deSChristoph Hellwig 38220d0dfe6SSagi Grimberg error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); 3833a85a5deSChristoph Hellwig if (error) 3843a85a5deSChristoph Hellwig goto out_cleanup_queue; 3853a85a5deSChristoph Hellwig 3863a85a5deSChristoph Hellwig ctrl->ctrl.max_hw_sectors = 3873a85a5deSChristoph Hellwig (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9); 3883a85a5deSChristoph Hellwig 3893a85a5deSChristoph Hellwig error = nvme_init_identify(&ctrl->ctrl); 3903a85a5deSChristoph Hellwig if (error) 3913a85a5deSChristoph Hellwig goto out_cleanup_queue; 3923a85a5deSChristoph Hellwig 3933a85a5deSChristoph Hellwig return 0; 3943a85a5deSChristoph Hellwig 3953a85a5deSChristoph Hellwig out_cleanup_queue: 3963a85a5deSChristoph Hellwig blk_cleanup_queue(ctrl->ctrl.admin_q); 3973a85a5deSChristoph Hellwig out_free_tagset: 3983a85a5deSChristoph Hellwig blk_mq_free_tag_set(&ctrl->admin_tag_set); 3993a85a5deSChristoph Hellwig out_free_sq: 4003a85a5deSChristoph Hellwig nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); 4013a85a5deSChristoph Hellwig return error; 4023a85a5deSChristoph Hellwig } 4033a85a5deSChristoph Hellwig 4043a85a5deSChristoph Hellwig static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) 4053a85a5deSChristoph Hellwig { 406d858e5f0SSagi Grimberg if (ctrl->ctrl.queue_count > 1) { 4073a85a5deSChristoph Hellwig nvme_stop_queues(&ctrl->ctrl); 4083a85a5deSChristoph Hellwig blk_mq_tagset_busy_iter(&ctrl->tag_set, 4093a85a5deSChristoph Hellwig nvme_cancel_request, &ctrl->ctrl); 410*622b8b68SMing Lei blk_mq_tagset_wait_completed_request(&ctrl->tag_set); 411945dd5baSSagi Grimberg nvme_loop_destroy_io_queues(ctrl); 4123a85a5deSChristoph Hellwig } 4133a85a5deSChristoph Hellwig 4143a85a5deSChristoph Hellwig if (ctrl->ctrl.state == NVME_CTRL_LIVE) 4153a85a5deSChristoph Hellwig nvme_shutdown_ctrl(&ctrl->ctrl); 4163a85a5deSChristoph Hellwig 417c1c0ffffSSagi Grimberg blk_mq_quiesce_queue(ctrl->ctrl.admin_q); 4183a85a5deSChristoph Hellwig blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, 4193a85a5deSChristoph Hellwig nvme_cancel_request, &ctrl->ctrl); 420*622b8b68SMing Lei blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set); 421c1c0ffffSSagi Grimberg blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 4223a85a5deSChristoph Hellwig nvme_loop_destroy_admin_queue(ctrl); 4233a85a5deSChristoph Hellwig } 4243a85a5deSChristoph Hellwig 425c5017e85SChristoph Hellwig static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl) 4263a85a5deSChristoph Hellwig { 427c5017e85SChristoph Hellwig nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl)); 4283a85a5deSChristoph Hellwig } 4293a85a5deSChristoph Hellwig 4303a85a5deSChristoph Hellwig static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl) 4313a85a5deSChristoph Hellwig { 4323a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl; 4333a85a5deSChristoph Hellwig 4343a85a5deSChristoph Hellwig mutex_lock(&nvme_loop_ctrl_mutex); 4353a85a5deSChristoph Hellwig list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) { 4363a85a5deSChristoph Hellwig if (ctrl->ctrl.cntlid == nctrl->cntlid) 437c5017e85SChristoph Hellwig nvme_delete_ctrl(&ctrl->ctrl); 4383a85a5deSChristoph Hellwig } 4393a85a5deSChristoph Hellwig mutex_unlock(&nvme_loop_ctrl_mutex); 4403a85a5deSChristoph Hellwig } 4413a85a5deSChristoph Hellwig 4423a85a5deSChristoph Hellwig static void nvme_loop_reset_ctrl_work(struct work_struct *work) 4433a85a5deSChristoph Hellwig { 444d86c4d8eSChristoph Hellwig struct nvme_loop_ctrl *ctrl = 445d86c4d8eSChristoph Hellwig container_of(work, struct nvme_loop_ctrl, ctrl.reset_work); 4463a85a5deSChristoph Hellwig bool changed; 447297186d6SSagi Grimberg int ret; 4483a85a5deSChristoph Hellwig 449d09f2b45SSagi Grimberg nvme_stop_ctrl(&ctrl->ctrl); 4503a85a5deSChristoph Hellwig nvme_loop_shutdown_ctrl(ctrl); 4513a85a5deSChristoph Hellwig 4528bfc3b4cSJohannes Thumshirn if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { 4538bfc3b4cSJohannes Thumshirn /* state change failure should never happen */ 4548bfc3b4cSJohannes Thumshirn WARN_ON_ONCE(1); 4558bfc3b4cSJohannes Thumshirn return; 4568bfc3b4cSJohannes Thumshirn } 4578bfc3b4cSJohannes Thumshirn 4583a85a5deSChristoph Hellwig ret = nvme_loop_configure_admin_queue(ctrl); 4593a85a5deSChristoph Hellwig if (ret) 4603a85a5deSChristoph Hellwig goto out_disable; 4613a85a5deSChristoph Hellwig 462945dd5baSSagi Grimberg ret = nvme_loop_init_io_queues(ctrl); 4633a85a5deSChristoph Hellwig if (ret) 464945dd5baSSagi Grimberg goto out_destroy_admin; 4653a85a5deSChristoph Hellwig 466297186d6SSagi Grimberg ret = nvme_loop_connect_io_queues(ctrl); 4673a85a5deSChristoph Hellwig if (ret) 468945dd5baSSagi Grimberg goto out_destroy_io; 4693a85a5deSChristoph Hellwig 4704368c39bSSagi Grimberg blk_mq_update_nr_hw_queues(&ctrl->tag_set, 4714368c39bSSagi Grimberg ctrl->ctrl.queue_count - 1); 4724368c39bSSagi Grimberg 4733a85a5deSChristoph Hellwig changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 4743a85a5deSChristoph Hellwig WARN_ON_ONCE(!changed); 4753a85a5deSChristoph Hellwig 476d09f2b45SSagi Grimberg nvme_start_ctrl(&ctrl->ctrl); 4773a85a5deSChristoph Hellwig 4783a85a5deSChristoph Hellwig return; 4793a85a5deSChristoph Hellwig 480945dd5baSSagi Grimberg out_destroy_io: 481945dd5baSSagi Grimberg nvme_loop_destroy_io_queues(ctrl); 482945dd5baSSagi Grimberg out_destroy_admin: 4833a85a5deSChristoph Hellwig nvme_loop_destroy_admin_queue(ctrl); 4843a85a5deSChristoph Hellwig out_disable: 4853a85a5deSChristoph Hellwig dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); 4863a85a5deSChristoph Hellwig nvme_uninit_ctrl(&ctrl->ctrl); 4873a85a5deSChristoph Hellwig nvme_put_ctrl(&ctrl->ctrl); 4883a85a5deSChristoph Hellwig } 4893a85a5deSChristoph Hellwig 4903a85a5deSChristoph Hellwig static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = { 4913a85a5deSChristoph Hellwig .name = "loop", 4923a85a5deSChristoph Hellwig .module = THIS_MODULE, 493d3d5b87dSChristoph Hellwig .flags = NVME_F_FABRICS, 4943a85a5deSChristoph Hellwig .reg_read32 = nvmf_reg_read32, 4953a85a5deSChristoph Hellwig .reg_read64 = nvmf_reg_read64, 4963a85a5deSChristoph Hellwig .reg_write32 = nvmf_reg_write32, 4973a85a5deSChristoph Hellwig .free_ctrl = nvme_loop_free_ctrl, 4983a85a5deSChristoph Hellwig .submit_async_event = nvme_loop_submit_async_event, 499c5017e85SChristoph Hellwig .delete_ctrl = nvme_loop_delete_ctrl_host, 500fe4a9791SChristoph Hellwig .get_address = nvmf_get_address, 5013a85a5deSChristoph Hellwig }; 5023a85a5deSChristoph Hellwig 5033a85a5deSChristoph Hellwig static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) 5043a85a5deSChristoph Hellwig { 505297186d6SSagi Grimberg int ret; 5063a85a5deSChristoph Hellwig 507945dd5baSSagi Grimberg ret = nvme_loop_init_io_queues(ctrl); 5083a85a5deSChristoph Hellwig if (ret) 509945dd5baSSagi Grimberg return ret; 5103a85a5deSChristoph Hellwig 5113a85a5deSChristoph Hellwig memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); 5123a85a5deSChristoph Hellwig ctrl->tag_set.ops = &nvme_loop_mq_ops; 513eadb7cf4SJay Freyensee ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; 5143a85a5deSChristoph Hellwig ctrl->tag_set.reserved_tags = 1; /* fabric connect */ 5153a85a5deSChristoph Hellwig ctrl->tag_set.numa_node = NUMA_NO_NODE; 5163a85a5deSChristoph Hellwig ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 5173a85a5deSChristoph Hellwig ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) + 5183a85a5deSChristoph Hellwig SG_CHUNK_SIZE * sizeof(struct scatterlist); 5193a85a5deSChristoph Hellwig ctrl->tag_set.driver_data = ctrl; 520d858e5f0SSagi Grimberg ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1; 5213a85a5deSChristoph Hellwig ctrl->tag_set.timeout = NVME_IO_TIMEOUT; 5223a85a5deSChristoph Hellwig ctrl->ctrl.tagset = &ctrl->tag_set; 5233a85a5deSChristoph Hellwig 5243a85a5deSChristoph Hellwig ret = blk_mq_alloc_tag_set(&ctrl->tag_set); 5253a85a5deSChristoph Hellwig if (ret) 5263a85a5deSChristoph Hellwig goto out_destroy_queues; 5273a85a5deSChristoph Hellwig 5283a85a5deSChristoph Hellwig ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); 5293a85a5deSChristoph Hellwig if (IS_ERR(ctrl->ctrl.connect_q)) { 5303a85a5deSChristoph Hellwig ret = PTR_ERR(ctrl->ctrl.connect_q); 5313a85a5deSChristoph Hellwig goto out_free_tagset; 5323a85a5deSChristoph Hellwig } 5333a85a5deSChristoph Hellwig 534297186d6SSagi Grimberg ret = nvme_loop_connect_io_queues(ctrl); 5353a85a5deSChristoph Hellwig if (ret) 5363a85a5deSChristoph Hellwig goto out_cleanup_connect_q; 5373a85a5deSChristoph Hellwig 5383a85a5deSChristoph Hellwig return 0; 5393a85a5deSChristoph Hellwig 5403a85a5deSChristoph Hellwig out_cleanup_connect_q: 5413a85a5deSChristoph Hellwig blk_cleanup_queue(ctrl->ctrl.connect_q); 5423a85a5deSChristoph Hellwig out_free_tagset: 5433a85a5deSChristoph Hellwig blk_mq_free_tag_set(&ctrl->tag_set); 5443a85a5deSChristoph Hellwig out_destroy_queues: 545945dd5baSSagi Grimberg nvme_loop_destroy_io_queues(ctrl); 5463a85a5deSChristoph Hellwig return ret; 5473a85a5deSChristoph Hellwig } 5483a85a5deSChristoph Hellwig 549fe4a9791SChristoph Hellwig static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl) 550fe4a9791SChristoph Hellwig { 551fe4a9791SChristoph Hellwig struct nvmet_port *p, *found = NULL; 552fe4a9791SChristoph Hellwig 553fe4a9791SChristoph Hellwig mutex_lock(&nvme_loop_ports_mutex); 554fe4a9791SChristoph Hellwig list_for_each_entry(p, &nvme_loop_ports, entry) { 555fe4a9791SChristoph Hellwig /* if no transport address is specified use the first port */ 556fe4a9791SChristoph Hellwig if ((ctrl->opts->mask & NVMF_OPT_TRADDR) && 557fe4a9791SChristoph Hellwig strcmp(ctrl->opts->traddr, p->disc_addr.traddr)) 558fe4a9791SChristoph Hellwig continue; 559fe4a9791SChristoph Hellwig found = p; 560fe4a9791SChristoph Hellwig break; 561fe4a9791SChristoph Hellwig } 562fe4a9791SChristoph Hellwig mutex_unlock(&nvme_loop_ports_mutex); 563fe4a9791SChristoph Hellwig return found; 564fe4a9791SChristoph Hellwig } 565fe4a9791SChristoph Hellwig 5663a85a5deSChristoph Hellwig static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev, 5673a85a5deSChristoph Hellwig struct nvmf_ctrl_options *opts) 5683a85a5deSChristoph Hellwig { 5693a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl; 5703a85a5deSChristoph Hellwig bool changed; 5713a85a5deSChristoph Hellwig int ret; 5723a85a5deSChristoph Hellwig 5733a85a5deSChristoph Hellwig ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 5743a85a5deSChristoph Hellwig if (!ctrl) 5753a85a5deSChristoph Hellwig return ERR_PTR(-ENOMEM); 5763a85a5deSChristoph Hellwig ctrl->ctrl.opts = opts; 5773a85a5deSChristoph Hellwig INIT_LIST_HEAD(&ctrl->list); 5783a85a5deSChristoph Hellwig 579d86c4d8eSChristoph Hellwig INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work); 5803a85a5deSChristoph Hellwig 5813a85a5deSChristoph Hellwig ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops, 5823a85a5deSChristoph Hellwig 0 /* no quirks, we're perfect! */); 5833a85a5deSChristoph Hellwig if (ret) 5843a85a5deSChristoph Hellwig goto out_put_ctrl; 5853a85a5deSChristoph Hellwig 5863a85a5deSChristoph Hellwig ret = -ENOMEM; 5873a85a5deSChristoph Hellwig 588eadb7cf4SJay Freyensee ctrl->ctrl.sqsize = opts->queue_size - 1; 5893a85a5deSChristoph Hellwig ctrl->ctrl.kato = opts->kato; 590fe4a9791SChristoph Hellwig ctrl->port = nvme_loop_find_port(&ctrl->ctrl); 5913a85a5deSChristoph Hellwig 5923a85a5deSChristoph Hellwig ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues), 5933a85a5deSChristoph Hellwig GFP_KERNEL); 5943a85a5deSChristoph Hellwig if (!ctrl->queues) 5953a85a5deSChristoph Hellwig goto out_uninit_ctrl; 5963a85a5deSChristoph Hellwig 5973a85a5deSChristoph Hellwig ret = nvme_loop_configure_admin_queue(ctrl); 5983a85a5deSChristoph Hellwig if (ret) 5993a85a5deSChristoph Hellwig goto out_free_queues; 6003a85a5deSChristoph Hellwig 6013a85a5deSChristoph Hellwig if (opts->queue_size > ctrl->ctrl.maxcmd) { 6023a85a5deSChristoph Hellwig /* warn if maxcmd is lower than queue_size */ 6033a85a5deSChristoph Hellwig dev_warn(ctrl->ctrl.device, 6043a85a5deSChristoph Hellwig "queue_size %zu > ctrl maxcmd %u, clamping down\n", 6053a85a5deSChristoph Hellwig opts->queue_size, ctrl->ctrl.maxcmd); 6063a85a5deSChristoph Hellwig opts->queue_size = ctrl->ctrl.maxcmd; 6073a85a5deSChristoph Hellwig } 6083a85a5deSChristoph Hellwig 6093a85a5deSChristoph Hellwig if (opts->nr_io_queues) { 6103a85a5deSChristoph Hellwig ret = nvme_loop_create_io_queues(ctrl); 6113a85a5deSChristoph Hellwig if (ret) 6123a85a5deSChristoph Hellwig goto out_remove_admin_queue; 6133a85a5deSChristoph Hellwig } 6143a85a5deSChristoph Hellwig 6153a85a5deSChristoph Hellwig nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0); 6163a85a5deSChristoph Hellwig 6173a85a5deSChristoph Hellwig dev_info(ctrl->ctrl.device, 6183a85a5deSChristoph Hellwig "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn); 6193a85a5deSChristoph Hellwig 620d22524a4SChristoph Hellwig nvme_get_ctrl(&ctrl->ctrl); 6213a85a5deSChristoph Hellwig 6223a85a5deSChristoph Hellwig changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 6233a85a5deSChristoph Hellwig WARN_ON_ONCE(!changed); 6243a85a5deSChristoph Hellwig 6253a85a5deSChristoph Hellwig mutex_lock(&nvme_loop_ctrl_mutex); 6263a85a5deSChristoph Hellwig list_add_tail(&ctrl->list, &nvme_loop_ctrl_list); 6273a85a5deSChristoph Hellwig mutex_unlock(&nvme_loop_ctrl_mutex); 6283a85a5deSChristoph Hellwig 629d09f2b45SSagi Grimberg nvme_start_ctrl(&ctrl->ctrl); 6303a85a5deSChristoph Hellwig 6313a85a5deSChristoph Hellwig return &ctrl->ctrl; 6323a85a5deSChristoph Hellwig 6333a85a5deSChristoph Hellwig out_remove_admin_queue: 6343a85a5deSChristoph Hellwig nvme_loop_destroy_admin_queue(ctrl); 6353a85a5deSChristoph Hellwig out_free_queues: 6363a85a5deSChristoph Hellwig kfree(ctrl->queues); 6373a85a5deSChristoph Hellwig out_uninit_ctrl: 6383a85a5deSChristoph Hellwig nvme_uninit_ctrl(&ctrl->ctrl); 6393a85a5deSChristoph Hellwig out_put_ctrl: 6403a85a5deSChristoph Hellwig nvme_put_ctrl(&ctrl->ctrl); 6413a85a5deSChristoph Hellwig if (ret > 0) 6423a85a5deSChristoph Hellwig ret = -EIO; 6433a85a5deSChristoph Hellwig return ERR_PTR(ret); 6443a85a5deSChristoph Hellwig } 6453a85a5deSChristoph Hellwig 6463a85a5deSChristoph Hellwig static int nvme_loop_add_port(struct nvmet_port *port) 6473a85a5deSChristoph Hellwig { 648fe4a9791SChristoph Hellwig mutex_lock(&nvme_loop_ports_mutex); 649fe4a9791SChristoph Hellwig list_add_tail(&port->entry, &nvme_loop_ports); 650fe4a9791SChristoph Hellwig mutex_unlock(&nvme_loop_ports_mutex); 6513a85a5deSChristoph Hellwig return 0; 6523a85a5deSChristoph Hellwig } 6533a85a5deSChristoph Hellwig 6543a85a5deSChristoph Hellwig static void nvme_loop_remove_port(struct nvmet_port *port) 6553a85a5deSChristoph Hellwig { 656fe4a9791SChristoph Hellwig mutex_lock(&nvme_loop_ports_mutex); 657fe4a9791SChristoph Hellwig list_del_init(&port->entry); 658fe4a9791SChristoph Hellwig mutex_unlock(&nvme_loop_ports_mutex); 6593a85a5deSChristoph Hellwig } 6603a85a5deSChristoph Hellwig 661e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops nvme_loop_ops = { 6623a85a5deSChristoph Hellwig .owner = THIS_MODULE, 6633a85a5deSChristoph Hellwig .type = NVMF_TRTYPE_LOOP, 6643a85a5deSChristoph Hellwig .add_port = nvme_loop_add_port, 6653a85a5deSChristoph Hellwig .remove_port = nvme_loop_remove_port, 6663a85a5deSChristoph Hellwig .queue_response = nvme_loop_queue_response, 6673a85a5deSChristoph Hellwig .delete_ctrl = nvme_loop_delete_ctrl, 6683a85a5deSChristoph Hellwig }; 6693a85a5deSChristoph Hellwig 6703a85a5deSChristoph Hellwig static struct nvmf_transport_ops nvme_loop_transport = { 6713a85a5deSChristoph Hellwig .name = "loop", 6720de5cd36SRoy Shterman .module = THIS_MODULE, 6733a85a5deSChristoph Hellwig .create_ctrl = nvme_loop_create_ctrl, 674fe4a9791SChristoph Hellwig .allowed_opts = NVMF_OPT_TRADDR, 6753a85a5deSChristoph Hellwig }; 6763a85a5deSChristoph Hellwig 6773a85a5deSChristoph Hellwig static int __init nvme_loop_init_module(void) 6783a85a5deSChristoph Hellwig { 6793a85a5deSChristoph Hellwig int ret; 6803a85a5deSChristoph Hellwig 6813a85a5deSChristoph Hellwig ret = nvmet_register_transport(&nvme_loop_ops); 6823a85a5deSChristoph Hellwig if (ret) 6833a85a5deSChristoph Hellwig return ret; 684d19eef02SSagi Grimberg 685d19eef02SSagi Grimberg ret = nvmf_register_transport(&nvme_loop_transport); 686d19eef02SSagi Grimberg if (ret) 687d19eef02SSagi Grimberg nvmet_unregister_transport(&nvme_loop_ops); 688d19eef02SSagi Grimberg 689d19eef02SSagi Grimberg return ret; 6903a85a5deSChristoph Hellwig } 6913a85a5deSChristoph Hellwig 6923a85a5deSChristoph Hellwig static void __exit nvme_loop_cleanup_module(void) 6933a85a5deSChristoph Hellwig { 6943a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl, *next; 6953a85a5deSChristoph Hellwig 6963a85a5deSChristoph Hellwig nvmf_unregister_transport(&nvme_loop_transport); 6973a85a5deSChristoph Hellwig nvmet_unregister_transport(&nvme_loop_ops); 6983a85a5deSChristoph Hellwig 6993a85a5deSChristoph Hellwig mutex_lock(&nvme_loop_ctrl_mutex); 7003a85a5deSChristoph Hellwig list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list) 701c5017e85SChristoph Hellwig nvme_delete_ctrl(&ctrl->ctrl); 7023a85a5deSChristoph Hellwig mutex_unlock(&nvme_loop_ctrl_mutex); 7033a85a5deSChristoph Hellwig 704b227c59bSRoy Shterman flush_workqueue(nvme_delete_wq); 7053a85a5deSChristoph Hellwig } 7063a85a5deSChristoph Hellwig 7073a85a5deSChristoph Hellwig module_init(nvme_loop_init_module); 7083a85a5deSChristoph Hellwig module_exit(nvme_loop_cleanup_module); 7093a85a5deSChristoph Hellwig 7103a85a5deSChristoph Hellwig MODULE_LICENSE("GPL v2"); 7113a85a5deSChristoph Hellwig MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */ 712