13a85a5deSChristoph Hellwig /* 23a85a5deSChristoph Hellwig * NVMe over Fabrics loopback device. 33a85a5deSChristoph Hellwig * Copyright (c) 2015-2016 HGST, a Western Digital Company. 43a85a5deSChristoph Hellwig * 53a85a5deSChristoph Hellwig * This program is free software; you can redistribute it and/or modify it 63a85a5deSChristoph Hellwig * under the terms and conditions of the GNU General Public License, 73a85a5deSChristoph Hellwig * version 2, as published by the Free Software Foundation. 83a85a5deSChristoph Hellwig * 93a85a5deSChristoph Hellwig * This program is distributed in the hope it will be useful, but WITHOUT 103a85a5deSChristoph Hellwig * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 113a85a5deSChristoph Hellwig * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 123a85a5deSChristoph Hellwig * more details. 133a85a5deSChristoph Hellwig */ 143a85a5deSChristoph Hellwig #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 153a85a5deSChristoph Hellwig #include <linux/scatterlist.h> 163a85a5deSChristoph Hellwig #include <linux/blk-mq.h> 173a85a5deSChristoph Hellwig #include <linux/nvme.h> 183a85a5deSChristoph Hellwig #include <linux/module.h> 193a85a5deSChristoph Hellwig #include <linux/parser.h> 203a85a5deSChristoph Hellwig #include "nvmet.h" 213a85a5deSChristoph Hellwig #include "../host/nvme.h" 223a85a5deSChristoph Hellwig #include "../host/fabrics.h" 233a85a5deSChristoph Hellwig 243a85a5deSChristoph Hellwig #define NVME_LOOP_MAX_SEGMENTS 256 253a85a5deSChristoph Hellwig 263a85a5deSChristoph Hellwig struct nvme_loop_iod { 27d49187e9SChristoph Hellwig struct nvme_request nvme_req; 283a85a5deSChristoph Hellwig struct nvme_command cmd; 293a85a5deSChristoph Hellwig struct nvme_completion rsp; 303a85a5deSChristoph Hellwig struct nvmet_req req; 313a85a5deSChristoph Hellwig struct nvme_loop_queue *queue; 323a85a5deSChristoph Hellwig struct work_struct work; 333a85a5deSChristoph Hellwig struct sg_table sg_table; 343a85a5deSChristoph Hellwig struct scatterlist first_sgl[]; 353a85a5deSChristoph Hellwig }; 363a85a5deSChristoph Hellwig 373a85a5deSChristoph Hellwig struct nvme_loop_ctrl { 383a85a5deSChristoph Hellwig struct nvme_loop_queue *queues; 393a85a5deSChristoph Hellwig 403a85a5deSChristoph Hellwig struct blk_mq_tag_set admin_tag_set; 413a85a5deSChristoph Hellwig 423a85a5deSChristoph Hellwig struct list_head list; 433a85a5deSChristoph Hellwig struct blk_mq_tag_set tag_set; 443a85a5deSChristoph Hellwig struct nvme_loop_iod async_event_iod; 453a85a5deSChristoph Hellwig struct nvme_ctrl ctrl; 463a85a5deSChristoph Hellwig 473a85a5deSChristoph Hellwig struct nvmet_ctrl *target_ctrl; 483a85a5deSChristoph Hellwig }; 493a85a5deSChristoph Hellwig 503a85a5deSChristoph Hellwig static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl) 513a85a5deSChristoph Hellwig { 523a85a5deSChristoph Hellwig return container_of(ctrl, struct nvme_loop_ctrl, ctrl); 533a85a5deSChristoph Hellwig } 543a85a5deSChristoph Hellwig 559d7fab04SSagi Grimberg enum nvme_loop_queue_flags { 569d7fab04SSagi Grimberg NVME_LOOP_Q_LIVE = 0, 579d7fab04SSagi Grimberg }; 589d7fab04SSagi Grimberg 593a85a5deSChristoph Hellwig struct nvme_loop_queue { 603a85a5deSChristoph Hellwig struct nvmet_cq nvme_cq; 613a85a5deSChristoph Hellwig struct nvmet_sq nvme_sq; 623a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl; 639d7fab04SSagi Grimberg unsigned long flags; 643a85a5deSChristoph Hellwig }; 653a85a5deSChristoph Hellwig 663a85a5deSChristoph Hellwig static struct nvmet_port *nvmet_loop_port; 673a85a5deSChristoph Hellwig 683a85a5deSChristoph Hellwig static LIST_HEAD(nvme_loop_ctrl_list); 693a85a5deSChristoph Hellwig static DEFINE_MUTEX(nvme_loop_ctrl_mutex); 703a85a5deSChristoph Hellwig 713a85a5deSChristoph Hellwig static void nvme_loop_queue_response(struct nvmet_req *nvme_req); 723a85a5deSChristoph Hellwig static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl); 733a85a5deSChristoph Hellwig 74e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops nvme_loop_ops; 753a85a5deSChristoph Hellwig 763a85a5deSChristoph Hellwig static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue) 773a85a5deSChristoph Hellwig { 783a85a5deSChristoph Hellwig return queue - queue->ctrl->queues; 793a85a5deSChristoph Hellwig } 803a85a5deSChristoph Hellwig 813a85a5deSChristoph Hellwig static void nvme_loop_complete_rq(struct request *req) 823a85a5deSChristoph Hellwig { 833a85a5deSChristoph Hellwig struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); 843a85a5deSChristoph Hellwig 853a85a5deSChristoph Hellwig nvme_cleanup_cmd(req); 863a85a5deSChristoph Hellwig sg_free_table_chained(&iod->sg_table, true); 8777f02a7aSChristoph Hellwig nvme_complete_rq(req); 883a85a5deSChristoph Hellwig } 893a85a5deSChristoph Hellwig 903b068376SSagi Grimberg static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue) 913b068376SSagi Grimberg { 923b068376SSagi Grimberg u32 queue_idx = nvme_loop_queue_idx(queue); 933a85a5deSChristoph Hellwig 943b068376SSagi Grimberg if (queue_idx == 0) 953b068376SSagi Grimberg return queue->ctrl->admin_tag_set.tags[queue_idx]; 963b068376SSagi Grimberg return queue->ctrl->tag_set.tags[queue_idx - 1]; 973a85a5deSChristoph Hellwig } 983a85a5deSChristoph Hellwig 99d49187e9SChristoph Hellwig static void nvme_loop_queue_response(struct nvmet_req *req) 1003a85a5deSChristoph Hellwig { 1013b068376SSagi Grimberg struct nvme_loop_queue *queue = 1023b068376SSagi Grimberg container_of(req->sq, struct nvme_loop_queue, nvme_sq); 1033b068376SSagi Grimberg struct nvme_completion *cqe = req->rsp; 1043a85a5deSChristoph Hellwig 1053a85a5deSChristoph Hellwig /* 1063a85a5deSChristoph Hellwig * AEN requests are special as they don't time out and can 1073a85a5deSChristoph Hellwig * survive any kind of queue freeze and often don't respond to 1083a85a5deSChristoph Hellwig * aborts. We don't even bother to allocate a struct request 1093a85a5deSChristoph Hellwig * for them but rather special case them here. 1103a85a5deSChristoph Hellwig */ 1113b068376SSagi Grimberg if (unlikely(nvme_loop_queue_idx(queue) == 0 && 11238dabe21SKeith Busch cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) { 1133b068376SSagi Grimberg nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, 1147bf58533SChristoph Hellwig &cqe->result); 1153a85a5deSChristoph Hellwig } else { 1163b068376SSagi Grimberg struct request *rq; 1173a85a5deSChristoph Hellwig 1183b068376SSagi Grimberg rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id); 1193b068376SSagi Grimberg if (!rq) { 1203b068376SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 1213b068376SSagi Grimberg "tag 0x%x on queue %d not found\n", 1223b068376SSagi Grimberg cqe->command_id, nvme_loop_queue_idx(queue)); 1233b068376SSagi Grimberg return; 1243b068376SSagi Grimberg } 1253b068376SSagi Grimberg 12627fa9bc5SChristoph Hellwig nvme_end_request(rq, cqe->status, cqe->result); 1273a85a5deSChristoph Hellwig } 1283a85a5deSChristoph Hellwig } 1293a85a5deSChristoph Hellwig 1303a85a5deSChristoph Hellwig static void nvme_loop_execute_work(struct work_struct *work) 1313a85a5deSChristoph Hellwig { 1323a85a5deSChristoph Hellwig struct nvme_loop_iod *iod = 1333a85a5deSChristoph Hellwig container_of(work, struct nvme_loop_iod, work); 1343a85a5deSChristoph Hellwig 1355e62d5c9SChristoph Hellwig nvmet_req_execute(&iod->req); 1363a85a5deSChristoph Hellwig } 1373a85a5deSChristoph Hellwig 1383a85a5deSChristoph Hellwig static enum blk_eh_timer_return 1393a85a5deSChristoph Hellwig nvme_loop_timeout(struct request *rq, bool reserved) 1403a85a5deSChristoph Hellwig { 1413a85a5deSChristoph Hellwig struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq); 1423a85a5deSChristoph Hellwig 1433a85a5deSChristoph Hellwig /* queue error recovery */ 144d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&iod->queue->ctrl->ctrl); 1453a85a5deSChristoph Hellwig 1463a85a5deSChristoph Hellwig /* fail with DNR on admin cmd timeout */ 14727fa9bc5SChristoph Hellwig nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR; 1483a85a5deSChristoph Hellwig 1493a85a5deSChristoph Hellwig return BLK_EH_HANDLED; 1503a85a5deSChristoph Hellwig } 1513a85a5deSChristoph Hellwig 152fc17b653SChristoph Hellwig static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, 1533a85a5deSChristoph Hellwig const struct blk_mq_queue_data *bd) 1543a85a5deSChristoph Hellwig { 1553a85a5deSChristoph Hellwig struct nvme_ns *ns = hctx->queue->queuedata; 1563a85a5deSChristoph Hellwig struct nvme_loop_queue *queue = hctx->driver_data; 1573a85a5deSChristoph Hellwig struct request *req = bd->rq; 1583a85a5deSChristoph Hellwig struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); 159fc17b653SChristoph Hellwig blk_status_t ret; 1603a85a5deSChristoph Hellwig 161*bb06ec31SJames Smart ret = nvmf_check_if_ready(&queue->ctrl->ctrl, req, 162*bb06ec31SJames Smart test_bit(NVME_LOOP_Q_LIVE, &queue->flags), true); 1639d7fab04SSagi Grimberg if (unlikely(ret)) 1649d7fab04SSagi Grimberg return ret; 1659d7fab04SSagi Grimberg 1663a85a5deSChristoph Hellwig ret = nvme_setup_cmd(ns, req, &iod->cmd); 167fc17b653SChristoph Hellwig if (ret) 1683a85a5deSChristoph Hellwig return ret; 1693a85a5deSChristoph Hellwig 17011d9ea6fSMing Lei blk_mq_start_request(req); 1713a85a5deSChristoph Hellwig iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; 1723a85a5deSChristoph Hellwig iod->req.port = nvmet_loop_port; 1733a85a5deSChristoph Hellwig if (!nvmet_req_init(&iod->req, &queue->nvme_cq, 17411d9ea6fSMing Lei &queue->nvme_sq, &nvme_loop_ops)) 175fc17b653SChristoph Hellwig return BLK_STS_OK; 1763a85a5deSChristoph Hellwig 177796b0b8dSChristoph Hellwig if (blk_rq_payload_bytes(req)) { 1783a85a5deSChristoph Hellwig iod->sg_table.sgl = iod->first_sgl; 179fc17b653SChristoph Hellwig if (sg_alloc_table_chained(&iod->sg_table, 180f9d03f96SChristoph Hellwig blk_rq_nr_phys_segments(req), 181fc17b653SChristoph Hellwig iod->sg_table.sgl)) 182fc17b653SChristoph Hellwig return BLK_STS_RESOURCE; 1833a85a5deSChristoph Hellwig 1843a85a5deSChristoph Hellwig iod->req.sg = iod->sg_table.sgl; 1853a85a5deSChristoph Hellwig iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl); 186796b0b8dSChristoph Hellwig iod->req.transfer_len = blk_rq_payload_bytes(req); 1873a85a5deSChristoph Hellwig } 1883a85a5deSChristoph Hellwig 1893a85a5deSChristoph Hellwig schedule_work(&iod->work); 190fc17b653SChristoph Hellwig return BLK_STS_OK; 1913a85a5deSChristoph Hellwig } 1923a85a5deSChristoph Hellwig 193ad22c355SKeith Busch static void nvme_loop_submit_async_event(struct nvme_ctrl *arg) 1943a85a5deSChristoph Hellwig { 1953a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg); 1963a85a5deSChristoph Hellwig struct nvme_loop_queue *queue = &ctrl->queues[0]; 1973a85a5deSChristoph Hellwig struct nvme_loop_iod *iod = &ctrl->async_event_iod; 1983a85a5deSChristoph Hellwig 1993a85a5deSChristoph Hellwig memset(&iod->cmd, 0, sizeof(iod->cmd)); 2003a85a5deSChristoph Hellwig iod->cmd.common.opcode = nvme_admin_async_event; 20138dabe21SKeith Busch iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH; 2023a85a5deSChristoph Hellwig iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; 2033a85a5deSChristoph Hellwig 2043a85a5deSChristoph Hellwig if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq, 2053a85a5deSChristoph Hellwig &nvme_loop_ops)) { 2063a85a5deSChristoph Hellwig dev_err(ctrl->ctrl.device, "failed async event work\n"); 2073a85a5deSChristoph Hellwig return; 2083a85a5deSChristoph Hellwig } 2093a85a5deSChristoph Hellwig 2103a85a5deSChristoph Hellwig schedule_work(&iod->work); 2113a85a5deSChristoph Hellwig } 2123a85a5deSChristoph Hellwig 2133a85a5deSChristoph Hellwig static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl, 2143a85a5deSChristoph Hellwig struct nvme_loop_iod *iod, unsigned int queue_idx) 2153a85a5deSChristoph Hellwig { 2163a85a5deSChristoph Hellwig iod->req.cmd = &iod->cmd; 2173a85a5deSChristoph Hellwig iod->req.rsp = &iod->rsp; 2183a85a5deSChristoph Hellwig iod->queue = &ctrl->queues[queue_idx]; 2193a85a5deSChristoph Hellwig INIT_WORK(&iod->work, nvme_loop_execute_work); 2203a85a5deSChristoph Hellwig return 0; 2213a85a5deSChristoph Hellwig } 2223a85a5deSChristoph Hellwig 223d6296d39SChristoph Hellwig static int nvme_loop_init_request(struct blk_mq_tag_set *set, 224d6296d39SChristoph Hellwig struct request *req, unsigned int hctx_idx, 2253a85a5deSChristoph Hellwig unsigned int numa_node) 2263a85a5deSChristoph Hellwig { 22762b83b18SChristoph Hellwig struct nvme_loop_ctrl *ctrl = set->driver_data; 2283a85a5deSChristoph Hellwig 22962b83b18SChristoph Hellwig return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req), 23062b83b18SChristoph Hellwig (set == &ctrl->tag_set) ? hctx_idx + 1 : 0); 2313a85a5deSChristoph Hellwig } 2323a85a5deSChristoph Hellwig 2333a85a5deSChristoph Hellwig static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 2343a85a5deSChristoph Hellwig unsigned int hctx_idx) 2353a85a5deSChristoph Hellwig { 2363a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl = data; 2373a85a5deSChristoph Hellwig struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1]; 2383a85a5deSChristoph Hellwig 239d858e5f0SSagi Grimberg BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); 2403a85a5deSChristoph Hellwig 2413a85a5deSChristoph Hellwig hctx->driver_data = queue; 2423a85a5deSChristoph Hellwig return 0; 2433a85a5deSChristoph Hellwig } 2443a85a5deSChristoph Hellwig 2453a85a5deSChristoph Hellwig static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, 2463a85a5deSChristoph Hellwig unsigned int hctx_idx) 2473a85a5deSChristoph Hellwig { 2483a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl = data; 2493a85a5deSChristoph Hellwig struct nvme_loop_queue *queue = &ctrl->queues[0]; 2503a85a5deSChristoph Hellwig 2513a85a5deSChristoph Hellwig BUG_ON(hctx_idx != 0); 2523a85a5deSChristoph Hellwig 2533a85a5deSChristoph Hellwig hctx->driver_data = queue; 2543a85a5deSChristoph Hellwig return 0; 2553a85a5deSChristoph Hellwig } 2563a85a5deSChristoph Hellwig 257f363b089SEric Biggers static const struct blk_mq_ops nvme_loop_mq_ops = { 2583a85a5deSChristoph Hellwig .queue_rq = nvme_loop_queue_rq, 2593a85a5deSChristoph Hellwig .complete = nvme_loop_complete_rq, 2603a85a5deSChristoph Hellwig .init_request = nvme_loop_init_request, 2613a85a5deSChristoph Hellwig .init_hctx = nvme_loop_init_hctx, 2623a85a5deSChristoph Hellwig .timeout = nvme_loop_timeout, 2633a85a5deSChristoph Hellwig }; 2643a85a5deSChristoph Hellwig 265f363b089SEric Biggers static const struct blk_mq_ops nvme_loop_admin_mq_ops = { 2663a85a5deSChristoph Hellwig .queue_rq = nvme_loop_queue_rq, 2673a85a5deSChristoph Hellwig .complete = nvme_loop_complete_rq, 26862b83b18SChristoph Hellwig .init_request = nvme_loop_init_request, 2693a85a5deSChristoph Hellwig .init_hctx = nvme_loop_init_admin_hctx, 2703a85a5deSChristoph Hellwig .timeout = nvme_loop_timeout, 2713a85a5deSChristoph Hellwig }; 2723a85a5deSChristoph Hellwig 2733a85a5deSChristoph Hellwig static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) 2743a85a5deSChristoph Hellwig { 2759d7fab04SSagi Grimberg clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); 276e4c5d376SSagi Grimberg nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); 2773a85a5deSChristoph Hellwig blk_cleanup_queue(ctrl->ctrl.admin_q); 2783a85a5deSChristoph Hellwig blk_mq_free_tag_set(&ctrl->admin_tag_set); 2793a85a5deSChristoph Hellwig } 2803a85a5deSChristoph Hellwig 2813a85a5deSChristoph Hellwig static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl) 2823a85a5deSChristoph Hellwig { 2833a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl); 2843a85a5deSChristoph Hellwig 2853a85a5deSChristoph Hellwig if (list_empty(&ctrl->list)) 2863a85a5deSChristoph Hellwig goto free_ctrl; 2873a85a5deSChristoph Hellwig 2883a85a5deSChristoph Hellwig mutex_lock(&nvme_loop_ctrl_mutex); 2893a85a5deSChristoph Hellwig list_del(&ctrl->list); 2903a85a5deSChristoph Hellwig mutex_unlock(&nvme_loop_ctrl_mutex); 2913a85a5deSChristoph Hellwig 2923a85a5deSChristoph Hellwig if (nctrl->tagset) { 2933a85a5deSChristoph Hellwig blk_cleanup_queue(ctrl->ctrl.connect_q); 2943a85a5deSChristoph Hellwig blk_mq_free_tag_set(&ctrl->tag_set); 2953a85a5deSChristoph Hellwig } 2963a85a5deSChristoph Hellwig kfree(ctrl->queues); 2973a85a5deSChristoph Hellwig nvmf_free_options(nctrl->opts); 2983a85a5deSChristoph Hellwig free_ctrl: 2993a85a5deSChristoph Hellwig kfree(ctrl); 3003a85a5deSChristoph Hellwig } 3013a85a5deSChristoph Hellwig 302945dd5baSSagi Grimberg static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) 303945dd5baSSagi Grimberg { 304945dd5baSSagi Grimberg int i; 305945dd5baSSagi Grimberg 3069d7fab04SSagi Grimberg for (i = 1; i < ctrl->ctrl.queue_count; i++) { 3079d7fab04SSagi Grimberg clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); 308945dd5baSSagi Grimberg nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); 309945dd5baSSagi Grimberg } 3109d7fab04SSagi Grimberg } 311945dd5baSSagi Grimberg 312945dd5baSSagi Grimberg static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) 313945dd5baSSagi Grimberg { 314945dd5baSSagi Grimberg struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 315945dd5baSSagi Grimberg unsigned int nr_io_queues; 316945dd5baSSagi Grimberg int ret, i; 317945dd5baSSagi Grimberg 318945dd5baSSagi Grimberg nr_io_queues = min(opts->nr_io_queues, num_online_cpus()); 319945dd5baSSagi Grimberg ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); 320945dd5baSSagi Grimberg if (ret || !nr_io_queues) 321945dd5baSSagi Grimberg return ret; 322945dd5baSSagi Grimberg 323945dd5baSSagi Grimberg dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues); 324945dd5baSSagi Grimberg 325945dd5baSSagi Grimberg for (i = 1; i <= nr_io_queues; i++) { 326945dd5baSSagi Grimberg ctrl->queues[i].ctrl = ctrl; 327945dd5baSSagi Grimberg ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); 328945dd5baSSagi Grimberg if (ret) 329945dd5baSSagi Grimberg goto out_destroy_queues; 330945dd5baSSagi Grimberg 331d858e5f0SSagi Grimberg ctrl->ctrl.queue_count++; 332945dd5baSSagi Grimberg } 333945dd5baSSagi Grimberg 334945dd5baSSagi Grimberg return 0; 335945dd5baSSagi Grimberg 336945dd5baSSagi Grimberg out_destroy_queues: 337945dd5baSSagi Grimberg nvme_loop_destroy_io_queues(ctrl); 338945dd5baSSagi Grimberg return ret; 339945dd5baSSagi Grimberg } 340945dd5baSSagi Grimberg 341297186d6SSagi Grimberg static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl) 342297186d6SSagi Grimberg { 343297186d6SSagi Grimberg int i, ret; 344297186d6SSagi Grimberg 345d858e5f0SSagi Grimberg for (i = 1; i < ctrl->ctrl.queue_count; i++) { 346297186d6SSagi Grimberg ret = nvmf_connect_io_queue(&ctrl->ctrl, i); 347297186d6SSagi Grimberg if (ret) 348297186d6SSagi Grimberg return ret; 3499d7fab04SSagi Grimberg set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); 350297186d6SSagi Grimberg } 351297186d6SSagi Grimberg 352297186d6SSagi Grimberg return 0; 353297186d6SSagi Grimberg } 354297186d6SSagi Grimberg 3553a85a5deSChristoph Hellwig static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) 3563a85a5deSChristoph Hellwig { 3573a85a5deSChristoph Hellwig int error; 3583a85a5deSChristoph Hellwig 3593a85a5deSChristoph Hellwig memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); 3603a85a5deSChristoph Hellwig ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops; 36138dabe21SKeith Busch ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH; 3623a85a5deSChristoph Hellwig ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */ 3633a85a5deSChristoph Hellwig ctrl->admin_tag_set.numa_node = NUMA_NO_NODE; 3643a85a5deSChristoph Hellwig ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) + 3653a85a5deSChristoph Hellwig SG_CHUNK_SIZE * sizeof(struct scatterlist); 3663a85a5deSChristoph Hellwig ctrl->admin_tag_set.driver_data = ctrl; 3673a85a5deSChristoph Hellwig ctrl->admin_tag_set.nr_hw_queues = 1; 3683a85a5deSChristoph Hellwig ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT; 36986f36b9cSIsrael Rukshin ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED; 3703a85a5deSChristoph Hellwig 3713a85a5deSChristoph Hellwig ctrl->queues[0].ctrl = ctrl; 3723a85a5deSChristoph Hellwig error = nvmet_sq_init(&ctrl->queues[0].nvme_sq); 3733a85a5deSChristoph Hellwig if (error) 3743a85a5deSChristoph Hellwig return error; 375d858e5f0SSagi Grimberg ctrl->ctrl.queue_count = 1; 3763a85a5deSChristoph Hellwig 3773a85a5deSChristoph Hellwig error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); 3783a85a5deSChristoph Hellwig if (error) 3793a85a5deSChristoph Hellwig goto out_free_sq; 38034b6c231SSagi Grimberg ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set; 3813a85a5deSChristoph Hellwig 3823a85a5deSChristoph Hellwig ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); 3833a85a5deSChristoph Hellwig if (IS_ERR(ctrl->ctrl.admin_q)) { 3843a85a5deSChristoph Hellwig error = PTR_ERR(ctrl->ctrl.admin_q); 3853a85a5deSChristoph Hellwig goto out_free_tagset; 3863a85a5deSChristoph Hellwig } 3873a85a5deSChristoph Hellwig 3883a85a5deSChristoph Hellwig error = nvmf_connect_admin_queue(&ctrl->ctrl); 3893a85a5deSChristoph Hellwig if (error) 3903a85a5deSChristoph Hellwig goto out_cleanup_queue; 3913a85a5deSChristoph Hellwig 3929d7fab04SSagi Grimberg set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); 3939d7fab04SSagi Grimberg 39420d0dfe6SSagi Grimberg error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap); 3953a85a5deSChristoph Hellwig if (error) { 3963a85a5deSChristoph Hellwig dev_err(ctrl->ctrl.device, 3973a85a5deSChristoph Hellwig "prop_get NVME_REG_CAP failed\n"); 3983a85a5deSChristoph Hellwig goto out_cleanup_queue; 3993a85a5deSChristoph Hellwig } 4003a85a5deSChristoph Hellwig 4013a85a5deSChristoph Hellwig ctrl->ctrl.sqsize = 40220d0dfe6SSagi Grimberg min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize); 4033a85a5deSChristoph Hellwig 40420d0dfe6SSagi Grimberg error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); 4053a85a5deSChristoph Hellwig if (error) 4063a85a5deSChristoph Hellwig goto out_cleanup_queue; 4073a85a5deSChristoph Hellwig 4083a85a5deSChristoph Hellwig ctrl->ctrl.max_hw_sectors = 4093a85a5deSChristoph Hellwig (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9); 4103a85a5deSChristoph Hellwig 4113a85a5deSChristoph Hellwig error = nvme_init_identify(&ctrl->ctrl); 4123a85a5deSChristoph Hellwig if (error) 4133a85a5deSChristoph Hellwig goto out_cleanup_queue; 4143a85a5deSChristoph Hellwig 4153a85a5deSChristoph Hellwig return 0; 4163a85a5deSChristoph Hellwig 4173a85a5deSChristoph Hellwig out_cleanup_queue: 4183a85a5deSChristoph Hellwig blk_cleanup_queue(ctrl->ctrl.admin_q); 4193a85a5deSChristoph Hellwig out_free_tagset: 4203a85a5deSChristoph Hellwig blk_mq_free_tag_set(&ctrl->admin_tag_set); 4213a85a5deSChristoph Hellwig out_free_sq: 4223a85a5deSChristoph Hellwig nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); 4233a85a5deSChristoph Hellwig return error; 4243a85a5deSChristoph Hellwig } 4253a85a5deSChristoph Hellwig 4263a85a5deSChristoph Hellwig static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) 4273a85a5deSChristoph Hellwig { 428d858e5f0SSagi Grimberg if (ctrl->ctrl.queue_count > 1) { 4293a85a5deSChristoph Hellwig nvme_stop_queues(&ctrl->ctrl); 4303a85a5deSChristoph Hellwig blk_mq_tagset_busy_iter(&ctrl->tag_set, 4313a85a5deSChristoph Hellwig nvme_cancel_request, &ctrl->ctrl); 432945dd5baSSagi Grimberg nvme_loop_destroy_io_queues(ctrl); 4333a85a5deSChristoph Hellwig } 4343a85a5deSChristoph Hellwig 4353a85a5deSChristoph Hellwig if (ctrl->ctrl.state == NVME_CTRL_LIVE) 4363a85a5deSChristoph Hellwig nvme_shutdown_ctrl(&ctrl->ctrl); 4373a85a5deSChristoph Hellwig 438c1c0ffffSSagi Grimberg blk_mq_quiesce_queue(ctrl->ctrl.admin_q); 4393a85a5deSChristoph Hellwig blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, 4403a85a5deSChristoph Hellwig nvme_cancel_request, &ctrl->ctrl); 441c1c0ffffSSagi Grimberg blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 4423a85a5deSChristoph Hellwig nvme_loop_destroy_admin_queue(ctrl); 4433a85a5deSChristoph Hellwig } 4443a85a5deSChristoph Hellwig 445c5017e85SChristoph Hellwig static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl) 4463a85a5deSChristoph Hellwig { 447c5017e85SChristoph Hellwig nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl)); 4483a85a5deSChristoph Hellwig } 4493a85a5deSChristoph Hellwig 4503a85a5deSChristoph Hellwig static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl) 4513a85a5deSChristoph Hellwig { 4523a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl; 4533a85a5deSChristoph Hellwig 4543a85a5deSChristoph Hellwig mutex_lock(&nvme_loop_ctrl_mutex); 4553a85a5deSChristoph Hellwig list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) { 4563a85a5deSChristoph Hellwig if (ctrl->ctrl.cntlid == nctrl->cntlid) 457c5017e85SChristoph Hellwig nvme_delete_ctrl(&ctrl->ctrl); 4583a85a5deSChristoph Hellwig } 4593a85a5deSChristoph Hellwig mutex_unlock(&nvme_loop_ctrl_mutex); 4603a85a5deSChristoph Hellwig } 4613a85a5deSChristoph Hellwig 4623a85a5deSChristoph Hellwig static void nvme_loop_reset_ctrl_work(struct work_struct *work) 4633a85a5deSChristoph Hellwig { 464d86c4d8eSChristoph Hellwig struct nvme_loop_ctrl *ctrl = 465d86c4d8eSChristoph Hellwig container_of(work, struct nvme_loop_ctrl, ctrl.reset_work); 4663a85a5deSChristoph Hellwig bool changed; 467297186d6SSagi Grimberg int ret; 4683a85a5deSChristoph Hellwig 469d09f2b45SSagi Grimberg nvme_stop_ctrl(&ctrl->ctrl); 4703a85a5deSChristoph Hellwig nvme_loop_shutdown_ctrl(ctrl); 4713a85a5deSChristoph Hellwig 4723a85a5deSChristoph Hellwig ret = nvme_loop_configure_admin_queue(ctrl); 4733a85a5deSChristoph Hellwig if (ret) 4743a85a5deSChristoph Hellwig goto out_disable; 4753a85a5deSChristoph Hellwig 476945dd5baSSagi Grimberg ret = nvme_loop_init_io_queues(ctrl); 4773a85a5deSChristoph Hellwig if (ret) 478945dd5baSSagi Grimberg goto out_destroy_admin; 4793a85a5deSChristoph Hellwig 480297186d6SSagi Grimberg ret = nvme_loop_connect_io_queues(ctrl); 4813a85a5deSChristoph Hellwig if (ret) 482945dd5baSSagi Grimberg goto out_destroy_io; 4833a85a5deSChristoph Hellwig 4844368c39bSSagi Grimberg blk_mq_update_nr_hw_queues(&ctrl->tag_set, 4854368c39bSSagi Grimberg ctrl->ctrl.queue_count - 1); 4864368c39bSSagi Grimberg 4873a85a5deSChristoph Hellwig changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 4883a85a5deSChristoph Hellwig WARN_ON_ONCE(!changed); 4893a85a5deSChristoph Hellwig 490d09f2b45SSagi Grimberg nvme_start_ctrl(&ctrl->ctrl); 4913a85a5deSChristoph Hellwig 4923a85a5deSChristoph Hellwig return; 4933a85a5deSChristoph Hellwig 494945dd5baSSagi Grimberg out_destroy_io: 495945dd5baSSagi Grimberg nvme_loop_destroy_io_queues(ctrl); 496945dd5baSSagi Grimberg out_destroy_admin: 4973a85a5deSChristoph Hellwig nvme_loop_destroy_admin_queue(ctrl); 4983a85a5deSChristoph Hellwig out_disable: 4993a85a5deSChristoph Hellwig dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); 5003a85a5deSChristoph Hellwig nvme_uninit_ctrl(&ctrl->ctrl); 5013a85a5deSChristoph Hellwig nvme_put_ctrl(&ctrl->ctrl); 5023a85a5deSChristoph Hellwig } 5033a85a5deSChristoph Hellwig 5043a85a5deSChristoph Hellwig static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = { 5053a85a5deSChristoph Hellwig .name = "loop", 5063a85a5deSChristoph Hellwig .module = THIS_MODULE, 507d3d5b87dSChristoph Hellwig .flags = NVME_F_FABRICS, 5083a85a5deSChristoph Hellwig .reg_read32 = nvmf_reg_read32, 5093a85a5deSChristoph Hellwig .reg_read64 = nvmf_reg_read64, 5103a85a5deSChristoph Hellwig .reg_write32 = nvmf_reg_write32, 5113a85a5deSChristoph Hellwig .free_ctrl = nvme_loop_free_ctrl, 5123a85a5deSChristoph Hellwig .submit_async_event = nvme_loop_submit_async_event, 513c5017e85SChristoph Hellwig .delete_ctrl = nvme_loop_delete_ctrl_host, 5143a85a5deSChristoph Hellwig }; 5153a85a5deSChristoph Hellwig 5163a85a5deSChristoph Hellwig static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) 5173a85a5deSChristoph Hellwig { 518297186d6SSagi Grimberg int ret; 5193a85a5deSChristoph Hellwig 520945dd5baSSagi Grimberg ret = nvme_loop_init_io_queues(ctrl); 5213a85a5deSChristoph Hellwig if (ret) 522945dd5baSSagi Grimberg return ret; 5233a85a5deSChristoph Hellwig 5243a85a5deSChristoph Hellwig memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); 5253a85a5deSChristoph Hellwig ctrl->tag_set.ops = &nvme_loop_mq_ops; 526eadb7cf4SJay Freyensee ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; 5273a85a5deSChristoph Hellwig ctrl->tag_set.reserved_tags = 1; /* fabric connect */ 5283a85a5deSChristoph Hellwig ctrl->tag_set.numa_node = NUMA_NO_NODE; 5293a85a5deSChristoph Hellwig ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 5303a85a5deSChristoph Hellwig ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) + 5313a85a5deSChristoph Hellwig SG_CHUNK_SIZE * sizeof(struct scatterlist); 5323a85a5deSChristoph Hellwig ctrl->tag_set.driver_data = ctrl; 533d858e5f0SSagi Grimberg ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1; 5343a85a5deSChristoph Hellwig ctrl->tag_set.timeout = NVME_IO_TIMEOUT; 5353a85a5deSChristoph Hellwig ctrl->ctrl.tagset = &ctrl->tag_set; 5363a85a5deSChristoph Hellwig 5373a85a5deSChristoph Hellwig ret = blk_mq_alloc_tag_set(&ctrl->tag_set); 5383a85a5deSChristoph Hellwig if (ret) 5393a85a5deSChristoph Hellwig goto out_destroy_queues; 5403a85a5deSChristoph Hellwig 5413a85a5deSChristoph Hellwig ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); 5423a85a5deSChristoph Hellwig if (IS_ERR(ctrl->ctrl.connect_q)) { 5433a85a5deSChristoph Hellwig ret = PTR_ERR(ctrl->ctrl.connect_q); 5443a85a5deSChristoph Hellwig goto out_free_tagset; 5453a85a5deSChristoph Hellwig } 5463a85a5deSChristoph Hellwig 547297186d6SSagi Grimberg ret = nvme_loop_connect_io_queues(ctrl); 5483a85a5deSChristoph Hellwig if (ret) 5493a85a5deSChristoph Hellwig goto out_cleanup_connect_q; 5503a85a5deSChristoph Hellwig 5513a85a5deSChristoph Hellwig return 0; 5523a85a5deSChristoph Hellwig 5533a85a5deSChristoph Hellwig out_cleanup_connect_q: 5543a85a5deSChristoph Hellwig blk_cleanup_queue(ctrl->ctrl.connect_q); 5553a85a5deSChristoph Hellwig out_free_tagset: 5563a85a5deSChristoph Hellwig blk_mq_free_tag_set(&ctrl->tag_set); 5573a85a5deSChristoph Hellwig out_destroy_queues: 558945dd5baSSagi Grimberg nvme_loop_destroy_io_queues(ctrl); 5593a85a5deSChristoph Hellwig return ret; 5603a85a5deSChristoph Hellwig } 5613a85a5deSChristoph Hellwig 5623a85a5deSChristoph Hellwig static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev, 5633a85a5deSChristoph Hellwig struct nvmf_ctrl_options *opts) 5643a85a5deSChristoph Hellwig { 5653a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl; 5663a85a5deSChristoph Hellwig bool changed; 5673a85a5deSChristoph Hellwig int ret; 5683a85a5deSChristoph Hellwig 5693a85a5deSChristoph Hellwig ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 5703a85a5deSChristoph Hellwig if (!ctrl) 5713a85a5deSChristoph Hellwig return ERR_PTR(-ENOMEM); 5723a85a5deSChristoph Hellwig ctrl->ctrl.opts = opts; 5733a85a5deSChristoph Hellwig INIT_LIST_HEAD(&ctrl->list); 5743a85a5deSChristoph Hellwig 575d86c4d8eSChristoph Hellwig INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work); 5763a85a5deSChristoph Hellwig 5773a85a5deSChristoph Hellwig ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops, 5783a85a5deSChristoph Hellwig 0 /* no quirks, we're perfect! */); 5793a85a5deSChristoph Hellwig if (ret) 5803a85a5deSChristoph Hellwig goto out_put_ctrl; 5813a85a5deSChristoph Hellwig 5823a85a5deSChristoph Hellwig ret = -ENOMEM; 5833a85a5deSChristoph Hellwig 584eadb7cf4SJay Freyensee ctrl->ctrl.sqsize = opts->queue_size - 1; 5853a85a5deSChristoph Hellwig ctrl->ctrl.kato = opts->kato; 5863a85a5deSChristoph Hellwig 5873a85a5deSChristoph Hellwig ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues), 5883a85a5deSChristoph Hellwig GFP_KERNEL); 5893a85a5deSChristoph Hellwig if (!ctrl->queues) 5903a85a5deSChristoph Hellwig goto out_uninit_ctrl; 5913a85a5deSChristoph Hellwig 5923a85a5deSChristoph Hellwig ret = nvme_loop_configure_admin_queue(ctrl); 5933a85a5deSChristoph Hellwig if (ret) 5943a85a5deSChristoph Hellwig goto out_free_queues; 5953a85a5deSChristoph Hellwig 5963a85a5deSChristoph Hellwig if (opts->queue_size > ctrl->ctrl.maxcmd) { 5973a85a5deSChristoph Hellwig /* warn if maxcmd is lower than queue_size */ 5983a85a5deSChristoph Hellwig dev_warn(ctrl->ctrl.device, 5993a85a5deSChristoph Hellwig "queue_size %zu > ctrl maxcmd %u, clamping down\n", 6003a85a5deSChristoph Hellwig opts->queue_size, ctrl->ctrl.maxcmd); 6013a85a5deSChristoph Hellwig opts->queue_size = ctrl->ctrl.maxcmd; 6023a85a5deSChristoph Hellwig } 6033a85a5deSChristoph Hellwig 6043a85a5deSChristoph Hellwig if (opts->nr_io_queues) { 6053a85a5deSChristoph Hellwig ret = nvme_loop_create_io_queues(ctrl); 6063a85a5deSChristoph Hellwig if (ret) 6073a85a5deSChristoph Hellwig goto out_remove_admin_queue; 6083a85a5deSChristoph Hellwig } 6093a85a5deSChristoph Hellwig 6103a85a5deSChristoph Hellwig nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0); 6113a85a5deSChristoph Hellwig 6123a85a5deSChristoph Hellwig dev_info(ctrl->ctrl.device, 6133a85a5deSChristoph Hellwig "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn); 6143a85a5deSChristoph Hellwig 615d22524a4SChristoph Hellwig nvme_get_ctrl(&ctrl->ctrl); 6163a85a5deSChristoph Hellwig 6173a85a5deSChristoph Hellwig changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 6183a85a5deSChristoph Hellwig WARN_ON_ONCE(!changed); 6193a85a5deSChristoph Hellwig 6203a85a5deSChristoph Hellwig mutex_lock(&nvme_loop_ctrl_mutex); 6213a85a5deSChristoph Hellwig list_add_tail(&ctrl->list, &nvme_loop_ctrl_list); 6223a85a5deSChristoph Hellwig mutex_unlock(&nvme_loop_ctrl_mutex); 6233a85a5deSChristoph Hellwig 624d09f2b45SSagi Grimberg nvme_start_ctrl(&ctrl->ctrl); 6253a85a5deSChristoph Hellwig 6263a85a5deSChristoph Hellwig return &ctrl->ctrl; 6273a85a5deSChristoph Hellwig 6283a85a5deSChristoph Hellwig out_remove_admin_queue: 6293a85a5deSChristoph Hellwig nvme_loop_destroy_admin_queue(ctrl); 6303a85a5deSChristoph Hellwig out_free_queues: 6313a85a5deSChristoph Hellwig kfree(ctrl->queues); 6323a85a5deSChristoph Hellwig out_uninit_ctrl: 6333a85a5deSChristoph Hellwig nvme_uninit_ctrl(&ctrl->ctrl); 6343a85a5deSChristoph Hellwig out_put_ctrl: 6353a85a5deSChristoph Hellwig nvme_put_ctrl(&ctrl->ctrl); 6363a85a5deSChristoph Hellwig if (ret > 0) 6373a85a5deSChristoph Hellwig ret = -EIO; 6383a85a5deSChristoph Hellwig return ERR_PTR(ret); 6393a85a5deSChristoph Hellwig } 6403a85a5deSChristoph Hellwig 6413a85a5deSChristoph Hellwig static int nvme_loop_add_port(struct nvmet_port *port) 6423a85a5deSChristoph Hellwig { 6433a85a5deSChristoph Hellwig /* 6443a85a5deSChristoph Hellwig * XXX: disalow adding more than one port so 6453a85a5deSChristoph Hellwig * there is no connection rejections when a 6463a85a5deSChristoph Hellwig * a subsystem is assigned to a port for which 6473a85a5deSChristoph Hellwig * loop doesn't have a pointer. 6483a85a5deSChristoph Hellwig * This scenario would be possible if we allowed 6493a85a5deSChristoph Hellwig * more than one port to be added and a subsystem 6503a85a5deSChristoph Hellwig * was assigned to a port other than nvmet_loop_port. 6513a85a5deSChristoph Hellwig */ 6523a85a5deSChristoph Hellwig 6533a85a5deSChristoph Hellwig if (nvmet_loop_port) 6543a85a5deSChristoph Hellwig return -EPERM; 6553a85a5deSChristoph Hellwig 6563a85a5deSChristoph Hellwig nvmet_loop_port = port; 6573a85a5deSChristoph Hellwig return 0; 6583a85a5deSChristoph Hellwig } 6593a85a5deSChristoph Hellwig 6603a85a5deSChristoph Hellwig static void nvme_loop_remove_port(struct nvmet_port *port) 6613a85a5deSChristoph Hellwig { 6623a85a5deSChristoph Hellwig if (port == nvmet_loop_port) 6633a85a5deSChristoph Hellwig nvmet_loop_port = NULL; 6643a85a5deSChristoph Hellwig } 6653a85a5deSChristoph Hellwig 666e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops nvme_loop_ops = { 6673a85a5deSChristoph Hellwig .owner = THIS_MODULE, 6683a85a5deSChristoph Hellwig .type = NVMF_TRTYPE_LOOP, 6693a85a5deSChristoph Hellwig .add_port = nvme_loop_add_port, 6703a85a5deSChristoph Hellwig .remove_port = nvme_loop_remove_port, 6713a85a5deSChristoph Hellwig .queue_response = nvme_loop_queue_response, 6723a85a5deSChristoph Hellwig .delete_ctrl = nvme_loop_delete_ctrl, 6733a85a5deSChristoph Hellwig }; 6743a85a5deSChristoph Hellwig 6753a85a5deSChristoph Hellwig static struct nvmf_transport_ops nvme_loop_transport = { 6763a85a5deSChristoph Hellwig .name = "loop", 6770de5cd36SRoy Shterman .module = THIS_MODULE, 6783a85a5deSChristoph Hellwig .create_ctrl = nvme_loop_create_ctrl, 6793a85a5deSChristoph Hellwig }; 6803a85a5deSChristoph Hellwig 6813a85a5deSChristoph Hellwig static int __init nvme_loop_init_module(void) 6823a85a5deSChristoph Hellwig { 6833a85a5deSChristoph Hellwig int ret; 6843a85a5deSChristoph Hellwig 6853a85a5deSChristoph Hellwig ret = nvmet_register_transport(&nvme_loop_ops); 6863a85a5deSChristoph Hellwig if (ret) 6873a85a5deSChristoph Hellwig return ret; 688d19eef02SSagi Grimberg 689d19eef02SSagi Grimberg ret = nvmf_register_transport(&nvme_loop_transport); 690d19eef02SSagi Grimberg if (ret) 691d19eef02SSagi Grimberg nvmet_unregister_transport(&nvme_loop_ops); 692d19eef02SSagi Grimberg 693d19eef02SSagi Grimberg return ret; 6943a85a5deSChristoph Hellwig } 6953a85a5deSChristoph Hellwig 6963a85a5deSChristoph Hellwig static void __exit nvme_loop_cleanup_module(void) 6973a85a5deSChristoph Hellwig { 6983a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl, *next; 6993a85a5deSChristoph Hellwig 7003a85a5deSChristoph Hellwig nvmf_unregister_transport(&nvme_loop_transport); 7013a85a5deSChristoph Hellwig nvmet_unregister_transport(&nvme_loop_ops); 7023a85a5deSChristoph Hellwig 7033a85a5deSChristoph Hellwig mutex_lock(&nvme_loop_ctrl_mutex); 7043a85a5deSChristoph Hellwig list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list) 705c5017e85SChristoph Hellwig nvme_delete_ctrl(&ctrl->ctrl); 7063a85a5deSChristoph Hellwig mutex_unlock(&nvme_loop_ctrl_mutex); 7073a85a5deSChristoph Hellwig 708b227c59bSRoy Shterman flush_workqueue(nvme_delete_wq); 7093a85a5deSChristoph Hellwig } 7103a85a5deSChristoph Hellwig 7113a85a5deSChristoph Hellwig module_init(nvme_loop_init_module); 7123a85a5deSChristoph Hellwig module_exit(nvme_loop_cleanup_module); 7133a85a5deSChristoph Hellwig 7143a85a5deSChristoph Hellwig MODULE_LICENSE("GPL v2"); 7153a85a5deSChristoph Hellwig MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */ 716