13a85a5deSChristoph Hellwig /* 23a85a5deSChristoph Hellwig * NVMe over Fabrics loopback device. 33a85a5deSChristoph Hellwig * Copyright (c) 2015-2016 HGST, a Western Digital Company. 43a85a5deSChristoph Hellwig * 53a85a5deSChristoph Hellwig * This program is free software; you can redistribute it and/or modify it 63a85a5deSChristoph Hellwig * under the terms and conditions of the GNU General Public License, 73a85a5deSChristoph Hellwig * version 2, as published by the Free Software Foundation. 83a85a5deSChristoph Hellwig * 93a85a5deSChristoph Hellwig * This program is distributed in the hope it will be useful, but WITHOUT 103a85a5deSChristoph Hellwig * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 113a85a5deSChristoph Hellwig * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 123a85a5deSChristoph Hellwig * more details. 133a85a5deSChristoph Hellwig */ 143a85a5deSChristoph Hellwig #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 153a85a5deSChristoph Hellwig #include <linux/scatterlist.h> 163a85a5deSChristoph Hellwig #include <linux/blk-mq.h> 173a85a5deSChristoph Hellwig #include <linux/nvme.h> 183a85a5deSChristoph Hellwig #include <linux/module.h> 193a85a5deSChristoph Hellwig #include <linux/parser.h> 203a85a5deSChristoph Hellwig #include "nvmet.h" 213a85a5deSChristoph Hellwig #include "../host/nvme.h" 223a85a5deSChristoph Hellwig #include "../host/fabrics.h" 233a85a5deSChristoph Hellwig 243a85a5deSChristoph Hellwig #define NVME_LOOP_AQ_DEPTH 256 253a85a5deSChristoph Hellwig 263a85a5deSChristoph Hellwig #define NVME_LOOP_MAX_SEGMENTS 256 273a85a5deSChristoph Hellwig 283a85a5deSChristoph Hellwig /* 293a85a5deSChristoph Hellwig * We handle AEN commands ourselves and don't even let the 303a85a5deSChristoph Hellwig * block layer know about them. 313a85a5deSChristoph Hellwig */ 323a85a5deSChristoph Hellwig #define NVME_LOOP_NR_AEN_COMMANDS 1 333a85a5deSChristoph Hellwig #define NVME_LOOP_AQ_BLKMQ_DEPTH \ 343a85a5deSChristoph Hellwig (NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS) 353a85a5deSChristoph Hellwig 363a85a5deSChristoph Hellwig struct nvme_loop_iod { 37d49187e9SChristoph Hellwig struct nvme_request nvme_req; 383a85a5deSChristoph Hellwig struct nvme_command cmd; 393a85a5deSChristoph Hellwig struct nvme_completion rsp; 403a85a5deSChristoph Hellwig struct nvmet_req req; 413a85a5deSChristoph Hellwig struct nvme_loop_queue *queue; 423a85a5deSChristoph Hellwig struct work_struct work; 433a85a5deSChristoph Hellwig struct sg_table sg_table; 443a85a5deSChristoph Hellwig struct scatterlist first_sgl[]; 453a85a5deSChristoph Hellwig }; 463a85a5deSChristoph Hellwig 473a85a5deSChristoph Hellwig struct nvme_loop_ctrl { 483a85a5deSChristoph Hellwig spinlock_t lock; 493a85a5deSChristoph Hellwig struct nvme_loop_queue *queues; 503a85a5deSChristoph Hellwig u32 queue_count; 513a85a5deSChristoph Hellwig 523a85a5deSChristoph Hellwig struct blk_mq_tag_set admin_tag_set; 533a85a5deSChristoph Hellwig 543a85a5deSChristoph Hellwig struct list_head list; 553a85a5deSChristoph Hellwig u64 cap; 563a85a5deSChristoph Hellwig struct blk_mq_tag_set tag_set; 573a85a5deSChristoph Hellwig struct nvme_loop_iod async_event_iod; 583a85a5deSChristoph Hellwig struct nvme_ctrl ctrl; 593a85a5deSChristoph Hellwig 603a85a5deSChristoph Hellwig struct nvmet_ctrl *target_ctrl; 613a85a5deSChristoph Hellwig struct work_struct delete_work; 623a85a5deSChristoph Hellwig struct work_struct reset_work; 633a85a5deSChristoph Hellwig }; 643a85a5deSChristoph Hellwig 653a85a5deSChristoph Hellwig static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl) 663a85a5deSChristoph Hellwig { 673a85a5deSChristoph Hellwig return container_of(ctrl, struct nvme_loop_ctrl, ctrl); 683a85a5deSChristoph Hellwig } 693a85a5deSChristoph Hellwig 703a85a5deSChristoph Hellwig struct nvme_loop_queue { 713a85a5deSChristoph Hellwig struct nvmet_cq nvme_cq; 723a85a5deSChristoph Hellwig struct nvmet_sq nvme_sq; 733a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl; 743a85a5deSChristoph Hellwig }; 753a85a5deSChristoph Hellwig 763a85a5deSChristoph Hellwig static struct nvmet_port *nvmet_loop_port; 773a85a5deSChristoph Hellwig 783a85a5deSChristoph Hellwig static LIST_HEAD(nvme_loop_ctrl_list); 793a85a5deSChristoph Hellwig static DEFINE_MUTEX(nvme_loop_ctrl_mutex); 803a85a5deSChristoph Hellwig 813a85a5deSChristoph Hellwig static void nvme_loop_queue_response(struct nvmet_req *nvme_req); 823a85a5deSChristoph Hellwig static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl); 833a85a5deSChristoph Hellwig 843a85a5deSChristoph Hellwig static struct nvmet_fabrics_ops nvme_loop_ops; 853a85a5deSChristoph Hellwig 863a85a5deSChristoph Hellwig static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue) 873a85a5deSChristoph Hellwig { 883a85a5deSChristoph Hellwig return queue - queue->ctrl->queues; 893a85a5deSChristoph Hellwig } 903a85a5deSChristoph Hellwig 913a85a5deSChristoph Hellwig static void nvme_loop_complete_rq(struct request *req) 923a85a5deSChristoph Hellwig { 933a85a5deSChristoph Hellwig struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); 943a85a5deSChristoph Hellwig int error = 0; 953a85a5deSChristoph Hellwig 963a85a5deSChristoph Hellwig nvme_cleanup_cmd(req); 973a85a5deSChristoph Hellwig sg_free_table_chained(&iod->sg_table, true); 983a85a5deSChristoph Hellwig 993a85a5deSChristoph Hellwig if (unlikely(req->errors)) { 1003a85a5deSChristoph Hellwig if (nvme_req_needs_retry(req, req->errors)) { 1013a85a5deSChristoph Hellwig nvme_requeue_req(req); 1023a85a5deSChristoph Hellwig return; 1033a85a5deSChristoph Hellwig } 1043a85a5deSChristoph Hellwig 10557292b58SChristoph Hellwig if (blk_rq_is_passthrough(req)) 1063a85a5deSChristoph Hellwig error = req->errors; 1073a85a5deSChristoph Hellwig else 1083a85a5deSChristoph Hellwig error = nvme_error_status(req->errors); 1093a85a5deSChristoph Hellwig } 1103a85a5deSChristoph Hellwig 1113a85a5deSChristoph Hellwig blk_mq_end_request(req, error); 1123a85a5deSChristoph Hellwig } 1133a85a5deSChristoph Hellwig 114*3b068376SSagi Grimberg static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue) 115*3b068376SSagi Grimberg { 116*3b068376SSagi Grimberg u32 queue_idx = nvme_loop_queue_idx(queue); 117*3b068376SSagi Grimberg 118*3b068376SSagi Grimberg if (queue_idx == 0) 119*3b068376SSagi Grimberg return queue->ctrl->admin_tag_set.tags[queue_idx]; 120*3b068376SSagi Grimberg return queue->ctrl->tag_set.tags[queue_idx - 1]; 121*3b068376SSagi Grimberg } 122*3b068376SSagi Grimberg 123d49187e9SChristoph Hellwig static void nvme_loop_queue_response(struct nvmet_req *req) 1243a85a5deSChristoph Hellwig { 125*3b068376SSagi Grimberg struct nvme_loop_queue *queue = 126*3b068376SSagi Grimberg container_of(req->sq, struct nvme_loop_queue, nvme_sq); 127*3b068376SSagi Grimberg struct nvme_completion *cqe = req->rsp; 1283a85a5deSChristoph Hellwig 1293a85a5deSChristoph Hellwig /* 1303a85a5deSChristoph Hellwig * AEN requests are special as they don't time out and can 1313a85a5deSChristoph Hellwig * survive any kind of queue freeze and often don't respond to 1323a85a5deSChristoph Hellwig * aborts. We don't even bother to allocate a struct request 1333a85a5deSChristoph Hellwig * for them but rather special case them here. 1343a85a5deSChristoph Hellwig */ 135*3b068376SSagi Grimberg if (unlikely(nvme_loop_queue_idx(queue) == 0 && 1363a85a5deSChristoph Hellwig cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) { 137*3b068376SSagi Grimberg nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, 1387bf58533SChristoph Hellwig &cqe->result); 1393a85a5deSChristoph Hellwig } else { 140*3b068376SSagi Grimberg struct request *rq; 141*3b068376SSagi Grimberg struct nvme_loop_iod *iod; 1423a85a5deSChristoph Hellwig 143*3b068376SSagi Grimberg rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id); 144*3b068376SSagi Grimberg if (!rq) { 145*3b068376SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 146*3b068376SSagi Grimberg "tag 0x%x on queue %d not found\n", 147*3b068376SSagi Grimberg cqe->command_id, nvme_loop_queue_idx(queue)); 148*3b068376SSagi Grimberg return; 149*3b068376SSagi Grimberg } 150*3b068376SSagi Grimberg 151*3b068376SSagi Grimberg iod = blk_mq_rq_to_pdu(rq); 152d49187e9SChristoph Hellwig iod->nvme_req.result = cqe->result; 153d49187e9SChristoph Hellwig blk_mq_complete_request(rq, le16_to_cpu(cqe->status) >> 1); 1543a85a5deSChristoph Hellwig } 1553a85a5deSChristoph Hellwig } 1563a85a5deSChristoph Hellwig 1573a85a5deSChristoph Hellwig static void nvme_loop_execute_work(struct work_struct *work) 1583a85a5deSChristoph Hellwig { 1593a85a5deSChristoph Hellwig struct nvme_loop_iod *iod = 1603a85a5deSChristoph Hellwig container_of(work, struct nvme_loop_iod, work); 1613a85a5deSChristoph Hellwig 1623a85a5deSChristoph Hellwig iod->req.execute(&iod->req); 1633a85a5deSChristoph Hellwig } 1643a85a5deSChristoph Hellwig 1653a85a5deSChristoph Hellwig static enum blk_eh_timer_return 1663a85a5deSChristoph Hellwig nvme_loop_timeout(struct request *rq, bool reserved) 1673a85a5deSChristoph Hellwig { 1683a85a5deSChristoph Hellwig struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq); 1693a85a5deSChristoph Hellwig 1703a85a5deSChristoph Hellwig /* queue error recovery */ 1713a85a5deSChristoph Hellwig schedule_work(&iod->queue->ctrl->reset_work); 1723a85a5deSChristoph Hellwig 1733a85a5deSChristoph Hellwig /* fail with DNR on admin cmd timeout */ 1743a85a5deSChristoph Hellwig rq->errors = NVME_SC_ABORT_REQ | NVME_SC_DNR; 1753a85a5deSChristoph Hellwig 1763a85a5deSChristoph Hellwig return BLK_EH_HANDLED; 1773a85a5deSChristoph Hellwig } 1783a85a5deSChristoph Hellwig 1793a85a5deSChristoph Hellwig static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, 1803a85a5deSChristoph Hellwig const struct blk_mq_queue_data *bd) 1813a85a5deSChristoph Hellwig { 1823a85a5deSChristoph Hellwig struct nvme_ns *ns = hctx->queue->queuedata; 1833a85a5deSChristoph Hellwig struct nvme_loop_queue *queue = hctx->driver_data; 1843a85a5deSChristoph Hellwig struct request *req = bd->rq; 1853a85a5deSChristoph Hellwig struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); 1863a85a5deSChristoph Hellwig int ret; 1873a85a5deSChristoph Hellwig 1883a85a5deSChristoph Hellwig ret = nvme_setup_cmd(ns, req, &iod->cmd); 189bac0000aSOmar Sandoval if (ret != BLK_MQ_RQ_QUEUE_OK) 1903a85a5deSChristoph Hellwig return ret; 1913a85a5deSChristoph Hellwig 1923a85a5deSChristoph Hellwig iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; 1933a85a5deSChristoph Hellwig iod->req.port = nvmet_loop_port; 1943a85a5deSChristoph Hellwig if (!nvmet_req_init(&iod->req, &queue->nvme_cq, 1953a85a5deSChristoph Hellwig &queue->nvme_sq, &nvme_loop_ops)) { 1963a85a5deSChristoph Hellwig nvme_cleanup_cmd(req); 1973a85a5deSChristoph Hellwig blk_mq_start_request(req); 1983a85a5deSChristoph Hellwig nvme_loop_queue_response(&iod->req); 199bac0000aSOmar Sandoval return BLK_MQ_RQ_QUEUE_OK; 2003a85a5deSChristoph Hellwig } 2013a85a5deSChristoph Hellwig 2023a85a5deSChristoph Hellwig if (blk_rq_bytes(req)) { 2033a85a5deSChristoph Hellwig iod->sg_table.sgl = iod->first_sgl; 2043a85a5deSChristoph Hellwig ret = sg_alloc_table_chained(&iod->sg_table, 205f9d03f96SChristoph Hellwig blk_rq_nr_phys_segments(req), 206f9d03f96SChristoph Hellwig iod->sg_table.sgl); 2073a85a5deSChristoph Hellwig if (ret) 2083a85a5deSChristoph Hellwig return BLK_MQ_RQ_QUEUE_BUSY; 2093a85a5deSChristoph Hellwig 2103a85a5deSChristoph Hellwig iod->req.sg = iod->sg_table.sgl; 2113a85a5deSChristoph Hellwig iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl); 2123a85a5deSChristoph Hellwig } 2133a85a5deSChristoph Hellwig 2143a85a5deSChristoph Hellwig blk_mq_start_request(req); 2153a85a5deSChristoph Hellwig 2163a85a5deSChristoph Hellwig schedule_work(&iod->work); 217bac0000aSOmar Sandoval return BLK_MQ_RQ_QUEUE_OK; 2183a85a5deSChristoph Hellwig } 2193a85a5deSChristoph Hellwig 2203a85a5deSChristoph Hellwig static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx) 2213a85a5deSChristoph Hellwig { 2223a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg); 2233a85a5deSChristoph Hellwig struct nvme_loop_queue *queue = &ctrl->queues[0]; 2243a85a5deSChristoph Hellwig struct nvme_loop_iod *iod = &ctrl->async_event_iod; 2253a85a5deSChristoph Hellwig 2263a85a5deSChristoph Hellwig memset(&iod->cmd, 0, sizeof(iod->cmd)); 2273a85a5deSChristoph Hellwig iod->cmd.common.opcode = nvme_admin_async_event; 2283a85a5deSChristoph Hellwig iod->cmd.common.command_id = NVME_LOOP_AQ_BLKMQ_DEPTH; 2293a85a5deSChristoph Hellwig iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; 2303a85a5deSChristoph Hellwig 2313a85a5deSChristoph Hellwig if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq, 2323a85a5deSChristoph Hellwig &nvme_loop_ops)) { 2333a85a5deSChristoph Hellwig dev_err(ctrl->ctrl.device, "failed async event work\n"); 2343a85a5deSChristoph Hellwig return; 2353a85a5deSChristoph Hellwig } 2363a85a5deSChristoph Hellwig 2373a85a5deSChristoph Hellwig schedule_work(&iod->work); 2383a85a5deSChristoph Hellwig } 2393a85a5deSChristoph Hellwig 2403a85a5deSChristoph Hellwig static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl, 2413a85a5deSChristoph Hellwig struct nvme_loop_iod *iod, unsigned int queue_idx) 2423a85a5deSChristoph Hellwig { 2433a85a5deSChristoph Hellwig iod->req.cmd = &iod->cmd; 2443a85a5deSChristoph Hellwig iod->req.rsp = &iod->rsp; 2453a85a5deSChristoph Hellwig iod->queue = &ctrl->queues[queue_idx]; 2463a85a5deSChristoph Hellwig INIT_WORK(&iod->work, nvme_loop_execute_work); 2473a85a5deSChristoph Hellwig return 0; 2483a85a5deSChristoph Hellwig } 2493a85a5deSChristoph Hellwig 2503a85a5deSChristoph Hellwig static int nvme_loop_init_request(void *data, struct request *req, 2513a85a5deSChristoph Hellwig unsigned int hctx_idx, unsigned int rq_idx, 2523a85a5deSChristoph Hellwig unsigned int numa_node) 2533a85a5deSChristoph Hellwig { 2543a85a5deSChristoph Hellwig return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), hctx_idx + 1); 2553a85a5deSChristoph Hellwig } 2563a85a5deSChristoph Hellwig 2573a85a5deSChristoph Hellwig static int nvme_loop_init_admin_request(void *data, struct request *req, 2583a85a5deSChristoph Hellwig unsigned int hctx_idx, unsigned int rq_idx, 2593a85a5deSChristoph Hellwig unsigned int numa_node) 2603a85a5deSChristoph Hellwig { 2613a85a5deSChristoph Hellwig return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), 0); 2623a85a5deSChristoph Hellwig } 2633a85a5deSChristoph Hellwig 2643a85a5deSChristoph Hellwig static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 2653a85a5deSChristoph Hellwig unsigned int hctx_idx) 2663a85a5deSChristoph Hellwig { 2673a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl = data; 2683a85a5deSChristoph Hellwig struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1]; 2693a85a5deSChristoph Hellwig 2703a85a5deSChristoph Hellwig BUG_ON(hctx_idx >= ctrl->queue_count); 2713a85a5deSChristoph Hellwig 2723a85a5deSChristoph Hellwig hctx->driver_data = queue; 2733a85a5deSChristoph Hellwig return 0; 2743a85a5deSChristoph Hellwig } 2753a85a5deSChristoph Hellwig 2763a85a5deSChristoph Hellwig static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, 2773a85a5deSChristoph Hellwig unsigned int hctx_idx) 2783a85a5deSChristoph Hellwig { 2793a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl = data; 2803a85a5deSChristoph Hellwig struct nvme_loop_queue *queue = &ctrl->queues[0]; 2813a85a5deSChristoph Hellwig 2823a85a5deSChristoph Hellwig BUG_ON(hctx_idx != 0); 2833a85a5deSChristoph Hellwig 2843a85a5deSChristoph Hellwig hctx->driver_data = queue; 2853a85a5deSChristoph Hellwig return 0; 2863a85a5deSChristoph Hellwig } 2873a85a5deSChristoph Hellwig 288f363b089SEric Biggers static const struct blk_mq_ops nvme_loop_mq_ops = { 2893a85a5deSChristoph Hellwig .queue_rq = nvme_loop_queue_rq, 2903a85a5deSChristoph Hellwig .complete = nvme_loop_complete_rq, 2913a85a5deSChristoph Hellwig .init_request = nvme_loop_init_request, 2923a85a5deSChristoph Hellwig .init_hctx = nvme_loop_init_hctx, 2933a85a5deSChristoph Hellwig .timeout = nvme_loop_timeout, 2943a85a5deSChristoph Hellwig }; 2953a85a5deSChristoph Hellwig 296f363b089SEric Biggers static const struct blk_mq_ops nvme_loop_admin_mq_ops = { 2973a85a5deSChristoph Hellwig .queue_rq = nvme_loop_queue_rq, 2983a85a5deSChristoph Hellwig .complete = nvme_loop_complete_rq, 2993a85a5deSChristoph Hellwig .init_request = nvme_loop_init_admin_request, 3003a85a5deSChristoph Hellwig .init_hctx = nvme_loop_init_admin_hctx, 3013a85a5deSChristoph Hellwig .timeout = nvme_loop_timeout, 3023a85a5deSChristoph Hellwig }; 3033a85a5deSChristoph Hellwig 3043a85a5deSChristoph Hellwig static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) 3053a85a5deSChristoph Hellwig { 306d476983eSSagi Grimberg nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); 3073a85a5deSChristoph Hellwig blk_cleanup_queue(ctrl->ctrl.admin_q); 3083a85a5deSChristoph Hellwig blk_mq_free_tag_set(&ctrl->admin_tag_set); 3093a85a5deSChristoph Hellwig } 3103a85a5deSChristoph Hellwig 3113a85a5deSChristoph Hellwig static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl) 3123a85a5deSChristoph Hellwig { 3133a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl); 3143a85a5deSChristoph Hellwig 3153a85a5deSChristoph Hellwig if (list_empty(&ctrl->list)) 3163a85a5deSChristoph Hellwig goto free_ctrl; 3173a85a5deSChristoph Hellwig 3183a85a5deSChristoph Hellwig mutex_lock(&nvme_loop_ctrl_mutex); 3193a85a5deSChristoph Hellwig list_del(&ctrl->list); 3203a85a5deSChristoph Hellwig mutex_unlock(&nvme_loop_ctrl_mutex); 3213a85a5deSChristoph Hellwig 3223a85a5deSChristoph Hellwig if (nctrl->tagset) { 3233a85a5deSChristoph Hellwig blk_cleanup_queue(ctrl->ctrl.connect_q); 3243a85a5deSChristoph Hellwig blk_mq_free_tag_set(&ctrl->tag_set); 3253a85a5deSChristoph Hellwig } 3263a85a5deSChristoph Hellwig kfree(ctrl->queues); 3273a85a5deSChristoph Hellwig nvmf_free_options(nctrl->opts); 3283a85a5deSChristoph Hellwig free_ctrl: 3293a85a5deSChristoph Hellwig kfree(ctrl); 3303a85a5deSChristoph Hellwig } 3313a85a5deSChristoph Hellwig 3326ecda70eSSagi Grimberg static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) 3336ecda70eSSagi Grimberg { 3346ecda70eSSagi Grimberg int i; 3356ecda70eSSagi Grimberg 3366ecda70eSSagi Grimberg for (i = 1; i < ctrl->queue_count; i++) 3376ecda70eSSagi Grimberg nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); 3386ecda70eSSagi Grimberg } 3396ecda70eSSagi Grimberg 3406ecda70eSSagi Grimberg static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) 3416ecda70eSSagi Grimberg { 3426ecda70eSSagi Grimberg struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 3436ecda70eSSagi Grimberg unsigned int nr_io_queues; 3446ecda70eSSagi Grimberg int ret, i; 3456ecda70eSSagi Grimberg 3466ecda70eSSagi Grimberg nr_io_queues = min(opts->nr_io_queues, num_online_cpus()); 3476ecda70eSSagi Grimberg ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); 3486ecda70eSSagi Grimberg if (ret || !nr_io_queues) 3496ecda70eSSagi Grimberg return ret; 3506ecda70eSSagi Grimberg 3516ecda70eSSagi Grimberg dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues); 3526ecda70eSSagi Grimberg 3536ecda70eSSagi Grimberg for (i = 1; i <= nr_io_queues; i++) { 3546ecda70eSSagi Grimberg ctrl->queues[i].ctrl = ctrl; 3556ecda70eSSagi Grimberg ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); 3566ecda70eSSagi Grimberg if (ret) 3576ecda70eSSagi Grimberg goto out_destroy_queues; 3586ecda70eSSagi Grimberg 3596ecda70eSSagi Grimberg ctrl->queue_count++; 3606ecda70eSSagi Grimberg } 3616ecda70eSSagi Grimberg 3626ecda70eSSagi Grimberg return 0; 3636ecda70eSSagi Grimberg 3646ecda70eSSagi Grimberg out_destroy_queues: 3656ecda70eSSagi Grimberg nvme_loop_destroy_io_queues(ctrl); 3666ecda70eSSagi Grimberg return ret; 3676ecda70eSSagi Grimberg } 3686ecda70eSSagi Grimberg 369297186d6SSagi Grimberg static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl) 370297186d6SSagi Grimberg { 371297186d6SSagi Grimberg int i, ret; 372297186d6SSagi Grimberg 373297186d6SSagi Grimberg for (i = 1; i < ctrl->queue_count; i++) { 374297186d6SSagi Grimberg ret = nvmf_connect_io_queue(&ctrl->ctrl, i); 375297186d6SSagi Grimberg if (ret) 376297186d6SSagi Grimberg return ret; 377297186d6SSagi Grimberg } 378297186d6SSagi Grimberg 379297186d6SSagi Grimberg return 0; 380297186d6SSagi Grimberg } 381297186d6SSagi Grimberg 3823a85a5deSChristoph Hellwig static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) 3833a85a5deSChristoph Hellwig { 3843a85a5deSChristoph Hellwig int error; 3853a85a5deSChristoph Hellwig 3863a85a5deSChristoph Hellwig memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); 3873a85a5deSChristoph Hellwig ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops; 3883a85a5deSChristoph Hellwig ctrl->admin_tag_set.queue_depth = NVME_LOOP_AQ_BLKMQ_DEPTH; 3893a85a5deSChristoph Hellwig ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */ 3903a85a5deSChristoph Hellwig ctrl->admin_tag_set.numa_node = NUMA_NO_NODE; 3913a85a5deSChristoph Hellwig ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) + 3923a85a5deSChristoph Hellwig SG_CHUNK_SIZE * sizeof(struct scatterlist); 3933a85a5deSChristoph Hellwig ctrl->admin_tag_set.driver_data = ctrl; 3943a85a5deSChristoph Hellwig ctrl->admin_tag_set.nr_hw_queues = 1; 3953a85a5deSChristoph Hellwig ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT; 3963a85a5deSChristoph Hellwig 3973a85a5deSChristoph Hellwig ctrl->queues[0].ctrl = ctrl; 3983a85a5deSChristoph Hellwig error = nvmet_sq_init(&ctrl->queues[0].nvme_sq); 3993a85a5deSChristoph Hellwig if (error) 4003a85a5deSChristoph Hellwig return error; 4013a85a5deSChristoph Hellwig ctrl->queue_count = 1; 4023a85a5deSChristoph Hellwig 4033a85a5deSChristoph Hellwig error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); 4043a85a5deSChristoph Hellwig if (error) 4053a85a5deSChristoph Hellwig goto out_free_sq; 4063a85a5deSChristoph Hellwig 4073a85a5deSChristoph Hellwig ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); 4083a85a5deSChristoph Hellwig if (IS_ERR(ctrl->ctrl.admin_q)) { 4093a85a5deSChristoph Hellwig error = PTR_ERR(ctrl->ctrl.admin_q); 4103a85a5deSChristoph Hellwig goto out_free_tagset; 4113a85a5deSChristoph Hellwig } 4123a85a5deSChristoph Hellwig 4133a85a5deSChristoph Hellwig error = nvmf_connect_admin_queue(&ctrl->ctrl); 4143a85a5deSChristoph Hellwig if (error) 4153a85a5deSChristoph Hellwig goto out_cleanup_queue; 4163a85a5deSChristoph Hellwig 4173a85a5deSChristoph Hellwig error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap); 4183a85a5deSChristoph Hellwig if (error) { 4193a85a5deSChristoph Hellwig dev_err(ctrl->ctrl.device, 4203a85a5deSChristoph Hellwig "prop_get NVME_REG_CAP failed\n"); 4213a85a5deSChristoph Hellwig goto out_cleanup_queue; 4223a85a5deSChristoph Hellwig } 4233a85a5deSChristoph Hellwig 4243a85a5deSChristoph Hellwig ctrl->ctrl.sqsize = 4253a85a5deSChristoph Hellwig min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); 4263a85a5deSChristoph Hellwig 4273a85a5deSChristoph Hellwig error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); 4283a85a5deSChristoph Hellwig if (error) 4293a85a5deSChristoph Hellwig goto out_cleanup_queue; 4303a85a5deSChristoph Hellwig 4313a85a5deSChristoph Hellwig ctrl->ctrl.max_hw_sectors = 4323a85a5deSChristoph Hellwig (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9); 4333a85a5deSChristoph Hellwig 4343a85a5deSChristoph Hellwig error = nvme_init_identify(&ctrl->ctrl); 4353a85a5deSChristoph Hellwig if (error) 4363a85a5deSChristoph Hellwig goto out_cleanup_queue; 4373a85a5deSChristoph Hellwig 4383a85a5deSChristoph Hellwig nvme_start_keep_alive(&ctrl->ctrl); 4393a85a5deSChristoph Hellwig 4403a85a5deSChristoph Hellwig return 0; 4413a85a5deSChristoph Hellwig 4423a85a5deSChristoph Hellwig out_cleanup_queue: 4433a85a5deSChristoph Hellwig blk_cleanup_queue(ctrl->ctrl.admin_q); 4443a85a5deSChristoph Hellwig out_free_tagset: 4453a85a5deSChristoph Hellwig blk_mq_free_tag_set(&ctrl->admin_tag_set); 4463a85a5deSChristoph Hellwig out_free_sq: 4473a85a5deSChristoph Hellwig nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); 4483a85a5deSChristoph Hellwig return error; 4493a85a5deSChristoph Hellwig } 4503a85a5deSChristoph Hellwig 4513a85a5deSChristoph Hellwig static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) 4523a85a5deSChristoph Hellwig { 4533a85a5deSChristoph Hellwig nvme_stop_keep_alive(&ctrl->ctrl); 4543a85a5deSChristoph Hellwig 4553a85a5deSChristoph Hellwig if (ctrl->queue_count > 1) { 4563a85a5deSChristoph Hellwig nvme_stop_queues(&ctrl->ctrl); 4573a85a5deSChristoph Hellwig blk_mq_tagset_busy_iter(&ctrl->tag_set, 4583a85a5deSChristoph Hellwig nvme_cancel_request, &ctrl->ctrl); 4596ecda70eSSagi Grimberg nvme_loop_destroy_io_queues(ctrl); 4603a85a5deSChristoph Hellwig } 4613a85a5deSChristoph Hellwig 4623a85a5deSChristoph Hellwig if (ctrl->ctrl.state == NVME_CTRL_LIVE) 4633a85a5deSChristoph Hellwig nvme_shutdown_ctrl(&ctrl->ctrl); 4643a85a5deSChristoph Hellwig 4653a85a5deSChristoph Hellwig blk_mq_stop_hw_queues(ctrl->ctrl.admin_q); 4663a85a5deSChristoph Hellwig blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, 4673a85a5deSChristoph Hellwig nvme_cancel_request, &ctrl->ctrl); 4683a85a5deSChristoph Hellwig nvme_loop_destroy_admin_queue(ctrl); 4693a85a5deSChristoph Hellwig } 4703a85a5deSChristoph Hellwig 4713a85a5deSChristoph Hellwig static void nvme_loop_del_ctrl_work(struct work_struct *work) 4723a85a5deSChristoph Hellwig { 4733a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl = container_of(work, 4743a85a5deSChristoph Hellwig struct nvme_loop_ctrl, delete_work); 4753a85a5deSChristoph Hellwig 4763a85a5deSChristoph Hellwig nvme_uninit_ctrl(&ctrl->ctrl); 477a159c64dSSagi Grimberg nvme_loop_shutdown_ctrl(ctrl); 4783a85a5deSChristoph Hellwig nvme_put_ctrl(&ctrl->ctrl); 4793a85a5deSChristoph Hellwig } 4803a85a5deSChristoph Hellwig 4813a85a5deSChristoph Hellwig static int __nvme_loop_del_ctrl(struct nvme_loop_ctrl *ctrl) 4823a85a5deSChristoph Hellwig { 4833a85a5deSChristoph Hellwig if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING)) 4843a85a5deSChristoph Hellwig return -EBUSY; 4853a85a5deSChristoph Hellwig 4863a85a5deSChristoph Hellwig if (!schedule_work(&ctrl->delete_work)) 4873a85a5deSChristoph Hellwig return -EBUSY; 4883a85a5deSChristoph Hellwig 4893a85a5deSChristoph Hellwig return 0; 4903a85a5deSChristoph Hellwig } 4913a85a5deSChristoph Hellwig 4923a85a5deSChristoph Hellwig static int nvme_loop_del_ctrl(struct nvme_ctrl *nctrl) 4933a85a5deSChristoph Hellwig { 4943a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl); 4953a85a5deSChristoph Hellwig int ret; 4963a85a5deSChristoph Hellwig 4973a85a5deSChristoph Hellwig ret = __nvme_loop_del_ctrl(ctrl); 4983a85a5deSChristoph Hellwig if (ret) 4993a85a5deSChristoph Hellwig return ret; 5003a85a5deSChristoph Hellwig 5013a85a5deSChristoph Hellwig flush_work(&ctrl->delete_work); 5023a85a5deSChristoph Hellwig 5033a85a5deSChristoph Hellwig return 0; 5043a85a5deSChristoph Hellwig } 5053a85a5deSChristoph Hellwig 5063a85a5deSChristoph Hellwig static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl) 5073a85a5deSChristoph Hellwig { 5083a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl; 5093a85a5deSChristoph Hellwig 5103a85a5deSChristoph Hellwig mutex_lock(&nvme_loop_ctrl_mutex); 5113a85a5deSChristoph Hellwig list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) { 5123a85a5deSChristoph Hellwig if (ctrl->ctrl.cntlid == nctrl->cntlid) 5133a85a5deSChristoph Hellwig __nvme_loop_del_ctrl(ctrl); 5143a85a5deSChristoph Hellwig } 5153a85a5deSChristoph Hellwig mutex_unlock(&nvme_loop_ctrl_mutex); 5163a85a5deSChristoph Hellwig } 5173a85a5deSChristoph Hellwig 5183a85a5deSChristoph Hellwig static void nvme_loop_reset_ctrl_work(struct work_struct *work) 5193a85a5deSChristoph Hellwig { 5203a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl = container_of(work, 5213a85a5deSChristoph Hellwig struct nvme_loop_ctrl, reset_work); 5223a85a5deSChristoph Hellwig bool changed; 523297186d6SSagi Grimberg int ret; 5243a85a5deSChristoph Hellwig 5253a85a5deSChristoph Hellwig nvme_loop_shutdown_ctrl(ctrl); 5263a85a5deSChristoph Hellwig 5273a85a5deSChristoph Hellwig ret = nvme_loop_configure_admin_queue(ctrl); 5283a85a5deSChristoph Hellwig if (ret) 5293a85a5deSChristoph Hellwig goto out_disable; 5303a85a5deSChristoph Hellwig 5316ecda70eSSagi Grimberg ret = nvme_loop_init_io_queues(ctrl); 5323a85a5deSChristoph Hellwig if (ret) 5336ecda70eSSagi Grimberg goto out_destroy_admin; 5343a85a5deSChristoph Hellwig 535297186d6SSagi Grimberg ret = nvme_loop_connect_io_queues(ctrl); 5363a85a5deSChristoph Hellwig if (ret) 5376ecda70eSSagi Grimberg goto out_destroy_io; 5383a85a5deSChristoph Hellwig 5393a85a5deSChristoph Hellwig changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 5403a85a5deSChristoph Hellwig WARN_ON_ONCE(!changed); 5413a85a5deSChristoph Hellwig 5423a85a5deSChristoph Hellwig nvme_queue_scan(&ctrl->ctrl); 5433a85a5deSChristoph Hellwig nvme_queue_async_events(&ctrl->ctrl); 5443a85a5deSChristoph Hellwig 5453a85a5deSChristoph Hellwig nvme_start_queues(&ctrl->ctrl); 5463a85a5deSChristoph Hellwig 5473a85a5deSChristoph Hellwig return; 5483a85a5deSChristoph Hellwig 5496ecda70eSSagi Grimberg out_destroy_io: 5506ecda70eSSagi Grimberg nvme_loop_destroy_io_queues(ctrl); 5516ecda70eSSagi Grimberg out_destroy_admin: 5523a85a5deSChristoph Hellwig nvme_loop_destroy_admin_queue(ctrl); 5533a85a5deSChristoph Hellwig out_disable: 5543a85a5deSChristoph Hellwig dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); 5553a85a5deSChristoph Hellwig nvme_uninit_ctrl(&ctrl->ctrl); 5563a85a5deSChristoph Hellwig nvme_put_ctrl(&ctrl->ctrl); 5573a85a5deSChristoph Hellwig } 5583a85a5deSChristoph Hellwig 5593a85a5deSChristoph Hellwig static int nvme_loop_reset_ctrl(struct nvme_ctrl *nctrl) 5603a85a5deSChristoph Hellwig { 5613a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl); 5623a85a5deSChristoph Hellwig 5633a85a5deSChristoph Hellwig if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) 5643a85a5deSChristoph Hellwig return -EBUSY; 5653a85a5deSChristoph Hellwig 5663a85a5deSChristoph Hellwig if (!schedule_work(&ctrl->reset_work)) 5673a85a5deSChristoph Hellwig return -EBUSY; 5683a85a5deSChristoph Hellwig 5693a85a5deSChristoph Hellwig flush_work(&ctrl->reset_work); 5703a85a5deSChristoph Hellwig 5713a85a5deSChristoph Hellwig return 0; 5723a85a5deSChristoph Hellwig } 5733a85a5deSChristoph Hellwig 5743a85a5deSChristoph Hellwig static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = { 5753a85a5deSChristoph Hellwig .name = "loop", 5763a85a5deSChristoph Hellwig .module = THIS_MODULE, 5773a85a5deSChristoph Hellwig .is_fabrics = true, 5783a85a5deSChristoph Hellwig .reg_read32 = nvmf_reg_read32, 5793a85a5deSChristoph Hellwig .reg_read64 = nvmf_reg_read64, 5803a85a5deSChristoph Hellwig .reg_write32 = nvmf_reg_write32, 5813a85a5deSChristoph Hellwig .reset_ctrl = nvme_loop_reset_ctrl, 5823a85a5deSChristoph Hellwig .free_ctrl = nvme_loop_free_ctrl, 5833a85a5deSChristoph Hellwig .submit_async_event = nvme_loop_submit_async_event, 5843a85a5deSChristoph Hellwig .delete_ctrl = nvme_loop_del_ctrl, 5853a85a5deSChristoph Hellwig .get_subsysnqn = nvmf_get_subsysnqn, 5863a85a5deSChristoph Hellwig }; 5873a85a5deSChristoph Hellwig 5883a85a5deSChristoph Hellwig static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) 5893a85a5deSChristoph Hellwig { 590297186d6SSagi Grimberg int ret; 5913a85a5deSChristoph Hellwig 5926ecda70eSSagi Grimberg ret = nvme_loop_init_io_queues(ctrl); 5933a85a5deSChristoph Hellwig if (ret) 5946ecda70eSSagi Grimberg return ret; 5953a85a5deSChristoph Hellwig 5963a85a5deSChristoph Hellwig memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); 5973a85a5deSChristoph Hellwig ctrl->tag_set.ops = &nvme_loop_mq_ops; 598eadb7cf4SJay Freyensee ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; 5993a85a5deSChristoph Hellwig ctrl->tag_set.reserved_tags = 1; /* fabric connect */ 6003a85a5deSChristoph Hellwig ctrl->tag_set.numa_node = NUMA_NO_NODE; 6013a85a5deSChristoph Hellwig ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 6023a85a5deSChristoph Hellwig ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) + 6033a85a5deSChristoph Hellwig SG_CHUNK_SIZE * sizeof(struct scatterlist); 6043a85a5deSChristoph Hellwig ctrl->tag_set.driver_data = ctrl; 6053a85a5deSChristoph Hellwig ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1; 6063a85a5deSChristoph Hellwig ctrl->tag_set.timeout = NVME_IO_TIMEOUT; 6073a85a5deSChristoph Hellwig ctrl->ctrl.tagset = &ctrl->tag_set; 6083a85a5deSChristoph Hellwig 6093a85a5deSChristoph Hellwig ret = blk_mq_alloc_tag_set(&ctrl->tag_set); 6103a85a5deSChristoph Hellwig if (ret) 6113a85a5deSChristoph Hellwig goto out_destroy_queues; 6123a85a5deSChristoph Hellwig 6133a85a5deSChristoph Hellwig ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); 6143a85a5deSChristoph Hellwig if (IS_ERR(ctrl->ctrl.connect_q)) { 6153a85a5deSChristoph Hellwig ret = PTR_ERR(ctrl->ctrl.connect_q); 6163a85a5deSChristoph Hellwig goto out_free_tagset; 6173a85a5deSChristoph Hellwig } 6183a85a5deSChristoph Hellwig 619297186d6SSagi Grimberg ret = nvme_loop_connect_io_queues(ctrl); 6203a85a5deSChristoph Hellwig if (ret) 6213a85a5deSChristoph Hellwig goto out_cleanup_connect_q; 6223a85a5deSChristoph Hellwig 6233a85a5deSChristoph Hellwig return 0; 6243a85a5deSChristoph Hellwig 6253a85a5deSChristoph Hellwig out_cleanup_connect_q: 6263a85a5deSChristoph Hellwig blk_cleanup_queue(ctrl->ctrl.connect_q); 6273a85a5deSChristoph Hellwig out_free_tagset: 6283a85a5deSChristoph Hellwig blk_mq_free_tag_set(&ctrl->tag_set); 6293a85a5deSChristoph Hellwig out_destroy_queues: 6306ecda70eSSagi Grimberg nvme_loop_destroy_io_queues(ctrl); 6313a85a5deSChristoph Hellwig return ret; 6323a85a5deSChristoph Hellwig } 6333a85a5deSChristoph Hellwig 6343a85a5deSChristoph Hellwig static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev, 6353a85a5deSChristoph Hellwig struct nvmf_ctrl_options *opts) 6363a85a5deSChristoph Hellwig { 6373a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl; 6383a85a5deSChristoph Hellwig bool changed; 6393a85a5deSChristoph Hellwig int ret; 6403a85a5deSChristoph Hellwig 6413a85a5deSChristoph Hellwig ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 6423a85a5deSChristoph Hellwig if (!ctrl) 6433a85a5deSChristoph Hellwig return ERR_PTR(-ENOMEM); 6443a85a5deSChristoph Hellwig ctrl->ctrl.opts = opts; 6453a85a5deSChristoph Hellwig INIT_LIST_HEAD(&ctrl->list); 6463a85a5deSChristoph Hellwig 6473a85a5deSChristoph Hellwig INIT_WORK(&ctrl->delete_work, nvme_loop_del_ctrl_work); 6483a85a5deSChristoph Hellwig INIT_WORK(&ctrl->reset_work, nvme_loop_reset_ctrl_work); 6493a85a5deSChristoph Hellwig 6503a85a5deSChristoph Hellwig ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops, 6513a85a5deSChristoph Hellwig 0 /* no quirks, we're perfect! */); 6523a85a5deSChristoph Hellwig if (ret) 6533a85a5deSChristoph Hellwig goto out_put_ctrl; 6543a85a5deSChristoph Hellwig 6553a85a5deSChristoph Hellwig spin_lock_init(&ctrl->lock); 6563a85a5deSChristoph Hellwig 6573a85a5deSChristoph Hellwig ret = -ENOMEM; 6583a85a5deSChristoph Hellwig 659eadb7cf4SJay Freyensee ctrl->ctrl.sqsize = opts->queue_size - 1; 6603a85a5deSChristoph Hellwig ctrl->ctrl.kato = opts->kato; 6613a85a5deSChristoph Hellwig 6623a85a5deSChristoph Hellwig ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues), 6633a85a5deSChristoph Hellwig GFP_KERNEL); 6643a85a5deSChristoph Hellwig if (!ctrl->queues) 6653a85a5deSChristoph Hellwig goto out_uninit_ctrl; 6663a85a5deSChristoph Hellwig 6673a85a5deSChristoph Hellwig ret = nvme_loop_configure_admin_queue(ctrl); 6683a85a5deSChristoph Hellwig if (ret) 6693a85a5deSChristoph Hellwig goto out_free_queues; 6703a85a5deSChristoph Hellwig 6713a85a5deSChristoph Hellwig if (opts->queue_size > ctrl->ctrl.maxcmd) { 6723a85a5deSChristoph Hellwig /* warn if maxcmd is lower than queue_size */ 6733a85a5deSChristoph Hellwig dev_warn(ctrl->ctrl.device, 6743a85a5deSChristoph Hellwig "queue_size %zu > ctrl maxcmd %u, clamping down\n", 6753a85a5deSChristoph Hellwig opts->queue_size, ctrl->ctrl.maxcmd); 6763a85a5deSChristoph Hellwig opts->queue_size = ctrl->ctrl.maxcmd; 6773a85a5deSChristoph Hellwig } 6783a85a5deSChristoph Hellwig 6793a85a5deSChristoph Hellwig if (opts->nr_io_queues) { 6803a85a5deSChristoph Hellwig ret = nvme_loop_create_io_queues(ctrl); 6813a85a5deSChristoph Hellwig if (ret) 6823a85a5deSChristoph Hellwig goto out_remove_admin_queue; 6833a85a5deSChristoph Hellwig } 6843a85a5deSChristoph Hellwig 6853a85a5deSChristoph Hellwig nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0); 6863a85a5deSChristoph Hellwig 6873a85a5deSChristoph Hellwig dev_info(ctrl->ctrl.device, 6883a85a5deSChristoph Hellwig "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn); 6893a85a5deSChristoph Hellwig 6903a85a5deSChristoph Hellwig kref_get(&ctrl->ctrl.kref); 6913a85a5deSChristoph Hellwig 6923a85a5deSChristoph Hellwig changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 6933a85a5deSChristoph Hellwig WARN_ON_ONCE(!changed); 6943a85a5deSChristoph Hellwig 6953a85a5deSChristoph Hellwig mutex_lock(&nvme_loop_ctrl_mutex); 6963a85a5deSChristoph Hellwig list_add_tail(&ctrl->list, &nvme_loop_ctrl_list); 6973a85a5deSChristoph Hellwig mutex_unlock(&nvme_loop_ctrl_mutex); 6983a85a5deSChristoph Hellwig 6993a85a5deSChristoph Hellwig if (opts->nr_io_queues) { 7003a85a5deSChristoph Hellwig nvme_queue_scan(&ctrl->ctrl); 7013a85a5deSChristoph Hellwig nvme_queue_async_events(&ctrl->ctrl); 7023a85a5deSChristoph Hellwig } 7033a85a5deSChristoph Hellwig 7043a85a5deSChristoph Hellwig return &ctrl->ctrl; 7053a85a5deSChristoph Hellwig 7063a85a5deSChristoph Hellwig out_remove_admin_queue: 7073a85a5deSChristoph Hellwig nvme_loop_destroy_admin_queue(ctrl); 7083a85a5deSChristoph Hellwig out_free_queues: 7093a85a5deSChristoph Hellwig kfree(ctrl->queues); 7103a85a5deSChristoph Hellwig out_uninit_ctrl: 7113a85a5deSChristoph Hellwig nvme_uninit_ctrl(&ctrl->ctrl); 7123a85a5deSChristoph Hellwig out_put_ctrl: 7133a85a5deSChristoph Hellwig nvme_put_ctrl(&ctrl->ctrl); 7143a85a5deSChristoph Hellwig if (ret > 0) 7153a85a5deSChristoph Hellwig ret = -EIO; 7163a85a5deSChristoph Hellwig return ERR_PTR(ret); 7173a85a5deSChristoph Hellwig } 7183a85a5deSChristoph Hellwig 7193a85a5deSChristoph Hellwig static int nvme_loop_add_port(struct nvmet_port *port) 7203a85a5deSChristoph Hellwig { 7213a85a5deSChristoph Hellwig /* 7223a85a5deSChristoph Hellwig * XXX: disalow adding more than one port so 7233a85a5deSChristoph Hellwig * there is no connection rejections when a 7243a85a5deSChristoph Hellwig * a subsystem is assigned to a port for which 7253a85a5deSChristoph Hellwig * loop doesn't have a pointer. 7263a85a5deSChristoph Hellwig * This scenario would be possible if we allowed 7273a85a5deSChristoph Hellwig * more than one port to be added and a subsystem 7283a85a5deSChristoph Hellwig * was assigned to a port other than nvmet_loop_port. 7293a85a5deSChristoph Hellwig */ 7303a85a5deSChristoph Hellwig 7313a85a5deSChristoph Hellwig if (nvmet_loop_port) 7323a85a5deSChristoph Hellwig return -EPERM; 7333a85a5deSChristoph Hellwig 7343a85a5deSChristoph Hellwig nvmet_loop_port = port; 7353a85a5deSChristoph Hellwig return 0; 7363a85a5deSChristoph Hellwig } 7373a85a5deSChristoph Hellwig 7383a85a5deSChristoph Hellwig static void nvme_loop_remove_port(struct nvmet_port *port) 7393a85a5deSChristoph Hellwig { 7403a85a5deSChristoph Hellwig if (port == nvmet_loop_port) 7413a85a5deSChristoph Hellwig nvmet_loop_port = NULL; 7423a85a5deSChristoph Hellwig } 7433a85a5deSChristoph Hellwig 7443a85a5deSChristoph Hellwig static struct nvmet_fabrics_ops nvme_loop_ops = { 7453a85a5deSChristoph Hellwig .owner = THIS_MODULE, 7463a85a5deSChristoph Hellwig .type = NVMF_TRTYPE_LOOP, 7473a85a5deSChristoph Hellwig .add_port = nvme_loop_add_port, 7483a85a5deSChristoph Hellwig .remove_port = nvme_loop_remove_port, 7493a85a5deSChristoph Hellwig .queue_response = nvme_loop_queue_response, 7503a85a5deSChristoph Hellwig .delete_ctrl = nvme_loop_delete_ctrl, 7513a85a5deSChristoph Hellwig }; 7523a85a5deSChristoph Hellwig 7533a85a5deSChristoph Hellwig static struct nvmf_transport_ops nvme_loop_transport = { 7543a85a5deSChristoph Hellwig .name = "loop", 7553a85a5deSChristoph Hellwig .create_ctrl = nvme_loop_create_ctrl, 7563a85a5deSChristoph Hellwig }; 7573a85a5deSChristoph Hellwig 7583a85a5deSChristoph Hellwig static int __init nvme_loop_init_module(void) 7593a85a5deSChristoph Hellwig { 7603a85a5deSChristoph Hellwig int ret; 7613a85a5deSChristoph Hellwig 7623a85a5deSChristoph Hellwig ret = nvmet_register_transport(&nvme_loop_ops); 7633a85a5deSChristoph Hellwig if (ret) 7643a85a5deSChristoph Hellwig return ret; 765d19eef02SSagi Grimberg 766d19eef02SSagi Grimberg ret = nvmf_register_transport(&nvme_loop_transport); 767d19eef02SSagi Grimberg if (ret) 768d19eef02SSagi Grimberg nvmet_unregister_transport(&nvme_loop_ops); 769d19eef02SSagi Grimberg 770d19eef02SSagi Grimberg return ret; 7713a85a5deSChristoph Hellwig } 7723a85a5deSChristoph Hellwig 7733a85a5deSChristoph Hellwig static void __exit nvme_loop_cleanup_module(void) 7743a85a5deSChristoph Hellwig { 7753a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl, *next; 7763a85a5deSChristoph Hellwig 7773a85a5deSChristoph Hellwig nvmf_unregister_transport(&nvme_loop_transport); 7783a85a5deSChristoph Hellwig nvmet_unregister_transport(&nvme_loop_ops); 7793a85a5deSChristoph Hellwig 7803a85a5deSChristoph Hellwig mutex_lock(&nvme_loop_ctrl_mutex); 7813a85a5deSChristoph Hellwig list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list) 7823a85a5deSChristoph Hellwig __nvme_loop_del_ctrl(ctrl); 7833a85a5deSChristoph Hellwig mutex_unlock(&nvme_loop_ctrl_mutex); 7843a85a5deSChristoph Hellwig 7853a85a5deSChristoph Hellwig flush_scheduled_work(); 7863a85a5deSChristoph Hellwig } 7873a85a5deSChristoph Hellwig 7883a85a5deSChristoph Hellwig module_init(nvme_loop_init_module); 7893a85a5deSChristoph Hellwig module_exit(nvme_loop_cleanup_module); 7903a85a5deSChristoph Hellwig 7913a85a5deSChristoph Hellwig MODULE_LICENSE("GPL v2"); 7923a85a5deSChristoph Hellwig MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */ 793