1d0ad6904SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0 23a85a5deSChristoph Hellwig /* 33a85a5deSChristoph Hellwig * NVMe over Fabrics loopback device. 43a85a5deSChristoph Hellwig * Copyright (c) 2015-2016 HGST, a Western Digital Company. 53a85a5deSChristoph Hellwig */ 63a85a5deSChristoph Hellwig #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 73a85a5deSChristoph Hellwig #include <linux/scatterlist.h> 83a85a5deSChristoph Hellwig #include <linux/blk-mq.h> 93a85a5deSChristoph Hellwig #include <linux/nvme.h> 103a85a5deSChristoph Hellwig #include <linux/module.h> 113a85a5deSChristoph Hellwig #include <linux/parser.h> 123a85a5deSChristoph Hellwig #include "nvmet.h" 133a85a5deSChristoph Hellwig #include "../host/nvme.h" 143a85a5deSChristoph Hellwig #include "../host/fabrics.h" 153a85a5deSChristoph Hellwig 163a85a5deSChristoph Hellwig #define NVME_LOOP_MAX_SEGMENTS 256 173a85a5deSChristoph Hellwig 183a85a5deSChristoph Hellwig struct nvme_loop_iod { 19d49187e9SChristoph Hellwig struct nvme_request nvme_req; 203a85a5deSChristoph Hellwig struct nvme_command cmd; 21fc6c9730SMax Gurtovoy struct nvme_completion cqe; 223a85a5deSChristoph Hellwig struct nvmet_req req; 233a85a5deSChristoph Hellwig struct nvme_loop_queue *queue; 243a85a5deSChristoph Hellwig struct work_struct work; 253a85a5deSChristoph Hellwig struct sg_table sg_table; 263a85a5deSChristoph Hellwig struct scatterlist first_sgl[]; 273a85a5deSChristoph Hellwig }; 283a85a5deSChristoph Hellwig 293a85a5deSChristoph Hellwig struct nvme_loop_ctrl { 303a85a5deSChristoph Hellwig struct nvme_loop_queue *queues; 313a85a5deSChristoph Hellwig 323a85a5deSChristoph Hellwig struct blk_mq_tag_set admin_tag_set; 333a85a5deSChristoph Hellwig 343a85a5deSChristoph Hellwig struct list_head list; 353a85a5deSChristoph Hellwig struct blk_mq_tag_set tag_set; 363a85a5deSChristoph Hellwig struct nvme_loop_iod async_event_iod; 373a85a5deSChristoph Hellwig struct nvme_ctrl ctrl; 383a85a5deSChristoph Hellwig 39fe4a9791SChristoph Hellwig struct nvmet_port *port; 403a85a5deSChristoph Hellwig }; 413a85a5deSChristoph Hellwig 423a85a5deSChristoph Hellwig static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl) 433a85a5deSChristoph Hellwig { 443a85a5deSChristoph Hellwig return container_of(ctrl, struct nvme_loop_ctrl, ctrl); 453a85a5deSChristoph Hellwig } 463a85a5deSChristoph Hellwig 479d7fab04SSagi Grimberg enum nvme_loop_queue_flags { 489d7fab04SSagi Grimberg NVME_LOOP_Q_LIVE = 0, 499d7fab04SSagi Grimberg }; 509d7fab04SSagi Grimberg 513a85a5deSChristoph Hellwig struct nvme_loop_queue { 523a85a5deSChristoph Hellwig struct nvmet_cq nvme_cq; 533a85a5deSChristoph Hellwig struct nvmet_sq nvme_sq; 543a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl; 559d7fab04SSagi Grimberg unsigned long flags; 563a85a5deSChristoph Hellwig }; 573a85a5deSChristoph Hellwig 58fe4a9791SChristoph Hellwig static LIST_HEAD(nvme_loop_ports); 59fe4a9791SChristoph Hellwig static DEFINE_MUTEX(nvme_loop_ports_mutex); 603a85a5deSChristoph Hellwig 613a85a5deSChristoph Hellwig static LIST_HEAD(nvme_loop_ctrl_list); 623a85a5deSChristoph Hellwig static DEFINE_MUTEX(nvme_loop_ctrl_mutex); 633a85a5deSChristoph Hellwig 643a85a5deSChristoph Hellwig static void nvme_loop_queue_response(struct nvmet_req *nvme_req); 653a85a5deSChristoph Hellwig static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl); 663a85a5deSChristoph Hellwig 67e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops nvme_loop_ops; 683a85a5deSChristoph Hellwig 693a85a5deSChristoph Hellwig static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue) 703a85a5deSChristoph Hellwig { 713a85a5deSChristoph Hellwig return queue - queue->ctrl->queues; 723a85a5deSChristoph Hellwig } 733a85a5deSChristoph Hellwig 743a85a5deSChristoph Hellwig static void nvme_loop_complete_rq(struct request *req) 753a85a5deSChristoph Hellwig { 763a85a5deSChristoph Hellwig struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); 773a85a5deSChristoph Hellwig 7852e6d8edSIsrael Rukshin sg_free_table_chained(&iod->sg_table, NVME_INLINE_SG_CNT); 7977f02a7aSChristoph Hellwig nvme_complete_rq(req); 803a85a5deSChristoph Hellwig } 813a85a5deSChristoph Hellwig 823b068376SSagi Grimberg static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue) 833b068376SSagi Grimberg { 843b068376SSagi Grimberg u32 queue_idx = nvme_loop_queue_idx(queue); 853a85a5deSChristoph Hellwig 863b068376SSagi Grimberg if (queue_idx == 0) 873b068376SSagi Grimberg return queue->ctrl->admin_tag_set.tags[queue_idx]; 883b068376SSagi Grimberg return queue->ctrl->tag_set.tags[queue_idx - 1]; 893a85a5deSChristoph Hellwig } 903a85a5deSChristoph Hellwig 91d49187e9SChristoph Hellwig static void nvme_loop_queue_response(struct nvmet_req *req) 923a85a5deSChristoph Hellwig { 933b068376SSagi Grimberg struct nvme_loop_queue *queue = 943b068376SSagi Grimberg container_of(req->sq, struct nvme_loop_queue, nvme_sq); 95fc6c9730SMax Gurtovoy struct nvme_completion *cqe = req->cqe; 963a85a5deSChristoph Hellwig 973a85a5deSChristoph Hellwig /* 983a85a5deSChristoph Hellwig * AEN requests are special as they don't time out and can 993a85a5deSChristoph Hellwig * survive any kind of queue freeze and often don't respond to 1003a85a5deSChristoph Hellwig * aborts. We don't even bother to allocate a struct request 1013a85a5deSChristoph Hellwig * for them but rather special case them here. 1023a85a5deSChristoph Hellwig */ 10358a8df67SIsrael Rukshin if (unlikely(nvme_is_aen_req(nvme_loop_queue_idx(queue), 10458a8df67SIsrael Rukshin cqe->command_id))) { 1053b068376SSagi Grimberg nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, 1067bf58533SChristoph Hellwig &cqe->result); 1073a85a5deSChristoph Hellwig } else { 1083b068376SSagi Grimberg struct request *rq; 1093a85a5deSChristoph Hellwig 110e7006de6SSagi Grimberg rq = nvme_find_rq(nvme_loop_tagset(queue), cqe->command_id); 1113b068376SSagi Grimberg if (!rq) { 1123b068376SSagi Grimberg dev_err(queue->ctrl->ctrl.device, 113e7006de6SSagi Grimberg "got bad command_id %#x on queue %d\n", 1143b068376SSagi Grimberg cqe->command_id, nvme_loop_queue_idx(queue)); 1153b068376SSagi Grimberg return; 1163b068376SSagi Grimberg } 1173b068376SSagi Grimberg 1182eb81a33SChristoph Hellwig if (!nvme_try_complete_req(rq, cqe->status, cqe->result)) 119ff029451SChristoph Hellwig nvme_loop_complete_rq(rq); 1203a85a5deSChristoph Hellwig } 1213a85a5deSChristoph Hellwig } 1223a85a5deSChristoph Hellwig 1233a85a5deSChristoph Hellwig static void nvme_loop_execute_work(struct work_struct *work) 1243a85a5deSChristoph Hellwig { 1253a85a5deSChristoph Hellwig struct nvme_loop_iod *iod = 1263a85a5deSChristoph Hellwig container_of(work, struct nvme_loop_iod, work); 1273a85a5deSChristoph Hellwig 128be3f3114SChristoph Hellwig iod->req.execute(&iod->req); 1293a85a5deSChristoph Hellwig } 1303a85a5deSChristoph Hellwig 131fc17b653SChristoph Hellwig static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, 1323a85a5deSChristoph Hellwig const struct blk_mq_queue_data *bd) 1333a85a5deSChristoph Hellwig { 1343a85a5deSChristoph Hellwig struct nvme_ns *ns = hctx->queue->queuedata; 1353a85a5deSChristoph Hellwig struct nvme_loop_queue *queue = hctx->driver_data; 1363a85a5deSChristoph Hellwig struct request *req = bd->rq; 1373a85a5deSChristoph Hellwig struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); 1383bc32bb1SChristoph Hellwig bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags); 139fc17b653SChristoph Hellwig blk_status_t ret; 1403a85a5deSChristoph Hellwig 141a9715744STao Chiu if (!nvme_check_ready(&queue->ctrl->ctrl, req, queue_ready)) 142a9715744STao Chiu return nvme_fail_nonready_command(&queue->ctrl->ctrl, req); 1439d7fab04SSagi Grimberg 144f4b9e6c9SKeith Busch ret = nvme_setup_cmd(ns, req); 145fc17b653SChristoph Hellwig if (ret) 1463a85a5deSChristoph Hellwig return ret; 1473a85a5deSChristoph Hellwig 1486887fc64SSagi Grimberg nvme_start_request(req); 1493a85a5deSChristoph Hellwig iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; 150fe4a9791SChristoph Hellwig iod->req.port = queue->ctrl->port; 1513a85a5deSChristoph Hellwig if (!nvmet_req_init(&iod->req, &queue->nvme_cq, 15211d9ea6fSMing Lei &queue->nvme_sq, &nvme_loop_ops)) 153fc17b653SChristoph Hellwig return BLK_STS_OK; 1543a85a5deSChristoph Hellwig 155eb464833SChaitanya Kulkarni if (blk_rq_nr_phys_segments(req)) { 1563a85a5deSChristoph Hellwig iod->sg_table.sgl = iod->first_sgl; 157fc17b653SChristoph Hellwig if (sg_alloc_table_chained(&iod->sg_table, 158f9d03f96SChristoph Hellwig blk_rq_nr_phys_segments(req), 15952e6d8edSIsrael Rukshin iod->sg_table.sgl, NVME_INLINE_SG_CNT)) { 1605812d04cSMax Gurtovoy nvme_cleanup_cmd(req); 161fc17b653SChristoph Hellwig return BLK_STS_RESOURCE; 1625812d04cSMax Gurtovoy } 1633a85a5deSChristoph Hellwig 1643a85a5deSChristoph Hellwig iod->req.sg = iod->sg_table.sgl; 1653a85a5deSChristoph Hellwig iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl); 166796b0b8dSChristoph Hellwig iod->req.transfer_len = blk_rq_payload_bytes(req); 1673a85a5deSChristoph Hellwig } 1683a85a5deSChristoph Hellwig 1698832cf92SSagi Grimberg queue_work(nvmet_wq, &iod->work); 170fc17b653SChristoph Hellwig return BLK_STS_OK; 1713a85a5deSChristoph Hellwig } 1723a85a5deSChristoph Hellwig 173ad22c355SKeith Busch static void nvme_loop_submit_async_event(struct nvme_ctrl *arg) 1743a85a5deSChristoph Hellwig { 1753a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg); 1763a85a5deSChristoph Hellwig struct nvme_loop_queue *queue = &ctrl->queues[0]; 1773a85a5deSChristoph Hellwig struct nvme_loop_iod *iod = &ctrl->async_event_iod; 1783a85a5deSChristoph Hellwig 1793a85a5deSChristoph Hellwig memset(&iod->cmd, 0, sizeof(iod->cmd)); 1803a85a5deSChristoph Hellwig iod->cmd.common.opcode = nvme_admin_async_event; 18138dabe21SKeith Busch iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH; 1823a85a5deSChristoph Hellwig iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; 1833a85a5deSChristoph Hellwig 1843a85a5deSChristoph Hellwig if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq, 1853a85a5deSChristoph Hellwig &nvme_loop_ops)) { 1863a85a5deSChristoph Hellwig dev_err(ctrl->ctrl.device, "failed async event work\n"); 1873a85a5deSChristoph Hellwig return; 1883a85a5deSChristoph Hellwig } 1893a85a5deSChristoph Hellwig 1908832cf92SSagi Grimberg queue_work(nvmet_wq, &iod->work); 1913a85a5deSChristoph Hellwig } 1923a85a5deSChristoph Hellwig 1933a85a5deSChristoph Hellwig static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl, 1943a85a5deSChristoph Hellwig struct nvme_loop_iod *iod, unsigned int queue_idx) 1953a85a5deSChristoph Hellwig { 1963a85a5deSChristoph Hellwig iod->req.cmd = &iod->cmd; 197fc6c9730SMax Gurtovoy iod->req.cqe = &iod->cqe; 1983a85a5deSChristoph Hellwig iod->queue = &ctrl->queues[queue_idx]; 1993a85a5deSChristoph Hellwig INIT_WORK(&iod->work, nvme_loop_execute_work); 2003a85a5deSChristoph Hellwig return 0; 2013a85a5deSChristoph Hellwig } 2023a85a5deSChristoph Hellwig 203d6296d39SChristoph Hellwig static int nvme_loop_init_request(struct blk_mq_tag_set *set, 204d6296d39SChristoph Hellwig struct request *req, unsigned int hctx_idx, 2053a85a5deSChristoph Hellwig unsigned int numa_node) 2063a85a5deSChristoph Hellwig { 2072ade8221SChristoph Hellwig struct nvme_loop_ctrl *ctrl = to_loop_ctrl(set->driver_data); 208f4b9e6c9SKeith Busch struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); 2093a85a5deSChristoph Hellwig 21059e29ce6SSagi Grimberg nvme_req(req)->ctrl = &ctrl->ctrl; 211f4b9e6c9SKeith Busch nvme_req(req)->cmd = &iod->cmd; 21262b83b18SChristoph Hellwig return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req), 21362b83b18SChristoph Hellwig (set == &ctrl->tag_set) ? hctx_idx + 1 : 0); 2143a85a5deSChristoph Hellwig } 2153a85a5deSChristoph Hellwig 21688c99793SMing Lei static struct lock_class_key loop_hctx_fq_lock_key; 21788c99793SMing Lei 2183a85a5deSChristoph Hellwig static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 2193a85a5deSChristoph Hellwig unsigned int hctx_idx) 2203a85a5deSChristoph Hellwig { 2212ade8221SChristoph Hellwig struct nvme_loop_ctrl *ctrl = to_loop_ctrl(data); 2223a85a5deSChristoph Hellwig struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1]; 2233a85a5deSChristoph Hellwig 224d858e5f0SSagi Grimberg BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); 2253a85a5deSChristoph Hellwig 22688c99793SMing Lei /* 22788c99793SMing Lei * flush_end_io() can be called recursively for us, so use our own 22888c99793SMing Lei * lock class key for avoiding lockdep possible recursive locking, 22988c99793SMing Lei * then we can remove the dynamically allocated lock class for each 23088c99793SMing Lei * flush queue, that way may cause horrible boot delay. 23188c99793SMing Lei */ 23288c99793SMing Lei blk_mq_hctx_set_fq_lock_class(hctx, &loop_hctx_fq_lock_key); 23388c99793SMing Lei 2343a85a5deSChristoph Hellwig hctx->driver_data = queue; 2353a85a5deSChristoph Hellwig return 0; 2363a85a5deSChristoph Hellwig } 2373a85a5deSChristoph Hellwig 2383a85a5deSChristoph Hellwig static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, 2393a85a5deSChristoph Hellwig unsigned int hctx_idx) 2403a85a5deSChristoph Hellwig { 2412ade8221SChristoph Hellwig struct nvme_loop_ctrl *ctrl = to_loop_ctrl(data); 2423a85a5deSChristoph Hellwig struct nvme_loop_queue *queue = &ctrl->queues[0]; 2433a85a5deSChristoph Hellwig 2443a85a5deSChristoph Hellwig BUG_ON(hctx_idx != 0); 2453a85a5deSChristoph Hellwig 2463a85a5deSChristoph Hellwig hctx->driver_data = queue; 2473a85a5deSChristoph Hellwig return 0; 2483a85a5deSChristoph Hellwig } 2493a85a5deSChristoph Hellwig 250f363b089SEric Biggers static const struct blk_mq_ops nvme_loop_mq_ops = { 2513a85a5deSChristoph Hellwig .queue_rq = nvme_loop_queue_rq, 2523a85a5deSChristoph Hellwig .complete = nvme_loop_complete_rq, 2533a85a5deSChristoph Hellwig .init_request = nvme_loop_init_request, 2543a85a5deSChristoph Hellwig .init_hctx = nvme_loop_init_hctx, 2553a85a5deSChristoph Hellwig }; 2563a85a5deSChristoph Hellwig 257f363b089SEric Biggers static const struct blk_mq_ops nvme_loop_admin_mq_ops = { 2583a85a5deSChristoph Hellwig .queue_rq = nvme_loop_queue_rq, 2593a85a5deSChristoph Hellwig .complete = nvme_loop_complete_rq, 26062b83b18SChristoph Hellwig .init_request = nvme_loop_init_request, 2613a85a5deSChristoph Hellwig .init_hctx = nvme_loop_init_admin_hctx, 2623a85a5deSChristoph Hellwig }; 2633a85a5deSChristoph Hellwig 2643a85a5deSChristoph Hellwig static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) 2653a85a5deSChristoph Hellwig { 2664237de2fSHannes Reinecke if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags)) 2674237de2fSHannes Reinecke return; 268e4c5d376SSagi Grimberg nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); 269ceee1953SChristoph Hellwig nvme_remove_admin_tag_set(&ctrl->ctrl); 2703a85a5deSChristoph Hellwig } 2713a85a5deSChristoph Hellwig 2723a85a5deSChristoph Hellwig static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl) 2733a85a5deSChristoph Hellwig { 2743a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl); 2753a85a5deSChristoph Hellwig 2763a85a5deSChristoph Hellwig if (list_empty(&ctrl->list)) 2773a85a5deSChristoph Hellwig goto free_ctrl; 2783a85a5deSChristoph Hellwig 2793a85a5deSChristoph Hellwig mutex_lock(&nvme_loop_ctrl_mutex); 2803a85a5deSChristoph Hellwig list_del(&ctrl->list); 2813a85a5deSChristoph Hellwig mutex_unlock(&nvme_loop_ctrl_mutex); 2823a85a5deSChristoph Hellwig 283ceee1953SChristoph Hellwig if (nctrl->tagset) 284ceee1953SChristoph Hellwig nvme_remove_io_tag_set(nctrl); 2853a85a5deSChristoph Hellwig kfree(ctrl->queues); 2863a85a5deSChristoph Hellwig nvmf_free_options(nctrl->opts); 2873a85a5deSChristoph Hellwig free_ctrl: 2883a85a5deSChristoph Hellwig kfree(ctrl); 2893a85a5deSChristoph Hellwig } 2903a85a5deSChristoph Hellwig 291945dd5baSSagi Grimberg static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) 292945dd5baSSagi Grimberg { 293945dd5baSSagi Grimberg int i; 294945dd5baSSagi Grimberg 2959d7fab04SSagi Grimberg for (i = 1; i < ctrl->ctrl.queue_count; i++) { 2969d7fab04SSagi Grimberg clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); 297945dd5baSSagi Grimberg nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); 298945dd5baSSagi Grimberg } 299a6c144f3SHannes Reinecke ctrl->ctrl.queue_count = 1; 3009d7fab04SSagi Grimberg } 301945dd5baSSagi Grimberg 302945dd5baSSagi Grimberg static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) 303945dd5baSSagi Grimberg { 304945dd5baSSagi Grimberg struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 305945dd5baSSagi Grimberg unsigned int nr_io_queues; 306945dd5baSSagi Grimberg int ret, i; 307945dd5baSSagi Grimberg 308945dd5baSSagi Grimberg nr_io_queues = min(opts->nr_io_queues, num_online_cpus()); 309945dd5baSSagi Grimberg ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); 310945dd5baSSagi Grimberg if (ret || !nr_io_queues) 311945dd5baSSagi Grimberg return ret; 312945dd5baSSagi Grimberg 313945dd5baSSagi Grimberg dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues); 314945dd5baSSagi Grimberg 315945dd5baSSagi Grimberg for (i = 1; i <= nr_io_queues; i++) { 316945dd5baSSagi Grimberg ctrl->queues[i].ctrl = ctrl; 317945dd5baSSagi Grimberg ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); 318945dd5baSSagi Grimberg if (ret) 319945dd5baSSagi Grimberg goto out_destroy_queues; 320945dd5baSSagi Grimberg 321d858e5f0SSagi Grimberg ctrl->ctrl.queue_count++; 322945dd5baSSagi Grimberg } 323945dd5baSSagi Grimberg 324945dd5baSSagi Grimberg return 0; 325945dd5baSSagi Grimberg 326945dd5baSSagi Grimberg out_destroy_queues: 327945dd5baSSagi Grimberg nvme_loop_destroy_io_queues(ctrl); 328945dd5baSSagi Grimberg return ret; 329945dd5baSSagi Grimberg } 330945dd5baSSagi Grimberg 331297186d6SSagi Grimberg static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl) 332297186d6SSagi Grimberg { 333297186d6SSagi Grimberg int i, ret; 334297186d6SSagi Grimberg 335d858e5f0SSagi Grimberg for (i = 1; i < ctrl->ctrl.queue_count; i++) { 336be42a33bSKeith Busch ret = nvmf_connect_io_queue(&ctrl->ctrl, i); 337297186d6SSagi Grimberg if (ret) 338297186d6SSagi Grimberg return ret; 3399d7fab04SSagi Grimberg set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); 340297186d6SSagi Grimberg } 341297186d6SSagi Grimberg 342297186d6SSagi Grimberg return 0; 343297186d6SSagi Grimberg } 344297186d6SSagi Grimberg 3453a85a5deSChristoph Hellwig static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) 3463a85a5deSChristoph Hellwig { 3473a85a5deSChristoph Hellwig int error; 3483a85a5deSChristoph Hellwig 3493a85a5deSChristoph Hellwig ctrl->queues[0].ctrl = ctrl; 3503a85a5deSChristoph Hellwig error = nvmet_sq_init(&ctrl->queues[0].nvme_sq); 3513a85a5deSChristoph Hellwig if (error) 3523a85a5deSChristoph Hellwig return error; 353d858e5f0SSagi Grimberg ctrl->ctrl.queue_count = 1; 3543a85a5deSChristoph Hellwig 355ceee1953SChristoph Hellwig error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set, 356*db45e1a5SChristoph Hellwig &nvme_loop_admin_mq_ops, 357ceee1953SChristoph Hellwig sizeof(struct nvme_loop_iod) + 358ceee1953SChristoph Hellwig NVME_INLINE_SG_CNT * sizeof(struct scatterlist)); 3593a85a5deSChristoph Hellwig if (error) 3603a85a5deSChristoph Hellwig goto out_free_sq; 3613a85a5deSChristoph Hellwig 3621d35d519SMing Lei /* reset stopped state for the fresh admin queue */ 3631d35d519SMing Lei clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->ctrl.flags); 3643a85a5deSChristoph Hellwig 3653a85a5deSChristoph Hellwig error = nvmf_connect_admin_queue(&ctrl->ctrl); 3663a85a5deSChristoph Hellwig if (error) 367ceee1953SChristoph Hellwig goto out_cleanup_tagset; 3683a85a5deSChristoph Hellwig 3699d7fab04SSagi Grimberg set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); 3709d7fab04SSagi Grimberg 371c0f2f45bSSagi Grimberg error = nvme_enable_ctrl(&ctrl->ctrl); 3723a85a5deSChristoph Hellwig if (error) 373ceee1953SChristoph Hellwig goto out_cleanup_tagset; 3743a85a5deSChristoph Hellwig 3753a85a5deSChristoph Hellwig ctrl->ctrl.max_hw_sectors = 3763a85a5deSChristoph Hellwig (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9); 3773a85a5deSChristoph Hellwig 3789f27bd70SChristoph Hellwig nvme_unquiesce_admin_queue(&ctrl->ctrl); 379e7832cb4SSagi Grimberg 38094cc781fSChristoph Hellwig error = nvme_init_ctrl_finish(&ctrl->ctrl, false); 3813a85a5deSChristoph Hellwig if (error) 382ceee1953SChristoph Hellwig goto out_cleanup_tagset; 3833a85a5deSChristoph Hellwig 3843a85a5deSChristoph Hellwig return 0; 3853a85a5deSChristoph Hellwig 386ceee1953SChristoph Hellwig out_cleanup_tagset: 3871c5f8e88SHannes Reinecke clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); 388ceee1953SChristoph Hellwig nvme_remove_admin_tag_set(&ctrl->ctrl); 3893a85a5deSChristoph Hellwig out_free_sq: 3903a85a5deSChristoph Hellwig nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); 3913a85a5deSChristoph Hellwig return error; 3923a85a5deSChristoph Hellwig } 3933a85a5deSChristoph Hellwig 3943a85a5deSChristoph Hellwig static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) 3953a85a5deSChristoph Hellwig { 396d858e5f0SSagi Grimberg if (ctrl->ctrl.queue_count > 1) { 3979f27bd70SChristoph Hellwig nvme_quiesce_io_queues(&ctrl->ctrl); 398e41f8c02SSagi Grimberg nvme_cancel_tagset(&ctrl->ctrl); 399945dd5baSSagi Grimberg nvme_loop_destroy_io_queues(ctrl); 4003a85a5deSChristoph Hellwig } 4013a85a5deSChristoph Hellwig 4029f27bd70SChristoph Hellwig nvme_quiesce_admin_queue(&ctrl->ctrl); 4033a85a5deSChristoph Hellwig if (ctrl->ctrl.state == NVME_CTRL_LIVE) 404285b6e9bSChristoph Hellwig nvme_disable_ctrl(&ctrl->ctrl, true); 4053a85a5deSChristoph Hellwig 406e41f8c02SSagi Grimberg nvme_cancel_admin_tagset(&ctrl->ctrl); 4073a85a5deSChristoph Hellwig nvme_loop_destroy_admin_queue(ctrl); 4083a85a5deSChristoph Hellwig } 4093a85a5deSChristoph Hellwig 410c5017e85SChristoph Hellwig static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl) 4113a85a5deSChristoph Hellwig { 412c5017e85SChristoph Hellwig nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl)); 4133a85a5deSChristoph Hellwig } 4143a85a5deSChristoph Hellwig 4153a85a5deSChristoph Hellwig static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl) 4163a85a5deSChristoph Hellwig { 4173a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl; 4183a85a5deSChristoph Hellwig 4193a85a5deSChristoph Hellwig mutex_lock(&nvme_loop_ctrl_mutex); 4203a85a5deSChristoph Hellwig list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) { 4213a85a5deSChristoph Hellwig if (ctrl->ctrl.cntlid == nctrl->cntlid) 422c5017e85SChristoph Hellwig nvme_delete_ctrl(&ctrl->ctrl); 4233a85a5deSChristoph Hellwig } 4243a85a5deSChristoph Hellwig mutex_unlock(&nvme_loop_ctrl_mutex); 4253a85a5deSChristoph Hellwig } 4263a85a5deSChristoph Hellwig 4273a85a5deSChristoph Hellwig static void nvme_loop_reset_ctrl_work(struct work_struct *work) 4283a85a5deSChristoph Hellwig { 429d86c4d8eSChristoph Hellwig struct nvme_loop_ctrl *ctrl = 430d86c4d8eSChristoph Hellwig container_of(work, struct nvme_loop_ctrl, ctrl.reset_work); 431297186d6SSagi Grimberg int ret; 4323a85a5deSChristoph Hellwig 433d09f2b45SSagi Grimberg nvme_stop_ctrl(&ctrl->ctrl); 4343a85a5deSChristoph Hellwig nvme_loop_shutdown_ctrl(ctrl); 4353a85a5deSChristoph Hellwig 4368bfc3b4cSJohannes Thumshirn if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { 4376622f9acSHannes Reinecke if (ctrl->ctrl.state != NVME_CTRL_DELETING && 4386622f9acSHannes Reinecke ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO) 4396622f9acSHannes Reinecke /* state change failure for non-deleted ctrl? */ 4408bfc3b4cSJohannes Thumshirn WARN_ON_ONCE(1); 4418bfc3b4cSJohannes Thumshirn return; 4428bfc3b4cSJohannes Thumshirn } 4438bfc3b4cSJohannes Thumshirn 4443a85a5deSChristoph Hellwig ret = nvme_loop_configure_admin_queue(ctrl); 4453a85a5deSChristoph Hellwig if (ret) 4463a85a5deSChristoph Hellwig goto out_disable; 4473a85a5deSChristoph Hellwig 448945dd5baSSagi Grimberg ret = nvme_loop_init_io_queues(ctrl); 4493a85a5deSChristoph Hellwig if (ret) 450945dd5baSSagi Grimberg goto out_destroy_admin; 4513a85a5deSChristoph Hellwig 452297186d6SSagi Grimberg ret = nvme_loop_connect_io_queues(ctrl); 4533a85a5deSChristoph Hellwig if (ret) 454945dd5baSSagi Grimberg goto out_destroy_io; 4553a85a5deSChristoph Hellwig 4564368c39bSSagi Grimberg blk_mq_update_nr_hw_queues(&ctrl->tag_set, 4574368c39bSSagi Grimberg ctrl->ctrl.queue_count - 1); 4584368c39bSSagi Grimberg 459b6cec06dSChaitanya Kulkarni if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE)) 460b6cec06dSChaitanya Kulkarni WARN_ON_ONCE(1); 4613a85a5deSChristoph Hellwig 462d09f2b45SSagi Grimberg nvme_start_ctrl(&ctrl->ctrl); 4633a85a5deSChristoph Hellwig 4643a85a5deSChristoph Hellwig return; 4653a85a5deSChristoph Hellwig 466945dd5baSSagi Grimberg out_destroy_io: 467945dd5baSSagi Grimberg nvme_loop_destroy_io_queues(ctrl); 468945dd5baSSagi Grimberg out_destroy_admin: 4693a85a5deSChristoph Hellwig nvme_loop_destroy_admin_queue(ctrl); 4703a85a5deSChristoph Hellwig out_disable: 4713a85a5deSChristoph Hellwig dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); 4723a85a5deSChristoph Hellwig nvme_uninit_ctrl(&ctrl->ctrl); 4733a85a5deSChristoph Hellwig } 4743a85a5deSChristoph Hellwig 4753a85a5deSChristoph Hellwig static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = { 4763a85a5deSChristoph Hellwig .name = "loop", 4773a85a5deSChristoph Hellwig .module = THIS_MODULE, 478d3d5b87dSChristoph Hellwig .flags = NVME_F_FABRICS, 4793a85a5deSChristoph Hellwig .reg_read32 = nvmf_reg_read32, 4803a85a5deSChristoph Hellwig .reg_read64 = nvmf_reg_read64, 4813a85a5deSChristoph Hellwig .reg_write32 = nvmf_reg_write32, 4823a85a5deSChristoph Hellwig .free_ctrl = nvme_loop_free_ctrl, 4833a85a5deSChristoph Hellwig .submit_async_event = nvme_loop_submit_async_event, 484c5017e85SChristoph Hellwig .delete_ctrl = nvme_loop_delete_ctrl_host, 485fe4a9791SChristoph Hellwig .get_address = nvmf_get_address, 4863a85a5deSChristoph Hellwig }; 4873a85a5deSChristoph Hellwig 4883a85a5deSChristoph Hellwig static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) 4893a85a5deSChristoph Hellwig { 490297186d6SSagi Grimberg int ret; 4913a85a5deSChristoph Hellwig 492945dd5baSSagi Grimberg ret = nvme_loop_init_io_queues(ctrl); 4933a85a5deSChristoph Hellwig if (ret) 494945dd5baSSagi Grimberg return ret; 4953a85a5deSChristoph Hellwig 496ceee1953SChristoph Hellwig ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set, 497*db45e1a5SChristoph Hellwig &nvme_loop_mq_ops, 1, 498ceee1953SChristoph Hellwig sizeof(struct nvme_loop_iod) + 499ceee1953SChristoph Hellwig NVME_INLINE_SG_CNT * sizeof(struct scatterlist)); 5003a85a5deSChristoph Hellwig if (ret) 5013a85a5deSChristoph Hellwig goto out_destroy_queues; 5023a85a5deSChristoph Hellwig 503297186d6SSagi Grimberg ret = nvme_loop_connect_io_queues(ctrl); 5043a85a5deSChristoph Hellwig if (ret) 505ceee1953SChristoph Hellwig goto out_cleanup_tagset; 5063a85a5deSChristoph Hellwig 5073a85a5deSChristoph Hellwig return 0; 5083a85a5deSChristoph Hellwig 509ceee1953SChristoph Hellwig out_cleanup_tagset: 510ceee1953SChristoph Hellwig nvme_remove_io_tag_set(&ctrl->ctrl); 5113a85a5deSChristoph Hellwig out_destroy_queues: 512945dd5baSSagi Grimberg nvme_loop_destroy_io_queues(ctrl); 5133a85a5deSChristoph Hellwig return ret; 5143a85a5deSChristoph Hellwig } 5153a85a5deSChristoph Hellwig 516fe4a9791SChristoph Hellwig static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl) 517fe4a9791SChristoph Hellwig { 518fe4a9791SChristoph Hellwig struct nvmet_port *p, *found = NULL; 519fe4a9791SChristoph Hellwig 520fe4a9791SChristoph Hellwig mutex_lock(&nvme_loop_ports_mutex); 521fe4a9791SChristoph Hellwig list_for_each_entry(p, &nvme_loop_ports, entry) { 522fe4a9791SChristoph Hellwig /* if no transport address is specified use the first port */ 523fe4a9791SChristoph Hellwig if ((ctrl->opts->mask & NVMF_OPT_TRADDR) && 524fe4a9791SChristoph Hellwig strcmp(ctrl->opts->traddr, p->disc_addr.traddr)) 525fe4a9791SChristoph Hellwig continue; 526fe4a9791SChristoph Hellwig found = p; 527fe4a9791SChristoph Hellwig break; 528fe4a9791SChristoph Hellwig } 529fe4a9791SChristoph Hellwig mutex_unlock(&nvme_loop_ports_mutex); 530fe4a9791SChristoph Hellwig return found; 531fe4a9791SChristoph Hellwig } 532fe4a9791SChristoph Hellwig 5333a85a5deSChristoph Hellwig static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev, 5343a85a5deSChristoph Hellwig struct nvmf_ctrl_options *opts) 5353a85a5deSChristoph Hellwig { 5363a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl; 5373a85a5deSChristoph Hellwig int ret; 5383a85a5deSChristoph Hellwig 5393a85a5deSChristoph Hellwig ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 5403a85a5deSChristoph Hellwig if (!ctrl) 5413a85a5deSChristoph Hellwig return ERR_PTR(-ENOMEM); 5423a85a5deSChristoph Hellwig ctrl->ctrl.opts = opts; 5433a85a5deSChristoph Hellwig INIT_LIST_HEAD(&ctrl->list); 5443a85a5deSChristoph Hellwig 545d86c4d8eSChristoph Hellwig INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work); 5463a85a5deSChristoph Hellwig 5473a85a5deSChristoph Hellwig ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops, 5483a85a5deSChristoph Hellwig 0 /* no quirks, we're perfect! */); 54903504e3bSWu Bo if (ret) { 55003504e3bSWu Bo kfree(ctrl); 5511401fcc4SChaitanya Kulkarni goto out; 55203504e3bSWu Bo } 5533a85a5deSChristoph Hellwig 554b6cec06dSChaitanya Kulkarni if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) 555b6cec06dSChaitanya Kulkarni WARN_ON_ONCE(1); 55664d452b3SChaitanya Kulkarni 5573a85a5deSChristoph Hellwig ret = -ENOMEM; 5583a85a5deSChristoph Hellwig 5593a85a5deSChristoph Hellwig ctrl->ctrl.kato = opts->kato; 560fe4a9791SChristoph Hellwig ctrl->port = nvme_loop_find_port(&ctrl->ctrl); 5613a85a5deSChristoph Hellwig 5623a85a5deSChristoph Hellwig ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues), 5633a85a5deSChristoph Hellwig GFP_KERNEL); 5643a85a5deSChristoph Hellwig if (!ctrl->queues) 5653a85a5deSChristoph Hellwig goto out_uninit_ctrl; 5663a85a5deSChristoph Hellwig 5673a85a5deSChristoph Hellwig ret = nvme_loop_configure_admin_queue(ctrl); 5683a85a5deSChristoph Hellwig if (ret) 5693a85a5deSChristoph Hellwig goto out_free_queues; 5703a85a5deSChristoph Hellwig 5713a85a5deSChristoph Hellwig if (opts->queue_size > ctrl->ctrl.maxcmd) { 5723a85a5deSChristoph Hellwig /* warn if maxcmd is lower than queue_size */ 5733a85a5deSChristoph Hellwig dev_warn(ctrl->ctrl.device, 5743a85a5deSChristoph Hellwig "queue_size %zu > ctrl maxcmd %u, clamping down\n", 5753a85a5deSChristoph Hellwig opts->queue_size, ctrl->ctrl.maxcmd); 5763a85a5deSChristoph Hellwig opts->queue_size = ctrl->ctrl.maxcmd; 5773a85a5deSChristoph Hellwig } 578379e0df5SChristoph Hellwig ctrl->ctrl.sqsize = opts->queue_size - 1; 5793a85a5deSChristoph Hellwig 5803a85a5deSChristoph Hellwig if (opts->nr_io_queues) { 5813a85a5deSChristoph Hellwig ret = nvme_loop_create_io_queues(ctrl); 5823a85a5deSChristoph Hellwig if (ret) 5833a85a5deSChristoph Hellwig goto out_remove_admin_queue; 5843a85a5deSChristoph Hellwig } 5853a85a5deSChristoph Hellwig 5863a85a5deSChristoph Hellwig nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0); 5873a85a5deSChristoph Hellwig 5883a85a5deSChristoph Hellwig dev_info(ctrl->ctrl.device, 5893a85a5deSChristoph Hellwig "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn); 5903a85a5deSChristoph Hellwig 591b6cec06dSChaitanya Kulkarni if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE)) 592b6cec06dSChaitanya Kulkarni WARN_ON_ONCE(1); 5933a85a5deSChristoph Hellwig 5943a85a5deSChristoph Hellwig mutex_lock(&nvme_loop_ctrl_mutex); 5953a85a5deSChristoph Hellwig list_add_tail(&ctrl->list, &nvme_loop_ctrl_list); 5963a85a5deSChristoph Hellwig mutex_unlock(&nvme_loop_ctrl_mutex); 5973a85a5deSChristoph Hellwig 598d09f2b45SSagi Grimberg nvme_start_ctrl(&ctrl->ctrl); 5993a85a5deSChristoph Hellwig 6003a85a5deSChristoph Hellwig return &ctrl->ctrl; 6013a85a5deSChristoph Hellwig 6023a85a5deSChristoph Hellwig out_remove_admin_queue: 6033a85a5deSChristoph Hellwig nvme_loop_destroy_admin_queue(ctrl); 6043a85a5deSChristoph Hellwig out_free_queues: 6053a85a5deSChristoph Hellwig kfree(ctrl->queues); 6063a85a5deSChristoph Hellwig out_uninit_ctrl: 6073a85a5deSChristoph Hellwig nvme_uninit_ctrl(&ctrl->ctrl); 6083a85a5deSChristoph Hellwig nvme_put_ctrl(&ctrl->ctrl); 6091401fcc4SChaitanya Kulkarni out: 6103a85a5deSChristoph Hellwig if (ret > 0) 6113a85a5deSChristoph Hellwig ret = -EIO; 6123a85a5deSChristoph Hellwig return ERR_PTR(ret); 6133a85a5deSChristoph Hellwig } 6143a85a5deSChristoph Hellwig 6153a85a5deSChristoph Hellwig static int nvme_loop_add_port(struct nvmet_port *port) 6163a85a5deSChristoph Hellwig { 617fe4a9791SChristoph Hellwig mutex_lock(&nvme_loop_ports_mutex); 618fe4a9791SChristoph Hellwig list_add_tail(&port->entry, &nvme_loop_ports); 619fe4a9791SChristoph Hellwig mutex_unlock(&nvme_loop_ports_mutex); 6203a85a5deSChristoph Hellwig return 0; 6213a85a5deSChristoph Hellwig } 6223a85a5deSChristoph Hellwig 6233a85a5deSChristoph Hellwig static void nvme_loop_remove_port(struct nvmet_port *port) 6243a85a5deSChristoph Hellwig { 625fe4a9791SChristoph Hellwig mutex_lock(&nvme_loop_ports_mutex); 626fe4a9791SChristoph Hellwig list_del_init(&port->entry); 627fe4a9791SChristoph Hellwig mutex_unlock(&nvme_loop_ports_mutex); 62886b9a63eSLogan Gunthorpe 62986b9a63eSLogan Gunthorpe /* 63086b9a63eSLogan Gunthorpe * Ensure any ctrls that are in the process of being 63186b9a63eSLogan Gunthorpe * deleted are in fact deleted before we return 63286b9a63eSLogan Gunthorpe * and free the port. This is to prevent active 63386b9a63eSLogan Gunthorpe * ctrls from using a port after it's freed. 63486b9a63eSLogan Gunthorpe */ 63586b9a63eSLogan Gunthorpe flush_workqueue(nvme_delete_wq); 6363a85a5deSChristoph Hellwig } 6373a85a5deSChristoph Hellwig 638e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops nvme_loop_ops = { 6393a85a5deSChristoph Hellwig .owner = THIS_MODULE, 6403a85a5deSChristoph Hellwig .type = NVMF_TRTYPE_LOOP, 6413a85a5deSChristoph Hellwig .add_port = nvme_loop_add_port, 6423a85a5deSChristoph Hellwig .remove_port = nvme_loop_remove_port, 6433a85a5deSChristoph Hellwig .queue_response = nvme_loop_queue_response, 6443a85a5deSChristoph Hellwig .delete_ctrl = nvme_loop_delete_ctrl, 6453a85a5deSChristoph Hellwig }; 6463a85a5deSChristoph Hellwig 6473a85a5deSChristoph Hellwig static struct nvmf_transport_ops nvme_loop_transport = { 6483a85a5deSChristoph Hellwig .name = "loop", 6490de5cd36SRoy Shterman .module = THIS_MODULE, 6503a85a5deSChristoph Hellwig .create_ctrl = nvme_loop_create_ctrl, 651fe4a9791SChristoph Hellwig .allowed_opts = NVMF_OPT_TRADDR, 6523a85a5deSChristoph Hellwig }; 6533a85a5deSChristoph Hellwig 6543a85a5deSChristoph Hellwig static int __init nvme_loop_init_module(void) 6553a85a5deSChristoph Hellwig { 6563a85a5deSChristoph Hellwig int ret; 6573a85a5deSChristoph Hellwig 6583a85a5deSChristoph Hellwig ret = nvmet_register_transport(&nvme_loop_ops); 6593a85a5deSChristoph Hellwig if (ret) 6603a85a5deSChristoph Hellwig return ret; 661d19eef02SSagi Grimberg 662d19eef02SSagi Grimberg ret = nvmf_register_transport(&nvme_loop_transport); 663d19eef02SSagi Grimberg if (ret) 664d19eef02SSagi Grimberg nvmet_unregister_transport(&nvme_loop_ops); 665d19eef02SSagi Grimberg 666d19eef02SSagi Grimberg return ret; 6673a85a5deSChristoph Hellwig } 6683a85a5deSChristoph Hellwig 6693a85a5deSChristoph Hellwig static void __exit nvme_loop_cleanup_module(void) 6703a85a5deSChristoph Hellwig { 6713a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl, *next; 6723a85a5deSChristoph Hellwig 6733a85a5deSChristoph Hellwig nvmf_unregister_transport(&nvme_loop_transport); 6743a85a5deSChristoph Hellwig nvmet_unregister_transport(&nvme_loop_ops); 6753a85a5deSChristoph Hellwig 6763a85a5deSChristoph Hellwig mutex_lock(&nvme_loop_ctrl_mutex); 6773a85a5deSChristoph Hellwig list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list) 678c5017e85SChristoph Hellwig nvme_delete_ctrl(&ctrl->ctrl); 6793a85a5deSChristoph Hellwig mutex_unlock(&nvme_loop_ctrl_mutex); 6803a85a5deSChristoph Hellwig 681b227c59bSRoy Shterman flush_workqueue(nvme_delete_wq); 6823a85a5deSChristoph Hellwig } 6833a85a5deSChristoph Hellwig 6843a85a5deSChristoph Hellwig module_init(nvme_loop_init_module); 6853a85a5deSChristoph Hellwig module_exit(nvme_loop_cleanup_module); 6863a85a5deSChristoph Hellwig 6873a85a5deSChristoph Hellwig MODULE_LICENSE("GPL v2"); 6883a85a5deSChristoph Hellwig MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */ 689