xref: /openbmc/linux/drivers/nvme/target/loop.c (revision fe4a97918de02d5c656f29664770e335df12e090)
13a85a5deSChristoph Hellwig /*
23a85a5deSChristoph Hellwig  * NVMe over Fabrics loopback device.
33a85a5deSChristoph Hellwig  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
43a85a5deSChristoph Hellwig  *
53a85a5deSChristoph Hellwig  * This program is free software; you can redistribute it and/or modify it
63a85a5deSChristoph Hellwig  * under the terms and conditions of the GNU General Public License,
73a85a5deSChristoph Hellwig  * version 2, as published by the Free Software Foundation.
83a85a5deSChristoph Hellwig  *
93a85a5deSChristoph Hellwig  * This program is distributed in the hope it will be useful, but WITHOUT
103a85a5deSChristoph Hellwig  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
113a85a5deSChristoph Hellwig  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
123a85a5deSChristoph Hellwig  * more details.
133a85a5deSChristoph Hellwig  */
143a85a5deSChristoph Hellwig #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
153a85a5deSChristoph Hellwig #include <linux/scatterlist.h>
163a85a5deSChristoph Hellwig #include <linux/blk-mq.h>
173a85a5deSChristoph Hellwig #include <linux/nvme.h>
183a85a5deSChristoph Hellwig #include <linux/module.h>
193a85a5deSChristoph Hellwig #include <linux/parser.h>
203a85a5deSChristoph Hellwig #include "nvmet.h"
213a85a5deSChristoph Hellwig #include "../host/nvme.h"
223a85a5deSChristoph Hellwig #include "../host/fabrics.h"
233a85a5deSChristoph Hellwig 
243a85a5deSChristoph Hellwig #define NVME_LOOP_MAX_SEGMENTS		256
253a85a5deSChristoph Hellwig 
263a85a5deSChristoph Hellwig struct nvme_loop_iod {
27d49187e9SChristoph Hellwig 	struct nvme_request	nvme_req;
283a85a5deSChristoph Hellwig 	struct nvme_command	cmd;
293a85a5deSChristoph Hellwig 	struct nvme_completion	rsp;
303a85a5deSChristoph Hellwig 	struct nvmet_req	req;
313a85a5deSChristoph Hellwig 	struct nvme_loop_queue	*queue;
323a85a5deSChristoph Hellwig 	struct work_struct	work;
333a85a5deSChristoph Hellwig 	struct sg_table		sg_table;
343a85a5deSChristoph Hellwig 	struct scatterlist	first_sgl[];
353a85a5deSChristoph Hellwig };
363a85a5deSChristoph Hellwig 
373a85a5deSChristoph Hellwig struct nvme_loop_ctrl {
383a85a5deSChristoph Hellwig 	struct nvme_loop_queue	*queues;
393a85a5deSChristoph Hellwig 
403a85a5deSChristoph Hellwig 	struct blk_mq_tag_set	admin_tag_set;
413a85a5deSChristoph Hellwig 
423a85a5deSChristoph Hellwig 	struct list_head	list;
433a85a5deSChristoph Hellwig 	struct blk_mq_tag_set	tag_set;
443a85a5deSChristoph Hellwig 	struct nvme_loop_iod	async_event_iod;
453a85a5deSChristoph Hellwig 	struct nvme_ctrl	ctrl;
463a85a5deSChristoph Hellwig 
473a85a5deSChristoph Hellwig 	struct nvmet_ctrl	*target_ctrl;
48*fe4a9791SChristoph Hellwig 	struct nvmet_port	*port;
493a85a5deSChristoph Hellwig };
503a85a5deSChristoph Hellwig 
513a85a5deSChristoph Hellwig static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
523a85a5deSChristoph Hellwig {
533a85a5deSChristoph Hellwig 	return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
543a85a5deSChristoph Hellwig }
553a85a5deSChristoph Hellwig 
569d7fab04SSagi Grimberg enum nvme_loop_queue_flags {
579d7fab04SSagi Grimberg 	NVME_LOOP_Q_LIVE	= 0,
589d7fab04SSagi Grimberg };
599d7fab04SSagi Grimberg 
603a85a5deSChristoph Hellwig struct nvme_loop_queue {
613a85a5deSChristoph Hellwig 	struct nvmet_cq		nvme_cq;
623a85a5deSChristoph Hellwig 	struct nvmet_sq		nvme_sq;
633a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl	*ctrl;
649d7fab04SSagi Grimberg 	unsigned long		flags;
653a85a5deSChristoph Hellwig };
663a85a5deSChristoph Hellwig 
67*fe4a9791SChristoph Hellwig static LIST_HEAD(nvme_loop_ports);
68*fe4a9791SChristoph Hellwig static DEFINE_MUTEX(nvme_loop_ports_mutex);
693a85a5deSChristoph Hellwig 
703a85a5deSChristoph Hellwig static LIST_HEAD(nvme_loop_ctrl_list);
713a85a5deSChristoph Hellwig static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
723a85a5deSChristoph Hellwig 
733a85a5deSChristoph Hellwig static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
743a85a5deSChristoph Hellwig static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
753a85a5deSChristoph Hellwig 
76e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops nvme_loop_ops;
773a85a5deSChristoph Hellwig 
783a85a5deSChristoph Hellwig static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
793a85a5deSChristoph Hellwig {
803a85a5deSChristoph Hellwig 	return queue - queue->ctrl->queues;
813a85a5deSChristoph Hellwig }
823a85a5deSChristoph Hellwig 
833a85a5deSChristoph Hellwig static void nvme_loop_complete_rq(struct request *req)
843a85a5deSChristoph Hellwig {
853a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
863a85a5deSChristoph Hellwig 
873a85a5deSChristoph Hellwig 	nvme_cleanup_cmd(req);
883a85a5deSChristoph Hellwig 	sg_free_table_chained(&iod->sg_table, true);
8977f02a7aSChristoph Hellwig 	nvme_complete_rq(req);
903a85a5deSChristoph Hellwig }
913a85a5deSChristoph Hellwig 
923b068376SSagi Grimberg static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
933b068376SSagi Grimberg {
943b068376SSagi Grimberg 	u32 queue_idx = nvme_loop_queue_idx(queue);
953a85a5deSChristoph Hellwig 
963b068376SSagi Grimberg 	if (queue_idx == 0)
973b068376SSagi Grimberg 		return queue->ctrl->admin_tag_set.tags[queue_idx];
983b068376SSagi Grimberg 	return queue->ctrl->tag_set.tags[queue_idx - 1];
993a85a5deSChristoph Hellwig }
1003a85a5deSChristoph Hellwig 
101d49187e9SChristoph Hellwig static void nvme_loop_queue_response(struct nvmet_req *req)
1023a85a5deSChristoph Hellwig {
1033b068376SSagi Grimberg 	struct nvme_loop_queue *queue =
1043b068376SSagi Grimberg 		container_of(req->sq, struct nvme_loop_queue, nvme_sq);
1053b068376SSagi Grimberg 	struct nvme_completion *cqe = req->rsp;
1063a85a5deSChristoph Hellwig 
1073a85a5deSChristoph Hellwig 	/*
1083a85a5deSChristoph Hellwig 	 * AEN requests are special as they don't time out and can
1093a85a5deSChristoph Hellwig 	 * survive any kind of queue freeze and often don't respond to
1103a85a5deSChristoph Hellwig 	 * aborts.  We don't even bother to allocate a struct request
1113a85a5deSChristoph Hellwig 	 * for them but rather special case them here.
1123a85a5deSChristoph Hellwig 	 */
1133b068376SSagi Grimberg 	if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
11438dabe21SKeith Busch 			cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
1153b068376SSagi Grimberg 		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
1167bf58533SChristoph Hellwig 				&cqe->result);
1173a85a5deSChristoph Hellwig 	} else {
1183b068376SSagi Grimberg 		struct request *rq;
1193a85a5deSChristoph Hellwig 
1203b068376SSagi Grimberg 		rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
1213b068376SSagi Grimberg 		if (!rq) {
1223b068376SSagi Grimberg 			dev_err(queue->ctrl->ctrl.device,
1233b068376SSagi Grimberg 				"tag 0x%x on queue %d not found\n",
1243b068376SSagi Grimberg 				cqe->command_id, nvme_loop_queue_idx(queue));
1253b068376SSagi Grimberg 			return;
1263b068376SSagi Grimberg 		}
1273b068376SSagi Grimberg 
12827fa9bc5SChristoph Hellwig 		nvme_end_request(rq, cqe->status, cqe->result);
1293a85a5deSChristoph Hellwig 	}
1303a85a5deSChristoph Hellwig }
1313a85a5deSChristoph Hellwig 
1323a85a5deSChristoph Hellwig static void nvme_loop_execute_work(struct work_struct *work)
1333a85a5deSChristoph Hellwig {
1343a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod =
1353a85a5deSChristoph Hellwig 		container_of(work, struct nvme_loop_iod, work);
1363a85a5deSChristoph Hellwig 
1375e62d5c9SChristoph Hellwig 	nvmet_req_execute(&iod->req);
1383a85a5deSChristoph Hellwig }
1393a85a5deSChristoph Hellwig 
1403a85a5deSChristoph Hellwig static enum blk_eh_timer_return
1413a85a5deSChristoph Hellwig nvme_loop_timeout(struct request *rq, bool reserved)
1423a85a5deSChristoph Hellwig {
1433a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
1443a85a5deSChristoph Hellwig 
1453a85a5deSChristoph Hellwig 	/* queue error recovery */
146d86c4d8eSChristoph Hellwig 	nvme_reset_ctrl(&iod->queue->ctrl->ctrl);
1473a85a5deSChristoph Hellwig 
1483a85a5deSChristoph Hellwig 	/* fail with DNR on admin cmd timeout */
14927fa9bc5SChristoph Hellwig 	nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
1503a85a5deSChristoph Hellwig 
151db8c48e4SChristoph Hellwig 	return BLK_EH_DONE;
1523a85a5deSChristoph Hellwig }
1533a85a5deSChristoph Hellwig 
154fc17b653SChristoph Hellwig static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1553a85a5deSChristoph Hellwig 		const struct blk_mq_queue_data *bd)
1563a85a5deSChristoph Hellwig {
1573a85a5deSChristoph Hellwig 	struct nvme_ns *ns = hctx->queue->queuedata;
1583a85a5deSChristoph Hellwig 	struct nvme_loop_queue *queue = hctx->driver_data;
1593a85a5deSChristoph Hellwig 	struct request *req = bd->rq;
1603a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
161fc17b653SChristoph Hellwig 	blk_status_t ret;
1623a85a5deSChristoph Hellwig 
163bb06ec31SJames Smart 	ret = nvmf_check_if_ready(&queue->ctrl->ctrl, req,
164bb06ec31SJames Smart 		test_bit(NVME_LOOP_Q_LIVE, &queue->flags), true);
1659d7fab04SSagi Grimberg 	if (unlikely(ret))
1669d7fab04SSagi Grimberg 		return ret;
1679d7fab04SSagi Grimberg 
1683a85a5deSChristoph Hellwig 	ret = nvme_setup_cmd(ns, req, &iod->cmd);
169fc17b653SChristoph Hellwig 	if (ret)
1703a85a5deSChristoph Hellwig 		return ret;
1713a85a5deSChristoph Hellwig 
17211d9ea6fSMing Lei 	blk_mq_start_request(req);
1733a85a5deSChristoph Hellwig 	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
174*fe4a9791SChristoph Hellwig 	iod->req.port = queue->ctrl->port;
1753a85a5deSChristoph Hellwig 	if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
17611d9ea6fSMing Lei 			&queue->nvme_sq, &nvme_loop_ops))
177fc17b653SChristoph Hellwig 		return BLK_STS_OK;
1783a85a5deSChristoph Hellwig 
179eb464833SChaitanya Kulkarni 	if (blk_rq_nr_phys_segments(req)) {
1803a85a5deSChristoph Hellwig 		iod->sg_table.sgl = iod->first_sgl;
181fc17b653SChristoph Hellwig 		if (sg_alloc_table_chained(&iod->sg_table,
182f9d03f96SChristoph Hellwig 				blk_rq_nr_phys_segments(req),
183fc17b653SChristoph Hellwig 				iod->sg_table.sgl))
184fc17b653SChristoph Hellwig 			return BLK_STS_RESOURCE;
1853a85a5deSChristoph Hellwig 
1863a85a5deSChristoph Hellwig 		iod->req.sg = iod->sg_table.sgl;
1873a85a5deSChristoph Hellwig 		iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
188796b0b8dSChristoph Hellwig 		iod->req.transfer_len = blk_rq_payload_bytes(req);
1893a85a5deSChristoph Hellwig 	}
1903a85a5deSChristoph Hellwig 
1913a85a5deSChristoph Hellwig 	schedule_work(&iod->work);
192fc17b653SChristoph Hellwig 	return BLK_STS_OK;
1933a85a5deSChristoph Hellwig }
1943a85a5deSChristoph Hellwig 
195ad22c355SKeith Busch static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
1963a85a5deSChristoph Hellwig {
1973a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
1983a85a5deSChristoph Hellwig 	struct nvme_loop_queue *queue = &ctrl->queues[0];
1993a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod = &ctrl->async_event_iod;
2003a85a5deSChristoph Hellwig 
2013a85a5deSChristoph Hellwig 	memset(&iod->cmd, 0, sizeof(iod->cmd));
2023a85a5deSChristoph Hellwig 	iod->cmd.common.opcode = nvme_admin_async_event;
20338dabe21SKeith Busch 	iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2043a85a5deSChristoph Hellwig 	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
2053a85a5deSChristoph Hellwig 
2063a85a5deSChristoph Hellwig 	if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
2073a85a5deSChristoph Hellwig 			&nvme_loop_ops)) {
2083a85a5deSChristoph Hellwig 		dev_err(ctrl->ctrl.device, "failed async event work\n");
2093a85a5deSChristoph Hellwig 		return;
2103a85a5deSChristoph Hellwig 	}
2113a85a5deSChristoph Hellwig 
2123a85a5deSChristoph Hellwig 	schedule_work(&iod->work);
2133a85a5deSChristoph Hellwig }
2143a85a5deSChristoph Hellwig 
2153a85a5deSChristoph Hellwig static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
2163a85a5deSChristoph Hellwig 		struct nvme_loop_iod *iod, unsigned int queue_idx)
2173a85a5deSChristoph Hellwig {
2183a85a5deSChristoph Hellwig 	iod->req.cmd = &iod->cmd;
2193a85a5deSChristoph Hellwig 	iod->req.rsp = &iod->rsp;
2203a85a5deSChristoph Hellwig 	iod->queue = &ctrl->queues[queue_idx];
2213a85a5deSChristoph Hellwig 	INIT_WORK(&iod->work, nvme_loop_execute_work);
2223a85a5deSChristoph Hellwig 	return 0;
2233a85a5deSChristoph Hellwig }
2243a85a5deSChristoph Hellwig 
225d6296d39SChristoph Hellwig static int nvme_loop_init_request(struct blk_mq_tag_set *set,
226d6296d39SChristoph Hellwig 		struct request *req, unsigned int hctx_idx,
2273a85a5deSChristoph Hellwig 		unsigned int numa_node)
2283a85a5deSChristoph Hellwig {
22962b83b18SChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = set->driver_data;
2303a85a5deSChristoph Hellwig 
23162b83b18SChristoph Hellwig 	return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
23262b83b18SChristoph Hellwig 			(set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
2333a85a5deSChristoph Hellwig }
2343a85a5deSChristoph Hellwig 
2353a85a5deSChristoph Hellwig static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2363a85a5deSChristoph Hellwig 		unsigned int hctx_idx)
2373a85a5deSChristoph Hellwig {
2383a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = data;
2393a85a5deSChristoph Hellwig 	struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
2403a85a5deSChristoph Hellwig 
241d858e5f0SSagi Grimberg 	BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
2423a85a5deSChristoph Hellwig 
2433a85a5deSChristoph Hellwig 	hctx->driver_data = queue;
2443a85a5deSChristoph Hellwig 	return 0;
2453a85a5deSChristoph Hellwig }
2463a85a5deSChristoph Hellwig 
2473a85a5deSChristoph Hellwig static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2483a85a5deSChristoph Hellwig 		unsigned int hctx_idx)
2493a85a5deSChristoph Hellwig {
2503a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = data;
2513a85a5deSChristoph Hellwig 	struct nvme_loop_queue *queue = &ctrl->queues[0];
2523a85a5deSChristoph Hellwig 
2533a85a5deSChristoph Hellwig 	BUG_ON(hctx_idx != 0);
2543a85a5deSChristoph Hellwig 
2553a85a5deSChristoph Hellwig 	hctx->driver_data = queue;
2563a85a5deSChristoph Hellwig 	return 0;
2573a85a5deSChristoph Hellwig }
2583a85a5deSChristoph Hellwig 
259f363b089SEric Biggers static const struct blk_mq_ops nvme_loop_mq_ops = {
2603a85a5deSChristoph Hellwig 	.queue_rq	= nvme_loop_queue_rq,
2613a85a5deSChristoph Hellwig 	.complete	= nvme_loop_complete_rq,
2623a85a5deSChristoph Hellwig 	.init_request	= nvme_loop_init_request,
2633a85a5deSChristoph Hellwig 	.init_hctx	= nvme_loop_init_hctx,
2643a85a5deSChristoph Hellwig 	.timeout	= nvme_loop_timeout,
2653a85a5deSChristoph Hellwig };
2663a85a5deSChristoph Hellwig 
267f363b089SEric Biggers static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
2683a85a5deSChristoph Hellwig 	.queue_rq	= nvme_loop_queue_rq,
2693a85a5deSChristoph Hellwig 	.complete	= nvme_loop_complete_rq,
27062b83b18SChristoph Hellwig 	.init_request	= nvme_loop_init_request,
2713a85a5deSChristoph Hellwig 	.init_hctx	= nvme_loop_init_admin_hctx,
2723a85a5deSChristoph Hellwig 	.timeout	= nvme_loop_timeout,
2733a85a5deSChristoph Hellwig };
2743a85a5deSChristoph Hellwig 
2753a85a5deSChristoph Hellwig static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
2763a85a5deSChristoph Hellwig {
2779d7fab04SSagi Grimberg 	clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
278e4c5d376SSagi Grimberg 	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
2793a85a5deSChristoph Hellwig 	blk_cleanup_queue(ctrl->ctrl.admin_q);
2803a85a5deSChristoph Hellwig 	blk_mq_free_tag_set(&ctrl->admin_tag_set);
2813a85a5deSChristoph Hellwig }
2823a85a5deSChristoph Hellwig 
2833a85a5deSChristoph Hellwig static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
2843a85a5deSChristoph Hellwig {
2853a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
2863a85a5deSChristoph Hellwig 
2873a85a5deSChristoph Hellwig 	if (list_empty(&ctrl->list))
2883a85a5deSChristoph Hellwig 		goto free_ctrl;
2893a85a5deSChristoph Hellwig 
2903a85a5deSChristoph Hellwig 	mutex_lock(&nvme_loop_ctrl_mutex);
2913a85a5deSChristoph Hellwig 	list_del(&ctrl->list);
2923a85a5deSChristoph Hellwig 	mutex_unlock(&nvme_loop_ctrl_mutex);
2933a85a5deSChristoph Hellwig 
2943a85a5deSChristoph Hellwig 	if (nctrl->tagset) {
2953a85a5deSChristoph Hellwig 		blk_cleanup_queue(ctrl->ctrl.connect_q);
2963a85a5deSChristoph Hellwig 		blk_mq_free_tag_set(&ctrl->tag_set);
2973a85a5deSChristoph Hellwig 	}
2983a85a5deSChristoph Hellwig 	kfree(ctrl->queues);
2993a85a5deSChristoph Hellwig 	nvmf_free_options(nctrl->opts);
3003a85a5deSChristoph Hellwig free_ctrl:
3013a85a5deSChristoph Hellwig 	kfree(ctrl);
3023a85a5deSChristoph Hellwig }
3033a85a5deSChristoph Hellwig 
304945dd5baSSagi Grimberg static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
305945dd5baSSagi Grimberg {
306945dd5baSSagi Grimberg 	int i;
307945dd5baSSagi Grimberg 
3089d7fab04SSagi Grimberg 	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
3099d7fab04SSagi Grimberg 		clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
310945dd5baSSagi Grimberg 		nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
311945dd5baSSagi Grimberg 	}
3129d7fab04SSagi Grimberg }
313945dd5baSSagi Grimberg 
314945dd5baSSagi Grimberg static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
315945dd5baSSagi Grimberg {
316945dd5baSSagi Grimberg 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
317945dd5baSSagi Grimberg 	unsigned int nr_io_queues;
318945dd5baSSagi Grimberg 	int ret, i;
319945dd5baSSagi Grimberg 
320945dd5baSSagi Grimberg 	nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
321945dd5baSSagi Grimberg 	ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
322945dd5baSSagi Grimberg 	if (ret || !nr_io_queues)
323945dd5baSSagi Grimberg 		return ret;
324945dd5baSSagi Grimberg 
325945dd5baSSagi Grimberg 	dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
326945dd5baSSagi Grimberg 
327945dd5baSSagi Grimberg 	for (i = 1; i <= nr_io_queues; i++) {
328945dd5baSSagi Grimberg 		ctrl->queues[i].ctrl = ctrl;
329945dd5baSSagi Grimberg 		ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
330945dd5baSSagi Grimberg 		if (ret)
331945dd5baSSagi Grimberg 			goto out_destroy_queues;
332945dd5baSSagi Grimberg 
333d858e5f0SSagi Grimberg 		ctrl->ctrl.queue_count++;
334945dd5baSSagi Grimberg 	}
335945dd5baSSagi Grimberg 
336945dd5baSSagi Grimberg 	return 0;
337945dd5baSSagi Grimberg 
338945dd5baSSagi Grimberg out_destroy_queues:
339945dd5baSSagi Grimberg 	nvme_loop_destroy_io_queues(ctrl);
340945dd5baSSagi Grimberg 	return ret;
341945dd5baSSagi Grimberg }
342945dd5baSSagi Grimberg 
343297186d6SSagi Grimberg static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
344297186d6SSagi Grimberg {
345297186d6SSagi Grimberg 	int i, ret;
346297186d6SSagi Grimberg 
347d858e5f0SSagi Grimberg 	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
348297186d6SSagi Grimberg 		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
349297186d6SSagi Grimberg 		if (ret)
350297186d6SSagi Grimberg 			return ret;
3519d7fab04SSagi Grimberg 		set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
352297186d6SSagi Grimberg 	}
353297186d6SSagi Grimberg 
354297186d6SSagi Grimberg 	return 0;
355297186d6SSagi Grimberg }
356297186d6SSagi Grimberg 
3573a85a5deSChristoph Hellwig static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
3583a85a5deSChristoph Hellwig {
3593a85a5deSChristoph Hellwig 	int error;
3603a85a5deSChristoph Hellwig 
3613a85a5deSChristoph Hellwig 	memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
3623a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
36338dabe21SKeith Busch 	ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
3643a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
3653a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
3663a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
3673a85a5deSChristoph Hellwig 		SG_CHUNK_SIZE * sizeof(struct scatterlist);
3683a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.driver_data = ctrl;
3693a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.nr_hw_queues = 1;
3703a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
37186f36b9cSIsrael Rukshin 	ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
3723a85a5deSChristoph Hellwig 
3733a85a5deSChristoph Hellwig 	ctrl->queues[0].ctrl = ctrl;
3743a85a5deSChristoph Hellwig 	error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
3753a85a5deSChristoph Hellwig 	if (error)
3763a85a5deSChristoph Hellwig 		return error;
377d858e5f0SSagi Grimberg 	ctrl->ctrl.queue_count = 1;
3783a85a5deSChristoph Hellwig 
3793a85a5deSChristoph Hellwig 	error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
3803a85a5deSChristoph Hellwig 	if (error)
3813a85a5deSChristoph Hellwig 		goto out_free_sq;
38234b6c231SSagi Grimberg 	ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
3833a85a5deSChristoph Hellwig 
3843a85a5deSChristoph Hellwig 	ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3853a85a5deSChristoph Hellwig 	if (IS_ERR(ctrl->ctrl.admin_q)) {
3863a85a5deSChristoph Hellwig 		error = PTR_ERR(ctrl->ctrl.admin_q);
3873a85a5deSChristoph Hellwig 		goto out_free_tagset;
3883a85a5deSChristoph Hellwig 	}
3893a85a5deSChristoph Hellwig 
3903a85a5deSChristoph Hellwig 	error = nvmf_connect_admin_queue(&ctrl->ctrl);
3913a85a5deSChristoph Hellwig 	if (error)
3923a85a5deSChristoph Hellwig 		goto out_cleanup_queue;
3933a85a5deSChristoph Hellwig 
3949d7fab04SSagi Grimberg 	set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
3959d7fab04SSagi Grimberg 
39620d0dfe6SSagi Grimberg 	error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
3973a85a5deSChristoph Hellwig 	if (error) {
3983a85a5deSChristoph Hellwig 		dev_err(ctrl->ctrl.device,
3993a85a5deSChristoph Hellwig 			"prop_get NVME_REG_CAP failed\n");
4003a85a5deSChristoph Hellwig 		goto out_cleanup_queue;
4013a85a5deSChristoph Hellwig 	}
4023a85a5deSChristoph Hellwig 
4033a85a5deSChristoph Hellwig 	ctrl->ctrl.sqsize =
40420d0dfe6SSagi Grimberg 		min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
4053a85a5deSChristoph Hellwig 
40620d0dfe6SSagi Grimberg 	error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
4073a85a5deSChristoph Hellwig 	if (error)
4083a85a5deSChristoph Hellwig 		goto out_cleanup_queue;
4093a85a5deSChristoph Hellwig 
4103a85a5deSChristoph Hellwig 	ctrl->ctrl.max_hw_sectors =
4113a85a5deSChristoph Hellwig 		(NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
4123a85a5deSChristoph Hellwig 
4133a85a5deSChristoph Hellwig 	error = nvme_init_identify(&ctrl->ctrl);
4143a85a5deSChristoph Hellwig 	if (error)
4153a85a5deSChristoph Hellwig 		goto out_cleanup_queue;
4163a85a5deSChristoph Hellwig 
4173a85a5deSChristoph Hellwig 	return 0;
4183a85a5deSChristoph Hellwig 
4193a85a5deSChristoph Hellwig out_cleanup_queue:
4203a85a5deSChristoph Hellwig 	blk_cleanup_queue(ctrl->ctrl.admin_q);
4213a85a5deSChristoph Hellwig out_free_tagset:
4223a85a5deSChristoph Hellwig 	blk_mq_free_tag_set(&ctrl->admin_tag_set);
4233a85a5deSChristoph Hellwig out_free_sq:
4243a85a5deSChristoph Hellwig 	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
4253a85a5deSChristoph Hellwig 	return error;
4263a85a5deSChristoph Hellwig }
4273a85a5deSChristoph Hellwig 
4283a85a5deSChristoph Hellwig static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
4293a85a5deSChristoph Hellwig {
430d858e5f0SSagi Grimberg 	if (ctrl->ctrl.queue_count > 1) {
4313a85a5deSChristoph Hellwig 		nvme_stop_queues(&ctrl->ctrl);
4323a85a5deSChristoph Hellwig 		blk_mq_tagset_busy_iter(&ctrl->tag_set,
4333a85a5deSChristoph Hellwig 					nvme_cancel_request, &ctrl->ctrl);
434945dd5baSSagi Grimberg 		nvme_loop_destroy_io_queues(ctrl);
4353a85a5deSChristoph Hellwig 	}
4363a85a5deSChristoph Hellwig 
4373a85a5deSChristoph Hellwig 	if (ctrl->ctrl.state == NVME_CTRL_LIVE)
4383a85a5deSChristoph Hellwig 		nvme_shutdown_ctrl(&ctrl->ctrl);
4393a85a5deSChristoph Hellwig 
440c1c0ffffSSagi Grimberg 	blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
4413a85a5deSChristoph Hellwig 	blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
4423a85a5deSChristoph Hellwig 				nvme_cancel_request, &ctrl->ctrl);
443c1c0ffffSSagi Grimberg 	blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
4443a85a5deSChristoph Hellwig 	nvme_loop_destroy_admin_queue(ctrl);
4453a85a5deSChristoph Hellwig }
4463a85a5deSChristoph Hellwig 
447c5017e85SChristoph Hellwig static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
4483a85a5deSChristoph Hellwig {
449c5017e85SChristoph Hellwig 	nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
4503a85a5deSChristoph Hellwig }
4513a85a5deSChristoph Hellwig 
4523a85a5deSChristoph Hellwig static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
4533a85a5deSChristoph Hellwig {
4543a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl;
4553a85a5deSChristoph Hellwig 
4563a85a5deSChristoph Hellwig 	mutex_lock(&nvme_loop_ctrl_mutex);
4573a85a5deSChristoph Hellwig 	list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
4583a85a5deSChristoph Hellwig 		if (ctrl->ctrl.cntlid == nctrl->cntlid)
459c5017e85SChristoph Hellwig 			nvme_delete_ctrl(&ctrl->ctrl);
4603a85a5deSChristoph Hellwig 	}
4613a85a5deSChristoph Hellwig 	mutex_unlock(&nvme_loop_ctrl_mutex);
4623a85a5deSChristoph Hellwig }
4633a85a5deSChristoph Hellwig 
4643a85a5deSChristoph Hellwig static void nvme_loop_reset_ctrl_work(struct work_struct *work)
4653a85a5deSChristoph Hellwig {
466d86c4d8eSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl =
467d86c4d8eSChristoph Hellwig 		container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
4683a85a5deSChristoph Hellwig 	bool changed;
469297186d6SSagi Grimberg 	int ret;
4703a85a5deSChristoph Hellwig 
471d09f2b45SSagi Grimberg 	nvme_stop_ctrl(&ctrl->ctrl);
4723a85a5deSChristoph Hellwig 	nvme_loop_shutdown_ctrl(ctrl);
4733a85a5deSChristoph Hellwig 
4748bfc3b4cSJohannes Thumshirn 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
4758bfc3b4cSJohannes Thumshirn 		/* state change failure should never happen */
4768bfc3b4cSJohannes Thumshirn 		WARN_ON_ONCE(1);
4778bfc3b4cSJohannes Thumshirn 		return;
4788bfc3b4cSJohannes Thumshirn 	}
4798bfc3b4cSJohannes Thumshirn 
4803a85a5deSChristoph Hellwig 	ret = nvme_loop_configure_admin_queue(ctrl);
4813a85a5deSChristoph Hellwig 	if (ret)
4823a85a5deSChristoph Hellwig 		goto out_disable;
4833a85a5deSChristoph Hellwig 
484945dd5baSSagi Grimberg 	ret = nvme_loop_init_io_queues(ctrl);
4853a85a5deSChristoph Hellwig 	if (ret)
486945dd5baSSagi Grimberg 		goto out_destroy_admin;
4873a85a5deSChristoph Hellwig 
488297186d6SSagi Grimberg 	ret = nvme_loop_connect_io_queues(ctrl);
4893a85a5deSChristoph Hellwig 	if (ret)
490945dd5baSSagi Grimberg 		goto out_destroy_io;
4913a85a5deSChristoph Hellwig 
4924368c39bSSagi Grimberg 	blk_mq_update_nr_hw_queues(&ctrl->tag_set,
4934368c39bSSagi Grimberg 			ctrl->ctrl.queue_count - 1);
4944368c39bSSagi Grimberg 
4953a85a5deSChristoph Hellwig 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
4963a85a5deSChristoph Hellwig 	WARN_ON_ONCE(!changed);
4973a85a5deSChristoph Hellwig 
498d09f2b45SSagi Grimberg 	nvme_start_ctrl(&ctrl->ctrl);
4993a85a5deSChristoph Hellwig 
5003a85a5deSChristoph Hellwig 	return;
5013a85a5deSChristoph Hellwig 
502945dd5baSSagi Grimberg out_destroy_io:
503945dd5baSSagi Grimberg 	nvme_loop_destroy_io_queues(ctrl);
504945dd5baSSagi Grimberg out_destroy_admin:
5053a85a5deSChristoph Hellwig 	nvme_loop_destroy_admin_queue(ctrl);
5063a85a5deSChristoph Hellwig out_disable:
5073a85a5deSChristoph Hellwig 	dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
5083a85a5deSChristoph Hellwig 	nvme_uninit_ctrl(&ctrl->ctrl);
5093a85a5deSChristoph Hellwig 	nvme_put_ctrl(&ctrl->ctrl);
5103a85a5deSChristoph Hellwig }
5113a85a5deSChristoph Hellwig 
5123a85a5deSChristoph Hellwig static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
5133a85a5deSChristoph Hellwig 	.name			= "loop",
5143a85a5deSChristoph Hellwig 	.module			= THIS_MODULE,
515d3d5b87dSChristoph Hellwig 	.flags			= NVME_F_FABRICS,
5163a85a5deSChristoph Hellwig 	.reg_read32		= nvmf_reg_read32,
5173a85a5deSChristoph Hellwig 	.reg_read64		= nvmf_reg_read64,
5183a85a5deSChristoph Hellwig 	.reg_write32		= nvmf_reg_write32,
5193a85a5deSChristoph Hellwig 	.free_ctrl		= nvme_loop_free_ctrl,
5203a85a5deSChristoph Hellwig 	.submit_async_event	= nvme_loop_submit_async_event,
521c5017e85SChristoph Hellwig 	.delete_ctrl		= nvme_loop_delete_ctrl_host,
522*fe4a9791SChristoph Hellwig 	.get_address		= nvmf_get_address,
5233a85a5deSChristoph Hellwig };
5243a85a5deSChristoph Hellwig 
5253a85a5deSChristoph Hellwig static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
5263a85a5deSChristoph Hellwig {
527297186d6SSagi Grimberg 	int ret;
5283a85a5deSChristoph Hellwig 
529945dd5baSSagi Grimberg 	ret = nvme_loop_init_io_queues(ctrl);
5303a85a5deSChristoph Hellwig 	if (ret)
531945dd5baSSagi Grimberg 		return ret;
5323a85a5deSChristoph Hellwig 
5333a85a5deSChristoph Hellwig 	memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
5343a85a5deSChristoph Hellwig 	ctrl->tag_set.ops = &nvme_loop_mq_ops;
535eadb7cf4SJay Freyensee 	ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
5363a85a5deSChristoph Hellwig 	ctrl->tag_set.reserved_tags = 1; /* fabric connect */
5373a85a5deSChristoph Hellwig 	ctrl->tag_set.numa_node = NUMA_NO_NODE;
5383a85a5deSChristoph Hellwig 	ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
5393a85a5deSChristoph Hellwig 	ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
5403a85a5deSChristoph Hellwig 		SG_CHUNK_SIZE * sizeof(struct scatterlist);
5413a85a5deSChristoph Hellwig 	ctrl->tag_set.driver_data = ctrl;
542d858e5f0SSagi Grimberg 	ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
5433a85a5deSChristoph Hellwig 	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
5443a85a5deSChristoph Hellwig 	ctrl->ctrl.tagset = &ctrl->tag_set;
5453a85a5deSChristoph Hellwig 
5463a85a5deSChristoph Hellwig 	ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
5473a85a5deSChristoph Hellwig 	if (ret)
5483a85a5deSChristoph Hellwig 		goto out_destroy_queues;
5493a85a5deSChristoph Hellwig 
5503a85a5deSChristoph Hellwig 	ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
5513a85a5deSChristoph Hellwig 	if (IS_ERR(ctrl->ctrl.connect_q)) {
5523a85a5deSChristoph Hellwig 		ret = PTR_ERR(ctrl->ctrl.connect_q);
5533a85a5deSChristoph Hellwig 		goto out_free_tagset;
5543a85a5deSChristoph Hellwig 	}
5553a85a5deSChristoph Hellwig 
556297186d6SSagi Grimberg 	ret = nvme_loop_connect_io_queues(ctrl);
5573a85a5deSChristoph Hellwig 	if (ret)
5583a85a5deSChristoph Hellwig 		goto out_cleanup_connect_q;
5593a85a5deSChristoph Hellwig 
5603a85a5deSChristoph Hellwig 	return 0;
5613a85a5deSChristoph Hellwig 
5623a85a5deSChristoph Hellwig out_cleanup_connect_q:
5633a85a5deSChristoph Hellwig 	blk_cleanup_queue(ctrl->ctrl.connect_q);
5643a85a5deSChristoph Hellwig out_free_tagset:
5653a85a5deSChristoph Hellwig 	blk_mq_free_tag_set(&ctrl->tag_set);
5663a85a5deSChristoph Hellwig out_destroy_queues:
567945dd5baSSagi Grimberg 	nvme_loop_destroy_io_queues(ctrl);
5683a85a5deSChristoph Hellwig 	return ret;
5693a85a5deSChristoph Hellwig }
5703a85a5deSChristoph Hellwig 
571*fe4a9791SChristoph Hellwig static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl)
572*fe4a9791SChristoph Hellwig {
573*fe4a9791SChristoph Hellwig 	struct nvmet_port *p, *found = NULL;
574*fe4a9791SChristoph Hellwig 
575*fe4a9791SChristoph Hellwig 	mutex_lock(&nvme_loop_ports_mutex);
576*fe4a9791SChristoph Hellwig 	list_for_each_entry(p, &nvme_loop_ports, entry) {
577*fe4a9791SChristoph Hellwig 		/* if no transport address is specified use the first port */
578*fe4a9791SChristoph Hellwig 		if ((ctrl->opts->mask & NVMF_OPT_TRADDR) &&
579*fe4a9791SChristoph Hellwig 		    strcmp(ctrl->opts->traddr, p->disc_addr.traddr))
580*fe4a9791SChristoph Hellwig 			continue;
581*fe4a9791SChristoph Hellwig 		found = p;
582*fe4a9791SChristoph Hellwig 		break;
583*fe4a9791SChristoph Hellwig 	}
584*fe4a9791SChristoph Hellwig 	mutex_unlock(&nvme_loop_ports_mutex);
585*fe4a9791SChristoph Hellwig 	return found;
586*fe4a9791SChristoph Hellwig }
587*fe4a9791SChristoph Hellwig 
5883a85a5deSChristoph Hellwig static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
5893a85a5deSChristoph Hellwig 		struct nvmf_ctrl_options *opts)
5903a85a5deSChristoph Hellwig {
5913a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl;
5923a85a5deSChristoph Hellwig 	bool changed;
5933a85a5deSChristoph Hellwig 	int ret;
5943a85a5deSChristoph Hellwig 
5953a85a5deSChristoph Hellwig 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
5963a85a5deSChristoph Hellwig 	if (!ctrl)
5973a85a5deSChristoph Hellwig 		return ERR_PTR(-ENOMEM);
5983a85a5deSChristoph Hellwig 	ctrl->ctrl.opts = opts;
5993a85a5deSChristoph Hellwig 	INIT_LIST_HEAD(&ctrl->list);
6003a85a5deSChristoph Hellwig 
601d86c4d8eSChristoph Hellwig 	INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
6023a85a5deSChristoph Hellwig 
6033a85a5deSChristoph Hellwig 	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
6043a85a5deSChristoph Hellwig 				0 /* no quirks, we're perfect! */);
6053a85a5deSChristoph Hellwig 	if (ret)
6063a85a5deSChristoph Hellwig 		goto out_put_ctrl;
6073a85a5deSChristoph Hellwig 
6083a85a5deSChristoph Hellwig 	ret = -ENOMEM;
6093a85a5deSChristoph Hellwig 
610eadb7cf4SJay Freyensee 	ctrl->ctrl.sqsize = opts->queue_size - 1;
6113a85a5deSChristoph Hellwig 	ctrl->ctrl.kato = opts->kato;
612*fe4a9791SChristoph Hellwig 	ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
6133a85a5deSChristoph Hellwig 
6143a85a5deSChristoph Hellwig 	ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
6153a85a5deSChristoph Hellwig 			GFP_KERNEL);
6163a85a5deSChristoph Hellwig 	if (!ctrl->queues)
6173a85a5deSChristoph Hellwig 		goto out_uninit_ctrl;
6183a85a5deSChristoph Hellwig 
6193a85a5deSChristoph Hellwig 	ret = nvme_loop_configure_admin_queue(ctrl);
6203a85a5deSChristoph Hellwig 	if (ret)
6213a85a5deSChristoph Hellwig 		goto out_free_queues;
6223a85a5deSChristoph Hellwig 
6233a85a5deSChristoph Hellwig 	if (opts->queue_size > ctrl->ctrl.maxcmd) {
6243a85a5deSChristoph Hellwig 		/* warn if maxcmd is lower than queue_size */
6253a85a5deSChristoph Hellwig 		dev_warn(ctrl->ctrl.device,
6263a85a5deSChristoph Hellwig 			"queue_size %zu > ctrl maxcmd %u, clamping down\n",
6273a85a5deSChristoph Hellwig 			opts->queue_size, ctrl->ctrl.maxcmd);
6283a85a5deSChristoph Hellwig 		opts->queue_size = ctrl->ctrl.maxcmd;
6293a85a5deSChristoph Hellwig 	}
6303a85a5deSChristoph Hellwig 
6313a85a5deSChristoph Hellwig 	if (opts->nr_io_queues) {
6323a85a5deSChristoph Hellwig 		ret = nvme_loop_create_io_queues(ctrl);
6333a85a5deSChristoph Hellwig 		if (ret)
6343a85a5deSChristoph Hellwig 			goto out_remove_admin_queue;
6353a85a5deSChristoph Hellwig 	}
6363a85a5deSChristoph Hellwig 
6373a85a5deSChristoph Hellwig 	nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
6383a85a5deSChristoph Hellwig 
6393a85a5deSChristoph Hellwig 	dev_info(ctrl->ctrl.device,
6403a85a5deSChristoph Hellwig 		 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
6413a85a5deSChristoph Hellwig 
642d22524a4SChristoph Hellwig 	nvme_get_ctrl(&ctrl->ctrl);
6433a85a5deSChristoph Hellwig 
6443a85a5deSChristoph Hellwig 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
6453a85a5deSChristoph Hellwig 	WARN_ON_ONCE(!changed);
6463a85a5deSChristoph Hellwig 
6473a85a5deSChristoph Hellwig 	mutex_lock(&nvme_loop_ctrl_mutex);
6483a85a5deSChristoph Hellwig 	list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
6493a85a5deSChristoph Hellwig 	mutex_unlock(&nvme_loop_ctrl_mutex);
6503a85a5deSChristoph Hellwig 
651d09f2b45SSagi Grimberg 	nvme_start_ctrl(&ctrl->ctrl);
6523a85a5deSChristoph Hellwig 
6533a85a5deSChristoph Hellwig 	return &ctrl->ctrl;
6543a85a5deSChristoph Hellwig 
6553a85a5deSChristoph Hellwig out_remove_admin_queue:
6563a85a5deSChristoph Hellwig 	nvme_loop_destroy_admin_queue(ctrl);
6573a85a5deSChristoph Hellwig out_free_queues:
6583a85a5deSChristoph Hellwig 	kfree(ctrl->queues);
6593a85a5deSChristoph Hellwig out_uninit_ctrl:
6603a85a5deSChristoph Hellwig 	nvme_uninit_ctrl(&ctrl->ctrl);
6613a85a5deSChristoph Hellwig out_put_ctrl:
6623a85a5deSChristoph Hellwig 	nvme_put_ctrl(&ctrl->ctrl);
6633a85a5deSChristoph Hellwig 	if (ret > 0)
6643a85a5deSChristoph Hellwig 		ret = -EIO;
6653a85a5deSChristoph Hellwig 	return ERR_PTR(ret);
6663a85a5deSChristoph Hellwig }
6673a85a5deSChristoph Hellwig 
6683a85a5deSChristoph Hellwig static int nvme_loop_add_port(struct nvmet_port *port)
6693a85a5deSChristoph Hellwig {
670*fe4a9791SChristoph Hellwig 	mutex_lock(&nvme_loop_ports_mutex);
671*fe4a9791SChristoph Hellwig 	list_add_tail(&port->entry, &nvme_loop_ports);
672*fe4a9791SChristoph Hellwig 	mutex_unlock(&nvme_loop_ports_mutex);
6733a85a5deSChristoph Hellwig 	return 0;
6743a85a5deSChristoph Hellwig }
6753a85a5deSChristoph Hellwig 
6763a85a5deSChristoph Hellwig static void nvme_loop_remove_port(struct nvmet_port *port)
6773a85a5deSChristoph Hellwig {
678*fe4a9791SChristoph Hellwig 	mutex_lock(&nvme_loop_ports_mutex);
679*fe4a9791SChristoph Hellwig 	list_del_init(&port->entry);
680*fe4a9791SChristoph Hellwig 	mutex_unlock(&nvme_loop_ports_mutex);
6813a85a5deSChristoph Hellwig }
6823a85a5deSChristoph Hellwig 
683e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops nvme_loop_ops = {
6843a85a5deSChristoph Hellwig 	.owner		= THIS_MODULE,
6853a85a5deSChristoph Hellwig 	.type		= NVMF_TRTYPE_LOOP,
6863a85a5deSChristoph Hellwig 	.add_port	= nvme_loop_add_port,
6873a85a5deSChristoph Hellwig 	.remove_port	= nvme_loop_remove_port,
6883a85a5deSChristoph Hellwig 	.queue_response = nvme_loop_queue_response,
6893a85a5deSChristoph Hellwig 	.delete_ctrl	= nvme_loop_delete_ctrl,
6903a85a5deSChristoph Hellwig };
6913a85a5deSChristoph Hellwig 
6923a85a5deSChristoph Hellwig static struct nvmf_transport_ops nvme_loop_transport = {
6933a85a5deSChristoph Hellwig 	.name		= "loop",
6940de5cd36SRoy Shterman 	.module		= THIS_MODULE,
6953a85a5deSChristoph Hellwig 	.create_ctrl	= nvme_loop_create_ctrl,
696*fe4a9791SChristoph Hellwig 	.allowed_opts	= NVMF_OPT_TRADDR,
6973a85a5deSChristoph Hellwig };
6983a85a5deSChristoph Hellwig 
6993a85a5deSChristoph Hellwig static int __init nvme_loop_init_module(void)
7003a85a5deSChristoph Hellwig {
7013a85a5deSChristoph Hellwig 	int ret;
7023a85a5deSChristoph Hellwig 
7033a85a5deSChristoph Hellwig 	ret = nvmet_register_transport(&nvme_loop_ops);
7043a85a5deSChristoph Hellwig 	if (ret)
7053a85a5deSChristoph Hellwig 		return ret;
706d19eef02SSagi Grimberg 
707d19eef02SSagi Grimberg 	ret = nvmf_register_transport(&nvme_loop_transport);
708d19eef02SSagi Grimberg 	if (ret)
709d19eef02SSagi Grimberg 		nvmet_unregister_transport(&nvme_loop_ops);
710d19eef02SSagi Grimberg 
711d19eef02SSagi Grimberg 	return ret;
7123a85a5deSChristoph Hellwig }
7133a85a5deSChristoph Hellwig 
7143a85a5deSChristoph Hellwig static void __exit nvme_loop_cleanup_module(void)
7153a85a5deSChristoph Hellwig {
7163a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl, *next;
7173a85a5deSChristoph Hellwig 
7183a85a5deSChristoph Hellwig 	nvmf_unregister_transport(&nvme_loop_transport);
7193a85a5deSChristoph Hellwig 	nvmet_unregister_transport(&nvme_loop_ops);
7203a85a5deSChristoph Hellwig 
7213a85a5deSChristoph Hellwig 	mutex_lock(&nvme_loop_ctrl_mutex);
7223a85a5deSChristoph Hellwig 	list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
723c5017e85SChristoph Hellwig 		nvme_delete_ctrl(&ctrl->ctrl);
7243a85a5deSChristoph Hellwig 	mutex_unlock(&nvme_loop_ctrl_mutex);
7253a85a5deSChristoph Hellwig 
726b227c59bSRoy Shterman 	flush_workqueue(nvme_delete_wq);
7273a85a5deSChristoph Hellwig }
7283a85a5deSChristoph Hellwig 
7293a85a5deSChristoph Hellwig module_init(nvme_loop_init_module);
7303a85a5deSChristoph Hellwig module_exit(nvme_loop_cleanup_module);
7313a85a5deSChristoph Hellwig 
7323a85a5deSChristoph Hellwig MODULE_LICENSE("GPL v2");
7333a85a5deSChristoph Hellwig MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */
734