xref: /openbmc/linux/drivers/nvme/target/loop.c (revision 1b4ad7a50ab06573aa8841217d6a472dc1db2d85)
1d0ad6904SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
23a85a5deSChristoph Hellwig /*
33a85a5deSChristoph Hellwig  * NVMe over Fabrics loopback device.
43a85a5deSChristoph Hellwig  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
53a85a5deSChristoph Hellwig  */
63a85a5deSChristoph Hellwig #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
73a85a5deSChristoph Hellwig #include <linux/scatterlist.h>
83a85a5deSChristoph Hellwig #include <linux/blk-mq.h>
93a85a5deSChristoph Hellwig #include <linux/nvme.h>
103a85a5deSChristoph Hellwig #include <linux/module.h>
113a85a5deSChristoph Hellwig #include <linux/parser.h>
123a85a5deSChristoph Hellwig #include "nvmet.h"
133a85a5deSChristoph Hellwig #include "../host/nvme.h"
143a85a5deSChristoph Hellwig #include "../host/fabrics.h"
153a85a5deSChristoph Hellwig 
163a85a5deSChristoph Hellwig #define NVME_LOOP_MAX_SEGMENTS		256
173a85a5deSChristoph Hellwig 
183a85a5deSChristoph Hellwig struct nvme_loop_iod {
19d49187e9SChristoph Hellwig 	struct nvme_request	nvme_req;
203a85a5deSChristoph Hellwig 	struct nvme_command	cmd;
21fc6c9730SMax Gurtovoy 	struct nvme_completion	cqe;
223a85a5deSChristoph Hellwig 	struct nvmet_req	req;
233a85a5deSChristoph Hellwig 	struct nvme_loop_queue	*queue;
243a85a5deSChristoph Hellwig 	struct work_struct	work;
253a85a5deSChristoph Hellwig 	struct sg_table		sg_table;
263a85a5deSChristoph Hellwig 	struct scatterlist	first_sgl[];
273a85a5deSChristoph Hellwig };
283a85a5deSChristoph Hellwig 
293a85a5deSChristoph Hellwig struct nvme_loop_ctrl {
303a85a5deSChristoph Hellwig 	struct nvme_loop_queue	*queues;
313a85a5deSChristoph Hellwig 
323a85a5deSChristoph Hellwig 	struct blk_mq_tag_set	admin_tag_set;
333a85a5deSChristoph Hellwig 
343a85a5deSChristoph Hellwig 	struct list_head	list;
353a85a5deSChristoph Hellwig 	struct blk_mq_tag_set	tag_set;
363a85a5deSChristoph Hellwig 	struct nvme_loop_iod	async_event_iod;
373a85a5deSChristoph Hellwig 	struct nvme_ctrl	ctrl;
383a85a5deSChristoph Hellwig 
393a85a5deSChristoph Hellwig 	struct nvmet_ctrl	*target_ctrl;
40fe4a9791SChristoph Hellwig 	struct nvmet_port	*port;
413a85a5deSChristoph Hellwig };
423a85a5deSChristoph Hellwig 
433a85a5deSChristoph Hellwig static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
443a85a5deSChristoph Hellwig {
453a85a5deSChristoph Hellwig 	return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
463a85a5deSChristoph Hellwig }
473a85a5deSChristoph Hellwig 
489d7fab04SSagi Grimberg enum nvme_loop_queue_flags {
499d7fab04SSagi Grimberg 	NVME_LOOP_Q_LIVE	= 0,
509d7fab04SSagi Grimberg };
519d7fab04SSagi Grimberg 
523a85a5deSChristoph Hellwig struct nvme_loop_queue {
533a85a5deSChristoph Hellwig 	struct nvmet_cq		nvme_cq;
543a85a5deSChristoph Hellwig 	struct nvmet_sq		nvme_sq;
553a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl	*ctrl;
569d7fab04SSagi Grimberg 	unsigned long		flags;
573a85a5deSChristoph Hellwig };
583a85a5deSChristoph Hellwig 
59fe4a9791SChristoph Hellwig static LIST_HEAD(nvme_loop_ports);
60fe4a9791SChristoph Hellwig static DEFINE_MUTEX(nvme_loop_ports_mutex);
613a85a5deSChristoph Hellwig 
623a85a5deSChristoph Hellwig static LIST_HEAD(nvme_loop_ctrl_list);
633a85a5deSChristoph Hellwig static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
643a85a5deSChristoph Hellwig 
653a85a5deSChristoph Hellwig static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
663a85a5deSChristoph Hellwig static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
673a85a5deSChristoph Hellwig 
68e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops nvme_loop_ops;
693a85a5deSChristoph Hellwig 
703a85a5deSChristoph Hellwig static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
713a85a5deSChristoph Hellwig {
723a85a5deSChristoph Hellwig 	return queue - queue->ctrl->queues;
733a85a5deSChristoph Hellwig }
743a85a5deSChristoph Hellwig 
753a85a5deSChristoph Hellwig static void nvme_loop_complete_rq(struct request *req)
763a85a5deSChristoph Hellwig {
773a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
783a85a5deSChristoph Hellwig 
7952e6d8edSIsrael Rukshin 	sg_free_table_chained(&iod->sg_table, NVME_INLINE_SG_CNT);
8077f02a7aSChristoph Hellwig 	nvme_complete_rq(req);
813a85a5deSChristoph Hellwig }
823a85a5deSChristoph Hellwig 
833b068376SSagi Grimberg static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
843b068376SSagi Grimberg {
853b068376SSagi Grimberg 	u32 queue_idx = nvme_loop_queue_idx(queue);
863a85a5deSChristoph Hellwig 
873b068376SSagi Grimberg 	if (queue_idx == 0)
883b068376SSagi Grimberg 		return queue->ctrl->admin_tag_set.tags[queue_idx];
893b068376SSagi Grimberg 	return queue->ctrl->tag_set.tags[queue_idx - 1];
903a85a5deSChristoph Hellwig }
913a85a5deSChristoph Hellwig 
92d49187e9SChristoph Hellwig static void nvme_loop_queue_response(struct nvmet_req *req)
933a85a5deSChristoph Hellwig {
943b068376SSagi Grimberg 	struct nvme_loop_queue *queue =
953b068376SSagi Grimberg 		container_of(req->sq, struct nvme_loop_queue, nvme_sq);
96fc6c9730SMax Gurtovoy 	struct nvme_completion *cqe = req->cqe;
973a85a5deSChristoph Hellwig 
983a85a5deSChristoph Hellwig 	/*
993a85a5deSChristoph Hellwig 	 * AEN requests are special as they don't time out and can
1003a85a5deSChristoph Hellwig 	 * survive any kind of queue freeze and often don't respond to
1013a85a5deSChristoph Hellwig 	 * aborts.  We don't even bother to allocate a struct request
1023a85a5deSChristoph Hellwig 	 * for them but rather special case them here.
1033a85a5deSChristoph Hellwig 	 */
10458a8df67SIsrael Rukshin 	if (unlikely(nvme_is_aen_req(nvme_loop_queue_idx(queue),
10558a8df67SIsrael Rukshin 				     cqe->command_id))) {
1063b068376SSagi Grimberg 		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
1077bf58533SChristoph Hellwig 				&cqe->result);
1083a85a5deSChristoph Hellwig 	} else {
1093b068376SSagi Grimberg 		struct request *rq;
1103a85a5deSChristoph Hellwig 
1113b068376SSagi Grimberg 		rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
1123b068376SSagi Grimberg 		if (!rq) {
1133b068376SSagi Grimberg 			dev_err(queue->ctrl->ctrl.device,
1143b068376SSagi Grimberg 				"tag 0x%x on queue %d not found\n",
1153b068376SSagi Grimberg 				cqe->command_id, nvme_loop_queue_idx(queue));
1163b068376SSagi Grimberg 			return;
1173b068376SSagi Grimberg 		}
1183b068376SSagi Grimberg 
11927fa9bc5SChristoph Hellwig 		nvme_end_request(rq, cqe->status, cqe->result);
1203a85a5deSChristoph Hellwig 	}
1213a85a5deSChristoph Hellwig }
1223a85a5deSChristoph Hellwig 
1233a85a5deSChristoph Hellwig static void nvme_loop_execute_work(struct work_struct *work)
1243a85a5deSChristoph Hellwig {
1253a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod =
1263a85a5deSChristoph Hellwig 		container_of(work, struct nvme_loop_iod, work);
1273a85a5deSChristoph Hellwig 
128be3f3114SChristoph Hellwig 	iod->req.execute(&iod->req);
1293a85a5deSChristoph Hellwig }
1303a85a5deSChristoph Hellwig 
131fc17b653SChristoph Hellwig static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1323a85a5deSChristoph Hellwig 		const struct blk_mq_queue_data *bd)
1333a85a5deSChristoph Hellwig {
1343a85a5deSChristoph Hellwig 	struct nvme_ns *ns = hctx->queue->queuedata;
1353a85a5deSChristoph Hellwig 	struct nvme_loop_queue *queue = hctx->driver_data;
1363a85a5deSChristoph Hellwig 	struct request *req = bd->rq;
1373a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
1383bc32bb1SChristoph Hellwig 	bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
139fc17b653SChristoph Hellwig 	blk_status_t ret;
1403a85a5deSChristoph Hellwig 
1413bc32bb1SChristoph Hellwig 	if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
1426cdefc6eSJames Smart 		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
1439d7fab04SSagi Grimberg 
1443a85a5deSChristoph Hellwig 	ret = nvme_setup_cmd(ns, req, &iod->cmd);
145fc17b653SChristoph Hellwig 	if (ret)
1463a85a5deSChristoph Hellwig 		return ret;
1473a85a5deSChristoph Hellwig 
14811d9ea6fSMing Lei 	blk_mq_start_request(req);
1493a85a5deSChristoph Hellwig 	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
150fe4a9791SChristoph Hellwig 	iod->req.port = queue->ctrl->port;
1513a85a5deSChristoph Hellwig 	if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
15211d9ea6fSMing Lei 			&queue->nvme_sq, &nvme_loop_ops))
153fc17b653SChristoph Hellwig 		return BLK_STS_OK;
1543a85a5deSChristoph Hellwig 
155eb464833SChaitanya Kulkarni 	if (blk_rq_nr_phys_segments(req)) {
1563a85a5deSChristoph Hellwig 		iod->sg_table.sgl = iod->first_sgl;
157fc17b653SChristoph Hellwig 		if (sg_alloc_table_chained(&iod->sg_table,
158f9d03f96SChristoph Hellwig 				blk_rq_nr_phys_segments(req),
15952e6d8edSIsrael Rukshin 				iod->sg_table.sgl, NVME_INLINE_SG_CNT)) {
1605812d04cSMax Gurtovoy 			nvme_cleanup_cmd(req);
161fc17b653SChristoph Hellwig 			return BLK_STS_RESOURCE;
1625812d04cSMax Gurtovoy 		}
1633a85a5deSChristoph Hellwig 
1643a85a5deSChristoph Hellwig 		iod->req.sg = iod->sg_table.sgl;
1653a85a5deSChristoph Hellwig 		iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
166796b0b8dSChristoph Hellwig 		iod->req.transfer_len = blk_rq_payload_bytes(req);
1673a85a5deSChristoph Hellwig 	}
1683a85a5deSChristoph Hellwig 
1693a85a5deSChristoph Hellwig 	schedule_work(&iod->work);
170fc17b653SChristoph Hellwig 	return BLK_STS_OK;
1713a85a5deSChristoph Hellwig }
1723a85a5deSChristoph Hellwig 
173ad22c355SKeith Busch static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
1743a85a5deSChristoph Hellwig {
1753a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
1763a85a5deSChristoph Hellwig 	struct nvme_loop_queue *queue = &ctrl->queues[0];
1773a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod = &ctrl->async_event_iod;
1783a85a5deSChristoph Hellwig 
1793a85a5deSChristoph Hellwig 	memset(&iod->cmd, 0, sizeof(iod->cmd));
1803a85a5deSChristoph Hellwig 	iod->cmd.common.opcode = nvme_admin_async_event;
18138dabe21SKeith Busch 	iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
1823a85a5deSChristoph Hellwig 	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
1833a85a5deSChristoph Hellwig 
1843a85a5deSChristoph Hellwig 	if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
1853a85a5deSChristoph Hellwig 			&nvme_loop_ops)) {
1863a85a5deSChristoph Hellwig 		dev_err(ctrl->ctrl.device, "failed async event work\n");
1873a85a5deSChristoph Hellwig 		return;
1883a85a5deSChristoph Hellwig 	}
1893a85a5deSChristoph Hellwig 
1903a85a5deSChristoph Hellwig 	schedule_work(&iod->work);
1913a85a5deSChristoph Hellwig }
1923a85a5deSChristoph Hellwig 
1933a85a5deSChristoph Hellwig static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
1943a85a5deSChristoph Hellwig 		struct nvme_loop_iod *iod, unsigned int queue_idx)
1953a85a5deSChristoph Hellwig {
1963a85a5deSChristoph Hellwig 	iod->req.cmd = &iod->cmd;
197fc6c9730SMax Gurtovoy 	iod->req.cqe = &iod->cqe;
1983a85a5deSChristoph Hellwig 	iod->queue = &ctrl->queues[queue_idx];
1993a85a5deSChristoph Hellwig 	INIT_WORK(&iod->work, nvme_loop_execute_work);
2003a85a5deSChristoph Hellwig 	return 0;
2013a85a5deSChristoph Hellwig }
2023a85a5deSChristoph Hellwig 
203d6296d39SChristoph Hellwig static int nvme_loop_init_request(struct blk_mq_tag_set *set,
204d6296d39SChristoph Hellwig 		struct request *req, unsigned int hctx_idx,
2053a85a5deSChristoph Hellwig 		unsigned int numa_node)
2063a85a5deSChristoph Hellwig {
20762b83b18SChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = set->driver_data;
2083a85a5deSChristoph Hellwig 
20959e29ce6SSagi Grimberg 	nvme_req(req)->ctrl = &ctrl->ctrl;
21062b83b18SChristoph Hellwig 	return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
21162b83b18SChristoph Hellwig 			(set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
2123a85a5deSChristoph Hellwig }
2133a85a5deSChristoph Hellwig 
2143a85a5deSChristoph Hellwig static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2153a85a5deSChristoph Hellwig 		unsigned int hctx_idx)
2163a85a5deSChristoph Hellwig {
2173a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = data;
2183a85a5deSChristoph Hellwig 	struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
2193a85a5deSChristoph Hellwig 
220d858e5f0SSagi Grimberg 	BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
2213a85a5deSChristoph Hellwig 
2223a85a5deSChristoph Hellwig 	hctx->driver_data = queue;
2233a85a5deSChristoph Hellwig 	return 0;
2243a85a5deSChristoph Hellwig }
2253a85a5deSChristoph Hellwig 
2263a85a5deSChristoph Hellwig static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2273a85a5deSChristoph Hellwig 		unsigned int hctx_idx)
2283a85a5deSChristoph Hellwig {
2293a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = data;
2303a85a5deSChristoph Hellwig 	struct nvme_loop_queue *queue = &ctrl->queues[0];
2313a85a5deSChristoph Hellwig 
2323a85a5deSChristoph Hellwig 	BUG_ON(hctx_idx != 0);
2333a85a5deSChristoph Hellwig 
2343a85a5deSChristoph Hellwig 	hctx->driver_data = queue;
2353a85a5deSChristoph Hellwig 	return 0;
2363a85a5deSChristoph Hellwig }
2373a85a5deSChristoph Hellwig 
238f363b089SEric Biggers static const struct blk_mq_ops nvme_loop_mq_ops = {
2393a85a5deSChristoph Hellwig 	.queue_rq	= nvme_loop_queue_rq,
2403a85a5deSChristoph Hellwig 	.complete	= nvme_loop_complete_rq,
2413a85a5deSChristoph Hellwig 	.init_request	= nvme_loop_init_request,
2423a85a5deSChristoph Hellwig 	.init_hctx	= nvme_loop_init_hctx,
2433a85a5deSChristoph Hellwig };
2443a85a5deSChristoph Hellwig 
245f363b089SEric Biggers static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
2463a85a5deSChristoph Hellwig 	.queue_rq	= nvme_loop_queue_rq,
2473a85a5deSChristoph Hellwig 	.complete	= nvme_loop_complete_rq,
24862b83b18SChristoph Hellwig 	.init_request	= nvme_loop_init_request,
2493a85a5deSChristoph Hellwig 	.init_hctx	= nvme_loop_init_admin_hctx,
2503a85a5deSChristoph Hellwig };
2513a85a5deSChristoph Hellwig 
2523a85a5deSChristoph Hellwig static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
2533a85a5deSChristoph Hellwig {
2549d7fab04SSagi Grimberg 	clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
255e4c5d376SSagi Grimberg 	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
2563a85a5deSChristoph Hellwig 	blk_cleanup_queue(ctrl->ctrl.admin_q);
257e7832cb4SSagi Grimberg 	blk_cleanup_queue(ctrl->ctrl.fabrics_q);
2583a85a5deSChristoph Hellwig 	blk_mq_free_tag_set(&ctrl->admin_tag_set);
2593a85a5deSChristoph Hellwig }
2603a85a5deSChristoph Hellwig 
2613a85a5deSChristoph Hellwig static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
2623a85a5deSChristoph Hellwig {
2633a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
2643a85a5deSChristoph Hellwig 
2653a85a5deSChristoph Hellwig 	if (list_empty(&ctrl->list))
2663a85a5deSChristoph Hellwig 		goto free_ctrl;
2673a85a5deSChristoph Hellwig 
2683a85a5deSChristoph Hellwig 	mutex_lock(&nvme_loop_ctrl_mutex);
2693a85a5deSChristoph Hellwig 	list_del(&ctrl->list);
2703a85a5deSChristoph Hellwig 	mutex_unlock(&nvme_loop_ctrl_mutex);
2713a85a5deSChristoph Hellwig 
2723a85a5deSChristoph Hellwig 	if (nctrl->tagset) {
2733a85a5deSChristoph Hellwig 		blk_cleanup_queue(ctrl->ctrl.connect_q);
2743a85a5deSChristoph Hellwig 		blk_mq_free_tag_set(&ctrl->tag_set);
2753a85a5deSChristoph Hellwig 	}
2763a85a5deSChristoph Hellwig 	kfree(ctrl->queues);
2773a85a5deSChristoph Hellwig 	nvmf_free_options(nctrl->opts);
2783a85a5deSChristoph Hellwig free_ctrl:
2793a85a5deSChristoph Hellwig 	kfree(ctrl);
2803a85a5deSChristoph Hellwig }
2813a85a5deSChristoph Hellwig 
282945dd5baSSagi Grimberg static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
283945dd5baSSagi Grimberg {
284945dd5baSSagi Grimberg 	int i;
285945dd5baSSagi Grimberg 
2869d7fab04SSagi Grimberg 	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
2879d7fab04SSagi Grimberg 		clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
288945dd5baSSagi Grimberg 		nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
289945dd5baSSagi Grimberg 	}
2909d7fab04SSagi Grimberg }
291945dd5baSSagi Grimberg 
292945dd5baSSagi Grimberg static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
293945dd5baSSagi Grimberg {
294945dd5baSSagi Grimberg 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
295945dd5baSSagi Grimberg 	unsigned int nr_io_queues;
296945dd5baSSagi Grimberg 	int ret, i;
297945dd5baSSagi Grimberg 
298945dd5baSSagi Grimberg 	nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
299945dd5baSSagi Grimberg 	ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
300945dd5baSSagi Grimberg 	if (ret || !nr_io_queues)
301945dd5baSSagi Grimberg 		return ret;
302945dd5baSSagi Grimberg 
303945dd5baSSagi Grimberg 	dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
304945dd5baSSagi Grimberg 
305945dd5baSSagi Grimberg 	for (i = 1; i <= nr_io_queues; i++) {
306945dd5baSSagi Grimberg 		ctrl->queues[i].ctrl = ctrl;
307945dd5baSSagi Grimberg 		ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
308945dd5baSSagi Grimberg 		if (ret)
309945dd5baSSagi Grimberg 			goto out_destroy_queues;
310945dd5baSSagi Grimberg 
311d858e5f0SSagi Grimberg 		ctrl->ctrl.queue_count++;
312945dd5baSSagi Grimberg 	}
313945dd5baSSagi Grimberg 
314945dd5baSSagi Grimberg 	return 0;
315945dd5baSSagi Grimberg 
316945dd5baSSagi Grimberg out_destroy_queues:
317945dd5baSSagi Grimberg 	nvme_loop_destroy_io_queues(ctrl);
318945dd5baSSagi Grimberg 	return ret;
319945dd5baSSagi Grimberg }
320945dd5baSSagi Grimberg 
321297186d6SSagi Grimberg static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
322297186d6SSagi Grimberg {
323297186d6SSagi Grimberg 	int i, ret;
324297186d6SSagi Grimberg 
325d858e5f0SSagi Grimberg 	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
32626c68227SSagi Grimberg 		ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
327297186d6SSagi Grimberg 		if (ret)
328297186d6SSagi Grimberg 			return ret;
3299d7fab04SSagi Grimberg 		set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
330297186d6SSagi Grimberg 	}
331297186d6SSagi Grimberg 
332297186d6SSagi Grimberg 	return 0;
333297186d6SSagi Grimberg }
334297186d6SSagi Grimberg 
3353a85a5deSChristoph Hellwig static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
3363a85a5deSChristoph Hellwig {
3373a85a5deSChristoph Hellwig 	int error;
3383a85a5deSChristoph Hellwig 
3393a85a5deSChristoph Hellwig 	memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
3403a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
34138dabe21SKeith Busch 	ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
3423a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
343*1b4ad7a5SMax Gurtovoy 	ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
3443a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
34552e6d8edSIsrael Rukshin 		NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
3463a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.driver_data = ctrl;
3473a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.nr_hw_queues = 1;
3483a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
34986f36b9cSIsrael Rukshin 	ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
3503a85a5deSChristoph Hellwig 
3513a85a5deSChristoph Hellwig 	ctrl->queues[0].ctrl = ctrl;
3523a85a5deSChristoph Hellwig 	error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
3533a85a5deSChristoph Hellwig 	if (error)
3543a85a5deSChristoph Hellwig 		return error;
355d858e5f0SSagi Grimberg 	ctrl->ctrl.queue_count = 1;
3563a85a5deSChristoph Hellwig 
3573a85a5deSChristoph Hellwig 	error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
3583a85a5deSChristoph Hellwig 	if (error)
3593a85a5deSChristoph Hellwig 		goto out_free_sq;
36034b6c231SSagi Grimberg 	ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
3613a85a5deSChristoph Hellwig 
362e7832cb4SSagi Grimberg 	ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
363e7832cb4SSagi Grimberg 	if (IS_ERR(ctrl->ctrl.fabrics_q)) {
364e7832cb4SSagi Grimberg 		error = PTR_ERR(ctrl->ctrl.fabrics_q);
365e7832cb4SSagi Grimberg 		goto out_free_tagset;
366e7832cb4SSagi Grimberg 	}
367e7832cb4SSagi Grimberg 
3683a85a5deSChristoph Hellwig 	ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3693a85a5deSChristoph Hellwig 	if (IS_ERR(ctrl->ctrl.admin_q)) {
3703a85a5deSChristoph Hellwig 		error = PTR_ERR(ctrl->ctrl.admin_q);
371e7832cb4SSagi Grimberg 		goto out_cleanup_fabrics_q;
3723a85a5deSChristoph Hellwig 	}
3733a85a5deSChristoph Hellwig 
3743a85a5deSChristoph Hellwig 	error = nvmf_connect_admin_queue(&ctrl->ctrl);
3753a85a5deSChristoph Hellwig 	if (error)
3763a85a5deSChristoph Hellwig 		goto out_cleanup_queue;
3773a85a5deSChristoph Hellwig 
3789d7fab04SSagi Grimberg 	set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
3799d7fab04SSagi Grimberg 
380c0f2f45bSSagi Grimberg 	error = nvme_enable_ctrl(&ctrl->ctrl);
3813a85a5deSChristoph Hellwig 	if (error)
3823a85a5deSChristoph Hellwig 		goto out_cleanup_queue;
3833a85a5deSChristoph Hellwig 
3843a85a5deSChristoph Hellwig 	ctrl->ctrl.max_hw_sectors =
3853a85a5deSChristoph Hellwig 		(NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
3863a85a5deSChristoph Hellwig 
387e7832cb4SSagi Grimberg 	blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
388e7832cb4SSagi Grimberg 
3893a85a5deSChristoph Hellwig 	error = nvme_init_identify(&ctrl->ctrl);
3903a85a5deSChristoph Hellwig 	if (error)
3913a85a5deSChristoph Hellwig 		goto out_cleanup_queue;
3923a85a5deSChristoph Hellwig 
3933a85a5deSChristoph Hellwig 	return 0;
3943a85a5deSChristoph Hellwig 
3953a85a5deSChristoph Hellwig out_cleanup_queue:
3963a85a5deSChristoph Hellwig 	blk_cleanup_queue(ctrl->ctrl.admin_q);
397e7832cb4SSagi Grimberg out_cleanup_fabrics_q:
398e7832cb4SSagi Grimberg 	blk_cleanup_queue(ctrl->ctrl.fabrics_q);
3993a85a5deSChristoph Hellwig out_free_tagset:
4003a85a5deSChristoph Hellwig 	blk_mq_free_tag_set(&ctrl->admin_tag_set);
4013a85a5deSChristoph Hellwig out_free_sq:
4023a85a5deSChristoph Hellwig 	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
4033a85a5deSChristoph Hellwig 	return error;
4043a85a5deSChristoph Hellwig }
4053a85a5deSChristoph Hellwig 
4063a85a5deSChristoph Hellwig static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
4073a85a5deSChristoph Hellwig {
408d858e5f0SSagi Grimberg 	if (ctrl->ctrl.queue_count > 1) {
4093a85a5deSChristoph Hellwig 		nvme_stop_queues(&ctrl->ctrl);
4103a85a5deSChristoph Hellwig 		blk_mq_tagset_busy_iter(&ctrl->tag_set,
4113a85a5deSChristoph Hellwig 					nvme_cancel_request, &ctrl->ctrl);
412622b8b68SMing Lei 		blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
413945dd5baSSagi Grimberg 		nvme_loop_destroy_io_queues(ctrl);
4143a85a5deSChristoph Hellwig 	}
4153a85a5deSChristoph Hellwig 
416e7832cb4SSagi Grimberg 	blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
4173a85a5deSChristoph Hellwig 	if (ctrl->ctrl.state == NVME_CTRL_LIVE)
4183a85a5deSChristoph Hellwig 		nvme_shutdown_ctrl(&ctrl->ctrl);
4193a85a5deSChristoph Hellwig 
4203a85a5deSChristoph Hellwig 	blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
4213a85a5deSChristoph Hellwig 				nvme_cancel_request, &ctrl->ctrl);
422622b8b68SMing Lei 	blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
4233a85a5deSChristoph Hellwig 	nvme_loop_destroy_admin_queue(ctrl);
4243a85a5deSChristoph Hellwig }
4253a85a5deSChristoph Hellwig 
426c5017e85SChristoph Hellwig static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
4273a85a5deSChristoph Hellwig {
428c5017e85SChristoph Hellwig 	nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
4293a85a5deSChristoph Hellwig }
4303a85a5deSChristoph Hellwig 
4313a85a5deSChristoph Hellwig static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
4323a85a5deSChristoph Hellwig {
4333a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl;
4343a85a5deSChristoph Hellwig 
4353a85a5deSChristoph Hellwig 	mutex_lock(&nvme_loop_ctrl_mutex);
4363a85a5deSChristoph Hellwig 	list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
4373a85a5deSChristoph Hellwig 		if (ctrl->ctrl.cntlid == nctrl->cntlid)
438c5017e85SChristoph Hellwig 			nvme_delete_ctrl(&ctrl->ctrl);
4393a85a5deSChristoph Hellwig 	}
4403a85a5deSChristoph Hellwig 	mutex_unlock(&nvme_loop_ctrl_mutex);
4413a85a5deSChristoph Hellwig }
4423a85a5deSChristoph Hellwig 
4433a85a5deSChristoph Hellwig static void nvme_loop_reset_ctrl_work(struct work_struct *work)
4443a85a5deSChristoph Hellwig {
445d86c4d8eSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl =
446d86c4d8eSChristoph Hellwig 		container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
4473a85a5deSChristoph Hellwig 	bool changed;
448297186d6SSagi Grimberg 	int ret;
4493a85a5deSChristoph Hellwig 
450d09f2b45SSagi Grimberg 	nvme_stop_ctrl(&ctrl->ctrl);
4513a85a5deSChristoph Hellwig 	nvme_loop_shutdown_ctrl(ctrl);
4523a85a5deSChristoph Hellwig 
4538bfc3b4cSJohannes Thumshirn 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
4548bfc3b4cSJohannes Thumshirn 		/* state change failure should never happen */
4558bfc3b4cSJohannes Thumshirn 		WARN_ON_ONCE(1);
4568bfc3b4cSJohannes Thumshirn 		return;
4578bfc3b4cSJohannes Thumshirn 	}
4588bfc3b4cSJohannes Thumshirn 
4593a85a5deSChristoph Hellwig 	ret = nvme_loop_configure_admin_queue(ctrl);
4603a85a5deSChristoph Hellwig 	if (ret)
4613a85a5deSChristoph Hellwig 		goto out_disable;
4623a85a5deSChristoph Hellwig 
463945dd5baSSagi Grimberg 	ret = nvme_loop_init_io_queues(ctrl);
4643a85a5deSChristoph Hellwig 	if (ret)
465945dd5baSSagi Grimberg 		goto out_destroy_admin;
4663a85a5deSChristoph Hellwig 
467297186d6SSagi Grimberg 	ret = nvme_loop_connect_io_queues(ctrl);
4683a85a5deSChristoph Hellwig 	if (ret)
469945dd5baSSagi Grimberg 		goto out_destroy_io;
4703a85a5deSChristoph Hellwig 
4714368c39bSSagi Grimberg 	blk_mq_update_nr_hw_queues(&ctrl->tag_set,
4724368c39bSSagi Grimberg 			ctrl->ctrl.queue_count - 1);
4734368c39bSSagi Grimberg 
4743a85a5deSChristoph Hellwig 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
4753a85a5deSChristoph Hellwig 	WARN_ON_ONCE(!changed);
4763a85a5deSChristoph Hellwig 
477d09f2b45SSagi Grimberg 	nvme_start_ctrl(&ctrl->ctrl);
4783a85a5deSChristoph Hellwig 
4793a85a5deSChristoph Hellwig 	return;
4803a85a5deSChristoph Hellwig 
481945dd5baSSagi Grimberg out_destroy_io:
482945dd5baSSagi Grimberg 	nvme_loop_destroy_io_queues(ctrl);
483945dd5baSSagi Grimberg out_destroy_admin:
4843a85a5deSChristoph Hellwig 	nvme_loop_destroy_admin_queue(ctrl);
4853a85a5deSChristoph Hellwig out_disable:
4863a85a5deSChristoph Hellwig 	dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
4873a85a5deSChristoph Hellwig 	nvme_uninit_ctrl(&ctrl->ctrl);
4883a85a5deSChristoph Hellwig }
4893a85a5deSChristoph Hellwig 
4903a85a5deSChristoph Hellwig static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
4913a85a5deSChristoph Hellwig 	.name			= "loop",
4923a85a5deSChristoph Hellwig 	.module			= THIS_MODULE,
493d3d5b87dSChristoph Hellwig 	.flags			= NVME_F_FABRICS,
4943a85a5deSChristoph Hellwig 	.reg_read32		= nvmf_reg_read32,
4953a85a5deSChristoph Hellwig 	.reg_read64		= nvmf_reg_read64,
4963a85a5deSChristoph Hellwig 	.reg_write32		= nvmf_reg_write32,
4973a85a5deSChristoph Hellwig 	.free_ctrl		= nvme_loop_free_ctrl,
4983a85a5deSChristoph Hellwig 	.submit_async_event	= nvme_loop_submit_async_event,
499c5017e85SChristoph Hellwig 	.delete_ctrl		= nvme_loop_delete_ctrl_host,
500fe4a9791SChristoph Hellwig 	.get_address		= nvmf_get_address,
5013a85a5deSChristoph Hellwig };
5023a85a5deSChristoph Hellwig 
5033a85a5deSChristoph Hellwig static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
5043a85a5deSChristoph Hellwig {
505297186d6SSagi Grimberg 	int ret;
5063a85a5deSChristoph Hellwig 
507945dd5baSSagi Grimberg 	ret = nvme_loop_init_io_queues(ctrl);
5083a85a5deSChristoph Hellwig 	if (ret)
509945dd5baSSagi Grimberg 		return ret;
5103a85a5deSChristoph Hellwig 
5113a85a5deSChristoph Hellwig 	memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
5123a85a5deSChristoph Hellwig 	ctrl->tag_set.ops = &nvme_loop_mq_ops;
513eadb7cf4SJay Freyensee 	ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
5143a85a5deSChristoph Hellwig 	ctrl->tag_set.reserved_tags = 1; /* fabric connect */
515*1b4ad7a5SMax Gurtovoy 	ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
5163a85a5deSChristoph Hellwig 	ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
5173a85a5deSChristoph Hellwig 	ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
51852e6d8edSIsrael Rukshin 		NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
5193a85a5deSChristoph Hellwig 	ctrl->tag_set.driver_data = ctrl;
520d858e5f0SSagi Grimberg 	ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
5213a85a5deSChristoph Hellwig 	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
5223a85a5deSChristoph Hellwig 	ctrl->ctrl.tagset = &ctrl->tag_set;
5233a85a5deSChristoph Hellwig 
5243a85a5deSChristoph Hellwig 	ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
5253a85a5deSChristoph Hellwig 	if (ret)
5263a85a5deSChristoph Hellwig 		goto out_destroy_queues;
5273a85a5deSChristoph Hellwig 
5283a85a5deSChristoph Hellwig 	ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
5293a85a5deSChristoph Hellwig 	if (IS_ERR(ctrl->ctrl.connect_q)) {
5303a85a5deSChristoph Hellwig 		ret = PTR_ERR(ctrl->ctrl.connect_q);
5313a85a5deSChristoph Hellwig 		goto out_free_tagset;
5323a85a5deSChristoph Hellwig 	}
5333a85a5deSChristoph Hellwig 
534297186d6SSagi Grimberg 	ret = nvme_loop_connect_io_queues(ctrl);
5353a85a5deSChristoph Hellwig 	if (ret)
5363a85a5deSChristoph Hellwig 		goto out_cleanup_connect_q;
5373a85a5deSChristoph Hellwig 
5383a85a5deSChristoph Hellwig 	return 0;
5393a85a5deSChristoph Hellwig 
5403a85a5deSChristoph Hellwig out_cleanup_connect_q:
5413a85a5deSChristoph Hellwig 	blk_cleanup_queue(ctrl->ctrl.connect_q);
5423a85a5deSChristoph Hellwig out_free_tagset:
5433a85a5deSChristoph Hellwig 	blk_mq_free_tag_set(&ctrl->tag_set);
5443a85a5deSChristoph Hellwig out_destroy_queues:
545945dd5baSSagi Grimberg 	nvme_loop_destroy_io_queues(ctrl);
5463a85a5deSChristoph Hellwig 	return ret;
5473a85a5deSChristoph Hellwig }
5483a85a5deSChristoph Hellwig 
549fe4a9791SChristoph Hellwig static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl)
550fe4a9791SChristoph Hellwig {
551fe4a9791SChristoph Hellwig 	struct nvmet_port *p, *found = NULL;
552fe4a9791SChristoph Hellwig 
553fe4a9791SChristoph Hellwig 	mutex_lock(&nvme_loop_ports_mutex);
554fe4a9791SChristoph Hellwig 	list_for_each_entry(p, &nvme_loop_ports, entry) {
555fe4a9791SChristoph Hellwig 		/* if no transport address is specified use the first port */
556fe4a9791SChristoph Hellwig 		if ((ctrl->opts->mask & NVMF_OPT_TRADDR) &&
557fe4a9791SChristoph Hellwig 		    strcmp(ctrl->opts->traddr, p->disc_addr.traddr))
558fe4a9791SChristoph Hellwig 			continue;
559fe4a9791SChristoph Hellwig 		found = p;
560fe4a9791SChristoph Hellwig 		break;
561fe4a9791SChristoph Hellwig 	}
562fe4a9791SChristoph Hellwig 	mutex_unlock(&nvme_loop_ports_mutex);
563fe4a9791SChristoph Hellwig 	return found;
564fe4a9791SChristoph Hellwig }
565fe4a9791SChristoph Hellwig 
5663a85a5deSChristoph Hellwig static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
5673a85a5deSChristoph Hellwig 		struct nvmf_ctrl_options *opts)
5683a85a5deSChristoph Hellwig {
5693a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl;
5703a85a5deSChristoph Hellwig 	bool changed;
5713a85a5deSChristoph Hellwig 	int ret;
5723a85a5deSChristoph Hellwig 
5733a85a5deSChristoph Hellwig 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
5743a85a5deSChristoph Hellwig 	if (!ctrl)
5753a85a5deSChristoph Hellwig 		return ERR_PTR(-ENOMEM);
5763a85a5deSChristoph Hellwig 	ctrl->ctrl.opts = opts;
5773a85a5deSChristoph Hellwig 	INIT_LIST_HEAD(&ctrl->list);
5783a85a5deSChristoph Hellwig 
579d86c4d8eSChristoph Hellwig 	INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
5803a85a5deSChristoph Hellwig 
5813a85a5deSChristoph Hellwig 	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
5823a85a5deSChristoph Hellwig 				0 /* no quirks, we're perfect! */);
5833a85a5deSChristoph Hellwig 	if (ret)
5843a85a5deSChristoph Hellwig 		goto out_put_ctrl;
5853a85a5deSChristoph Hellwig 
5863a85a5deSChristoph Hellwig 	ret = -ENOMEM;
5873a85a5deSChristoph Hellwig 
588eadb7cf4SJay Freyensee 	ctrl->ctrl.sqsize = opts->queue_size - 1;
5893a85a5deSChristoph Hellwig 	ctrl->ctrl.kato = opts->kato;
590fe4a9791SChristoph Hellwig 	ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
5913a85a5deSChristoph Hellwig 
5923a85a5deSChristoph Hellwig 	ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
5933a85a5deSChristoph Hellwig 			GFP_KERNEL);
5943a85a5deSChristoph Hellwig 	if (!ctrl->queues)
5953a85a5deSChristoph Hellwig 		goto out_uninit_ctrl;
5963a85a5deSChristoph Hellwig 
5973a85a5deSChristoph Hellwig 	ret = nvme_loop_configure_admin_queue(ctrl);
5983a85a5deSChristoph Hellwig 	if (ret)
5993a85a5deSChristoph Hellwig 		goto out_free_queues;
6003a85a5deSChristoph Hellwig 
6013a85a5deSChristoph Hellwig 	if (opts->queue_size > ctrl->ctrl.maxcmd) {
6023a85a5deSChristoph Hellwig 		/* warn if maxcmd is lower than queue_size */
6033a85a5deSChristoph Hellwig 		dev_warn(ctrl->ctrl.device,
6043a85a5deSChristoph Hellwig 			"queue_size %zu > ctrl maxcmd %u, clamping down\n",
6053a85a5deSChristoph Hellwig 			opts->queue_size, ctrl->ctrl.maxcmd);
6063a85a5deSChristoph Hellwig 		opts->queue_size = ctrl->ctrl.maxcmd;
6073a85a5deSChristoph Hellwig 	}
6083a85a5deSChristoph Hellwig 
6093a85a5deSChristoph Hellwig 	if (opts->nr_io_queues) {
6103a85a5deSChristoph Hellwig 		ret = nvme_loop_create_io_queues(ctrl);
6113a85a5deSChristoph Hellwig 		if (ret)
6123a85a5deSChristoph Hellwig 			goto out_remove_admin_queue;
6133a85a5deSChristoph Hellwig 	}
6143a85a5deSChristoph Hellwig 
6153a85a5deSChristoph Hellwig 	nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
6163a85a5deSChristoph Hellwig 
6173a85a5deSChristoph Hellwig 	dev_info(ctrl->ctrl.device,
6183a85a5deSChristoph Hellwig 		 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
6193a85a5deSChristoph Hellwig 
6203a85a5deSChristoph Hellwig 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
6213a85a5deSChristoph Hellwig 	WARN_ON_ONCE(!changed);
6223a85a5deSChristoph Hellwig 
6233a85a5deSChristoph Hellwig 	mutex_lock(&nvme_loop_ctrl_mutex);
6243a85a5deSChristoph Hellwig 	list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
6253a85a5deSChristoph Hellwig 	mutex_unlock(&nvme_loop_ctrl_mutex);
6263a85a5deSChristoph Hellwig 
627d09f2b45SSagi Grimberg 	nvme_start_ctrl(&ctrl->ctrl);
6283a85a5deSChristoph Hellwig 
6293a85a5deSChristoph Hellwig 	return &ctrl->ctrl;
6303a85a5deSChristoph Hellwig 
6313a85a5deSChristoph Hellwig out_remove_admin_queue:
6323a85a5deSChristoph Hellwig 	nvme_loop_destroy_admin_queue(ctrl);
6333a85a5deSChristoph Hellwig out_free_queues:
6343a85a5deSChristoph Hellwig 	kfree(ctrl->queues);
6353a85a5deSChristoph Hellwig out_uninit_ctrl:
6363a85a5deSChristoph Hellwig 	nvme_uninit_ctrl(&ctrl->ctrl);
6373a85a5deSChristoph Hellwig out_put_ctrl:
6383a85a5deSChristoph Hellwig 	nvme_put_ctrl(&ctrl->ctrl);
6393a85a5deSChristoph Hellwig 	if (ret > 0)
6403a85a5deSChristoph Hellwig 		ret = -EIO;
6413a85a5deSChristoph Hellwig 	return ERR_PTR(ret);
6423a85a5deSChristoph Hellwig }
6433a85a5deSChristoph Hellwig 
6443a85a5deSChristoph Hellwig static int nvme_loop_add_port(struct nvmet_port *port)
6453a85a5deSChristoph Hellwig {
646fe4a9791SChristoph Hellwig 	mutex_lock(&nvme_loop_ports_mutex);
647fe4a9791SChristoph Hellwig 	list_add_tail(&port->entry, &nvme_loop_ports);
648fe4a9791SChristoph Hellwig 	mutex_unlock(&nvme_loop_ports_mutex);
6493a85a5deSChristoph Hellwig 	return 0;
6503a85a5deSChristoph Hellwig }
6513a85a5deSChristoph Hellwig 
6523a85a5deSChristoph Hellwig static void nvme_loop_remove_port(struct nvmet_port *port)
6533a85a5deSChristoph Hellwig {
654fe4a9791SChristoph Hellwig 	mutex_lock(&nvme_loop_ports_mutex);
655fe4a9791SChristoph Hellwig 	list_del_init(&port->entry);
656fe4a9791SChristoph Hellwig 	mutex_unlock(&nvme_loop_ports_mutex);
65786b9a63eSLogan Gunthorpe 
65886b9a63eSLogan Gunthorpe 	/*
65986b9a63eSLogan Gunthorpe 	 * Ensure any ctrls that are in the process of being
66086b9a63eSLogan Gunthorpe 	 * deleted are in fact deleted before we return
66186b9a63eSLogan Gunthorpe 	 * and free the port. This is to prevent active
66286b9a63eSLogan Gunthorpe 	 * ctrls from using a port after it's freed.
66386b9a63eSLogan Gunthorpe 	 */
66486b9a63eSLogan Gunthorpe 	flush_workqueue(nvme_delete_wq);
6653a85a5deSChristoph Hellwig }
6663a85a5deSChristoph Hellwig 
667e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops nvme_loop_ops = {
6683a85a5deSChristoph Hellwig 	.owner		= THIS_MODULE,
6693a85a5deSChristoph Hellwig 	.type		= NVMF_TRTYPE_LOOP,
6703a85a5deSChristoph Hellwig 	.add_port	= nvme_loop_add_port,
6713a85a5deSChristoph Hellwig 	.remove_port	= nvme_loop_remove_port,
6723a85a5deSChristoph Hellwig 	.queue_response = nvme_loop_queue_response,
6733a85a5deSChristoph Hellwig 	.delete_ctrl	= nvme_loop_delete_ctrl,
6743a85a5deSChristoph Hellwig };
6753a85a5deSChristoph Hellwig 
6763a85a5deSChristoph Hellwig static struct nvmf_transport_ops nvme_loop_transport = {
6773a85a5deSChristoph Hellwig 	.name		= "loop",
6780de5cd36SRoy Shterman 	.module		= THIS_MODULE,
6793a85a5deSChristoph Hellwig 	.create_ctrl	= nvme_loop_create_ctrl,
680fe4a9791SChristoph Hellwig 	.allowed_opts	= NVMF_OPT_TRADDR,
6813a85a5deSChristoph Hellwig };
6823a85a5deSChristoph Hellwig 
6833a85a5deSChristoph Hellwig static int __init nvme_loop_init_module(void)
6843a85a5deSChristoph Hellwig {
6853a85a5deSChristoph Hellwig 	int ret;
6863a85a5deSChristoph Hellwig 
6873a85a5deSChristoph Hellwig 	ret = nvmet_register_transport(&nvme_loop_ops);
6883a85a5deSChristoph Hellwig 	if (ret)
6893a85a5deSChristoph Hellwig 		return ret;
690d19eef02SSagi Grimberg 
691d19eef02SSagi Grimberg 	ret = nvmf_register_transport(&nvme_loop_transport);
692d19eef02SSagi Grimberg 	if (ret)
693d19eef02SSagi Grimberg 		nvmet_unregister_transport(&nvme_loop_ops);
694d19eef02SSagi Grimberg 
695d19eef02SSagi Grimberg 	return ret;
6963a85a5deSChristoph Hellwig }
6973a85a5deSChristoph Hellwig 
6983a85a5deSChristoph Hellwig static void __exit nvme_loop_cleanup_module(void)
6993a85a5deSChristoph Hellwig {
7003a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl, *next;
7013a85a5deSChristoph Hellwig 
7023a85a5deSChristoph Hellwig 	nvmf_unregister_transport(&nvme_loop_transport);
7033a85a5deSChristoph Hellwig 	nvmet_unregister_transport(&nvme_loop_ops);
7043a85a5deSChristoph Hellwig 
7053a85a5deSChristoph Hellwig 	mutex_lock(&nvme_loop_ctrl_mutex);
7063a85a5deSChristoph Hellwig 	list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
707c5017e85SChristoph Hellwig 		nvme_delete_ctrl(&ctrl->ctrl);
7083a85a5deSChristoph Hellwig 	mutex_unlock(&nvme_loop_ctrl_mutex);
7093a85a5deSChristoph Hellwig 
710b227c59bSRoy Shterman 	flush_workqueue(nvme_delete_wq);
7113a85a5deSChristoph Hellwig }
7123a85a5deSChristoph Hellwig 
7133a85a5deSChristoph Hellwig module_init(nvme_loop_init_module);
7143a85a5deSChristoph Hellwig module_exit(nvme_loop_cleanup_module);
7153a85a5deSChristoph Hellwig 
7163a85a5deSChristoph Hellwig MODULE_LICENSE("GPL v2");
7173a85a5deSChristoph Hellwig MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */
718