xref: /openbmc/linux/drivers/nvme/target/loop.c (revision 0de5cd367c6aa2a31a1c931628f778f79f8ef22e)
13a85a5deSChristoph Hellwig /*
23a85a5deSChristoph Hellwig  * NVMe over Fabrics loopback device.
33a85a5deSChristoph Hellwig  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
43a85a5deSChristoph Hellwig  *
53a85a5deSChristoph Hellwig  * This program is free software; you can redistribute it and/or modify it
63a85a5deSChristoph Hellwig  * under the terms and conditions of the GNU General Public License,
73a85a5deSChristoph Hellwig  * version 2, as published by the Free Software Foundation.
83a85a5deSChristoph Hellwig  *
93a85a5deSChristoph Hellwig  * This program is distributed in the hope it will be useful, but WITHOUT
103a85a5deSChristoph Hellwig  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
113a85a5deSChristoph Hellwig  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
123a85a5deSChristoph Hellwig  * more details.
133a85a5deSChristoph Hellwig  */
143a85a5deSChristoph Hellwig #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
153a85a5deSChristoph Hellwig #include <linux/scatterlist.h>
163a85a5deSChristoph Hellwig #include <linux/blk-mq.h>
173a85a5deSChristoph Hellwig #include <linux/nvme.h>
183a85a5deSChristoph Hellwig #include <linux/module.h>
193a85a5deSChristoph Hellwig #include <linux/parser.h>
203a85a5deSChristoph Hellwig #include "nvmet.h"
213a85a5deSChristoph Hellwig #include "../host/nvme.h"
223a85a5deSChristoph Hellwig #include "../host/fabrics.h"
233a85a5deSChristoph Hellwig 
243a85a5deSChristoph Hellwig #define NVME_LOOP_MAX_SEGMENTS		256
253a85a5deSChristoph Hellwig 
263a85a5deSChristoph Hellwig struct nvme_loop_iod {
27d49187e9SChristoph Hellwig 	struct nvme_request	nvme_req;
283a85a5deSChristoph Hellwig 	struct nvme_command	cmd;
293a85a5deSChristoph Hellwig 	struct nvme_completion	rsp;
303a85a5deSChristoph Hellwig 	struct nvmet_req	req;
313a85a5deSChristoph Hellwig 	struct nvme_loop_queue	*queue;
323a85a5deSChristoph Hellwig 	struct work_struct	work;
333a85a5deSChristoph Hellwig 	struct sg_table		sg_table;
343a85a5deSChristoph Hellwig 	struct scatterlist	first_sgl[];
353a85a5deSChristoph Hellwig };
363a85a5deSChristoph Hellwig 
373a85a5deSChristoph Hellwig struct nvme_loop_ctrl {
383a85a5deSChristoph Hellwig 	struct nvme_loop_queue	*queues;
393a85a5deSChristoph Hellwig 
403a85a5deSChristoph Hellwig 	struct blk_mq_tag_set	admin_tag_set;
413a85a5deSChristoph Hellwig 
423a85a5deSChristoph Hellwig 	struct list_head	list;
433a85a5deSChristoph Hellwig 	struct blk_mq_tag_set	tag_set;
443a85a5deSChristoph Hellwig 	struct nvme_loop_iod	async_event_iod;
453a85a5deSChristoph Hellwig 	struct nvme_ctrl	ctrl;
463a85a5deSChristoph Hellwig 
473a85a5deSChristoph Hellwig 	struct nvmet_ctrl	*target_ctrl;
483a85a5deSChristoph Hellwig };
493a85a5deSChristoph Hellwig 
503a85a5deSChristoph Hellwig static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
513a85a5deSChristoph Hellwig {
523a85a5deSChristoph Hellwig 	return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
533a85a5deSChristoph Hellwig }
543a85a5deSChristoph Hellwig 
559d7fab04SSagi Grimberg enum nvme_loop_queue_flags {
569d7fab04SSagi Grimberg 	NVME_LOOP_Q_LIVE	= 0,
579d7fab04SSagi Grimberg };
589d7fab04SSagi Grimberg 
593a85a5deSChristoph Hellwig struct nvme_loop_queue {
603a85a5deSChristoph Hellwig 	struct nvmet_cq		nvme_cq;
613a85a5deSChristoph Hellwig 	struct nvmet_sq		nvme_sq;
623a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl	*ctrl;
639d7fab04SSagi Grimberg 	unsigned long		flags;
643a85a5deSChristoph Hellwig };
653a85a5deSChristoph Hellwig 
663a85a5deSChristoph Hellwig static struct nvmet_port *nvmet_loop_port;
673a85a5deSChristoph Hellwig 
683a85a5deSChristoph Hellwig static LIST_HEAD(nvme_loop_ctrl_list);
693a85a5deSChristoph Hellwig static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
703a85a5deSChristoph Hellwig 
713a85a5deSChristoph Hellwig static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
723a85a5deSChristoph Hellwig static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
733a85a5deSChristoph Hellwig 
743a85a5deSChristoph Hellwig static struct nvmet_fabrics_ops nvme_loop_ops;
753a85a5deSChristoph Hellwig 
763a85a5deSChristoph Hellwig static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
773a85a5deSChristoph Hellwig {
783a85a5deSChristoph Hellwig 	return queue - queue->ctrl->queues;
793a85a5deSChristoph Hellwig }
803a85a5deSChristoph Hellwig 
813a85a5deSChristoph Hellwig static void nvme_loop_complete_rq(struct request *req)
823a85a5deSChristoph Hellwig {
833a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
843a85a5deSChristoph Hellwig 
853a85a5deSChristoph Hellwig 	nvme_cleanup_cmd(req);
863a85a5deSChristoph Hellwig 	sg_free_table_chained(&iod->sg_table, true);
8777f02a7aSChristoph Hellwig 	nvme_complete_rq(req);
883a85a5deSChristoph Hellwig }
893a85a5deSChristoph Hellwig 
903b068376SSagi Grimberg static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
913b068376SSagi Grimberg {
923b068376SSagi Grimberg 	u32 queue_idx = nvme_loop_queue_idx(queue);
933a85a5deSChristoph Hellwig 
943b068376SSagi Grimberg 	if (queue_idx == 0)
953b068376SSagi Grimberg 		return queue->ctrl->admin_tag_set.tags[queue_idx];
963b068376SSagi Grimberg 	return queue->ctrl->tag_set.tags[queue_idx - 1];
973a85a5deSChristoph Hellwig }
983a85a5deSChristoph Hellwig 
99d49187e9SChristoph Hellwig static void nvme_loop_queue_response(struct nvmet_req *req)
1003a85a5deSChristoph Hellwig {
1013b068376SSagi Grimberg 	struct nvme_loop_queue *queue =
1023b068376SSagi Grimberg 		container_of(req->sq, struct nvme_loop_queue, nvme_sq);
1033b068376SSagi Grimberg 	struct nvme_completion *cqe = req->rsp;
1043a85a5deSChristoph Hellwig 
1053a85a5deSChristoph Hellwig 	/*
1063a85a5deSChristoph Hellwig 	 * AEN requests are special as they don't time out and can
1073a85a5deSChristoph Hellwig 	 * survive any kind of queue freeze and often don't respond to
1083a85a5deSChristoph Hellwig 	 * aborts.  We don't even bother to allocate a struct request
1093a85a5deSChristoph Hellwig 	 * for them but rather special case them here.
1103a85a5deSChristoph Hellwig 	 */
1113b068376SSagi Grimberg 	if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
11238dabe21SKeith Busch 			cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
1133b068376SSagi Grimberg 		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
1147bf58533SChristoph Hellwig 				&cqe->result);
1153a85a5deSChristoph Hellwig 	} else {
1163b068376SSagi Grimberg 		struct request *rq;
1173a85a5deSChristoph Hellwig 
1183b068376SSagi Grimberg 		rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
1193b068376SSagi Grimberg 		if (!rq) {
1203b068376SSagi Grimberg 			dev_err(queue->ctrl->ctrl.device,
1213b068376SSagi Grimberg 				"tag 0x%x on queue %d not found\n",
1223b068376SSagi Grimberg 				cqe->command_id, nvme_loop_queue_idx(queue));
1233b068376SSagi Grimberg 			return;
1243b068376SSagi Grimberg 		}
1253b068376SSagi Grimberg 
12627fa9bc5SChristoph Hellwig 		nvme_end_request(rq, cqe->status, cqe->result);
1273a85a5deSChristoph Hellwig 	}
1283a85a5deSChristoph Hellwig }
1293a85a5deSChristoph Hellwig 
1303a85a5deSChristoph Hellwig static void nvme_loop_execute_work(struct work_struct *work)
1313a85a5deSChristoph Hellwig {
1323a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod =
1333a85a5deSChristoph Hellwig 		container_of(work, struct nvme_loop_iod, work);
1343a85a5deSChristoph Hellwig 
1355e62d5c9SChristoph Hellwig 	nvmet_req_execute(&iod->req);
1363a85a5deSChristoph Hellwig }
1373a85a5deSChristoph Hellwig 
1383a85a5deSChristoph Hellwig static enum blk_eh_timer_return
1393a85a5deSChristoph Hellwig nvme_loop_timeout(struct request *rq, bool reserved)
1403a85a5deSChristoph Hellwig {
1413a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
1423a85a5deSChristoph Hellwig 
1433a85a5deSChristoph Hellwig 	/* queue error recovery */
144d86c4d8eSChristoph Hellwig 	nvme_reset_ctrl(&iod->queue->ctrl->ctrl);
1453a85a5deSChristoph Hellwig 
1463a85a5deSChristoph Hellwig 	/* fail with DNR on admin cmd timeout */
14727fa9bc5SChristoph Hellwig 	nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
1483a85a5deSChristoph Hellwig 
1493a85a5deSChristoph Hellwig 	return BLK_EH_HANDLED;
1503a85a5deSChristoph Hellwig }
1513a85a5deSChristoph Hellwig 
1529d7fab04SSagi Grimberg static inline blk_status_t nvme_loop_is_ready(struct nvme_loop_queue *queue,
1539d7fab04SSagi Grimberg 		struct request *rq)
1549d7fab04SSagi Grimberg {
1559d7fab04SSagi Grimberg 	if (unlikely(!test_bit(NVME_LOOP_Q_LIVE, &queue->flags)))
1569d7fab04SSagi Grimberg 		return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
1579d7fab04SSagi Grimberg 	return BLK_STS_OK;
1589d7fab04SSagi Grimberg }
1599d7fab04SSagi Grimberg 
160fc17b653SChristoph Hellwig static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1613a85a5deSChristoph Hellwig 		const struct blk_mq_queue_data *bd)
1623a85a5deSChristoph Hellwig {
1633a85a5deSChristoph Hellwig 	struct nvme_ns *ns = hctx->queue->queuedata;
1643a85a5deSChristoph Hellwig 	struct nvme_loop_queue *queue = hctx->driver_data;
1653a85a5deSChristoph Hellwig 	struct request *req = bd->rq;
1663a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
167fc17b653SChristoph Hellwig 	blk_status_t ret;
1683a85a5deSChristoph Hellwig 
1699d7fab04SSagi Grimberg 	ret = nvme_loop_is_ready(queue, req);
1709d7fab04SSagi Grimberg 	if (unlikely(ret))
1719d7fab04SSagi Grimberg 		return ret;
1729d7fab04SSagi Grimberg 
1733a85a5deSChristoph Hellwig 	ret = nvme_setup_cmd(ns, req, &iod->cmd);
174fc17b653SChristoph Hellwig 	if (ret)
1753a85a5deSChristoph Hellwig 		return ret;
1763a85a5deSChristoph Hellwig 
1773a85a5deSChristoph Hellwig 	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
1783a85a5deSChristoph Hellwig 	iod->req.port = nvmet_loop_port;
1793a85a5deSChristoph Hellwig 	if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
1803a85a5deSChristoph Hellwig 			&queue->nvme_sq, &nvme_loop_ops)) {
1813a85a5deSChristoph Hellwig 		nvme_cleanup_cmd(req);
1823a85a5deSChristoph Hellwig 		blk_mq_start_request(req);
1833a85a5deSChristoph Hellwig 		nvme_loop_queue_response(&iod->req);
184fc17b653SChristoph Hellwig 		return BLK_STS_OK;
1853a85a5deSChristoph Hellwig 	}
1863a85a5deSChristoph Hellwig 
1873a85a5deSChristoph Hellwig 	if (blk_rq_bytes(req)) {
1883a85a5deSChristoph Hellwig 		iod->sg_table.sgl = iod->first_sgl;
189fc17b653SChristoph Hellwig 		if (sg_alloc_table_chained(&iod->sg_table,
190f9d03f96SChristoph Hellwig 				blk_rq_nr_phys_segments(req),
191fc17b653SChristoph Hellwig 				iod->sg_table.sgl))
192fc17b653SChristoph Hellwig 			return BLK_STS_RESOURCE;
1933a85a5deSChristoph Hellwig 
1943a85a5deSChristoph Hellwig 		iod->req.sg = iod->sg_table.sgl;
1953a85a5deSChristoph Hellwig 		iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
1965e62d5c9SChristoph Hellwig 		iod->req.transfer_len = blk_rq_bytes(req);
1973a85a5deSChristoph Hellwig 	}
1983a85a5deSChristoph Hellwig 
1993a85a5deSChristoph Hellwig 	blk_mq_start_request(req);
2003a85a5deSChristoph Hellwig 
2013a85a5deSChristoph Hellwig 	schedule_work(&iod->work);
202fc17b653SChristoph Hellwig 	return BLK_STS_OK;
2033a85a5deSChristoph Hellwig }
2043a85a5deSChristoph Hellwig 
205ad22c355SKeith Busch static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
2063a85a5deSChristoph Hellwig {
2073a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
2083a85a5deSChristoph Hellwig 	struct nvme_loop_queue *queue = &ctrl->queues[0];
2093a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod = &ctrl->async_event_iod;
2103a85a5deSChristoph Hellwig 
2113a85a5deSChristoph Hellwig 	memset(&iod->cmd, 0, sizeof(iod->cmd));
2123a85a5deSChristoph Hellwig 	iod->cmd.common.opcode = nvme_admin_async_event;
21338dabe21SKeith Busch 	iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2143a85a5deSChristoph Hellwig 	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
2153a85a5deSChristoph Hellwig 
2163a85a5deSChristoph Hellwig 	if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
2173a85a5deSChristoph Hellwig 			&nvme_loop_ops)) {
2183a85a5deSChristoph Hellwig 		dev_err(ctrl->ctrl.device, "failed async event work\n");
2193a85a5deSChristoph Hellwig 		return;
2203a85a5deSChristoph Hellwig 	}
2213a85a5deSChristoph Hellwig 
2223a85a5deSChristoph Hellwig 	schedule_work(&iod->work);
2233a85a5deSChristoph Hellwig }
2243a85a5deSChristoph Hellwig 
2253a85a5deSChristoph Hellwig static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
2263a85a5deSChristoph Hellwig 		struct nvme_loop_iod *iod, unsigned int queue_idx)
2273a85a5deSChristoph Hellwig {
2283a85a5deSChristoph Hellwig 	iod->req.cmd = &iod->cmd;
2293a85a5deSChristoph Hellwig 	iod->req.rsp = &iod->rsp;
2303a85a5deSChristoph Hellwig 	iod->queue = &ctrl->queues[queue_idx];
2313a85a5deSChristoph Hellwig 	INIT_WORK(&iod->work, nvme_loop_execute_work);
2323a85a5deSChristoph Hellwig 	return 0;
2333a85a5deSChristoph Hellwig }
2343a85a5deSChristoph Hellwig 
235d6296d39SChristoph Hellwig static int nvme_loop_init_request(struct blk_mq_tag_set *set,
236d6296d39SChristoph Hellwig 		struct request *req, unsigned int hctx_idx,
2373a85a5deSChristoph Hellwig 		unsigned int numa_node)
2383a85a5deSChristoph Hellwig {
23962b83b18SChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = set->driver_data;
2403a85a5deSChristoph Hellwig 
24162b83b18SChristoph Hellwig 	return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
24262b83b18SChristoph Hellwig 			(set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
2433a85a5deSChristoph Hellwig }
2443a85a5deSChristoph Hellwig 
2453a85a5deSChristoph Hellwig static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2463a85a5deSChristoph Hellwig 		unsigned int hctx_idx)
2473a85a5deSChristoph Hellwig {
2483a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = data;
2493a85a5deSChristoph Hellwig 	struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
2503a85a5deSChristoph Hellwig 
251d858e5f0SSagi Grimberg 	BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
2523a85a5deSChristoph Hellwig 
2533a85a5deSChristoph Hellwig 	hctx->driver_data = queue;
2543a85a5deSChristoph Hellwig 	return 0;
2553a85a5deSChristoph Hellwig }
2563a85a5deSChristoph Hellwig 
2573a85a5deSChristoph Hellwig static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2583a85a5deSChristoph Hellwig 		unsigned int hctx_idx)
2593a85a5deSChristoph Hellwig {
2603a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = data;
2613a85a5deSChristoph Hellwig 	struct nvme_loop_queue *queue = &ctrl->queues[0];
2623a85a5deSChristoph Hellwig 
2633a85a5deSChristoph Hellwig 	BUG_ON(hctx_idx != 0);
2643a85a5deSChristoph Hellwig 
2653a85a5deSChristoph Hellwig 	hctx->driver_data = queue;
2663a85a5deSChristoph Hellwig 	return 0;
2673a85a5deSChristoph Hellwig }
2683a85a5deSChristoph Hellwig 
269f363b089SEric Biggers static const struct blk_mq_ops nvme_loop_mq_ops = {
2703a85a5deSChristoph Hellwig 	.queue_rq	= nvme_loop_queue_rq,
2713a85a5deSChristoph Hellwig 	.complete	= nvme_loop_complete_rq,
2723a85a5deSChristoph Hellwig 	.init_request	= nvme_loop_init_request,
2733a85a5deSChristoph Hellwig 	.init_hctx	= nvme_loop_init_hctx,
2743a85a5deSChristoph Hellwig 	.timeout	= nvme_loop_timeout,
2753a85a5deSChristoph Hellwig };
2763a85a5deSChristoph Hellwig 
277f363b089SEric Biggers static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
2783a85a5deSChristoph Hellwig 	.queue_rq	= nvme_loop_queue_rq,
2793a85a5deSChristoph Hellwig 	.complete	= nvme_loop_complete_rq,
28062b83b18SChristoph Hellwig 	.init_request	= nvme_loop_init_request,
2813a85a5deSChristoph Hellwig 	.init_hctx	= nvme_loop_init_admin_hctx,
2823a85a5deSChristoph Hellwig 	.timeout	= nvme_loop_timeout,
2833a85a5deSChristoph Hellwig };
2843a85a5deSChristoph Hellwig 
2853a85a5deSChristoph Hellwig static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
2863a85a5deSChristoph Hellwig {
2879d7fab04SSagi Grimberg 	clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
288e4c5d376SSagi Grimberg 	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
2893a85a5deSChristoph Hellwig 	blk_cleanup_queue(ctrl->ctrl.admin_q);
2903a85a5deSChristoph Hellwig 	blk_mq_free_tag_set(&ctrl->admin_tag_set);
2913a85a5deSChristoph Hellwig }
2923a85a5deSChristoph Hellwig 
2933a85a5deSChristoph Hellwig static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
2943a85a5deSChristoph Hellwig {
2953a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
2963a85a5deSChristoph Hellwig 
2973a85a5deSChristoph Hellwig 	if (list_empty(&ctrl->list))
2983a85a5deSChristoph Hellwig 		goto free_ctrl;
2993a85a5deSChristoph Hellwig 
3003a85a5deSChristoph Hellwig 	mutex_lock(&nvme_loop_ctrl_mutex);
3013a85a5deSChristoph Hellwig 	list_del(&ctrl->list);
3023a85a5deSChristoph Hellwig 	mutex_unlock(&nvme_loop_ctrl_mutex);
3033a85a5deSChristoph Hellwig 
3043a85a5deSChristoph Hellwig 	if (nctrl->tagset) {
3053a85a5deSChristoph Hellwig 		blk_cleanup_queue(ctrl->ctrl.connect_q);
3063a85a5deSChristoph Hellwig 		blk_mq_free_tag_set(&ctrl->tag_set);
3073a85a5deSChristoph Hellwig 	}
3083a85a5deSChristoph Hellwig 	kfree(ctrl->queues);
3093a85a5deSChristoph Hellwig 	nvmf_free_options(nctrl->opts);
3103a85a5deSChristoph Hellwig free_ctrl:
3113a85a5deSChristoph Hellwig 	kfree(ctrl);
3123a85a5deSChristoph Hellwig }
3133a85a5deSChristoph Hellwig 
314945dd5baSSagi Grimberg static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
315945dd5baSSagi Grimberg {
316945dd5baSSagi Grimberg 	int i;
317945dd5baSSagi Grimberg 
3189d7fab04SSagi Grimberg 	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
3199d7fab04SSagi Grimberg 		clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
320945dd5baSSagi Grimberg 		nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
321945dd5baSSagi Grimberg 	}
3229d7fab04SSagi Grimberg }
323945dd5baSSagi Grimberg 
324945dd5baSSagi Grimberg static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
325945dd5baSSagi Grimberg {
326945dd5baSSagi Grimberg 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
327945dd5baSSagi Grimberg 	unsigned int nr_io_queues;
328945dd5baSSagi Grimberg 	int ret, i;
329945dd5baSSagi Grimberg 
330945dd5baSSagi Grimberg 	nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
331945dd5baSSagi Grimberg 	ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
332945dd5baSSagi Grimberg 	if (ret || !nr_io_queues)
333945dd5baSSagi Grimberg 		return ret;
334945dd5baSSagi Grimberg 
335945dd5baSSagi Grimberg 	dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
336945dd5baSSagi Grimberg 
337945dd5baSSagi Grimberg 	for (i = 1; i <= nr_io_queues; i++) {
338945dd5baSSagi Grimberg 		ctrl->queues[i].ctrl = ctrl;
339945dd5baSSagi Grimberg 		ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
340945dd5baSSagi Grimberg 		if (ret)
341945dd5baSSagi Grimberg 			goto out_destroy_queues;
342945dd5baSSagi Grimberg 
343d858e5f0SSagi Grimberg 		ctrl->ctrl.queue_count++;
344945dd5baSSagi Grimberg 	}
345945dd5baSSagi Grimberg 
346945dd5baSSagi Grimberg 	return 0;
347945dd5baSSagi Grimberg 
348945dd5baSSagi Grimberg out_destroy_queues:
349945dd5baSSagi Grimberg 	nvme_loop_destroy_io_queues(ctrl);
350945dd5baSSagi Grimberg 	return ret;
351945dd5baSSagi Grimberg }
352945dd5baSSagi Grimberg 
353297186d6SSagi Grimberg static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
354297186d6SSagi Grimberg {
355297186d6SSagi Grimberg 	int i, ret;
356297186d6SSagi Grimberg 
357d858e5f0SSagi Grimberg 	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
358297186d6SSagi Grimberg 		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
359297186d6SSagi Grimberg 		if (ret)
360297186d6SSagi Grimberg 			return ret;
3619d7fab04SSagi Grimberg 		set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
362297186d6SSagi Grimberg 	}
363297186d6SSagi Grimberg 
364297186d6SSagi Grimberg 	return 0;
365297186d6SSagi Grimberg }
366297186d6SSagi Grimberg 
3673a85a5deSChristoph Hellwig static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
3683a85a5deSChristoph Hellwig {
3693a85a5deSChristoph Hellwig 	int error;
3703a85a5deSChristoph Hellwig 
3713a85a5deSChristoph Hellwig 	memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
3723a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
37338dabe21SKeith Busch 	ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
3743a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
3753a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
3763a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
3773a85a5deSChristoph Hellwig 		SG_CHUNK_SIZE * sizeof(struct scatterlist);
3783a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.driver_data = ctrl;
3793a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.nr_hw_queues = 1;
3803a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
38186f36b9cSIsrael Rukshin 	ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
3823a85a5deSChristoph Hellwig 
3833a85a5deSChristoph Hellwig 	ctrl->queues[0].ctrl = ctrl;
3843a85a5deSChristoph Hellwig 	error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
3853a85a5deSChristoph Hellwig 	if (error)
3863a85a5deSChristoph Hellwig 		return error;
387d858e5f0SSagi Grimberg 	ctrl->ctrl.queue_count = 1;
3883a85a5deSChristoph Hellwig 
3893a85a5deSChristoph Hellwig 	error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
3903a85a5deSChristoph Hellwig 	if (error)
3913a85a5deSChristoph Hellwig 		goto out_free_sq;
39234b6c231SSagi Grimberg 	ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
3933a85a5deSChristoph Hellwig 
3943a85a5deSChristoph Hellwig 	ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3953a85a5deSChristoph Hellwig 	if (IS_ERR(ctrl->ctrl.admin_q)) {
3963a85a5deSChristoph Hellwig 		error = PTR_ERR(ctrl->ctrl.admin_q);
3973a85a5deSChristoph Hellwig 		goto out_free_tagset;
3983a85a5deSChristoph Hellwig 	}
3993a85a5deSChristoph Hellwig 
4003a85a5deSChristoph Hellwig 	error = nvmf_connect_admin_queue(&ctrl->ctrl);
4013a85a5deSChristoph Hellwig 	if (error)
4023a85a5deSChristoph Hellwig 		goto out_cleanup_queue;
4033a85a5deSChristoph Hellwig 
4049d7fab04SSagi Grimberg 	set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
4059d7fab04SSagi Grimberg 
40620d0dfe6SSagi Grimberg 	error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
4073a85a5deSChristoph Hellwig 	if (error) {
4083a85a5deSChristoph Hellwig 		dev_err(ctrl->ctrl.device,
4093a85a5deSChristoph Hellwig 			"prop_get NVME_REG_CAP failed\n");
4103a85a5deSChristoph Hellwig 		goto out_cleanup_queue;
4113a85a5deSChristoph Hellwig 	}
4123a85a5deSChristoph Hellwig 
4133a85a5deSChristoph Hellwig 	ctrl->ctrl.sqsize =
41420d0dfe6SSagi Grimberg 		min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
4153a85a5deSChristoph Hellwig 
41620d0dfe6SSagi Grimberg 	error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
4173a85a5deSChristoph Hellwig 	if (error)
4183a85a5deSChristoph Hellwig 		goto out_cleanup_queue;
4193a85a5deSChristoph Hellwig 
4203a85a5deSChristoph Hellwig 	ctrl->ctrl.max_hw_sectors =
4213a85a5deSChristoph Hellwig 		(NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
4223a85a5deSChristoph Hellwig 
4233a85a5deSChristoph Hellwig 	error = nvme_init_identify(&ctrl->ctrl);
4243a85a5deSChristoph Hellwig 	if (error)
4253a85a5deSChristoph Hellwig 		goto out_cleanup_queue;
4263a85a5deSChristoph Hellwig 
4273a85a5deSChristoph Hellwig 	return 0;
4283a85a5deSChristoph Hellwig 
4293a85a5deSChristoph Hellwig out_cleanup_queue:
4303a85a5deSChristoph Hellwig 	blk_cleanup_queue(ctrl->ctrl.admin_q);
4313a85a5deSChristoph Hellwig out_free_tagset:
4323a85a5deSChristoph Hellwig 	blk_mq_free_tag_set(&ctrl->admin_tag_set);
4333a85a5deSChristoph Hellwig out_free_sq:
4343a85a5deSChristoph Hellwig 	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
4353a85a5deSChristoph Hellwig 	return error;
4363a85a5deSChristoph Hellwig }
4373a85a5deSChristoph Hellwig 
4383a85a5deSChristoph Hellwig static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
4393a85a5deSChristoph Hellwig {
440d858e5f0SSagi Grimberg 	if (ctrl->ctrl.queue_count > 1) {
4413a85a5deSChristoph Hellwig 		nvme_stop_queues(&ctrl->ctrl);
4423a85a5deSChristoph Hellwig 		blk_mq_tagset_busy_iter(&ctrl->tag_set,
4433a85a5deSChristoph Hellwig 					nvme_cancel_request, &ctrl->ctrl);
444945dd5baSSagi Grimberg 		nvme_loop_destroy_io_queues(ctrl);
4453a85a5deSChristoph Hellwig 	}
4463a85a5deSChristoph Hellwig 
4473a85a5deSChristoph Hellwig 	if (ctrl->ctrl.state == NVME_CTRL_LIVE)
4483a85a5deSChristoph Hellwig 		nvme_shutdown_ctrl(&ctrl->ctrl);
4493a85a5deSChristoph Hellwig 
450c1c0ffffSSagi Grimberg 	blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
4513a85a5deSChristoph Hellwig 	blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
4523a85a5deSChristoph Hellwig 				nvme_cancel_request, &ctrl->ctrl);
453c1c0ffffSSagi Grimberg 	blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
4543a85a5deSChristoph Hellwig 	nvme_loop_destroy_admin_queue(ctrl);
4553a85a5deSChristoph Hellwig }
4563a85a5deSChristoph Hellwig 
457c5017e85SChristoph Hellwig static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
4583a85a5deSChristoph Hellwig {
459c5017e85SChristoph Hellwig 	nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
4603a85a5deSChristoph Hellwig }
4613a85a5deSChristoph Hellwig 
4623a85a5deSChristoph Hellwig static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
4633a85a5deSChristoph Hellwig {
4643a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl;
4653a85a5deSChristoph Hellwig 
4663a85a5deSChristoph Hellwig 	mutex_lock(&nvme_loop_ctrl_mutex);
4673a85a5deSChristoph Hellwig 	list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
4683a85a5deSChristoph Hellwig 		if (ctrl->ctrl.cntlid == nctrl->cntlid)
469c5017e85SChristoph Hellwig 			nvme_delete_ctrl(&ctrl->ctrl);
4703a85a5deSChristoph Hellwig 	}
4713a85a5deSChristoph Hellwig 	mutex_unlock(&nvme_loop_ctrl_mutex);
4723a85a5deSChristoph Hellwig }
4733a85a5deSChristoph Hellwig 
4743a85a5deSChristoph Hellwig static void nvme_loop_reset_ctrl_work(struct work_struct *work)
4753a85a5deSChristoph Hellwig {
476d86c4d8eSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl =
477d86c4d8eSChristoph Hellwig 		container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
4783a85a5deSChristoph Hellwig 	bool changed;
479297186d6SSagi Grimberg 	int ret;
4803a85a5deSChristoph Hellwig 
481d09f2b45SSagi Grimberg 	nvme_stop_ctrl(&ctrl->ctrl);
4823a85a5deSChristoph Hellwig 	nvme_loop_shutdown_ctrl(ctrl);
4833a85a5deSChristoph Hellwig 
4843a85a5deSChristoph Hellwig 	ret = nvme_loop_configure_admin_queue(ctrl);
4853a85a5deSChristoph Hellwig 	if (ret)
4863a85a5deSChristoph Hellwig 		goto out_disable;
4873a85a5deSChristoph Hellwig 
488945dd5baSSagi Grimberg 	ret = nvme_loop_init_io_queues(ctrl);
4893a85a5deSChristoph Hellwig 	if (ret)
490945dd5baSSagi Grimberg 		goto out_destroy_admin;
4913a85a5deSChristoph Hellwig 
492297186d6SSagi Grimberg 	ret = nvme_loop_connect_io_queues(ctrl);
4933a85a5deSChristoph Hellwig 	if (ret)
494945dd5baSSagi Grimberg 		goto out_destroy_io;
4953a85a5deSChristoph Hellwig 
4964368c39bSSagi Grimberg 	blk_mq_update_nr_hw_queues(&ctrl->tag_set,
4974368c39bSSagi Grimberg 			ctrl->ctrl.queue_count - 1);
4984368c39bSSagi Grimberg 
4993a85a5deSChristoph Hellwig 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
5003a85a5deSChristoph Hellwig 	WARN_ON_ONCE(!changed);
5013a85a5deSChristoph Hellwig 
502d09f2b45SSagi Grimberg 	nvme_start_ctrl(&ctrl->ctrl);
5033a85a5deSChristoph Hellwig 
5043a85a5deSChristoph Hellwig 	return;
5053a85a5deSChristoph Hellwig 
506945dd5baSSagi Grimberg out_destroy_io:
507945dd5baSSagi Grimberg 	nvme_loop_destroy_io_queues(ctrl);
508945dd5baSSagi Grimberg out_destroy_admin:
5093a85a5deSChristoph Hellwig 	nvme_loop_destroy_admin_queue(ctrl);
5103a85a5deSChristoph Hellwig out_disable:
5113a85a5deSChristoph Hellwig 	dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
5123a85a5deSChristoph Hellwig 	nvme_uninit_ctrl(&ctrl->ctrl);
5133a85a5deSChristoph Hellwig 	nvme_put_ctrl(&ctrl->ctrl);
5143a85a5deSChristoph Hellwig }
5153a85a5deSChristoph Hellwig 
5163a85a5deSChristoph Hellwig static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
5173a85a5deSChristoph Hellwig 	.name			= "loop",
5183a85a5deSChristoph Hellwig 	.module			= THIS_MODULE,
519d3d5b87dSChristoph Hellwig 	.flags			= NVME_F_FABRICS,
5203a85a5deSChristoph Hellwig 	.reg_read32		= nvmf_reg_read32,
5213a85a5deSChristoph Hellwig 	.reg_read64		= nvmf_reg_read64,
5223a85a5deSChristoph Hellwig 	.reg_write32		= nvmf_reg_write32,
5233a85a5deSChristoph Hellwig 	.free_ctrl		= nvme_loop_free_ctrl,
5243a85a5deSChristoph Hellwig 	.submit_async_event	= nvme_loop_submit_async_event,
525c5017e85SChristoph Hellwig 	.delete_ctrl		= nvme_loop_delete_ctrl_host,
5263a85a5deSChristoph Hellwig };
5273a85a5deSChristoph Hellwig 
5283a85a5deSChristoph Hellwig static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
5293a85a5deSChristoph Hellwig {
530297186d6SSagi Grimberg 	int ret;
5313a85a5deSChristoph Hellwig 
532945dd5baSSagi Grimberg 	ret = nvme_loop_init_io_queues(ctrl);
5333a85a5deSChristoph Hellwig 	if (ret)
534945dd5baSSagi Grimberg 		return ret;
5353a85a5deSChristoph Hellwig 
5363a85a5deSChristoph Hellwig 	memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
5373a85a5deSChristoph Hellwig 	ctrl->tag_set.ops = &nvme_loop_mq_ops;
538eadb7cf4SJay Freyensee 	ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
5393a85a5deSChristoph Hellwig 	ctrl->tag_set.reserved_tags = 1; /* fabric connect */
5403a85a5deSChristoph Hellwig 	ctrl->tag_set.numa_node = NUMA_NO_NODE;
5413a85a5deSChristoph Hellwig 	ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
5423a85a5deSChristoph Hellwig 	ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
5433a85a5deSChristoph Hellwig 		SG_CHUNK_SIZE * sizeof(struct scatterlist);
5443a85a5deSChristoph Hellwig 	ctrl->tag_set.driver_data = ctrl;
545d858e5f0SSagi Grimberg 	ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
5463a85a5deSChristoph Hellwig 	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
5473a85a5deSChristoph Hellwig 	ctrl->ctrl.tagset = &ctrl->tag_set;
5483a85a5deSChristoph Hellwig 
5493a85a5deSChristoph Hellwig 	ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
5503a85a5deSChristoph Hellwig 	if (ret)
5513a85a5deSChristoph Hellwig 		goto out_destroy_queues;
5523a85a5deSChristoph Hellwig 
5533a85a5deSChristoph Hellwig 	ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
5543a85a5deSChristoph Hellwig 	if (IS_ERR(ctrl->ctrl.connect_q)) {
5553a85a5deSChristoph Hellwig 		ret = PTR_ERR(ctrl->ctrl.connect_q);
5563a85a5deSChristoph Hellwig 		goto out_free_tagset;
5573a85a5deSChristoph Hellwig 	}
5583a85a5deSChristoph Hellwig 
559297186d6SSagi Grimberg 	ret = nvme_loop_connect_io_queues(ctrl);
5603a85a5deSChristoph Hellwig 	if (ret)
5613a85a5deSChristoph Hellwig 		goto out_cleanup_connect_q;
5623a85a5deSChristoph Hellwig 
5633a85a5deSChristoph Hellwig 	return 0;
5643a85a5deSChristoph Hellwig 
5653a85a5deSChristoph Hellwig out_cleanup_connect_q:
5663a85a5deSChristoph Hellwig 	blk_cleanup_queue(ctrl->ctrl.connect_q);
5673a85a5deSChristoph Hellwig out_free_tagset:
5683a85a5deSChristoph Hellwig 	blk_mq_free_tag_set(&ctrl->tag_set);
5693a85a5deSChristoph Hellwig out_destroy_queues:
570945dd5baSSagi Grimberg 	nvme_loop_destroy_io_queues(ctrl);
5713a85a5deSChristoph Hellwig 	return ret;
5723a85a5deSChristoph Hellwig }
5733a85a5deSChristoph Hellwig 
5743a85a5deSChristoph Hellwig static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
5753a85a5deSChristoph Hellwig 		struct nvmf_ctrl_options *opts)
5763a85a5deSChristoph Hellwig {
5773a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl;
5783a85a5deSChristoph Hellwig 	bool changed;
5793a85a5deSChristoph Hellwig 	int ret;
5803a85a5deSChristoph Hellwig 
5813a85a5deSChristoph Hellwig 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
5823a85a5deSChristoph Hellwig 	if (!ctrl)
5833a85a5deSChristoph Hellwig 		return ERR_PTR(-ENOMEM);
5843a85a5deSChristoph Hellwig 	ctrl->ctrl.opts = opts;
5853a85a5deSChristoph Hellwig 	INIT_LIST_HEAD(&ctrl->list);
5863a85a5deSChristoph Hellwig 
587d86c4d8eSChristoph Hellwig 	INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
5883a85a5deSChristoph Hellwig 
5893a85a5deSChristoph Hellwig 	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
5903a85a5deSChristoph Hellwig 				0 /* no quirks, we're perfect! */);
5913a85a5deSChristoph Hellwig 	if (ret)
5923a85a5deSChristoph Hellwig 		goto out_put_ctrl;
5933a85a5deSChristoph Hellwig 
5943a85a5deSChristoph Hellwig 	ret = -ENOMEM;
5953a85a5deSChristoph Hellwig 
596eadb7cf4SJay Freyensee 	ctrl->ctrl.sqsize = opts->queue_size - 1;
5973a85a5deSChristoph Hellwig 	ctrl->ctrl.kato = opts->kato;
5983a85a5deSChristoph Hellwig 
5993a85a5deSChristoph Hellwig 	ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
6003a85a5deSChristoph Hellwig 			GFP_KERNEL);
6013a85a5deSChristoph Hellwig 	if (!ctrl->queues)
6023a85a5deSChristoph Hellwig 		goto out_uninit_ctrl;
6033a85a5deSChristoph Hellwig 
6043a85a5deSChristoph Hellwig 	ret = nvme_loop_configure_admin_queue(ctrl);
6053a85a5deSChristoph Hellwig 	if (ret)
6063a85a5deSChristoph Hellwig 		goto out_free_queues;
6073a85a5deSChristoph Hellwig 
6083a85a5deSChristoph Hellwig 	if (opts->queue_size > ctrl->ctrl.maxcmd) {
6093a85a5deSChristoph Hellwig 		/* warn if maxcmd is lower than queue_size */
6103a85a5deSChristoph Hellwig 		dev_warn(ctrl->ctrl.device,
6113a85a5deSChristoph Hellwig 			"queue_size %zu > ctrl maxcmd %u, clamping down\n",
6123a85a5deSChristoph Hellwig 			opts->queue_size, ctrl->ctrl.maxcmd);
6133a85a5deSChristoph Hellwig 		opts->queue_size = ctrl->ctrl.maxcmd;
6143a85a5deSChristoph Hellwig 	}
6153a85a5deSChristoph Hellwig 
6163a85a5deSChristoph Hellwig 	if (opts->nr_io_queues) {
6173a85a5deSChristoph Hellwig 		ret = nvme_loop_create_io_queues(ctrl);
6183a85a5deSChristoph Hellwig 		if (ret)
6193a85a5deSChristoph Hellwig 			goto out_remove_admin_queue;
6203a85a5deSChristoph Hellwig 	}
6213a85a5deSChristoph Hellwig 
6223a85a5deSChristoph Hellwig 	nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
6233a85a5deSChristoph Hellwig 
6243a85a5deSChristoph Hellwig 	dev_info(ctrl->ctrl.device,
6253a85a5deSChristoph Hellwig 		 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
6263a85a5deSChristoph Hellwig 
627d22524a4SChristoph Hellwig 	nvme_get_ctrl(&ctrl->ctrl);
6283a85a5deSChristoph Hellwig 
6293a85a5deSChristoph Hellwig 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
6303a85a5deSChristoph Hellwig 	WARN_ON_ONCE(!changed);
6313a85a5deSChristoph Hellwig 
6323a85a5deSChristoph Hellwig 	mutex_lock(&nvme_loop_ctrl_mutex);
6333a85a5deSChristoph Hellwig 	list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
6343a85a5deSChristoph Hellwig 	mutex_unlock(&nvme_loop_ctrl_mutex);
6353a85a5deSChristoph Hellwig 
636d09f2b45SSagi Grimberg 	nvme_start_ctrl(&ctrl->ctrl);
6373a85a5deSChristoph Hellwig 
6383a85a5deSChristoph Hellwig 	return &ctrl->ctrl;
6393a85a5deSChristoph Hellwig 
6403a85a5deSChristoph Hellwig out_remove_admin_queue:
6413a85a5deSChristoph Hellwig 	nvme_loop_destroy_admin_queue(ctrl);
6423a85a5deSChristoph Hellwig out_free_queues:
6433a85a5deSChristoph Hellwig 	kfree(ctrl->queues);
6443a85a5deSChristoph Hellwig out_uninit_ctrl:
6453a85a5deSChristoph Hellwig 	nvme_uninit_ctrl(&ctrl->ctrl);
6463a85a5deSChristoph Hellwig out_put_ctrl:
6473a85a5deSChristoph Hellwig 	nvme_put_ctrl(&ctrl->ctrl);
6483a85a5deSChristoph Hellwig 	if (ret > 0)
6493a85a5deSChristoph Hellwig 		ret = -EIO;
6503a85a5deSChristoph Hellwig 	return ERR_PTR(ret);
6513a85a5deSChristoph Hellwig }
6523a85a5deSChristoph Hellwig 
6533a85a5deSChristoph Hellwig static int nvme_loop_add_port(struct nvmet_port *port)
6543a85a5deSChristoph Hellwig {
6553a85a5deSChristoph Hellwig 	/*
6563a85a5deSChristoph Hellwig 	 * XXX: disalow adding more than one port so
6573a85a5deSChristoph Hellwig 	 * there is no connection rejections when a
6583a85a5deSChristoph Hellwig 	 * a subsystem is assigned to a port for which
6593a85a5deSChristoph Hellwig 	 * loop doesn't have a pointer.
6603a85a5deSChristoph Hellwig 	 * This scenario would be possible if we allowed
6613a85a5deSChristoph Hellwig 	 * more than one port to be added and a subsystem
6623a85a5deSChristoph Hellwig 	 * was assigned to a port other than nvmet_loop_port.
6633a85a5deSChristoph Hellwig 	 */
6643a85a5deSChristoph Hellwig 
6653a85a5deSChristoph Hellwig 	if (nvmet_loop_port)
6663a85a5deSChristoph Hellwig 		return -EPERM;
6673a85a5deSChristoph Hellwig 
6683a85a5deSChristoph Hellwig 	nvmet_loop_port = port;
6693a85a5deSChristoph Hellwig 	return 0;
6703a85a5deSChristoph Hellwig }
6713a85a5deSChristoph Hellwig 
6723a85a5deSChristoph Hellwig static void nvme_loop_remove_port(struct nvmet_port *port)
6733a85a5deSChristoph Hellwig {
6743a85a5deSChristoph Hellwig 	if (port == nvmet_loop_port)
6753a85a5deSChristoph Hellwig 		nvmet_loop_port = NULL;
6763a85a5deSChristoph Hellwig }
6773a85a5deSChristoph Hellwig 
6783a85a5deSChristoph Hellwig static struct nvmet_fabrics_ops nvme_loop_ops = {
6793a85a5deSChristoph Hellwig 	.owner		= THIS_MODULE,
6803a85a5deSChristoph Hellwig 	.type		= NVMF_TRTYPE_LOOP,
6813a85a5deSChristoph Hellwig 	.add_port	= nvme_loop_add_port,
6823a85a5deSChristoph Hellwig 	.remove_port	= nvme_loop_remove_port,
6833a85a5deSChristoph Hellwig 	.queue_response = nvme_loop_queue_response,
6843a85a5deSChristoph Hellwig 	.delete_ctrl	= nvme_loop_delete_ctrl,
6853a85a5deSChristoph Hellwig };
6863a85a5deSChristoph Hellwig 
6873a85a5deSChristoph Hellwig static struct nvmf_transport_ops nvme_loop_transport = {
6883a85a5deSChristoph Hellwig 	.name		= "loop",
689*0de5cd36SRoy Shterman 	.module		= THIS_MODULE,
6903a85a5deSChristoph Hellwig 	.create_ctrl	= nvme_loop_create_ctrl,
6913a85a5deSChristoph Hellwig };
6923a85a5deSChristoph Hellwig 
6933a85a5deSChristoph Hellwig static int __init nvme_loop_init_module(void)
6943a85a5deSChristoph Hellwig {
6953a85a5deSChristoph Hellwig 	int ret;
6963a85a5deSChristoph Hellwig 
6973a85a5deSChristoph Hellwig 	ret = nvmet_register_transport(&nvme_loop_ops);
6983a85a5deSChristoph Hellwig 	if (ret)
6993a85a5deSChristoph Hellwig 		return ret;
700d19eef02SSagi Grimberg 
701d19eef02SSagi Grimberg 	ret = nvmf_register_transport(&nvme_loop_transport);
702d19eef02SSagi Grimberg 	if (ret)
703d19eef02SSagi Grimberg 		nvmet_unregister_transport(&nvme_loop_ops);
704d19eef02SSagi Grimberg 
705d19eef02SSagi Grimberg 	return ret;
7063a85a5deSChristoph Hellwig }
7073a85a5deSChristoph Hellwig 
7083a85a5deSChristoph Hellwig static void __exit nvme_loop_cleanup_module(void)
7093a85a5deSChristoph Hellwig {
7103a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl, *next;
7113a85a5deSChristoph Hellwig 
7123a85a5deSChristoph Hellwig 	nvmf_unregister_transport(&nvme_loop_transport);
7133a85a5deSChristoph Hellwig 	nvmet_unregister_transport(&nvme_loop_ops);
7143a85a5deSChristoph Hellwig 
7153a85a5deSChristoph Hellwig 	mutex_lock(&nvme_loop_ctrl_mutex);
7163a85a5deSChristoph Hellwig 	list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
717c5017e85SChristoph Hellwig 		nvme_delete_ctrl(&ctrl->ctrl);
7183a85a5deSChristoph Hellwig 	mutex_unlock(&nvme_loop_ctrl_mutex);
7193a85a5deSChristoph Hellwig 
7209a6327d2SSagi Grimberg 	flush_workqueue(nvme_wq);
7213a85a5deSChristoph Hellwig }
7223a85a5deSChristoph Hellwig 
7233a85a5deSChristoph Hellwig module_init(nvme_loop_init_module);
7243a85a5deSChristoph Hellwig module_exit(nvme_loop_cleanup_module);
7253a85a5deSChristoph Hellwig 
7263a85a5deSChristoph Hellwig MODULE_LICENSE("GPL v2");
7273a85a5deSChristoph Hellwig MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */
728