xref: /openbmc/linux/drivers/nvme/target/loop.c (revision a159c64d936eb0d1da29d8ad384183d8984899c9)
13a85a5deSChristoph Hellwig /*
23a85a5deSChristoph Hellwig  * NVMe over Fabrics loopback device.
33a85a5deSChristoph Hellwig  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
43a85a5deSChristoph Hellwig  *
53a85a5deSChristoph Hellwig  * This program is free software; you can redistribute it and/or modify it
63a85a5deSChristoph Hellwig  * under the terms and conditions of the GNU General Public License,
73a85a5deSChristoph Hellwig  * version 2, as published by the Free Software Foundation.
83a85a5deSChristoph Hellwig  *
93a85a5deSChristoph Hellwig  * This program is distributed in the hope it will be useful, but WITHOUT
103a85a5deSChristoph Hellwig  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
113a85a5deSChristoph Hellwig  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
123a85a5deSChristoph Hellwig  * more details.
133a85a5deSChristoph Hellwig  */
143a85a5deSChristoph Hellwig #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
153a85a5deSChristoph Hellwig #include <linux/scatterlist.h>
163a85a5deSChristoph Hellwig #include <linux/delay.h>
173a85a5deSChristoph Hellwig #include <linux/blk-mq.h>
183a85a5deSChristoph Hellwig #include <linux/nvme.h>
193a85a5deSChristoph Hellwig #include <linux/module.h>
203a85a5deSChristoph Hellwig #include <linux/parser.h>
213a85a5deSChristoph Hellwig #include <linux/t10-pi.h>
223a85a5deSChristoph Hellwig #include "nvmet.h"
233a85a5deSChristoph Hellwig #include "../host/nvme.h"
243a85a5deSChristoph Hellwig #include "../host/fabrics.h"
253a85a5deSChristoph Hellwig 
263a85a5deSChristoph Hellwig #define NVME_LOOP_AQ_DEPTH		256
273a85a5deSChristoph Hellwig 
283a85a5deSChristoph Hellwig #define NVME_LOOP_MAX_SEGMENTS		256
293a85a5deSChristoph Hellwig 
303a85a5deSChristoph Hellwig /*
313a85a5deSChristoph Hellwig  * We handle AEN commands ourselves and don't even let the
323a85a5deSChristoph Hellwig  * block layer know about them.
333a85a5deSChristoph Hellwig  */
343a85a5deSChristoph Hellwig #define NVME_LOOP_NR_AEN_COMMANDS	1
353a85a5deSChristoph Hellwig #define NVME_LOOP_AQ_BLKMQ_DEPTH	\
363a85a5deSChristoph Hellwig 	(NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
373a85a5deSChristoph Hellwig 
383a85a5deSChristoph Hellwig struct nvme_loop_iod {
393a85a5deSChristoph Hellwig 	struct nvme_command	cmd;
403a85a5deSChristoph Hellwig 	struct nvme_completion	rsp;
413a85a5deSChristoph Hellwig 	struct nvmet_req	req;
423a85a5deSChristoph Hellwig 	struct nvme_loop_queue	*queue;
433a85a5deSChristoph Hellwig 	struct work_struct	work;
443a85a5deSChristoph Hellwig 	struct sg_table		sg_table;
453a85a5deSChristoph Hellwig 	struct scatterlist	first_sgl[];
463a85a5deSChristoph Hellwig };
473a85a5deSChristoph Hellwig 
483a85a5deSChristoph Hellwig struct nvme_loop_ctrl {
493a85a5deSChristoph Hellwig 	spinlock_t		lock;
503a85a5deSChristoph Hellwig 	struct nvme_loop_queue	*queues;
513a85a5deSChristoph Hellwig 	u32			queue_count;
523a85a5deSChristoph Hellwig 
533a85a5deSChristoph Hellwig 	struct blk_mq_tag_set	admin_tag_set;
543a85a5deSChristoph Hellwig 
553a85a5deSChristoph Hellwig 	struct list_head	list;
563a85a5deSChristoph Hellwig 	u64			cap;
573a85a5deSChristoph Hellwig 	struct blk_mq_tag_set	tag_set;
583a85a5deSChristoph Hellwig 	struct nvme_loop_iod	async_event_iod;
593a85a5deSChristoph Hellwig 	struct nvme_ctrl	ctrl;
603a85a5deSChristoph Hellwig 
613a85a5deSChristoph Hellwig 	struct nvmet_ctrl	*target_ctrl;
623a85a5deSChristoph Hellwig 	struct work_struct	delete_work;
633a85a5deSChristoph Hellwig 	struct work_struct	reset_work;
643a85a5deSChristoph Hellwig };
653a85a5deSChristoph Hellwig 
663a85a5deSChristoph Hellwig static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
673a85a5deSChristoph Hellwig {
683a85a5deSChristoph Hellwig 	return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
693a85a5deSChristoph Hellwig }
703a85a5deSChristoph Hellwig 
713a85a5deSChristoph Hellwig struct nvme_loop_queue {
723a85a5deSChristoph Hellwig 	struct nvmet_cq		nvme_cq;
733a85a5deSChristoph Hellwig 	struct nvmet_sq		nvme_sq;
743a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl	*ctrl;
753a85a5deSChristoph Hellwig };
763a85a5deSChristoph Hellwig 
773a85a5deSChristoph Hellwig static struct nvmet_port *nvmet_loop_port;
783a85a5deSChristoph Hellwig 
793a85a5deSChristoph Hellwig static LIST_HEAD(nvme_loop_ctrl_list);
803a85a5deSChristoph Hellwig static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
813a85a5deSChristoph Hellwig 
823a85a5deSChristoph Hellwig static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
833a85a5deSChristoph Hellwig static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
843a85a5deSChristoph Hellwig 
853a85a5deSChristoph Hellwig static struct nvmet_fabrics_ops nvme_loop_ops;
863a85a5deSChristoph Hellwig 
873a85a5deSChristoph Hellwig static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
883a85a5deSChristoph Hellwig {
893a85a5deSChristoph Hellwig 	return queue - queue->ctrl->queues;
903a85a5deSChristoph Hellwig }
913a85a5deSChristoph Hellwig 
923a85a5deSChristoph Hellwig static void nvme_loop_complete_rq(struct request *req)
933a85a5deSChristoph Hellwig {
943a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
953a85a5deSChristoph Hellwig 	int error = 0;
963a85a5deSChristoph Hellwig 
973a85a5deSChristoph Hellwig 	nvme_cleanup_cmd(req);
983a85a5deSChristoph Hellwig 	sg_free_table_chained(&iod->sg_table, true);
993a85a5deSChristoph Hellwig 
1003a85a5deSChristoph Hellwig 	if (unlikely(req->errors)) {
1013a85a5deSChristoph Hellwig 		if (nvme_req_needs_retry(req, req->errors)) {
1023a85a5deSChristoph Hellwig 			nvme_requeue_req(req);
1033a85a5deSChristoph Hellwig 			return;
1043a85a5deSChristoph Hellwig 		}
1053a85a5deSChristoph Hellwig 
1063a85a5deSChristoph Hellwig 		if (req->cmd_type == REQ_TYPE_DRV_PRIV)
1073a85a5deSChristoph Hellwig 			error = req->errors;
1083a85a5deSChristoph Hellwig 		else
1093a85a5deSChristoph Hellwig 			error = nvme_error_status(req->errors);
1103a85a5deSChristoph Hellwig 	}
1113a85a5deSChristoph Hellwig 
1123a85a5deSChristoph Hellwig 	blk_mq_end_request(req, error);
1133a85a5deSChristoph Hellwig }
1143a85a5deSChristoph Hellwig 
1153a85a5deSChristoph Hellwig static void nvme_loop_queue_response(struct nvmet_req *nvme_req)
1163a85a5deSChristoph Hellwig {
1173a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod =
1183a85a5deSChristoph Hellwig 		container_of(nvme_req, struct nvme_loop_iod, req);
1193a85a5deSChristoph Hellwig 	struct nvme_completion *cqe = &iod->rsp;
1203a85a5deSChristoph Hellwig 
1213a85a5deSChristoph Hellwig 	/*
1223a85a5deSChristoph Hellwig 	 * AEN requests are special as they don't time out and can
1233a85a5deSChristoph Hellwig 	 * survive any kind of queue freeze and often don't respond to
1243a85a5deSChristoph Hellwig 	 * aborts.  We don't even bother to allocate a struct request
1253a85a5deSChristoph Hellwig 	 * for them but rather special case them here.
1263a85a5deSChristoph Hellwig 	 */
1273a85a5deSChristoph Hellwig 	if (unlikely(nvme_loop_queue_idx(iod->queue) == 0 &&
1283a85a5deSChristoph Hellwig 			cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
1293a85a5deSChristoph Hellwig 		nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe);
1303a85a5deSChristoph Hellwig 	} else {
1313a85a5deSChristoph Hellwig 		struct request *req = blk_mq_rq_from_pdu(iod);
1323a85a5deSChristoph Hellwig 
1333a85a5deSChristoph Hellwig 		if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special)
1343a85a5deSChristoph Hellwig 			memcpy(req->special, cqe, sizeof(*cqe));
1353a85a5deSChristoph Hellwig 		blk_mq_complete_request(req, le16_to_cpu(cqe->status) >> 1);
1363a85a5deSChristoph Hellwig 	}
1373a85a5deSChristoph Hellwig }
1383a85a5deSChristoph Hellwig 
1393a85a5deSChristoph Hellwig static void nvme_loop_execute_work(struct work_struct *work)
1403a85a5deSChristoph Hellwig {
1413a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod =
1423a85a5deSChristoph Hellwig 		container_of(work, struct nvme_loop_iod, work);
1433a85a5deSChristoph Hellwig 
1443a85a5deSChristoph Hellwig 	iod->req.execute(&iod->req);
1453a85a5deSChristoph Hellwig }
1463a85a5deSChristoph Hellwig 
1473a85a5deSChristoph Hellwig static enum blk_eh_timer_return
1483a85a5deSChristoph Hellwig nvme_loop_timeout(struct request *rq, bool reserved)
1493a85a5deSChristoph Hellwig {
1503a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
1513a85a5deSChristoph Hellwig 
1523a85a5deSChristoph Hellwig 	/* queue error recovery */
1533a85a5deSChristoph Hellwig 	schedule_work(&iod->queue->ctrl->reset_work);
1543a85a5deSChristoph Hellwig 
1553a85a5deSChristoph Hellwig 	/* fail with DNR on admin cmd timeout */
1563a85a5deSChristoph Hellwig 	rq->errors = NVME_SC_ABORT_REQ | NVME_SC_DNR;
1573a85a5deSChristoph Hellwig 
1583a85a5deSChristoph Hellwig 	return BLK_EH_HANDLED;
1593a85a5deSChristoph Hellwig }
1603a85a5deSChristoph Hellwig 
1613a85a5deSChristoph Hellwig static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1623a85a5deSChristoph Hellwig 		const struct blk_mq_queue_data *bd)
1633a85a5deSChristoph Hellwig {
1643a85a5deSChristoph Hellwig 	struct nvme_ns *ns = hctx->queue->queuedata;
1653a85a5deSChristoph Hellwig 	struct nvme_loop_queue *queue = hctx->driver_data;
1663a85a5deSChristoph Hellwig 	struct request *req = bd->rq;
1673a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
1683a85a5deSChristoph Hellwig 	int ret;
1693a85a5deSChristoph Hellwig 
1703a85a5deSChristoph Hellwig 	ret = nvme_setup_cmd(ns, req, &iod->cmd);
1713a85a5deSChristoph Hellwig 	if (ret)
1723a85a5deSChristoph Hellwig 		return ret;
1733a85a5deSChristoph Hellwig 
1743a85a5deSChristoph Hellwig 	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
1753a85a5deSChristoph Hellwig 	iod->req.port = nvmet_loop_port;
1763a85a5deSChristoph Hellwig 	if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
1773a85a5deSChristoph Hellwig 			&queue->nvme_sq, &nvme_loop_ops)) {
1783a85a5deSChristoph Hellwig 		nvme_cleanup_cmd(req);
1793a85a5deSChristoph Hellwig 		blk_mq_start_request(req);
1803a85a5deSChristoph Hellwig 		nvme_loop_queue_response(&iod->req);
1813a85a5deSChristoph Hellwig 		return 0;
1823a85a5deSChristoph Hellwig 	}
1833a85a5deSChristoph Hellwig 
1843a85a5deSChristoph Hellwig 	if (blk_rq_bytes(req)) {
1853a85a5deSChristoph Hellwig 		iod->sg_table.sgl = iod->first_sgl;
1863a85a5deSChristoph Hellwig 		ret = sg_alloc_table_chained(&iod->sg_table,
1873a85a5deSChristoph Hellwig 			req->nr_phys_segments, iod->sg_table.sgl);
1883a85a5deSChristoph Hellwig 		if (ret)
1893a85a5deSChristoph Hellwig 			return BLK_MQ_RQ_QUEUE_BUSY;
1903a85a5deSChristoph Hellwig 
1913a85a5deSChristoph Hellwig 		iod->req.sg = iod->sg_table.sgl;
1923a85a5deSChristoph Hellwig 		iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
1933a85a5deSChristoph Hellwig 		BUG_ON(iod->req.sg_cnt > req->nr_phys_segments);
1943a85a5deSChristoph Hellwig 	}
1953a85a5deSChristoph Hellwig 
1963a85a5deSChristoph Hellwig 	iod->cmd.common.command_id = req->tag;
1973a85a5deSChristoph Hellwig 	blk_mq_start_request(req);
1983a85a5deSChristoph Hellwig 
1993a85a5deSChristoph Hellwig 	schedule_work(&iod->work);
2003a85a5deSChristoph Hellwig 	return 0;
2013a85a5deSChristoph Hellwig }
2023a85a5deSChristoph Hellwig 
2033a85a5deSChristoph Hellwig static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
2043a85a5deSChristoph Hellwig {
2053a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
2063a85a5deSChristoph Hellwig 	struct nvme_loop_queue *queue = &ctrl->queues[0];
2073a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod = &ctrl->async_event_iod;
2083a85a5deSChristoph Hellwig 
2093a85a5deSChristoph Hellwig 	memset(&iod->cmd, 0, sizeof(iod->cmd));
2103a85a5deSChristoph Hellwig 	iod->cmd.common.opcode = nvme_admin_async_event;
2113a85a5deSChristoph Hellwig 	iod->cmd.common.command_id = NVME_LOOP_AQ_BLKMQ_DEPTH;
2123a85a5deSChristoph Hellwig 	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
2133a85a5deSChristoph Hellwig 
2143a85a5deSChristoph Hellwig 	if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
2153a85a5deSChristoph Hellwig 			&nvme_loop_ops)) {
2163a85a5deSChristoph Hellwig 		dev_err(ctrl->ctrl.device, "failed async event work\n");
2173a85a5deSChristoph Hellwig 		return;
2183a85a5deSChristoph Hellwig 	}
2193a85a5deSChristoph Hellwig 
2203a85a5deSChristoph Hellwig 	schedule_work(&iod->work);
2213a85a5deSChristoph Hellwig }
2223a85a5deSChristoph Hellwig 
2233a85a5deSChristoph Hellwig static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
2243a85a5deSChristoph Hellwig 		struct nvme_loop_iod *iod, unsigned int queue_idx)
2253a85a5deSChristoph Hellwig {
2263a85a5deSChristoph Hellwig 	BUG_ON(queue_idx >= ctrl->queue_count);
2273a85a5deSChristoph Hellwig 
2283a85a5deSChristoph Hellwig 	iod->req.cmd = &iod->cmd;
2293a85a5deSChristoph Hellwig 	iod->req.rsp = &iod->rsp;
2303a85a5deSChristoph Hellwig 	iod->queue = &ctrl->queues[queue_idx];
2313a85a5deSChristoph Hellwig 	INIT_WORK(&iod->work, nvme_loop_execute_work);
2323a85a5deSChristoph Hellwig 	return 0;
2333a85a5deSChristoph Hellwig }
2343a85a5deSChristoph Hellwig 
2353a85a5deSChristoph Hellwig static int nvme_loop_init_request(void *data, struct request *req,
2363a85a5deSChristoph Hellwig 				unsigned int hctx_idx, unsigned int rq_idx,
2373a85a5deSChristoph Hellwig 				unsigned int numa_node)
2383a85a5deSChristoph Hellwig {
2393a85a5deSChristoph Hellwig 	return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), hctx_idx + 1);
2403a85a5deSChristoph Hellwig }
2413a85a5deSChristoph Hellwig 
2423a85a5deSChristoph Hellwig static int nvme_loop_init_admin_request(void *data, struct request *req,
2433a85a5deSChristoph Hellwig 				unsigned int hctx_idx, unsigned int rq_idx,
2443a85a5deSChristoph Hellwig 				unsigned int numa_node)
2453a85a5deSChristoph Hellwig {
2463a85a5deSChristoph Hellwig 	return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), 0);
2473a85a5deSChristoph Hellwig }
2483a85a5deSChristoph Hellwig 
2493a85a5deSChristoph Hellwig static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2503a85a5deSChristoph Hellwig 		unsigned int hctx_idx)
2513a85a5deSChristoph Hellwig {
2523a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = data;
2533a85a5deSChristoph Hellwig 	struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
2543a85a5deSChristoph Hellwig 
2553a85a5deSChristoph Hellwig 	BUG_ON(hctx_idx >= ctrl->queue_count);
2563a85a5deSChristoph Hellwig 
2573a85a5deSChristoph Hellwig 	hctx->driver_data = queue;
2583a85a5deSChristoph Hellwig 	return 0;
2593a85a5deSChristoph Hellwig }
2603a85a5deSChristoph Hellwig 
2613a85a5deSChristoph Hellwig static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2623a85a5deSChristoph Hellwig 		unsigned int hctx_idx)
2633a85a5deSChristoph Hellwig {
2643a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = data;
2653a85a5deSChristoph Hellwig 	struct nvme_loop_queue *queue = &ctrl->queues[0];
2663a85a5deSChristoph Hellwig 
2673a85a5deSChristoph Hellwig 	BUG_ON(hctx_idx != 0);
2683a85a5deSChristoph Hellwig 
2693a85a5deSChristoph Hellwig 	hctx->driver_data = queue;
2703a85a5deSChristoph Hellwig 	return 0;
2713a85a5deSChristoph Hellwig }
2723a85a5deSChristoph Hellwig 
2733a85a5deSChristoph Hellwig static struct blk_mq_ops nvme_loop_mq_ops = {
2743a85a5deSChristoph Hellwig 	.queue_rq	= nvme_loop_queue_rq,
2753a85a5deSChristoph Hellwig 	.complete	= nvme_loop_complete_rq,
2763a85a5deSChristoph Hellwig 	.map_queue	= blk_mq_map_queue,
2773a85a5deSChristoph Hellwig 	.init_request	= nvme_loop_init_request,
2783a85a5deSChristoph Hellwig 	.init_hctx	= nvme_loop_init_hctx,
2793a85a5deSChristoph Hellwig 	.timeout	= nvme_loop_timeout,
2803a85a5deSChristoph Hellwig };
2813a85a5deSChristoph Hellwig 
2823a85a5deSChristoph Hellwig static struct blk_mq_ops nvme_loop_admin_mq_ops = {
2833a85a5deSChristoph Hellwig 	.queue_rq	= nvme_loop_queue_rq,
2843a85a5deSChristoph Hellwig 	.complete	= nvme_loop_complete_rq,
2853a85a5deSChristoph Hellwig 	.map_queue	= blk_mq_map_queue,
2863a85a5deSChristoph Hellwig 	.init_request	= nvme_loop_init_admin_request,
2873a85a5deSChristoph Hellwig 	.init_hctx	= nvme_loop_init_admin_hctx,
2883a85a5deSChristoph Hellwig 	.timeout	= nvme_loop_timeout,
2893a85a5deSChristoph Hellwig };
2903a85a5deSChristoph Hellwig 
2913a85a5deSChristoph Hellwig static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
2923a85a5deSChristoph Hellwig {
2933a85a5deSChristoph Hellwig 	blk_cleanup_queue(ctrl->ctrl.admin_q);
2943a85a5deSChristoph Hellwig 	blk_mq_free_tag_set(&ctrl->admin_tag_set);
2953a85a5deSChristoph Hellwig 	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
2963a85a5deSChristoph Hellwig }
2973a85a5deSChristoph Hellwig 
2983a85a5deSChristoph Hellwig static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
2993a85a5deSChristoph Hellwig {
3003a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
3013a85a5deSChristoph Hellwig 
3023a85a5deSChristoph Hellwig 	if (list_empty(&ctrl->list))
3033a85a5deSChristoph Hellwig 		goto free_ctrl;
3043a85a5deSChristoph Hellwig 
3053a85a5deSChristoph Hellwig 	mutex_lock(&nvme_loop_ctrl_mutex);
3063a85a5deSChristoph Hellwig 	list_del(&ctrl->list);
3073a85a5deSChristoph Hellwig 	mutex_unlock(&nvme_loop_ctrl_mutex);
3083a85a5deSChristoph Hellwig 
3093a85a5deSChristoph Hellwig 	if (nctrl->tagset) {
3103a85a5deSChristoph Hellwig 		blk_cleanup_queue(ctrl->ctrl.connect_q);
3113a85a5deSChristoph Hellwig 		blk_mq_free_tag_set(&ctrl->tag_set);
3123a85a5deSChristoph Hellwig 	}
3133a85a5deSChristoph Hellwig 	kfree(ctrl->queues);
3143a85a5deSChristoph Hellwig 	nvmf_free_options(nctrl->opts);
3153a85a5deSChristoph Hellwig free_ctrl:
3163a85a5deSChristoph Hellwig 	kfree(ctrl);
3173a85a5deSChristoph Hellwig }
3183a85a5deSChristoph Hellwig 
3193a85a5deSChristoph Hellwig static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
3203a85a5deSChristoph Hellwig {
3213a85a5deSChristoph Hellwig 	int error;
3223a85a5deSChristoph Hellwig 
3233a85a5deSChristoph Hellwig 	memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
3243a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
3253a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.queue_depth = NVME_LOOP_AQ_BLKMQ_DEPTH;
3263a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
3273a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
3283a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
3293a85a5deSChristoph Hellwig 		SG_CHUNK_SIZE * sizeof(struct scatterlist);
3303a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.driver_data = ctrl;
3313a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.nr_hw_queues = 1;
3323a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
3333a85a5deSChristoph Hellwig 
3343a85a5deSChristoph Hellwig 	ctrl->queues[0].ctrl = ctrl;
3353a85a5deSChristoph Hellwig 	error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
3363a85a5deSChristoph Hellwig 	if (error)
3373a85a5deSChristoph Hellwig 		return error;
3383a85a5deSChristoph Hellwig 	ctrl->queue_count = 1;
3393a85a5deSChristoph Hellwig 
3403a85a5deSChristoph Hellwig 	error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
3413a85a5deSChristoph Hellwig 	if (error)
3423a85a5deSChristoph Hellwig 		goto out_free_sq;
3433a85a5deSChristoph Hellwig 
3443a85a5deSChristoph Hellwig 	ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3453a85a5deSChristoph Hellwig 	if (IS_ERR(ctrl->ctrl.admin_q)) {
3463a85a5deSChristoph Hellwig 		error = PTR_ERR(ctrl->ctrl.admin_q);
3473a85a5deSChristoph Hellwig 		goto out_free_tagset;
3483a85a5deSChristoph Hellwig 	}
3493a85a5deSChristoph Hellwig 
3503a85a5deSChristoph Hellwig 	error = nvmf_connect_admin_queue(&ctrl->ctrl);
3513a85a5deSChristoph Hellwig 	if (error)
3523a85a5deSChristoph Hellwig 		goto out_cleanup_queue;
3533a85a5deSChristoph Hellwig 
3543a85a5deSChristoph Hellwig 	error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
3553a85a5deSChristoph Hellwig 	if (error) {
3563a85a5deSChristoph Hellwig 		dev_err(ctrl->ctrl.device,
3573a85a5deSChristoph Hellwig 			"prop_get NVME_REG_CAP failed\n");
3583a85a5deSChristoph Hellwig 		goto out_cleanup_queue;
3593a85a5deSChristoph Hellwig 	}
3603a85a5deSChristoph Hellwig 
3613a85a5deSChristoph Hellwig 	ctrl->ctrl.sqsize =
3623a85a5deSChristoph Hellwig 		min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
3633a85a5deSChristoph Hellwig 
3643a85a5deSChristoph Hellwig 	error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
3653a85a5deSChristoph Hellwig 	if (error)
3663a85a5deSChristoph Hellwig 		goto out_cleanup_queue;
3673a85a5deSChristoph Hellwig 
3683a85a5deSChristoph Hellwig 	ctrl->ctrl.max_hw_sectors =
3693a85a5deSChristoph Hellwig 		(NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
3703a85a5deSChristoph Hellwig 
3713a85a5deSChristoph Hellwig 	error = nvme_init_identify(&ctrl->ctrl);
3723a85a5deSChristoph Hellwig 	if (error)
3733a85a5deSChristoph Hellwig 		goto out_cleanup_queue;
3743a85a5deSChristoph Hellwig 
3753a85a5deSChristoph Hellwig 	nvme_start_keep_alive(&ctrl->ctrl);
3763a85a5deSChristoph Hellwig 
3773a85a5deSChristoph Hellwig 	return 0;
3783a85a5deSChristoph Hellwig 
3793a85a5deSChristoph Hellwig out_cleanup_queue:
3803a85a5deSChristoph Hellwig 	blk_cleanup_queue(ctrl->ctrl.admin_q);
3813a85a5deSChristoph Hellwig out_free_tagset:
3823a85a5deSChristoph Hellwig 	blk_mq_free_tag_set(&ctrl->admin_tag_set);
3833a85a5deSChristoph Hellwig out_free_sq:
3843a85a5deSChristoph Hellwig 	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
3853a85a5deSChristoph Hellwig 	return error;
3863a85a5deSChristoph Hellwig }
3873a85a5deSChristoph Hellwig 
3883a85a5deSChristoph Hellwig static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
3893a85a5deSChristoph Hellwig {
3903a85a5deSChristoph Hellwig 	int i;
3913a85a5deSChristoph Hellwig 
3923a85a5deSChristoph Hellwig 	nvme_stop_keep_alive(&ctrl->ctrl);
3933a85a5deSChristoph Hellwig 
3943a85a5deSChristoph Hellwig 	if (ctrl->queue_count > 1) {
3953a85a5deSChristoph Hellwig 		nvme_stop_queues(&ctrl->ctrl);
3963a85a5deSChristoph Hellwig 		blk_mq_tagset_busy_iter(&ctrl->tag_set,
3973a85a5deSChristoph Hellwig 					nvme_cancel_request, &ctrl->ctrl);
3983a85a5deSChristoph Hellwig 
3993a85a5deSChristoph Hellwig 		for (i = 1; i < ctrl->queue_count; i++)
4003a85a5deSChristoph Hellwig 			nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
4013a85a5deSChristoph Hellwig 	}
4023a85a5deSChristoph Hellwig 
4033a85a5deSChristoph Hellwig 	if (ctrl->ctrl.state == NVME_CTRL_LIVE)
4043a85a5deSChristoph Hellwig 		nvme_shutdown_ctrl(&ctrl->ctrl);
4053a85a5deSChristoph Hellwig 
4063a85a5deSChristoph Hellwig 	blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
4073a85a5deSChristoph Hellwig 	blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
4083a85a5deSChristoph Hellwig 				nvme_cancel_request, &ctrl->ctrl);
4093a85a5deSChristoph Hellwig 	nvme_loop_destroy_admin_queue(ctrl);
4103a85a5deSChristoph Hellwig }
4113a85a5deSChristoph Hellwig 
4123a85a5deSChristoph Hellwig static void nvme_loop_del_ctrl_work(struct work_struct *work)
4133a85a5deSChristoph Hellwig {
4143a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = container_of(work,
4153a85a5deSChristoph Hellwig 				struct nvme_loop_ctrl, delete_work);
4163a85a5deSChristoph Hellwig 
4173a85a5deSChristoph Hellwig 	nvme_uninit_ctrl(&ctrl->ctrl);
418*a159c64dSSagi Grimberg 	nvme_loop_shutdown_ctrl(ctrl);
4193a85a5deSChristoph Hellwig 	nvme_put_ctrl(&ctrl->ctrl);
4203a85a5deSChristoph Hellwig }
4213a85a5deSChristoph Hellwig 
4223a85a5deSChristoph Hellwig static int __nvme_loop_del_ctrl(struct nvme_loop_ctrl *ctrl)
4233a85a5deSChristoph Hellwig {
4243a85a5deSChristoph Hellwig 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
4253a85a5deSChristoph Hellwig 		return -EBUSY;
4263a85a5deSChristoph Hellwig 
4273a85a5deSChristoph Hellwig 	if (!schedule_work(&ctrl->delete_work))
4283a85a5deSChristoph Hellwig 		return -EBUSY;
4293a85a5deSChristoph Hellwig 
4303a85a5deSChristoph Hellwig 	return 0;
4313a85a5deSChristoph Hellwig }
4323a85a5deSChristoph Hellwig 
4333a85a5deSChristoph Hellwig static int nvme_loop_del_ctrl(struct nvme_ctrl *nctrl)
4343a85a5deSChristoph Hellwig {
4353a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
4363a85a5deSChristoph Hellwig 	int ret;
4373a85a5deSChristoph Hellwig 
4383a85a5deSChristoph Hellwig 	ret = __nvme_loop_del_ctrl(ctrl);
4393a85a5deSChristoph Hellwig 	if (ret)
4403a85a5deSChristoph Hellwig 		return ret;
4413a85a5deSChristoph Hellwig 
4423a85a5deSChristoph Hellwig 	flush_work(&ctrl->delete_work);
4433a85a5deSChristoph Hellwig 
4443a85a5deSChristoph Hellwig 	return 0;
4453a85a5deSChristoph Hellwig }
4463a85a5deSChristoph Hellwig 
4473a85a5deSChristoph Hellwig static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
4483a85a5deSChristoph Hellwig {
4493a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl;
4503a85a5deSChristoph Hellwig 
4513a85a5deSChristoph Hellwig 	mutex_lock(&nvme_loop_ctrl_mutex);
4523a85a5deSChristoph Hellwig 	list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
4533a85a5deSChristoph Hellwig 		if (ctrl->ctrl.cntlid == nctrl->cntlid)
4543a85a5deSChristoph Hellwig 			__nvme_loop_del_ctrl(ctrl);
4553a85a5deSChristoph Hellwig 	}
4563a85a5deSChristoph Hellwig 	mutex_unlock(&nvme_loop_ctrl_mutex);
4573a85a5deSChristoph Hellwig }
4583a85a5deSChristoph Hellwig 
4593a85a5deSChristoph Hellwig static void nvme_loop_reset_ctrl_work(struct work_struct *work)
4603a85a5deSChristoph Hellwig {
4613a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = container_of(work,
4623a85a5deSChristoph Hellwig 					struct nvme_loop_ctrl, reset_work);
4633a85a5deSChristoph Hellwig 	bool changed;
4643a85a5deSChristoph Hellwig 	int i, ret;
4653a85a5deSChristoph Hellwig 
4663a85a5deSChristoph Hellwig 	nvme_loop_shutdown_ctrl(ctrl);
4673a85a5deSChristoph Hellwig 
4683a85a5deSChristoph Hellwig 	ret = nvme_loop_configure_admin_queue(ctrl);
4693a85a5deSChristoph Hellwig 	if (ret)
4703a85a5deSChristoph Hellwig 		goto out_disable;
4713a85a5deSChristoph Hellwig 
4723a85a5deSChristoph Hellwig 	for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
4733a85a5deSChristoph Hellwig 		ctrl->queues[i].ctrl = ctrl;
4743a85a5deSChristoph Hellwig 		ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
4753a85a5deSChristoph Hellwig 		if (ret)
4763a85a5deSChristoph Hellwig 			goto out_free_queues;
4773a85a5deSChristoph Hellwig 
4783a85a5deSChristoph Hellwig 		ctrl->queue_count++;
4793a85a5deSChristoph Hellwig 	}
4803a85a5deSChristoph Hellwig 
4813a85a5deSChristoph Hellwig 	for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
4823a85a5deSChristoph Hellwig 		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
4833a85a5deSChristoph Hellwig 		if (ret)
4843a85a5deSChristoph Hellwig 			goto out_free_queues;
4853a85a5deSChristoph Hellwig 	}
4863a85a5deSChristoph Hellwig 
4873a85a5deSChristoph Hellwig 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
4883a85a5deSChristoph Hellwig 	WARN_ON_ONCE(!changed);
4893a85a5deSChristoph Hellwig 
4903a85a5deSChristoph Hellwig 	nvme_queue_scan(&ctrl->ctrl);
4913a85a5deSChristoph Hellwig 	nvme_queue_async_events(&ctrl->ctrl);
4923a85a5deSChristoph Hellwig 
4933a85a5deSChristoph Hellwig 	nvme_start_queues(&ctrl->ctrl);
4943a85a5deSChristoph Hellwig 
4953a85a5deSChristoph Hellwig 	return;
4963a85a5deSChristoph Hellwig 
4973a85a5deSChristoph Hellwig out_free_queues:
4983a85a5deSChristoph Hellwig 	for (i = 1; i < ctrl->queue_count; i++)
4993a85a5deSChristoph Hellwig 		nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
5003a85a5deSChristoph Hellwig 	nvme_loop_destroy_admin_queue(ctrl);
5013a85a5deSChristoph Hellwig out_disable:
5023a85a5deSChristoph Hellwig 	dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
5033a85a5deSChristoph Hellwig 	nvme_uninit_ctrl(&ctrl->ctrl);
5043a85a5deSChristoph Hellwig 	nvme_put_ctrl(&ctrl->ctrl);
5053a85a5deSChristoph Hellwig }
5063a85a5deSChristoph Hellwig 
5073a85a5deSChristoph Hellwig static int nvme_loop_reset_ctrl(struct nvme_ctrl *nctrl)
5083a85a5deSChristoph Hellwig {
5093a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
5103a85a5deSChristoph Hellwig 
5113a85a5deSChristoph Hellwig 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
5123a85a5deSChristoph Hellwig 		return -EBUSY;
5133a85a5deSChristoph Hellwig 
5143a85a5deSChristoph Hellwig 	if (!schedule_work(&ctrl->reset_work))
5153a85a5deSChristoph Hellwig 		return -EBUSY;
5163a85a5deSChristoph Hellwig 
5173a85a5deSChristoph Hellwig 	flush_work(&ctrl->reset_work);
5183a85a5deSChristoph Hellwig 
5193a85a5deSChristoph Hellwig 	return 0;
5203a85a5deSChristoph Hellwig }
5213a85a5deSChristoph Hellwig 
5223a85a5deSChristoph Hellwig static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
5233a85a5deSChristoph Hellwig 	.name			= "loop",
5243a85a5deSChristoph Hellwig 	.module			= THIS_MODULE,
5253a85a5deSChristoph Hellwig 	.is_fabrics		= true,
5263a85a5deSChristoph Hellwig 	.reg_read32		= nvmf_reg_read32,
5273a85a5deSChristoph Hellwig 	.reg_read64		= nvmf_reg_read64,
5283a85a5deSChristoph Hellwig 	.reg_write32		= nvmf_reg_write32,
5293a85a5deSChristoph Hellwig 	.reset_ctrl		= nvme_loop_reset_ctrl,
5303a85a5deSChristoph Hellwig 	.free_ctrl		= nvme_loop_free_ctrl,
5313a85a5deSChristoph Hellwig 	.submit_async_event	= nvme_loop_submit_async_event,
5323a85a5deSChristoph Hellwig 	.delete_ctrl		= nvme_loop_del_ctrl,
5333a85a5deSChristoph Hellwig 	.get_subsysnqn		= nvmf_get_subsysnqn,
5343a85a5deSChristoph Hellwig };
5353a85a5deSChristoph Hellwig 
5363a85a5deSChristoph Hellwig static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
5373a85a5deSChristoph Hellwig {
5383a85a5deSChristoph Hellwig 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
5393a85a5deSChristoph Hellwig 	int ret, i;
5403a85a5deSChristoph Hellwig 
5413a85a5deSChristoph Hellwig 	ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
5423a85a5deSChristoph Hellwig 	if (ret || !opts->nr_io_queues)
5433a85a5deSChristoph Hellwig 		return ret;
5443a85a5deSChristoph Hellwig 
5453a85a5deSChristoph Hellwig 	dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
5463a85a5deSChristoph Hellwig 		opts->nr_io_queues);
5473a85a5deSChristoph Hellwig 
5483a85a5deSChristoph Hellwig 	for (i = 1; i <= opts->nr_io_queues; i++) {
5493a85a5deSChristoph Hellwig 		ctrl->queues[i].ctrl = ctrl;
5503a85a5deSChristoph Hellwig 		ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
5513a85a5deSChristoph Hellwig 		if (ret)
5523a85a5deSChristoph Hellwig 			goto out_destroy_queues;
5533a85a5deSChristoph Hellwig 
5543a85a5deSChristoph Hellwig 		ctrl->queue_count++;
5553a85a5deSChristoph Hellwig 	}
5563a85a5deSChristoph Hellwig 
5573a85a5deSChristoph Hellwig 	memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
5583a85a5deSChristoph Hellwig 	ctrl->tag_set.ops = &nvme_loop_mq_ops;
5593a85a5deSChristoph Hellwig 	ctrl->tag_set.queue_depth = ctrl->ctrl.sqsize;
5603a85a5deSChristoph Hellwig 	ctrl->tag_set.reserved_tags = 1; /* fabric connect */
5613a85a5deSChristoph Hellwig 	ctrl->tag_set.numa_node = NUMA_NO_NODE;
5623a85a5deSChristoph Hellwig 	ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
5633a85a5deSChristoph Hellwig 	ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
5643a85a5deSChristoph Hellwig 		SG_CHUNK_SIZE * sizeof(struct scatterlist);
5653a85a5deSChristoph Hellwig 	ctrl->tag_set.driver_data = ctrl;
5663a85a5deSChristoph Hellwig 	ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
5673a85a5deSChristoph Hellwig 	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
5683a85a5deSChristoph Hellwig 	ctrl->ctrl.tagset = &ctrl->tag_set;
5693a85a5deSChristoph Hellwig 
5703a85a5deSChristoph Hellwig 	ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
5713a85a5deSChristoph Hellwig 	if (ret)
5723a85a5deSChristoph Hellwig 		goto out_destroy_queues;
5733a85a5deSChristoph Hellwig 
5743a85a5deSChristoph Hellwig 	ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
5753a85a5deSChristoph Hellwig 	if (IS_ERR(ctrl->ctrl.connect_q)) {
5763a85a5deSChristoph Hellwig 		ret = PTR_ERR(ctrl->ctrl.connect_q);
5773a85a5deSChristoph Hellwig 		goto out_free_tagset;
5783a85a5deSChristoph Hellwig 	}
5793a85a5deSChristoph Hellwig 
5803a85a5deSChristoph Hellwig 	for (i = 1; i <= opts->nr_io_queues; i++) {
5813a85a5deSChristoph Hellwig 		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
5823a85a5deSChristoph Hellwig 		if (ret)
5833a85a5deSChristoph Hellwig 			goto out_cleanup_connect_q;
5843a85a5deSChristoph Hellwig 	}
5853a85a5deSChristoph Hellwig 
5863a85a5deSChristoph Hellwig 	return 0;
5873a85a5deSChristoph Hellwig 
5883a85a5deSChristoph Hellwig out_cleanup_connect_q:
5893a85a5deSChristoph Hellwig 	blk_cleanup_queue(ctrl->ctrl.connect_q);
5903a85a5deSChristoph Hellwig out_free_tagset:
5913a85a5deSChristoph Hellwig 	blk_mq_free_tag_set(&ctrl->tag_set);
5923a85a5deSChristoph Hellwig out_destroy_queues:
5933a85a5deSChristoph Hellwig 	for (i = 1; i < ctrl->queue_count; i++)
5943a85a5deSChristoph Hellwig 		nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
5953a85a5deSChristoph Hellwig 	return ret;
5963a85a5deSChristoph Hellwig }
5973a85a5deSChristoph Hellwig 
5983a85a5deSChristoph Hellwig static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
5993a85a5deSChristoph Hellwig 		struct nvmf_ctrl_options *opts)
6003a85a5deSChristoph Hellwig {
6013a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl;
6023a85a5deSChristoph Hellwig 	bool changed;
6033a85a5deSChristoph Hellwig 	int ret;
6043a85a5deSChristoph Hellwig 
6053a85a5deSChristoph Hellwig 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
6063a85a5deSChristoph Hellwig 	if (!ctrl)
6073a85a5deSChristoph Hellwig 		return ERR_PTR(-ENOMEM);
6083a85a5deSChristoph Hellwig 	ctrl->ctrl.opts = opts;
6093a85a5deSChristoph Hellwig 	INIT_LIST_HEAD(&ctrl->list);
6103a85a5deSChristoph Hellwig 
6113a85a5deSChristoph Hellwig 	INIT_WORK(&ctrl->delete_work, nvme_loop_del_ctrl_work);
6123a85a5deSChristoph Hellwig 	INIT_WORK(&ctrl->reset_work, nvme_loop_reset_ctrl_work);
6133a85a5deSChristoph Hellwig 
6143a85a5deSChristoph Hellwig 	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
6153a85a5deSChristoph Hellwig 				0 /* no quirks, we're perfect! */);
6163a85a5deSChristoph Hellwig 	if (ret)
6173a85a5deSChristoph Hellwig 		goto out_put_ctrl;
6183a85a5deSChristoph Hellwig 
6193a85a5deSChristoph Hellwig 	spin_lock_init(&ctrl->lock);
6203a85a5deSChristoph Hellwig 
6213a85a5deSChristoph Hellwig 	ret = -ENOMEM;
6223a85a5deSChristoph Hellwig 
6233a85a5deSChristoph Hellwig 	ctrl->ctrl.sqsize = opts->queue_size;
6243a85a5deSChristoph Hellwig 	ctrl->ctrl.kato = opts->kato;
6253a85a5deSChristoph Hellwig 
6263a85a5deSChristoph Hellwig 	ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
6273a85a5deSChristoph Hellwig 			GFP_KERNEL);
6283a85a5deSChristoph Hellwig 	if (!ctrl->queues)
6293a85a5deSChristoph Hellwig 		goto out_uninit_ctrl;
6303a85a5deSChristoph Hellwig 
6313a85a5deSChristoph Hellwig 	ret = nvme_loop_configure_admin_queue(ctrl);
6323a85a5deSChristoph Hellwig 	if (ret)
6333a85a5deSChristoph Hellwig 		goto out_free_queues;
6343a85a5deSChristoph Hellwig 
6353a85a5deSChristoph Hellwig 	if (opts->queue_size > ctrl->ctrl.maxcmd) {
6363a85a5deSChristoph Hellwig 		/* warn if maxcmd is lower than queue_size */
6373a85a5deSChristoph Hellwig 		dev_warn(ctrl->ctrl.device,
6383a85a5deSChristoph Hellwig 			"queue_size %zu > ctrl maxcmd %u, clamping down\n",
6393a85a5deSChristoph Hellwig 			opts->queue_size, ctrl->ctrl.maxcmd);
6403a85a5deSChristoph Hellwig 		opts->queue_size = ctrl->ctrl.maxcmd;
6413a85a5deSChristoph Hellwig 	}
6423a85a5deSChristoph Hellwig 
6433a85a5deSChristoph Hellwig 	if (opts->nr_io_queues) {
6443a85a5deSChristoph Hellwig 		ret = nvme_loop_create_io_queues(ctrl);
6453a85a5deSChristoph Hellwig 		if (ret)
6463a85a5deSChristoph Hellwig 			goto out_remove_admin_queue;
6473a85a5deSChristoph Hellwig 	}
6483a85a5deSChristoph Hellwig 
6493a85a5deSChristoph Hellwig 	nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
6503a85a5deSChristoph Hellwig 
6513a85a5deSChristoph Hellwig 	dev_info(ctrl->ctrl.device,
6523a85a5deSChristoph Hellwig 		 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
6533a85a5deSChristoph Hellwig 
6543a85a5deSChristoph Hellwig 	kref_get(&ctrl->ctrl.kref);
6553a85a5deSChristoph Hellwig 
6563a85a5deSChristoph Hellwig 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
6573a85a5deSChristoph Hellwig 	WARN_ON_ONCE(!changed);
6583a85a5deSChristoph Hellwig 
6593a85a5deSChristoph Hellwig 	mutex_lock(&nvme_loop_ctrl_mutex);
6603a85a5deSChristoph Hellwig 	list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
6613a85a5deSChristoph Hellwig 	mutex_unlock(&nvme_loop_ctrl_mutex);
6623a85a5deSChristoph Hellwig 
6633a85a5deSChristoph Hellwig 	if (opts->nr_io_queues) {
6643a85a5deSChristoph Hellwig 		nvme_queue_scan(&ctrl->ctrl);
6653a85a5deSChristoph Hellwig 		nvme_queue_async_events(&ctrl->ctrl);
6663a85a5deSChristoph Hellwig 	}
6673a85a5deSChristoph Hellwig 
6683a85a5deSChristoph Hellwig 	return &ctrl->ctrl;
6693a85a5deSChristoph Hellwig 
6703a85a5deSChristoph Hellwig out_remove_admin_queue:
6713a85a5deSChristoph Hellwig 	nvme_loop_destroy_admin_queue(ctrl);
6723a85a5deSChristoph Hellwig out_free_queues:
6733a85a5deSChristoph Hellwig 	kfree(ctrl->queues);
6743a85a5deSChristoph Hellwig out_uninit_ctrl:
6753a85a5deSChristoph Hellwig 	nvme_uninit_ctrl(&ctrl->ctrl);
6763a85a5deSChristoph Hellwig out_put_ctrl:
6773a85a5deSChristoph Hellwig 	nvme_put_ctrl(&ctrl->ctrl);
6783a85a5deSChristoph Hellwig 	if (ret > 0)
6793a85a5deSChristoph Hellwig 		ret = -EIO;
6803a85a5deSChristoph Hellwig 	return ERR_PTR(ret);
6813a85a5deSChristoph Hellwig }
6823a85a5deSChristoph Hellwig 
6833a85a5deSChristoph Hellwig static int nvme_loop_add_port(struct nvmet_port *port)
6843a85a5deSChristoph Hellwig {
6853a85a5deSChristoph Hellwig 	/*
6863a85a5deSChristoph Hellwig 	 * XXX: disalow adding more than one port so
6873a85a5deSChristoph Hellwig 	 * there is no connection rejections when a
6883a85a5deSChristoph Hellwig 	 * a subsystem is assigned to a port for which
6893a85a5deSChristoph Hellwig 	 * loop doesn't have a pointer.
6903a85a5deSChristoph Hellwig 	 * This scenario would be possible if we allowed
6913a85a5deSChristoph Hellwig 	 * more than one port to be added and a subsystem
6923a85a5deSChristoph Hellwig 	 * was assigned to a port other than nvmet_loop_port.
6933a85a5deSChristoph Hellwig 	 */
6943a85a5deSChristoph Hellwig 
6953a85a5deSChristoph Hellwig 	if (nvmet_loop_port)
6963a85a5deSChristoph Hellwig 		return -EPERM;
6973a85a5deSChristoph Hellwig 
6983a85a5deSChristoph Hellwig 	nvmet_loop_port = port;
6993a85a5deSChristoph Hellwig 	return 0;
7003a85a5deSChristoph Hellwig }
7013a85a5deSChristoph Hellwig 
7023a85a5deSChristoph Hellwig static void nvme_loop_remove_port(struct nvmet_port *port)
7033a85a5deSChristoph Hellwig {
7043a85a5deSChristoph Hellwig 	if (port == nvmet_loop_port)
7053a85a5deSChristoph Hellwig 		nvmet_loop_port = NULL;
7063a85a5deSChristoph Hellwig }
7073a85a5deSChristoph Hellwig 
7083a85a5deSChristoph Hellwig static struct nvmet_fabrics_ops nvme_loop_ops = {
7093a85a5deSChristoph Hellwig 	.owner		= THIS_MODULE,
7103a85a5deSChristoph Hellwig 	.type		= NVMF_TRTYPE_LOOP,
7113a85a5deSChristoph Hellwig 	.add_port	= nvme_loop_add_port,
7123a85a5deSChristoph Hellwig 	.remove_port	= nvme_loop_remove_port,
7133a85a5deSChristoph Hellwig 	.queue_response = nvme_loop_queue_response,
7143a85a5deSChristoph Hellwig 	.delete_ctrl	= nvme_loop_delete_ctrl,
7153a85a5deSChristoph Hellwig };
7163a85a5deSChristoph Hellwig 
7173a85a5deSChristoph Hellwig static struct nvmf_transport_ops nvme_loop_transport = {
7183a85a5deSChristoph Hellwig 	.name		= "loop",
7193a85a5deSChristoph Hellwig 	.create_ctrl	= nvme_loop_create_ctrl,
7203a85a5deSChristoph Hellwig };
7213a85a5deSChristoph Hellwig 
7223a85a5deSChristoph Hellwig static int __init nvme_loop_init_module(void)
7233a85a5deSChristoph Hellwig {
7243a85a5deSChristoph Hellwig 	int ret;
7253a85a5deSChristoph Hellwig 
7263a85a5deSChristoph Hellwig 	ret = nvmet_register_transport(&nvme_loop_ops);
7273a85a5deSChristoph Hellwig 	if (ret)
7283a85a5deSChristoph Hellwig 		return ret;
7293a85a5deSChristoph Hellwig 	nvmf_register_transport(&nvme_loop_transport);
7303a85a5deSChristoph Hellwig 	return 0;
7313a85a5deSChristoph Hellwig }
7323a85a5deSChristoph Hellwig 
7333a85a5deSChristoph Hellwig static void __exit nvme_loop_cleanup_module(void)
7343a85a5deSChristoph Hellwig {
7353a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl, *next;
7363a85a5deSChristoph Hellwig 
7373a85a5deSChristoph Hellwig 	nvmf_unregister_transport(&nvme_loop_transport);
7383a85a5deSChristoph Hellwig 	nvmet_unregister_transport(&nvme_loop_ops);
7393a85a5deSChristoph Hellwig 
7403a85a5deSChristoph Hellwig 	mutex_lock(&nvme_loop_ctrl_mutex);
7413a85a5deSChristoph Hellwig 	list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
7423a85a5deSChristoph Hellwig 		__nvme_loop_del_ctrl(ctrl);
7433a85a5deSChristoph Hellwig 	mutex_unlock(&nvme_loop_ctrl_mutex);
7443a85a5deSChristoph Hellwig 
7453a85a5deSChristoph Hellwig 	flush_scheduled_work();
7463a85a5deSChristoph Hellwig }
7473a85a5deSChristoph Hellwig 
7483a85a5deSChristoph Hellwig module_init(nvme_loop_init_module);
7493a85a5deSChristoph Hellwig module_exit(nvme_loop_cleanup_module);
7503a85a5deSChristoph Hellwig 
7513a85a5deSChristoph Hellwig MODULE_LICENSE("GPL v2");
7523a85a5deSChristoph Hellwig MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */
753