xref: /openbmc/linux/drivers/nvme/target/loop.c (revision 7bf58533a0bc257edff883619befe7e5a1e8caca)
13a85a5deSChristoph Hellwig /*
23a85a5deSChristoph Hellwig  * NVMe over Fabrics loopback device.
33a85a5deSChristoph Hellwig  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
43a85a5deSChristoph Hellwig  *
53a85a5deSChristoph Hellwig  * This program is free software; you can redistribute it and/or modify it
63a85a5deSChristoph Hellwig  * under the terms and conditions of the GNU General Public License,
73a85a5deSChristoph Hellwig  * version 2, as published by the Free Software Foundation.
83a85a5deSChristoph Hellwig  *
93a85a5deSChristoph Hellwig  * This program is distributed in the hope it will be useful, but WITHOUT
103a85a5deSChristoph Hellwig  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
113a85a5deSChristoph Hellwig  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
123a85a5deSChristoph Hellwig  * more details.
133a85a5deSChristoph Hellwig  */
143a85a5deSChristoph Hellwig #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
153a85a5deSChristoph Hellwig #include <linux/scatterlist.h>
163a85a5deSChristoph Hellwig #include <linux/delay.h>
173a85a5deSChristoph Hellwig #include <linux/blk-mq.h>
183a85a5deSChristoph Hellwig #include <linux/nvme.h>
193a85a5deSChristoph Hellwig #include <linux/module.h>
203a85a5deSChristoph Hellwig #include <linux/parser.h>
213a85a5deSChristoph Hellwig #include <linux/t10-pi.h>
223a85a5deSChristoph Hellwig #include "nvmet.h"
233a85a5deSChristoph Hellwig #include "../host/nvme.h"
243a85a5deSChristoph Hellwig #include "../host/fabrics.h"
253a85a5deSChristoph Hellwig 
263a85a5deSChristoph Hellwig #define NVME_LOOP_AQ_DEPTH		256
273a85a5deSChristoph Hellwig 
283a85a5deSChristoph Hellwig #define NVME_LOOP_MAX_SEGMENTS		256
293a85a5deSChristoph Hellwig 
303a85a5deSChristoph Hellwig /*
313a85a5deSChristoph Hellwig  * We handle AEN commands ourselves and don't even let the
323a85a5deSChristoph Hellwig  * block layer know about them.
333a85a5deSChristoph Hellwig  */
343a85a5deSChristoph Hellwig #define NVME_LOOP_NR_AEN_COMMANDS	1
353a85a5deSChristoph Hellwig #define NVME_LOOP_AQ_BLKMQ_DEPTH	\
363a85a5deSChristoph Hellwig 	(NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
373a85a5deSChristoph Hellwig 
383a85a5deSChristoph Hellwig struct nvme_loop_iod {
39d49187e9SChristoph Hellwig 	struct nvme_request	nvme_req;
403a85a5deSChristoph Hellwig 	struct nvme_command	cmd;
413a85a5deSChristoph Hellwig 	struct nvme_completion	rsp;
423a85a5deSChristoph Hellwig 	struct nvmet_req	req;
433a85a5deSChristoph Hellwig 	struct nvme_loop_queue	*queue;
443a85a5deSChristoph Hellwig 	struct work_struct	work;
453a85a5deSChristoph Hellwig 	struct sg_table		sg_table;
463a85a5deSChristoph Hellwig 	struct scatterlist	first_sgl[];
473a85a5deSChristoph Hellwig };
483a85a5deSChristoph Hellwig 
493a85a5deSChristoph Hellwig struct nvme_loop_ctrl {
503a85a5deSChristoph Hellwig 	spinlock_t		lock;
513a85a5deSChristoph Hellwig 	struct nvme_loop_queue	*queues;
523a85a5deSChristoph Hellwig 	u32			queue_count;
533a85a5deSChristoph Hellwig 
543a85a5deSChristoph Hellwig 	struct blk_mq_tag_set	admin_tag_set;
553a85a5deSChristoph Hellwig 
563a85a5deSChristoph Hellwig 	struct list_head	list;
573a85a5deSChristoph Hellwig 	u64			cap;
583a85a5deSChristoph Hellwig 	struct blk_mq_tag_set	tag_set;
593a85a5deSChristoph Hellwig 	struct nvme_loop_iod	async_event_iod;
603a85a5deSChristoph Hellwig 	struct nvme_ctrl	ctrl;
613a85a5deSChristoph Hellwig 
623a85a5deSChristoph Hellwig 	struct nvmet_ctrl	*target_ctrl;
633a85a5deSChristoph Hellwig 	struct work_struct	delete_work;
643a85a5deSChristoph Hellwig 	struct work_struct	reset_work;
653a85a5deSChristoph Hellwig };
663a85a5deSChristoph Hellwig 
673a85a5deSChristoph Hellwig static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
683a85a5deSChristoph Hellwig {
693a85a5deSChristoph Hellwig 	return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
703a85a5deSChristoph Hellwig }
713a85a5deSChristoph Hellwig 
723a85a5deSChristoph Hellwig struct nvme_loop_queue {
733a85a5deSChristoph Hellwig 	struct nvmet_cq		nvme_cq;
743a85a5deSChristoph Hellwig 	struct nvmet_sq		nvme_sq;
753a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl	*ctrl;
763a85a5deSChristoph Hellwig };
773a85a5deSChristoph Hellwig 
783a85a5deSChristoph Hellwig static struct nvmet_port *nvmet_loop_port;
793a85a5deSChristoph Hellwig 
803a85a5deSChristoph Hellwig static LIST_HEAD(nvme_loop_ctrl_list);
813a85a5deSChristoph Hellwig static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
823a85a5deSChristoph Hellwig 
833a85a5deSChristoph Hellwig static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
843a85a5deSChristoph Hellwig static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
853a85a5deSChristoph Hellwig 
863a85a5deSChristoph Hellwig static struct nvmet_fabrics_ops nvme_loop_ops;
873a85a5deSChristoph Hellwig 
883a85a5deSChristoph Hellwig static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
893a85a5deSChristoph Hellwig {
903a85a5deSChristoph Hellwig 	return queue - queue->ctrl->queues;
913a85a5deSChristoph Hellwig }
923a85a5deSChristoph Hellwig 
933a85a5deSChristoph Hellwig static void nvme_loop_complete_rq(struct request *req)
943a85a5deSChristoph Hellwig {
953a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
963a85a5deSChristoph Hellwig 	int error = 0;
973a85a5deSChristoph Hellwig 
983a85a5deSChristoph Hellwig 	nvme_cleanup_cmd(req);
993a85a5deSChristoph Hellwig 	sg_free_table_chained(&iod->sg_table, true);
1003a85a5deSChristoph Hellwig 
1013a85a5deSChristoph Hellwig 	if (unlikely(req->errors)) {
1023a85a5deSChristoph Hellwig 		if (nvme_req_needs_retry(req, req->errors)) {
1033a85a5deSChristoph Hellwig 			nvme_requeue_req(req);
1043a85a5deSChristoph Hellwig 			return;
1053a85a5deSChristoph Hellwig 		}
1063a85a5deSChristoph Hellwig 
1073a85a5deSChristoph Hellwig 		if (req->cmd_type == REQ_TYPE_DRV_PRIV)
1083a85a5deSChristoph Hellwig 			error = req->errors;
1093a85a5deSChristoph Hellwig 		else
1103a85a5deSChristoph Hellwig 			error = nvme_error_status(req->errors);
1113a85a5deSChristoph Hellwig 	}
1123a85a5deSChristoph Hellwig 
1133a85a5deSChristoph Hellwig 	blk_mq_end_request(req, error);
1143a85a5deSChristoph Hellwig }
1153a85a5deSChristoph Hellwig 
116d49187e9SChristoph Hellwig static void nvme_loop_queue_response(struct nvmet_req *req)
1173a85a5deSChristoph Hellwig {
1183a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod =
119d49187e9SChristoph Hellwig 		container_of(req, struct nvme_loop_iod, req);
1203a85a5deSChristoph Hellwig 	struct nvme_completion *cqe = &iod->rsp;
1213a85a5deSChristoph Hellwig 
1223a85a5deSChristoph Hellwig 	/*
1233a85a5deSChristoph Hellwig 	 * AEN requests are special as they don't time out and can
1243a85a5deSChristoph Hellwig 	 * survive any kind of queue freeze and often don't respond to
1253a85a5deSChristoph Hellwig 	 * aborts.  We don't even bother to allocate a struct request
1263a85a5deSChristoph Hellwig 	 * for them but rather special case them here.
1273a85a5deSChristoph Hellwig 	 */
1283a85a5deSChristoph Hellwig 	if (unlikely(nvme_loop_queue_idx(iod->queue) == 0 &&
1293a85a5deSChristoph Hellwig 			cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
130*7bf58533SChristoph Hellwig 		nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe->status,
131*7bf58533SChristoph Hellwig 				&cqe->result);
1323a85a5deSChristoph Hellwig 	} else {
133d49187e9SChristoph Hellwig 		struct request *rq = blk_mq_rq_from_pdu(iod);
1343a85a5deSChristoph Hellwig 
135d49187e9SChristoph Hellwig 		iod->nvme_req.result = cqe->result;
136d49187e9SChristoph Hellwig 		blk_mq_complete_request(rq, le16_to_cpu(cqe->status) >> 1);
1373a85a5deSChristoph Hellwig 	}
1383a85a5deSChristoph Hellwig }
1393a85a5deSChristoph Hellwig 
1403a85a5deSChristoph Hellwig static void nvme_loop_execute_work(struct work_struct *work)
1413a85a5deSChristoph Hellwig {
1423a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod =
1433a85a5deSChristoph Hellwig 		container_of(work, struct nvme_loop_iod, work);
1443a85a5deSChristoph Hellwig 
1453a85a5deSChristoph Hellwig 	iod->req.execute(&iod->req);
1463a85a5deSChristoph Hellwig }
1473a85a5deSChristoph Hellwig 
1483a85a5deSChristoph Hellwig static enum blk_eh_timer_return
1493a85a5deSChristoph Hellwig nvme_loop_timeout(struct request *rq, bool reserved)
1503a85a5deSChristoph Hellwig {
1513a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
1523a85a5deSChristoph Hellwig 
1533a85a5deSChristoph Hellwig 	/* queue error recovery */
1543a85a5deSChristoph Hellwig 	schedule_work(&iod->queue->ctrl->reset_work);
1553a85a5deSChristoph Hellwig 
1563a85a5deSChristoph Hellwig 	/* fail with DNR on admin cmd timeout */
1573a85a5deSChristoph Hellwig 	rq->errors = NVME_SC_ABORT_REQ | NVME_SC_DNR;
1583a85a5deSChristoph Hellwig 
1593a85a5deSChristoph Hellwig 	return BLK_EH_HANDLED;
1603a85a5deSChristoph Hellwig }
1613a85a5deSChristoph Hellwig 
1623a85a5deSChristoph Hellwig static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1633a85a5deSChristoph Hellwig 		const struct blk_mq_queue_data *bd)
1643a85a5deSChristoph Hellwig {
1653a85a5deSChristoph Hellwig 	struct nvme_ns *ns = hctx->queue->queuedata;
1663a85a5deSChristoph Hellwig 	struct nvme_loop_queue *queue = hctx->driver_data;
1673a85a5deSChristoph Hellwig 	struct request *req = bd->rq;
1683a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
1693a85a5deSChristoph Hellwig 	int ret;
1703a85a5deSChristoph Hellwig 
1713a85a5deSChristoph Hellwig 	ret = nvme_setup_cmd(ns, req, &iod->cmd);
1723a85a5deSChristoph Hellwig 	if (ret)
1733a85a5deSChristoph Hellwig 		return ret;
1743a85a5deSChristoph Hellwig 
1753a85a5deSChristoph Hellwig 	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
1763a85a5deSChristoph Hellwig 	iod->req.port = nvmet_loop_port;
1773a85a5deSChristoph Hellwig 	if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
1783a85a5deSChristoph Hellwig 			&queue->nvme_sq, &nvme_loop_ops)) {
1793a85a5deSChristoph Hellwig 		nvme_cleanup_cmd(req);
1803a85a5deSChristoph Hellwig 		blk_mq_start_request(req);
1813a85a5deSChristoph Hellwig 		nvme_loop_queue_response(&iod->req);
1823a85a5deSChristoph Hellwig 		return 0;
1833a85a5deSChristoph Hellwig 	}
1843a85a5deSChristoph Hellwig 
1853a85a5deSChristoph Hellwig 	if (blk_rq_bytes(req)) {
1863a85a5deSChristoph Hellwig 		iod->sg_table.sgl = iod->first_sgl;
1873a85a5deSChristoph Hellwig 		ret = sg_alloc_table_chained(&iod->sg_table,
1883a85a5deSChristoph Hellwig 			req->nr_phys_segments, iod->sg_table.sgl);
1893a85a5deSChristoph Hellwig 		if (ret)
1903a85a5deSChristoph Hellwig 			return BLK_MQ_RQ_QUEUE_BUSY;
1913a85a5deSChristoph Hellwig 
1923a85a5deSChristoph Hellwig 		iod->req.sg = iod->sg_table.sgl;
1933a85a5deSChristoph Hellwig 		iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
1943a85a5deSChristoph Hellwig 		BUG_ON(iod->req.sg_cnt > req->nr_phys_segments);
1953a85a5deSChristoph Hellwig 	}
1963a85a5deSChristoph Hellwig 
1973a85a5deSChristoph Hellwig 	iod->cmd.common.command_id = req->tag;
1983a85a5deSChristoph Hellwig 	blk_mq_start_request(req);
1993a85a5deSChristoph Hellwig 
2003a85a5deSChristoph Hellwig 	schedule_work(&iod->work);
2013a85a5deSChristoph Hellwig 	return 0;
2023a85a5deSChristoph Hellwig }
2033a85a5deSChristoph Hellwig 
2043a85a5deSChristoph Hellwig static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
2053a85a5deSChristoph Hellwig {
2063a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
2073a85a5deSChristoph Hellwig 	struct nvme_loop_queue *queue = &ctrl->queues[0];
2083a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod = &ctrl->async_event_iod;
2093a85a5deSChristoph Hellwig 
2103a85a5deSChristoph Hellwig 	memset(&iod->cmd, 0, sizeof(iod->cmd));
2113a85a5deSChristoph Hellwig 	iod->cmd.common.opcode = nvme_admin_async_event;
2123a85a5deSChristoph Hellwig 	iod->cmd.common.command_id = NVME_LOOP_AQ_BLKMQ_DEPTH;
2133a85a5deSChristoph Hellwig 	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
2143a85a5deSChristoph Hellwig 
2153a85a5deSChristoph Hellwig 	if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
2163a85a5deSChristoph Hellwig 			&nvme_loop_ops)) {
2173a85a5deSChristoph Hellwig 		dev_err(ctrl->ctrl.device, "failed async event work\n");
2183a85a5deSChristoph Hellwig 		return;
2193a85a5deSChristoph Hellwig 	}
2203a85a5deSChristoph Hellwig 
2213a85a5deSChristoph Hellwig 	schedule_work(&iod->work);
2223a85a5deSChristoph Hellwig }
2233a85a5deSChristoph Hellwig 
2243a85a5deSChristoph Hellwig static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
2253a85a5deSChristoph Hellwig 		struct nvme_loop_iod *iod, unsigned int queue_idx)
2263a85a5deSChristoph Hellwig {
2273a85a5deSChristoph Hellwig 	BUG_ON(queue_idx >= ctrl->queue_count);
2283a85a5deSChristoph Hellwig 
2293a85a5deSChristoph Hellwig 	iod->req.cmd = &iod->cmd;
2303a85a5deSChristoph Hellwig 	iod->req.rsp = &iod->rsp;
2313a85a5deSChristoph Hellwig 	iod->queue = &ctrl->queues[queue_idx];
2323a85a5deSChristoph Hellwig 	INIT_WORK(&iod->work, nvme_loop_execute_work);
2333a85a5deSChristoph Hellwig 	return 0;
2343a85a5deSChristoph Hellwig }
2353a85a5deSChristoph Hellwig 
2363a85a5deSChristoph Hellwig static int nvme_loop_init_request(void *data, struct request *req,
2373a85a5deSChristoph Hellwig 				unsigned int hctx_idx, unsigned int rq_idx,
2383a85a5deSChristoph Hellwig 				unsigned int numa_node)
2393a85a5deSChristoph Hellwig {
2403a85a5deSChristoph Hellwig 	return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), hctx_idx + 1);
2413a85a5deSChristoph Hellwig }
2423a85a5deSChristoph Hellwig 
2433a85a5deSChristoph Hellwig static int nvme_loop_init_admin_request(void *data, struct request *req,
2443a85a5deSChristoph Hellwig 				unsigned int hctx_idx, unsigned int rq_idx,
2453a85a5deSChristoph Hellwig 				unsigned int numa_node)
2463a85a5deSChristoph Hellwig {
2473a85a5deSChristoph Hellwig 	return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), 0);
2483a85a5deSChristoph Hellwig }
2493a85a5deSChristoph Hellwig 
2503a85a5deSChristoph Hellwig static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2513a85a5deSChristoph Hellwig 		unsigned int hctx_idx)
2523a85a5deSChristoph Hellwig {
2533a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = data;
2543a85a5deSChristoph Hellwig 	struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
2553a85a5deSChristoph Hellwig 
2563a85a5deSChristoph Hellwig 	BUG_ON(hctx_idx >= ctrl->queue_count);
2573a85a5deSChristoph Hellwig 
2583a85a5deSChristoph Hellwig 	hctx->driver_data = queue;
2593a85a5deSChristoph Hellwig 	return 0;
2603a85a5deSChristoph Hellwig }
2613a85a5deSChristoph Hellwig 
2623a85a5deSChristoph Hellwig static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2633a85a5deSChristoph Hellwig 		unsigned int hctx_idx)
2643a85a5deSChristoph Hellwig {
2653a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = data;
2663a85a5deSChristoph Hellwig 	struct nvme_loop_queue *queue = &ctrl->queues[0];
2673a85a5deSChristoph Hellwig 
2683a85a5deSChristoph Hellwig 	BUG_ON(hctx_idx != 0);
2693a85a5deSChristoph Hellwig 
2703a85a5deSChristoph Hellwig 	hctx->driver_data = queue;
2713a85a5deSChristoph Hellwig 	return 0;
2723a85a5deSChristoph Hellwig }
2733a85a5deSChristoph Hellwig 
2743a85a5deSChristoph Hellwig static struct blk_mq_ops nvme_loop_mq_ops = {
2753a85a5deSChristoph Hellwig 	.queue_rq	= nvme_loop_queue_rq,
2763a85a5deSChristoph Hellwig 	.complete	= nvme_loop_complete_rq,
2773a85a5deSChristoph Hellwig 	.init_request	= nvme_loop_init_request,
2783a85a5deSChristoph Hellwig 	.init_hctx	= nvme_loop_init_hctx,
2793a85a5deSChristoph Hellwig 	.timeout	= nvme_loop_timeout,
2803a85a5deSChristoph Hellwig };
2813a85a5deSChristoph Hellwig 
2823a85a5deSChristoph Hellwig static struct blk_mq_ops nvme_loop_admin_mq_ops = {
2833a85a5deSChristoph Hellwig 	.queue_rq	= nvme_loop_queue_rq,
2843a85a5deSChristoph Hellwig 	.complete	= nvme_loop_complete_rq,
2853a85a5deSChristoph Hellwig 	.init_request	= nvme_loop_init_admin_request,
2863a85a5deSChristoph Hellwig 	.init_hctx	= nvme_loop_init_admin_hctx,
2873a85a5deSChristoph Hellwig 	.timeout	= nvme_loop_timeout,
2883a85a5deSChristoph Hellwig };
2893a85a5deSChristoph Hellwig 
2903a85a5deSChristoph Hellwig static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
2913a85a5deSChristoph Hellwig {
2923a85a5deSChristoph Hellwig 	blk_cleanup_queue(ctrl->ctrl.admin_q);
2933a85a5deSChristoph Hellwig 	blk_mq_free_tag_set(&ctrl->admin_tag_set);
2943a85a5deSChristoph Hellwig 	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
2953a85a5deSChristoph Hellwig }
2963a85a5deSChristoph Hellwig 
2973a85a5deSChristoph Hellwig static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
2983a85a5deSChristoph Hellwig {
2993a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
3003a85a5deSChristoph Hellwig 
3013a85a5deSChristoph Hellwig 	if (list_empty(&ctrl->list))
3023a85a5deSChristoph Hellwig 		goto free_ctrl;
3033a85a5deSChristoph Hellwig 
3043a85a5deSChristoph Hellwig 	mutex_lock(&nvme_loop_ctrl_mutex);
3053a85a5deSChristoph Hellwig 	list_del(&ctrl->list);
3063a85a5deSChristoph Hellwig 	mutex_unlock(&nvme_loop_ctrl_mutex);
3073a85a5deSChristoph Hellwig 
3083a85a5deSChristoph Hellwig 	if (nctrl->tagset) {
3093a85a5deSChristoph Hellwig 		blk_cleanup_queue(ctrl->ctrl.connect_q);
3103a85a5deSChristoph Hellwig 		blk_mq_free_tag_set(&ctrl->tag_set);
3113a85a5deSChristoph Hellwig 	}
3123a85a5deSChristoph Hellwig 	kfree(ctrl->queues);
3133a85a5deSChristoph Hellwig 	nvmf_free_options(nctrl->opts);
3143a85a5deSChristoph Hellwig free_ctrl:
3153a85a5deSChristoph Hellwig 	kfree(ctrl);
3163a85a5deSChristoph Hellwig }
3173a85a5deSChristoph Hellwig 
3183a85a5deSChristoph Hellwig static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
3193a85a5deSChristoph Hellwig {
3203a85a5deSChristoph Hellwig 	int error;
3213a85a5deSChristoph Hellwig 
3223a85a5deSChristoph Hellwig 	memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
3233a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
3243a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.queue_depth = NVME_LOOP_AQ_BLKMQ_DEPTH;
3253a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
3263a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
3273a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
3283a85a5deSChristoph Hellwig 		SG_CHUNK_SIZE * sizeof(struct scatterlist);
3293a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.driver_data = ctrl;
3303a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.nr_hw_queues = 1;
3313a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
3323a85a5deSChristoph Hellwig 
3333a85a5deSChristoph Hellwig 	ctrl->queues[0].ctrl = ctrl;
3343a85a5deSChristoph Hellwig 	error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
3353a85a5deSChristoph Hellwig 	if (error)
3363a85a5deSChristoph Hellwig 		return error;
3373a85a5deSChristoph Hellwig 	ctrl->queue_count = 1;
3383a85a5deSChristoph Hellwig 
3393a85a5deSChristoph Hellwig 	error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
3403a85a5deSChristoph Hellwig 	if (error)
3413a85a5deSChristoph Hellwig 		goto out_free_sq;
3423a85a5deSChristoph Hellwig 
3433a85a5deSChristoph Hellwig 	ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3443a85a5deSChristoph Hellwig 	if (IS_ERR(ctrl->ctrl.admin_q)) {
3453a85a5deSChristoph Hellwig 		error = PTR_ERR(ctrl->ctrl.admin_q);
3463a85a5deSChristoph Hellwig 		goto out_free_tagset;
3473a85a5deSChristoph Hellwig 	}
3483a85a5deSChristoph Hellwig 
3493a85a5deSChristoph Hellwig 	error = nvmf_connect_admin_queue(&ctrl->ctrl);
3503a85a5deSChristoph Hellwig 	if (error)
3513a85a5deSChristoph Hellwig 		goto out_cleanup_queue;
3523a85a5deSChristoph Hellwig 
3533a85a5deSChristoph Hellwig 	error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
3543a85a5deSChristoph Hellwig 	if (error) {
3553a85a5deSChristoph Hellwig 		dev_err(ctrl->ctrl.device,
3563a85a5deSChristoph Hellwig 			"prop_get NVME_REG_CAP failed\n");
3573a85a5deSChristoph Hellwig 		goto out_cleanup_queue;
3583a85a5deSChristoph Hellwig 	}
3593a85a5deSChristoph Hellwig 
3603a85a5deSChristoph Hellwig 	ctrl->ctrl.sqsize =
3613a85a5deSChristoph Hellwig 		min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
3623a85a5deSChristoph Hellwig 
3633a85a5deSChristoph Hellwig 	error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
3643a85a5deSChristoph Hellwig 	if (error)
3653a85a5deSChristoph Hellwig 		goto out_cleanup_queue;
3663a85a5deSChristoph Hellwig 
3673a85a5deSChristoph Hellwig 	ctrl->ctrl.max_hw_sectors =
3683a85a5deSChristoph Hellwig 		(NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
3693a85a5deSChristoph Hellwig 
3703a85a5deSChristoph Hellwig 	error = nvme_init_identify(&ctrl->ctrl);
3713a85a5deSChristoph Hellwig 	if (error)
3723a85a5deSChristoph Hellwig 		goto out_cleanup_queue;
3733a85a5deSChristoph Hellwig 
3743a85a5deSChristoph Hellwig 	nvme_start_keep_alive(&ctrl->ctrl);
3753a85a5deSChristoph Hellwig 
3763a85a5deSChristoph Hellwig 	return 0;
3773a85a5deSChristoph Hellwig 
3783a85a5deSChristoph Hellwig out_cleanup_queue:
3793a85a5deSChristoph Hellwig 	blk_cleanup_queue(ctrl->ctrl.admin_q);
3803a85a5deSChristoph Hellwig out_free_tagset:
3813a85a5deSChristoph Hellwig 	blk_mq_free_tag_set(&ctrl->admin_tag_set);
3823a85a5deSChristoph Hellwig out_free_sq:
3833a85a5deSChristoph Hellwig 	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
3843a85a5deSChristoph Hellwig 	return error;
3853a85a5deSChristoph Hellwig }
3863a85a5deSChristoph Hellwig 
3873a85a5deSChristoph Hellwig static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
3883a85a5deSChristoph Hellwig {
3893a85a5deSChristoph Hellwig 	int i;
3903a85a5deSChristoph Hellwig 
3913a85a5deSChristoph Hellwig 	nvme_stop_keep_alive(&ctrl->ctrl);
3923a85a5deSChristoph Hellwig 
3933a85a5deSChristoph Hellwig 	if (ctrl->queue_count > 1) {
3943a85a5deSChristoph Hellwig 		nvme_stop_queues(&ctrl->ctrl);
3953a85a5deSChristoph Hellwig 		blk_mq_tagset_busy_iter(&ctrl->tag_set,
3963a85a5deSChristoph Hellwig 					nvme_cancel_request, &ctrl->ctrl);
3973a85a5deSChristoph Hellwig 
3983a85a5deSChristoph Hellwig 		for (i = 1; i < ctrl->queue_count; i++)
3993a85a5deSChristoph Hellwig 			nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
4003a85a5deSChristoph Hellwig 	}
4013a85a5deSChristoph Hellwig 
4023a85a5deSChristoph Hellwig 	if (ctrl->ctrl.state == NVME_CTRL_LIVE)
4033a85a5deSChristoph Hellwig 		nvme_shutdown_ctrl(&ctrl->ctrl);
4043a85a5deSChristoph Hellwig 
4053a85a5deSChristoph Hellwig 	blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
4063a85a5deSChristoph Hellwig 	blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
4073a85a5deSChristoph Hellwig 				nvme_cancel_request, &ctrl->ctrl);
4083a85a5deSChristoph Hellwig 	nvme_loop_destroy_admin_queue(ctrl);
4093a85a5deSChristoph Hellwig }
4103a85a5deSChristoph Hellwig 
4113a85a5deSChristoph Hellwig static void nvme_loop_del_ctrl_work(struct work_struct *work)
4123a85a5deSChristoph Hellwig {
4133a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = container_of(work,
4143a85a5deSChristoph Hellwig 				struct nvme_loop_ctrl, delete_work);
4153a85a5deSChristoph Hellwig 
4163a85a5deSChristoph Hellwig 	nvme_uninit_ctrl(&ctrl->ctrl);
417a159c64dSSagi Grimberg 	nvme_loop_shutdown_ctrl(ctrl);
4183a85a5deSChristoph Hellwig 	nvme_put_ctrl(&ctrl->ctrl);
4193a85a5deSChristoph Hellwig }
4203a85a5deSChristoph Hellwig 
4213a85a5deSChristoph Hellwig static int __nvme_loop_del_ctrl(struct nvme_loop_ctrl *ctrl)
4223a85a5deSChristoph Hellwig {
4233a85a5deSChristoph Hellwig 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
4243a85a5deSChristoph Hellwig 		return -EBUSY;
4253a85a5deSChristoph Hellwig 
4263a85a5deSChristoph Hellwig 	if (!schedule_work(&ctrl->delete_work))
4273a85a5deSChristoph Hellwig 		return -EBUSY;
4283a85a5deSChristoph Hellwig 
4293a85a5deSChristoph Hellwig 	return 0;
4303a85a5deSChristoph Hellwig }
4313a85a5deSChristoph Hellwig 
4323a85a5deSChristoph Hellwig static int nvme_loop_del_ctrl(struct nvme_ctrl *nctrl)
4333a85a5deSChristoph Hellwig {
4343a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
4353a85a5deSChristoph Hellwig 	int ret;
4363a85a5deSChristoph Hellwig 
4373a85a5deSChristoph Hellwig 	ret = __nvme_loop_del_ctrl(ctrl);
4383a85a5deSChristoph Hellwig 	if (ret)
4393a85a5deSChristoph Hellwig 		return ret;
4403a85a5deSChristoph Hellwig 
4413a85a5deSChristoph Hellwig 	flush_work(&ctrl->delete_work);
4423a85a5deSChristoph Hellwig 
4433a85a5deSChristoph Hellwig 	return 0;
4443a85a5deSChristoph Hellwig }
4453a85a5deSChristoph Hellwig 
4463a85a5deSChristoph Hellwig static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
4473a85a5deSChristoph Hellwig {
4483a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl;
4493a85a5deSChristoph Hellwig 
4503a85a5deSChristoph Hellwig 	mutex_lock(&nvme_loop_ctrl_mutex);
4513a85a5deSChristoph Hellwig 	list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
4523a85a5deSChristoph Hellwig 		if (ctrl->ctrl.cntlid == nctrl->cntlid)
4533a85a5deSChristoph Hellwig 			__nvme_loop_del_ctrl(ctrl);
4543a85a5deSChristoph Hellwig 	}
4553a85a5deSChristoph Hellwig 	mutex_unlock(&nvme_loop_ctrl_mutex);
4563a85a5deSChristoph Hellwig }
4573a85a5deSChristoph Hellwig 
4583a85a5deSChristoph Hellwig static void nvme_loop_reset_ctrl_work(struct work_struct *work)
4593a85a5deSChristoph Hellwig {
4603a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = container_of(work,
4613a85a5deSChristoph Hellwig 					struct nvme_loop_ctrl, reset_work);
4623a85a5deSChristoph Hellwig 	bool changed;
4633a85a5deSChristoph Hellwig 	int i, ret;
4643a85a5deSChristoph Hellwig 
4653a85a5deSChristoph Hellwig 	nvme_loop_shutdown_ctrl(ctrl);
4663a85a5deSChristoph Hellwig 
4673a85a5deSChristoph Hellwig 	ret = nvme_loop_configure_admin_queue(ctrl);
4683a85a5deSChristoph Hellwig 	if (ret)
4693a85a5deSChristoph Hellwig 		goto out_disable;
4703a85a5deSChristoph Hellwig 
4713a85a5deSChristoph Hellwig 	for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
4723a85a5deSChristoph Hellwig 		ctrl->queues[i].ctrl = ctrl;
4733a85a5deSChristoph Hellwig 		ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
4743a85a5deSChristoph Hellwig 		if (ret)
4753a85a5deSChristoph Hellwig 			goto out_free_queues;
4763a85a5deSChristoph Hellwig 
4773a85a5deSChristoph Hellwig 		ctrl->queue_count++;
4783a85a5deSChristoph Hellwig 	}
4793a85a5deSChristoph Hellwig 
4803a85a5deSChristoph Hellwig 	for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
4813a85a5deSChristoph Hellwig 		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
4823a85a5deSChristoph Hellwig 		if (ret)
4833a85a5deSChristoph Hellwig 			goto out_free_queues;
4843a85a5deSChristoph Hellwig 	}
4853a85a5deSChristoph Hellwig 
4863a85a5deSChristoph Hellwig 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
4873a85a5deSChristoph Hellwig 	WARN_ON_ONCE(!changed);
4883a85a5deSChristoph Hellwig 
4893a85a5deSChristoph Hellwig 	nvme_queue_scan(&ctrl->ctrl);
4903a85a5deSChristoph Hellwig 	nvme_queue_async_events(&ctrl->ctrl);
4913a85a5deSChristoph Hellwig 
4923a85a5deSChristoph Hellwig 	nvme_start_queues(&ctrl->ctrl);
4933a85a5deSChristoph Hellwig 
4943a85a5deSChristoph Hellwig 	return;
4953a85a5deSChristoph Hellwig 
4963a85a5deSChristoph Hellwig out_free_queues:
4973a85a5deSChristoph Hellwig 	for (i = 1; i < ctrl->queue_count; i++)
4983a85a5deSChristoph Hellwig 		nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
4993a85a5deSChristoph Hellwig 	nvme_loop_destroy_admin_queue(ctrl);
5003a85a5deSChristoph Hellwig out_disable:
5013a85a5deSChristoph Hellwig 	dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
5023a85a5deSChristoph Hellwig 	nvme_uninit_ctrl(&ctrl->ctrl);
5033a85a5deSChristoph Hellwig 	nvme_put_ctrl(&ctrl->ctrl);
5043a85a5deSChristoph Hellwig }
5053a85a5deSChristoph Hellwig 
5063a85a5deSChristoph Hellwig static int nvme_loop_reset_ctrl(struct nvme_ctrl *nctrl)
5073a85a5deSChristoph Hellwig {
5083a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
5093a85a5deSChristoph Hellwig 
5103a85a5deSChristoph Hellwig 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
5113a85a5deSChristoph Hellwig 		return -EBUSY;
5123a85a5deSChristoph Hellwig 
5133a85a5deSChristoph Hellwig 	if (!schedule_work(&ctrl->reset_work))
5143a85a5deSChristoph Hellwig 		return -EBUSY;
5153a85a5deSChristoph Hellwig 
5163a85a5deSChristoph Hellwig 	flush_work(&ctrl->reset_work);
5173a85a5deSChristoph Hellwig 
5183a85a5deSChristoph Hellwig 	return 0;
5193a85a5deSChristoph Hellwig }
5203a85a5deSChristoph Hellwig 
5213a85a5deSChristoph Hellwig static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
5223a85a5deSChristoph Hellwig 	.name			= "loop",
5233a85a5deSChristoph Hellwig 	.module			= THIS_MODULE,
5243a85a5deSChristoph Hellwig 	.is_fabrics		= true,
5253a85a5deSChristoph Hellwig 	.reg_read32		= nvmf_reg_read32,
5263a85a5deSChristoph Hellwig 	.reg_read64		= nvmf_reg_read64,
5273a85a5deSChristoph Hellwig 	.reg_write32		= nvmf_reg_write32,
5283a85a5deSChristoph Hellwig 	.reset_ctrl		= nvme_loop_reset_ctrl,
5293a85a5deSChristoph Hellwig 	.free_ctrl		= nvme_loop_free_ctrl,
5303a85a5deSChristoph Hellwig 	.submit_async_event	= nvme_loop_submit_async_event,
5313a85a5deSChristoph Hellwig 	.delete_ctrl		= nvme_loop_del_ctrl,
5323a85a5deSChristoph Hellwig 	.get_subsysnqn		= nvmf_get_subsysnqn,
5333a85a5deSChristoph Hellwig };
5343a85a5deSChristoph Hellwig 
5353a85a5deSChristoph Hellwig static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
5363a85a5deSChristoph Hellwig {
5373a85a5deSChristoph Hellwig 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
5383a85a5deSChristoph Hellwig 	int ret, i;
5393a85a5deSChristoph Hellwig 
5403a85a5deSChristoph Hellwig 	ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
5413a85a5deSChristoph Hellwig 	if (ret || !opts->nr_io_queues)
5423a85a5deSChristoph Hellwig 		return ret;
5433a85a5deSChristoph Hellwig 
5443a85a5deSChristoph Hellwig 	dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
5453a85a5deSChristoph Hellwig 		opts->nr_io_queues);
5463a85a5deSChristoph Hellwig 
5473a85a5deSChristoph Hellwig 	for (i = 1; i <= opts->nr_io_queues; i++) {
5483a85a5deSChristoph Hellwig 		ctrl->queues[i].ctrl = ctrl;
5493a85a5deSChristoph Hellwig 		ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
5503a85a5deSChristoph Hellwig 		if (ret)
5513a85a5deSChristoph Hellwig 			goto out_destroy_queues;
5523a85a5deSChristoph Hellwig 
5533a85a5deSChristoph Hellwig 		ctrl->queue_count++;
5543a85a5deSChristoph Hellwig 	}
5553a85a5deSChristoph Hellwig 
5563a85a5deSChristoph Hellwig 	memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
5573a85a5deSChristoph Hellwig 	ctrl->tag_set.ops = &nvme_loop_mq_ops;
558eadb7cf4SJay Freyensee 	ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
5593a85a5deSChristoph Hellwig 	ctrl->tag_set.reserved_tags = 1; /* fabric connect */
5603a85a5deSChristoph Hellwig 	ctrl->tag_set.numa_node = NUMA_NO_NODE;
5613a85a5deSChristoph Hellwig 	ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
5623a85a5deSChristoph Hellwig 	ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
5633a85a5deSChristoph Hellwig 		SG_CHUNK_SIZE * sizeof(struct scatterlist);
5643a85a5deSChristoph Hellwig 	ctrl->tag_set.driver_data = ctrl;
5653a85a5deSChristoph Hellwig 	ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
5663a85a5deSChristoph Hellwig 	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
5673a85a5deSChristoph Hellwig 	ctrl->ctrl.tagset = &ctrl->tag_set;
5683a85a5deSChristoph Hellwig 
5693a85a5deSChristoph Hellwig 	ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
5703a85a5deSChristoph Hellwig 	if (ret)
5713a85a5deSChristoph Hellwig 		goto out_destroy_queues;
5723a85a5deSChristoph Hellwig 
5733a85a5deSChristoph Hellwig 	ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
5743a85a5deSChristoph Hellwig 	if (IS_ERR(ctrl->ctrl.connect_q)) {
5753a85a5deSChristoph Hellwig 		ret = PTR_ERR(ctrl->ctrl.connect_q);
5763a85a5deSChristoph Hellwig 		goto out_free_tagset;
5773a85a5deSChristoph Hellwig 	}
5783a85a5deSChristoph Hellwig 
5793a85a5deSChristoph Hellwig 	for (i = 1; i <= opts->nr_io_queues; i++) {
5803a85a5deSChristoph Hellwig 		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
5813a85a5deSChristoph Hellwig 		if (ret)
5823a85a5deSChristoph Hellwig 			goto out_cleanup_connect_q;
5833a85a5deSChristoph Hellwig 	}
5843a85a5deSChristoph Hellwig 
5853a85a5deSChristoph Hellwig 	return 0;
5863a85a5deSChristoph Hellwig 
5873a85a5deSChristoph Hellwig out_cleanup_connect_q:
5883a85a5deSChristoph Hellwig 	blk_cleanup_queue(ctrl->ctrl.connect_q);
5893a85a5deSChristoph Hellwig out_free_tagset:
5903a85a5deSChristoph Hellwig 	blk_mq_free_tag_set(&ctrl->tag_set);
5913a85a5deSChristoph Hellwig out_destroy_queues:
5923a85a5deSChristoph Hellwig 	for (i = 1; i < ctrl->queue_count; i++)
5933a85a5deSChristoph Hellwig 		nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
5943a85a5deSChristoph Hellwig 	return ret;
5953a85a5deSChristoph Hellwig }
5963a85a5deSChristoph Hellwig 
5973a85a5deSChristoph Hellwig static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
5983a85a5deSChristoph Hellwig 		struct nvmf_ctrl_options *opts)
5993a85a5deSChristoph Hellwig {
6003a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl;
6013a85a5deSChristoph Hellwig 	bool changed;
6023a85a5deSChristoph Hellwig 	int ret;
6033a85a5deSChristoph Hellwig 
6043a85a5deSChristoph Hellwig 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
6053a85a5deSChristoph Hellwig 	if (!ctrl)
6063a85a5deSChristoph Hellwig 		return ERR_PTR(-ENOMEM);
6073a85a5deSChristoph Hellwig 	ctrl->ctrl.opts = opts;
6083a85a5deSChristoph Hellwig 	INIT_LIST_HEAD(&ctrl->list);
6093a85a5deSChristoph Hellwig 
6103a85a5deSChristoph Hellwig 	INIT_WORK(&ctrl->delete_work, nvme_loop_del_ctrl_work);
6113a85a5deSChristoph Hellwig 	INIT_WORK(&ctrl->reset_work, nvme_loop_reset_ctrl_work);
6123a85a5deSChristoph Hellwig 
6133a85a5deSChristoph Hellwig 	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
6143a85a5deSChristoph Hellwig 				0 /* no quirks, we're perfect! */);
6153a85a5deSChristoph Hellwig 	if (ret)
6163a85a5deSChristoph Hellwig 		goto out_put_ctrl;
6173a85a5deSChristoph Hellwig 
6183a85a5deSChristoph Hellwig 	spin_lock_init(&ctrl->lock);
6193a85a5deSChristoph Hellwig 
6203a85a5deSChristoph Hellwig 	ret = -ENOMEM;
6213a85a5deSChristoph Hellwig 
622eadb7cf4SJay Freyensee 	ctrl->ctrl.sqsize = opts->queue_size - 1;
6233a85a5deSChristoph Hellwig 	ctrl->ctrl.kato = opts->kato;
6243a85a5deSChristoph Hellwig 
6253a85a5deSChristoph Hellwig 	ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
6263a85a5deSChristoph Hellwig 			GFP_KERNEL);
6273a85a5deSChristoph Hellwig 	if (!ctrl->queues)
6283a85a5deSChristoph Hellwig 		goto out_uninit_ctrl;
6293a85a5deSChristoph Hellwig 
6303a85a5deSChristoph Hellwig 	ret = nvme_loop_configure_admin_queue(ctrl);
6313a85a5deSChristoph Hellwig 	if (ret)
6323a85a5deSChristoph Hellwig 		goto out_free_queues;
6333a85a5deSChristoph Hellwig 
6343a85a5deSChristoph Hellwig 	if (opts->queue_size > ctrl->ctrl.maxcmd) {
6353a85a5deSChristoph Hellwig 		/* warn if maxcmd is lower than queue_size */
6363a85a5deSChristoph Hellwig 		dev_warn(ctrl->ctrl.device,
6373a85a5deSChristoph Hellwig 			"queue_size %zu > ctrl maxcmd %u, clamping down\n",
6383a85a5deSChristoph Hellwig 			opts->queue_size, ctrl->ctrl.maxcmd);
6393a85a5deSChristoph Hellwig 		opts->queue_size = ctrl->ctrl.maxcmd;
6403a85a5deSChristoph Hellwig 	}
6413a85a5deSChristoph Hellwig 
6423a85a5deSChristoph Hellwig 	if (opts->nr_io_queues) {
6433a85a5deSChristoph Hellwig 		ret = nvme_loop_create_io_queues(ctrl);
6443a85a5deSChristoph Hellwig 		if (ret)
6453a85a5deSChristoph Hellwig 			goto out_remove_admin_queue;
6463a85a5deSChristoph Hellwig 	}
6473a85a5deSChristoph Hellwig 
6483a85a5deSChristoph Hellwig 	nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
6493a85a5deSChristoph Hellwig 
6503a85a5deSChristoph Hellwig 	dev_info(ctrl->ctrl.device,
6513a85a5deSChristoph Hellwig 		 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
6523a85a5deSChristoph Hellwig 
6533a85a5deSChristoph Hellwig 	kref_get(&ctrl->ctrl.kref);
6543a85a5deSChristoph Hellwig 
6553a85a5deSChristoph Hellwig 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
6563a85a5deSChristoph Hellwig 	WARN_ON_ONCE(!changed);
6573a85a5deSChristoph Hellwig 
6583a85a5deSChristoph Hellwig 	mutex_lock(&nvme_loop_ctrl_mutex);
6593a85a5deSChristoph Hellwig 	list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
6603a85a5deSChristoph Hellwig 	mutex_unlock(&nvme_loop_ctrl_mutex);
6613a85a5deSChristoph Hellwig 
6623a85a5deSChristoph Hellwig 	if (opts->nr_io_queues) {
6633a85a5deSChristoph Hellwig 		nvme_queue_scan(&ctrl->ctrl);
6643a85a5deSChristoph Hellwig 		nvme_queue_async_events(&ctrl->ctrl);
6653a85a5deSChristoph Hellwig 	}
6663a85a5deSChristoph Hellwig 
6673a85a5deSChristoph Hellwig 	return &ctrl->ctrl;
6683a85a5deSChristoph Hellwig 
6693a85a5deSChristoph Hellwig out_remove_admin_queue:
6703a85a5deSChristoph Hellwig 	nvme_loop_destroy_admin_queue(ctrl);
6713a85a5deSChristoph Hellwig out_free_queues:
6723a85a5deSChristoph Hellwig 	kfree(ctrl->queues);
6733a85a5deSChristoph Hellwig out_uninit_ctrl:
6743a85a5deSChristoph Hellwig 	nvme_uninit_ctrl(&ctrl->ctrl);
6753a85a5deSChristoph Hellwig out_put_ctrl:
6763a85a5deSChristoph Hellwig 	nvme_put_ctrl(&ctrl->ctrl);
6773a85a5deSChristoph Hellwig 	if (ret > 0)
6783a85a5deSChristoph Hellwig 		ret = -EIO;
6793a85a5deSChristoph Hellwig 	return ERR_PTR(ret);
6803a85a5deSChristoph Hellwig }
6813a85a5deSChristoph Hellwig 
6823a85a5deSChristoph Hellwig static int nvme_loop_add_port(struct nvmet_port *port)
6833a85a5deSChristoph Hellwig {
6843a85a5deSChristoph Hellwig 	/*
6853a85a5deSChristoph Hellwig 	 * XXX: disalow adding more than one port so
6863a85a5deSChristoph Hellwig 	 * there is no connection rejections when a
6873a85a5deSChristoph Hellwig 	 * a subsystem is assigned to a port for which
6883a85a5deSChristoph Hellwig 	 * loop doesn't have a pointer.
6893a85a5deSChristoph Hellwig 	 * This scenario would be possible if we allowed
6903a85a5deSChristoph Hellwig 	 * more than one port to be added and a subsystem
6913a85a5deSChristoph Hellwig 	 * was assigned to a port other than nvmet_loop_port.
6923a85a5deSChristoph Hellwig 	 */
6933a85a5deSChristoph Hellwig 
6943a85a5deSChristoph Hellwig 	if (nvmet_loop_port)
6953a85a5deSChristoph Hellwig 		return -EPERM;
6963a85a5deSChristoph Hellwig 
6973a85a5deSChristoph Hellwig 	nvmet_loop_port = port;
6983a85a5deSChristoph Hellwig 	return 0;
6993a85a5deSChristoph Hellwig }
7003a85a5deSChristoph Hellwig 
7013a85a5deSChristoph Hellwig static void nvme_loop_remove_port(struct nvmet_port *port)
7023a85a5deSChristoph Hellwig {
7033a85a5deSChristoph Hellwig 	if (port == nvmet_loop_port)
7043a85a5deSChristoph Hellwig 		nvmet_loop_port = NULL;
7053a85a5deSChristoph Hellwig }
7063a85a5deSChristoph Hellwig 
7073a85a5deSChristoph Hellwig static struct nvmet_fabrics_ops nvme_loop_ops = {
7083a85a5deSChristoph Hellwig 	.owner		= THIS_MODULE,
7093a85a5deSChristoph Hellwig 	.type		= NVMF_TRTYPE_LOOP,
7103a85a5deSChristoph Hellwig 	.add_port	= nvme_loop_add_port,
7113a85a5deSChristoph Hellwig 	.remove_port	= nvme_loop_remove_port,
7123a85a5deSChristoph Hellwig 	.queue_response = nvme_loop_queue_response,
7133a85a5deSChristoph Hellwig 	.delete_ctrl	= nvme_loop_delete_ctrl,
7143a85a5deSChristoph Hellwig };
7153a85a5deSChristoph Hellwig 
7163a85a5deSChristoph Hellwig static struct nvmf_transport_ops nvme_loop_transport = {
7173a85a5deSChristoph Hellwig 	.name		= "loop",
7183a85a5deSChristoph Hellwig 	.create_ctrl	= nvme_loop_create_ctrl,
7193a85a5deSChristoph Hellwig };
7203a85a5deSChristoph Hellwig 
7213a85a5deSChristoph Hellwig static int __init nvme_loop_init_module(void)
7223a85a5deSChristoph Hellwig {
7233a85a5deSChristoph Hellwig 	int ret;
7243a85a5deSChristoph Hellwig 
7253a85a5deSChristoph Hellwig 	ret = nvmet_register_transport(&nvme_loop_ops);
7263a85a5deSChristoph Hellwig 	if (ret)
7273a85a5deSChristoph Hellwig 		return ret;
7283a85a5deSChristoph Hellwig 	nvmf_register_transport(&nvme_loop_transport);
7293a85a5deSChristoph Hellwig 	return 0;
7303a85a5deSChristoph Hellwig }
7313a85a5deSChristoph Hellwig 
7323a85a5deSChristoph Hellwig static void __exit nvme_loop_cleanup_module(void)
7333a85a5deSChristoph Hellwig {
7343a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl, *next;
7353a85a5deSChristoph Hellwig 
7363a85a5deSChristoph Hellwig 	nvmf_unregister_transport(&nvme_loop_transport);
7373a85a5deSChristoph Hellwig 	nvmet_unregister_transport(&nvme_loop_ops);
7383a85a5deSChristoph Hellwig 
7393a85a5deSChristoph Hellwig 	mutex_lock(&nvme_loop_ctrl_mutex);
7403a85a5deSChristoph Hellwig 	list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
7413a85a5deSChristoph Hellwig 		__nvme_loop_del_ctrl(ctrl);
7423a85a5deSChristoph Hellwig 	mutex_unlock(&nvme_loop_ctrl_mutex);
7433a85a5deSChristoph Hellwig 
7443a85a5deSChristoph Hellwig 	flush_scheduled_work();
7453a85a5deSChristoph Hellwig }
7463a85a5deSChristoph Hellwig 
7473a85a5deSChristoph Hellwig module_init(nvme_loop_init_module);
7483a85a5deSChristoph Hellwig module_exit(nvme_loop_cleanup_module);
7493a85a5deSChristoph Hellwig 
7503a85a5deSChristoph Hellwig MODULE_LICENSE("GPL v2");
7513a85a5deSChristoph Hellwig MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */
752