xref: /openbmc/linux/drivers/nvme/target/loop.c (revision 3a85a5de29ea779634ddfd768059e06196687aba)
1*3a85a5deSChristoph Hellwig /*
2*3a85a5deSChristoph Hellwig  * NVMe over Fabrics loopback device.
3*3a85a5deSChristoph Hellwig  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4*3a85a5deSChristoph Hellwig  *
5*3a85a5deSChristoph Hellwig  * This program is free software; you can redistribute it and/or modify it
6*3a85a5deSChristoph Hellwig  * under the terms and conditions of the GNU General Public License,
7*3a85a5deSChristoph Hellwig  * version 2, as published by the Free Software Foundation.
8*3a85a5deSChristoph Hellwig  *
9*3a85a5deSChristoph Hellwig  * This program is distributed in the hope it will be useful, but WITHOUT
10*3a85a5deSChristoph Hellwig  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11*3a85a5deSChristoph Hellwig  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12*3a85a5deSChristoph Hellwig  * more details.
13*3a85a5deSChristoph Hellwig  */
14*3a85a5deSChristoph Hellwig #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15*3a85a5deSChristoph Hellwig #include <linux/scatterlist.h>
16*3a85a5deSChristoph Hellwig #include <linux/delay.h>
17*3a85a5deSChristoph Hellwig #include <linux/blk-mq.h>
18*3a85a5deSChristoph Hellwig #include <linux/nvme.h>
19*3a85a5deSChristoph Hellwig #include <linux/module.h>
20*3a85a5deSChristoph Hellwig #include <linux/parser.h>
21*3a85a5deSChristoph Hellwig #include <linux/t10-pi.h>
22*3a85a5deSChristoph Hellwig #include "nvmet.h"
23*3a85a5deSChristoph Hellwig #include "../host/nvme.h"
24*3a85a5deSChristoph Hellwig #include "../host/fabrics.h"
25*3a85a5deSChristoph Hellwig 
26*3a85a5deSChristoph Hellwig #define NVME_LOOP_AQ_DEPTH		256
27*3a85a5deSChristoph Hellwig 
28*3a85a5deSChristoph Hellwig #define NVME_LOOP_MAX_SEGMENTS		256
29*3a85a5deSChristoph Hellwig 
30*3a85a5deSChristoph Hellwig /*
31*3a85a5deSChristoph Hellwig  * We handle AEN commands ourselves and don't even let the
32*3a85a5deSChristoph Hellwig  * block layer know about them.
33*3a85a5deSChristoph Hellwig  */
34*3a85a5deSChristoph Hellwig #define NVME_LOOP_NR_AEN_COMMANDS	1
35*3a85a5deSChristoph Hellwig #define NVME_LOOP_AQ_BLKMQ_DEPTH	\
36*3a85a5deSChristoph Hellwig 	(NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
37*3a85a5deSChristoph Hellwig 
38*3a85a5deSChristoph Hellwig struct nvme_loop_iod {
39*3a85a5deSChristoph Hellwig 	struct nvme_command	cmd;
40*3a85a5deSChristoph Hellwig 	struct nvme_completion	rsp;
41*3a85a5deSChristoph Hellwig 	struct nvmet_req	req;
42*3a85a5deSChristoph Hellwig 	struct nvme_loop_queue	*queue;
43*3a85a5deSChristoph Hellwig 	struct work_struct	work;
44*3a85a5deSChristoph Hellwig 	struct sg_table		sg_table;
45*3a85a5deSChristoph Hellwig 	struct scatterlist	first_sgl[];
46*3a85a5deSChristoph Hellwig };
47*3a85a5deSChristoph Hellwig 
48*3a85a5deSChristoph Hellwig struct nvme_loop_ctrl {
49*3a85a5deSChristoph Hellwig 	spinlock_t		lock;
50*3a85a5deSChristoph Hellwig 	struct nvme_loop_queue	*queues;
51*3a85a5deSChristoph Hellwig 	u32			queue_count;
52*3a85a5deSChristoph Hellwig 
53*3a85a5deSChristoph Hellwig 	struct blk_mq_tag_set	admin_tag_set;
54*3a85a5deSChristoph Hellwig 
55*3a85a5deSChristoph Hellwig 	struct list_head	list;
56*3a85a5deSChristoph Hellwig 	u64			cap;
57*3a85a5deSChristoph Hellwig 	struct blk_mq_tag_set	tag_set;
58*3a85a5deSChristoph Hellwig 	struct nvme_loop_iod	async_event_iod;
59*3a85a5deSChristoph Hellwig 	struct nvme_ctrl	ctrl;
60*3a85a5deSChristoph Hellwig 
61*3a85a5deSChristoph Hellwig 	struct nvmet_ctrl	*target_ctrl;
62*3a85a5deSChristoph Hellwig 	struct work_struct	delete_work;
63*3a85a5deSChristoph Hellwig 	struct work_struct	reset_work;
64*3a85a5deSChristoph Hellwig };
65*3a85a5deSChristoph Hellwig 
66*3a85a5deSChristoph Hellwig static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
67*3a85a5deSChristoph Hellwig {
68*3a85a5deSChristoph Hellwig 	return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
69*3a85a5deSChristoph Hellwig }
70*3a85a5deSChristoph Hellwig 
71*3a85a5deSChristoph Hellwig struct nvme_loop_queue {
72*3a85a5deSChristoph Hellwig 	struct nvmet_cq		nvme_cq;
73*3a85a5deSChristoph Hellwig 	struct nvmet_sq		nvme_sq;
74*3a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl	*ctrl;
75*3a85a5deSChristoph Hellwig };
76*3a85a5deSChristoph Hellwig 
77*3a85a5deSChristoph Hellwig static struct nvmet_port *nvmet_loop_port;
78*3a85a5deSChristoph Hellwig 
79*3a85a5deSChristoph Hellwig static LIST_HEAD(nvme_loop_ctrl_list);
80*3a85a5deSChristoph Hellwig static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
81*3a85a5deSChristoph Hellwig 
82*3a85a5deSChristoph Hellwig static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
83*3a85a5deSChristoph Hellwig static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
84*3a85a5deSChristoph Hellwig 
85*3a85a5deSChristoph Hellwig static struct nvmet_fabrics_ops nvme_loop_ops;
86*3a85a5deSChristoph Hellwig 
87*3a85a5deSChristoph Hellwig static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
88*3a85a5deSChristoph Hellwig {
89*3a85a5deSChristoph Hellwig 	return queue - queue->ctrl->queues;
90*3a85a5deSChristoph Hellwig }
91*3a85a5deSChristoph Hellwig 
92*3a85a5deSChristoph Hellwig static void nvme_loop_complete_rq(struct request *req)
93*3a85a5deSChristoph Hellwig {
94*3a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
95*3a85a5deSChristoph Hellwig 	int error = 0;
96*3a85a5deSChristoph Hellwig 
97*3a85a5deSChristoph Hellwig 	nvme_cleanup_cmd(req);
98*3a85a5deSChristoph Hellwig 	sg_free_table_chained(&iod->sg_table, true);
99*3a85a5deSChristoph Hellwig 
100*3a85a5deSChristoph Hellwig 	if (unlikely(req->errors)) {
101*3a85a5deSChristoph Hellwig 		if (nvme_req_needs_retry(req, req->errors)) {
102*3a85a5deSChristoph Hellwig 			nvme_requeue_req(req);
103*3a85a5deSChristoph Hellwig 			return;
104*3a85a5deSChristoph Hellwig 		}
105*3a85a5deSChristoph Hellwig 
106*3a85a5deSChristoph Hellwig 		if (req->cmd_type == REQ_TYPE_DRV_PRIV)
107*3a85a5deSChristoph Hellwig 			error = req->errors;
108*3a85a5deSChristoph Hellwig 		else
109*3a85a5deSChristoph Hellwig 			error = nvme_error_status(req->errors);
110*3a85a5deSChristoph Hellwig 	}
111*3a85a5deSChristoph Hellwig 
112*3a85a5deSChristoph Hellwig 	blk_mq_end_request(req, error);
113*3a85a5deSChristoph Hellwig }
114*3a85a5deSChristoph Hellwig 
115*3a85a5deSChristoph Hellwig static void nvme_loop_queue_response(struct nvmet_req *nvme_req)
116*3a85a5deSChristoph Hellwig {
117*3a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod =
118*3a85a5deSChristoph Hellwig 		container_of(nvme_req, struct nvme_loop_iod, req);
119*3a85a5deSChristoph Hellwig 	struct nvme_completion *cqe = &iod->rsp;
120*3a85a5deSChristoph Hellwig 
121*3a85a5deSChristoph Hellwig 	/*
122*3a85a5deSChristoph Hellwig 	 * AEN requests are special as they don't time out and can
123*3a85a5deSChristoph Hellwig 	 * survive any kind of queue freeze and often don't respond to
124*3a85a5deSChristoph Hellwig 	 * aborts.  We don't even bother to allocate a struct request
125*3a85a5deSChristoph Hellwig 	 * for them but rather special case them here.
126*3a85a5deSChristoph Hellwig 	 */
127*3a85a5deSChristoph Hellwig 	if (unlikely(nvme_loop_queue_idx(iod->queue) == 0 &&
128*3a85a5deSChristoph Hellwig 			cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
129*3a85a5deSChristoph Hellwig 		nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe);
130*3a85a5deSChristoph Hellwig 	} else {
131*3a85a5deSChristoph Hellwig 		struct request *req = blk_mq_rq_from_pdu(iod);
132*3a85a5deSChristoph Hellwig 
133*3a85a5deSChristoph Hellwig 		if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special)
134*3a85a5deSChristoph Hellwig 			memcpy(req->special, cqe, sizeof(*cqe));
135*3a85a5deSChristoph Hellwig 		blk_mq_complete_request(req, le16_to_cpu(cqe->status) >> 1);
136*3a85a5deSChristoph Hellwig 	}
137*3a85a5deSChristoph Hellwig }
138*3a85a5deSChristoph Hellwig 
139*3a85a5deSChristoph Hellwig static void nvme_loop_execute_work(struct work_struct *work)
140*3a85a5deSChristoph Hellwig {
141*3a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod =
142*3a85a5deSChristoph Hellwig 		container_of(work, struct nvme_loop_iod, work);
143*3a85a5deSChristoph Hellwig 
144*3a85a5deSChristoph Hellwig 	iod->req.execute(&iod->req);
145*3a85a5deSChristoph Hellwig }
146*3a85a5deSChristoph Hellwig 
147*3a85a5deSChristoph Hellwig static enum blk_eh_timer_return
148*3a85a5deSChristoph Hellwig nvme_loop_timeout(struct request *rq, bool reserved)
149*3a85a5deSChristoph Hellwig {
150*3a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
151*3a85a5deSChristoph Hellwig 
152*3a85a5deSChristoph Hellwig 	/* queue error recovery */
153*3a85a5deSChristoph Hellwig 	schedule_work(&iod->queue->ctrl->reset_work);
154*3a85a5deSChristoph Hellwig 
155*3a85a5deSChristoph Hellwig 	/* fail with DNR on admin cmd timeout */
156*3a85a5deSChristoph Hellwig 	rq->errors = NVME_SC_ABORT_REQ | NVME_SC_DNR;
157*3a85a5deSChristoph Hellwig 
158*3a85a5deSChristoph Hellwig 	return BLK_EH_HANDLED;
159*3a85a5deSChristoph Hellwig }
160*3a85a5deSChristoph Hellwig 
161*3a85a5deSChristoph Hellwig static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
162*3a85a5deSChristoph Hellwig 		const struct blk_mq_queue_data *bd)
163*3a85a5deSChristoph Hellwig {
164*3a85a5deSChristoph Hellwig 	struct nvme_ns *ns = hctx->queue->queuedata;
165*3a85a5deSChristoph Hellwig 	struct nvme_loop_queue *queue = hctx->driver_data;
166*3a85a5deSChristoph Hellwig 	struct request *req = bd->rq;
167*3a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
168*3a85a5deSChristoph Hellwig 	int ret;
169*3a85a5deSChristoph Hellwig 
170*3a85a5deSChristoph Hellwig 	ret = nvme_setup_cmd(ns, req, &iod->cmd);
171*3a85a5deSChristoph Hellwig 	if (ret)
172*3a85a5deSChristoph Hellwig 		return ret;
173*3a85a5deSChristoph Hellwig 
174*3a85a5deSChristoph Hellwig 	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
175*3a85a5deSChristoph Hellwig 	iod->req.port = nvmet_loop_port;
176*3a85a5deSChristoph Hellwig 	if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
177*3a85a5deSChristoph Hellwig 			&queue->nvme_sq, &nvme_loop_ops)) {
178*3a85a5deSChristoph Hellwig 		nvme_cleanup_cmd(req);
179*3a85a5deSChristoph Hellwig 		blk_mq_start_request(req);
180*3a85a5deSChristoph Hellwig 		nvme_loop_queue_response(&iod->req);
181*3a85a5deSChristoph Hellwig 		return 0;
182*3a85a5deSChristoph Hellwig 	}
183*3a85a5deSChristoph Hellwig 
184*3a85a5deSChristoph Hellwig 	if (blk_rq_bytes(req)) {
185*3a85a5deSChristoph Hellwig 		iod->sg_table.sgl = iod->first_sgl;
186*3a85a5deSChristoph Hellwig 		ret = sg_alloc_table_chained(&iod->sg_table,
187*3a85a5deSChristoph Hellwig 			req->nr_phys_segments, iod->sg_table.sgl);
188*3a85a5deSChristoph Hellwig 		if (ret)
189*3a85a5deSChristoph Hellwig 			return BLK_MQ_RQ_QUEUE_BUSY;
190*3a85a5deSChristoph Hellwig 
191*3a85a5deSChristoph Hellwig 		iod->req.sg = iod->sg_table.sgl;
192*3a85a5deSChristoph Hellwig 		iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
193*3a85a5deSChristoph Hellwig 		BUG_ON(iod->req.sg_cnt > req->nr_phys_segments);
194*3a85a5deSChristoph Hellwig 	}
195*3a85a5deSChristoph Hellwig 
196*3a85a5deSChristoph Hellwig 	iod->cmd.common.command_id = req->tag;
197*3a85a5deSChristoph Hellwig 	blk_mq_start_request(req);
198*3a85a5deSChristoph Hellwig 
199*3a85a5deSChristoph Hellwig 	schedule_work(&iod->work);
200*3a85a5deSChristoph Hellwig 	return 0;
201*3a85a5deSChristoph Hellwig }
202*3a85a5deSChristoph Hellwig 
203*3a85a5deSChristoph Hellwig static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
204*3a85a5deSChristoph Hellwig {
205*3a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
206*3a85a5deSChristoph Hellwig 	struct nvme_loop_queue *queue = &ctrl->queues[0];
207*3a85a5deSChristoph Hellwig 	struct nvme_loop_iod *iod = &ctrl->async_event_iod;
208*3a85a5deSChristoph Hellwig 
209*3a85a5deSChristoph Hellwig 	memset(&iod->cmd, 0, sizeof(iod->cmd));
210*3a85a5deSChristoph Hellwig 	iod->cmd.common.opcode = nvme_admin_async_event;
211*3a85a5deSChristoph Hellwig 	iod->cmd.common.command_id = NVME_LOOP_AQ_BLKMQ_DEPTH;
212*3a85a5deSChristoph Hellwig 	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
213*3a85a5deSChristoph Hellwig 
214*3a85a5deSChristoph Hellwig 	if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
215*3a85a5deSChristoph Hellwig 			&nvme_loop_ops)) {
216*3a85a5deSChristoph Hellwig 		dev_err(ctrl->ctrl.device, "failed async event work\n");
217*3a85a5deSChristoph Hellwig 		return;
218*3a85a5deSChristoph Hellwig 	}
219*3a85a5deSChristoph Hellwig 
220*3a85a5deSChristoph Hellwig 	schedule_work(&iod->work);
221*3a85a5deSChristoph Hellwig }
222*3a85a5deSChristoph Hellwig 
223*3a85a5deSChristoph Hellwig static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
224*3a85a5deSChristoph Hellwig 		struct nvme_loop_iod *iod, unsigned int queue_idx)
225*3a85a5deSChristoph Hellwig {
226*3a85a5deSChristoph Hellwig 	BUG_ON(queue_idx >= ctrl->queue_count);
227*3a85a5deSChristoph Hellwig 
228*3a85a5deSChristoph Hellwig 	iod->req.cmd = &iod->cmd;
229*3a85a5deSChristoph Hellwig 	iod->req.rsp = &iod->rsp;
230*3a85a5deSChristoph Hellwig 	iod->queue = &ctrl->queues[queue_idx];
231*3a85a5deSChristoph Hellwig 	INIT_WORK(&iod->work, nvme_loop_execute_work);
232*3a85a5deSChristoph Hellwig 	return 0;
233*3a85a5deSChristoph Hellwig }
234*3a85a5deSChristoph Hellwig 
235*3a85a5deSChristoph Hellwig static int nvme_loop_init_request(void *data, struct request *req,
236*3a85a5deSChristoph Hellwig 				unsigned int hctx_idx, unsigned int rq_idx,
237*3a85a5deSChristoph Hellwig 				unsigned int numa_node)
238*3a85a5deSChristoph Hellwig {
239*3a85a5deSChristoph Hellwig 	return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), hctx_idx + 1);
240*3a85a5deSChristoph Hellwig }
241*3a85a5deSChristoph Hellwig 
242*3a85a5deSChristoph Hellwig static int nvme_loop_init_admin_request(void *data, struct request *req,
243*3a85a5deSChristoph Hellwig 				unsigned int hctx_idx, unsigned int rq_idx,
244*3a85a5deSChristoph Hellwig 				unsigned int numa_node)
245*3a85a5deSChristoph Hellwig {
246*3a85a5deSChristoph Hellwig 	return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), 0);
247*3a85a5deSChristoph Hellwig }
248*3a85a5deSChristoph Hellwig 
249*3a85a5deSChristoph Hellwig static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
250*3a85a5deSChristoph Hellwig 		unsigned int hctx_idx)
251*3a85a5deSChristoph Hellwig {
252*3a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = data;
253*3a85a5deSChristoph Hellwig 	struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
254*3a85a5deSChristoph Hellwig 
255*3a85a5deSChristoph Hellwig 	BUG_ON(hctx_idx >= ctrl->queue_count);
256*3a85a5deSChristoph Hellwig 
257*3a85a5deSChristoph Hellwig 	hctx->driver_data = queue;
258*3a85a5deSChristoph Hellwig 	return 0;
259*3a85a5deSChristoph Hellwig }
260*3a85a5deSChristoph Hellwig 
261*3a85a5deSChristoph Hellwig static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
262*3a85a5deSChristoph Hellwig 		unsigned int hctx_idx)
263*3a85a5deSChristoph Hellwig {
264*3a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = data;
265*3a85a5deSChristoph Hellwig 	struct nvme_loop_queue *queue = &ctrl->queues[0];
266*3a85a5deSChristoph Hellwig 
267*3a85a5deSChristoph Hellwig 	BUG_ON(hctx_idx != 0);
268*3a85a5deSChristoph Hellwig 
269*3a85a5deSChristoph Hellwig 	hctx->driver_data = queue;
270*3a85a5deSChristoph Hellwig 	return 0;
271*3a85a5deSChristoph Hellwig }
272*3a85a5deSChristoph Hellwig 
273*3a85a5deSChristoph Hellwig static struct blk_mq_ops nvme_loop_mq_ops = {
274*3a85a5deSChristoph Hellwig 	.queue_rq	= nvme_loop_queue_rq,
275*3a85a5deSChristoph Hellwig 	.complete	= nvme_loop_complete_rq,
276*3a85a5deSChristoph Hellwig 	.map_queue	= blk_mq_map_queue,
277*3a85a5deSChristoph Hellwig 	.init_request	= nvme_loop_init_request,
278*3a85a5deSChristoph Hellwig 	.init_hctx	= nvme_loop_init_hctx,
279*3a85a5deSChristoph Hellwig 	.timeout	= nvme_loop_timeout,
280*3a85a5deSChristoph Hellwig };
281*3a85a5deSChristoph Hellwig 
282*3a85a5deSChristoph Hellwig static struct blk_mq_ops nvme_loop_admin_mq_ops = {
283*3a85a5deSChristoph Hellwig 	.queue_rq	= nvme_loop_queue_rq,
284*3a85a5deSChristoph Hellwig 	.complete	= nvme_loop_complete_rq,
285*3a85a5deSChristoph Hellwig 	.map_queue	= blk_mq_map_queue,
286*3a85a5deSChristoph Hellwig 	.init_request	= nvme_loop_init_admin_request,
287*3a85a5deSChristoph Hellwig 	.init_hctx	= nvme_loop_init_admin_hctx,
288*3a85a5deSChristoph Hellwig 	.timeout	= nvme_loop_timeout,
289*3a85a5deSChristoph Hellwig };
290*3a85a5deSChristoph Hellwig 
291*3a85a5deSChristoph Hellwig static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
292*3a85a5deSChristoph Hellwig {
293*3a85a5deSChristoph Hellwig 	blk_cleanup_queue(ctrl->ctrl.admin_q);
294*3a85a5deSChristoph Hellwig 	blk_mq_free_tag_set(&ctrl->admin_tag_set);
295*3a85a5deSChristoph Hellwig 	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
296*3a85a5deSChristoph Hellwig }
297*3a85a5deSChristoph Hellwig 
298*3a85a5deSChristoph Hellwig static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
299*3a85a5deSChristoph Hellwig {
300*3a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
301*3a85a5deSChristoph Hellwig 
302*3a85a5deSChristoph Hellwig 	if (list_empty(&ctrl->list))
303*3a85a5deSChristoph Hellwig 		goto free_ctrl;
304*3a85a5deSChristoph Hellwig 
305*3a85a5deSChristoph Hellwig 	mutex_lock(&nvme_loop_ctrl_mutex);
306*3a85a5deSChristoph Hellwig 	list_del(&ctrl->list);
307*3a85a5deSChristoph Hellwig 	mutex_unlock(&nvme_loop_ctrl_mutex);
308*3a85a5deSChristoph Hellwig 
309*3a85a5deSChristoph Hellwig 	if (nctrl->tagset) {
310*3a85a5deSChristoph Hellwig 		blk_cleanup_queue(ctrl->ctrl.connect_q);
311*3a85a5deSChristoph Hellwig 		blk_mq_free_tag_set(&ctrl->tag_set);
312*3a85a5deSChristoph Hellwig 	}
313*3a85a5deSChristoph Hellwig 	kfree(ctrl->queues);
314*3a85a5deSChristoph Hellwig 	nvmf_free_options(nctrl->opts);
315*3a85a5deSChristoph Hellwig free_ctrl:
316*3a85a5deSChristoph Hellwig 	kfree(ctrl);
317*3a85a5deSChristoph Hellwig }
318*3a85a5deSChristoph Hellwig 
319*3a85a5deSChristoph Hellwig static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
320*3a85a5deSChristoph Hellwig {
321*3a85a5deSChristoph Hellwig 	int error;
322*3a85a5deSChristoph Hellwig 
323*3a85a5deSChristoph Hellwig 	memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
324*3a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
325*3a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.queue_depth = NVME_LOOP_AQ_BLKMQ_DEPTH;
326*3a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
327*3a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
328*3a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
329*3a85a5deSChristoph Hellwig 		SG_CHUNK_SIZE * sizeof(struct scatterlist);
330*3a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.driver_data = ctrl;
331*3a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.nr_hw_queues = 1;
332*3a85a5deSChristoph Hellwig 	ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
333*3a85a5deSChristoph Hellwig 
334*3a85a5deSChristoph Hellwig 	ctrl->queues[0].ctrl = ctrl;
335*3a85a5deSChristoph Hellwig 	error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
336*3a85a5deSChristoph Hellwig 	if (error)
337*3a85a5deSChristoph Hellwig 		return error;
338*3a85a5deSChristoph Hellwig 	ctrl->queue_count = 1;
339*3a85a5deSChristoph Hellwig 
340*3a85a5deSChristoph Hellwig 	error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
341*3a85a5deSChristoph Hellwig 	if (error)
342*3a85a5deSChristoph Hellwig 		goto out_free_sq;
343*3a85a5deSChristoph Hellwig 
344*3a85a5deSChristoph Hellwig 	ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
345*3a85a5deSChristoph Hellwig 	if (IS_ERR(ctrl->ctrl.admin_q)) {
346*3a85a5deSChristoph Hellwig 		error = PTR_ERR(ctrl->ctrl.admin_q);
347*3a85a5deSChristoph Hellwig 		goto out_free_tagset;
348*3a85a5deSChristoph Hellwig 	}
349*3a85a5deSChristoph Hellwig 
350*3a85a5deSChristoph Hellwig 	error = nvmf_connect_admin_queue(&ctrl->ctrl);
351*3a85a5deSChristoph Hellwig 	if (error)
352*3a85a5deSChristoph Hellwig 		goto out_cleanup_queue;
353*3a85a5deSChristoph Hellwig 
354*3a85a5deSChristoph Hellwig 	error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
355*3a85a5deSChristoph Hellwig 	if (error) {
356*3a85a5deSChristoph Hellwig 		dev_err(ctrl->ctrl.device,
357*3a85a5deSChristoph Hellwig 			"prop_get NVME_REG_CAP failed\n");
358*3a85a5deSChristoph Hellwig 		goto out_cleanup_queue;
359*3a85a5deSChristoph Hellwig 	}
360*3a85a5deSChristoph Hellwig 
361*3a85a5deSChristoph Hellwig 	ctrl->ctrl.sqsize =
362*3a85a5deSChristoph Hellwig 		min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
363*3a85a5deSChristoph Hellwig 
364*3a85a5deSChristoph Hellwig 	error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
365*3a85a5deSChristoph Hellwig 	if (error)
366*3a85a5deSChristoph Hellwig 		goto out_cleanup_queue;
367*3a85a5deSChristoph Hellwig 
368*3a85a5deSChristoph Hellwig 	ctrl->ctrl.max_hw_sectors =
369*3a85a5deSChristoph Hellwig 		(NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
370*3a85a5deSChristoph Hellwig 
371*3a85a5deSChristoph Hellwig 	error = nvme_init_identify(&ctrl->ctrl);
372*3a85a5deSChristoph Hellwig 	if (error)
373*3a85a5deSChristoph Hellwig 		goto out_cleanup_queue;
374*3a85a5deSChristoph Hellwig 
375*3a85a5deSChristoph Hellwig 	nvme_start_keep_alive(&ctrl->ctrl);
376*3a85a5deSChristoph Hellwig 
377*3a85a5deSChristoph Hellwig 	return 0;
378*3a85a5deSChristoph Hellwig 
379*3a85a5deSChristoph Hellwig out_cleanup_queue:
380*3a85a5deSChristoph Hellwig 	blk_cleanup_queue(ctrl->ctrl.admin_q);
381*3a85a5deSChristoph Hellwig out_free_tagset:
382*3a85a5deSChristoph Hellwig 	blk_mq_free_tag_set(&ctrl->admin_tag_set);
383*3a85a5deSChristoph Hellwig out_free_sq:
384*3a85a5deSChristoph Hellwig 	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
385*3a85a5deSChristoph Hellwig 	return error;
386*3a85a5deSChristoph Hellwig }
387*3a85a5deSChristoph Hellwig 
388*3a85a5deSChristoph Hellwig static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
389*3a85a5deSChristoph Hellwig {
390*3a85a5deSChristoph Hellwig 	int i;
391*3a85a5deSChristoph Hellwig 
392*3a85a5deSChristoph Hellwig 	nvme_stop_keep_alive(&ctrl->ctrl);
393*3a85a5deSChristoph Hellwig 
394*3a85a5deSChristoph Hellwig 	if (ctrl->queue_count > 1) {
395*3a85a5deSChristoph Hellwig 		nvme_stop_queues(&ctrl->ctrl);
396*3a85a5deSChristoph Hellwig 		blk_mq_tagset_busy_iter(&ctrl->tag_set,
397*3a85a5deSChristoph Hellwig 					nvme_cancel_request, &ctrl->ctrl);
398*3a85a5deSChristoph Hellwig 
399*3a85a5deSChristoph Hellwig 		for (i = 1; i < ctrl->queue_count; i++)
400*3a85a5deSChristoph Hellwig 			nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
401*3a85a5deSChristoph Hellwig 	}
402*3a85a5deSChristoph Hellwig 
403*3a85a5deSChristoph Hellwig 	if (ctrl->ctrl.state == NVME_CTRL_LIVE)
404*3a85a5deSChristoph Hellwig 		nvme_shutdown_ctrl(&ctrl->ctrl);
405*3a85a5deSChristoph Hellwig 
406*3a85a5deSChristoph Hellwig 	blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
407*3a85a5deSChristoph Hellwig 	blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
408*3a85a5deSChristoph Hellwig 				nvme_cancel_request, &ctrl->ctrl);
409*3a85a5deSChristoph Hellwig 	nvme_loop_destroy_admin_queue(ctrl);
410*3a85a5deSChristoph Hellwig }
411*3a85a5deSChristoph Hellwig 
412*3a85a5deSChristoph Hellwig static void nvme_loop_del_ctrl_work(struct work_struct *work)
413*3a85a5deSChristoph Hellwig {
414*3a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = container_of(work,
415*3a85a5deSChristoph Hellwig 				struct nvme_loop_ctrl, delete_work);
416*3a85a5deSChristoph Hellwig 
417*3a85a5deSChristoph Hellwig 	nvme_remove_namespaces(&ctrl->ctrl);
418*3a85a5deSChristoph Hellwig 	nvme_loop_shutdown_ctrl(ctrl);
419*3a85a5deSChristoph Hellwig 	nvme_uninit_ctrl(&ctrl->ctrl);
420*3a85a5deSChristoph Hellwig 	nvme_put_ctrl(&ctrl->ctrl);
421*3a85a5deSChristoph Hellwig }
422*3a85a5deSChristoph Hellwig 
423*3a85a5deSChristoph Hellwig static int __nvme_loop_del_ctrl(struct nvme_loop_ctrl *ctrl)
424*3a85a5deSChristoph Hellwig {
425*3a85a5deSChristoph Hellwig 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
426*3a85a5deSChristoph Hellwig 		return -EBUSY;
427*3a85a5deSChristoph Hellwig 
428*3a85a5deSChristoph Hellwig 	if (!schedule_work(&ctrl->delete_work))
429*3a85a5deSChristoph Hellwig 		return -EBUSY;
430*3a85a5deSChristoph Hellwig 
431*3a85a5deSChristoph Hellwig 	return 0;
432*3a85a5deSChristoph Hellwig }
433*3a85a5deSChristoph Hellwig 
434*3a85a5deSChristoph Hellwig static int nvme_loop_del_ctrl(struct nvme_ctrl *nctrl)
435*3a85a5deSChristoph Hellwig {
436*3a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
437*3a85a5deSChristoph Hellwig 	int ret;
438*3a85a5deSChristoph Hellwig 
439*3a85a5deSChristoph Hellwig 	ret = __nvme_loop_del_ctrl(ctrl);
440*3a85a5deSChristoph Hellwig 	if (ret)
441*3a85a5deSChristoph Hellwig 		return ret;
442*3a85a5deSChristoph Hellwig 
443*3a85a5deSChristoph Hellwig 	flush_work(&ctrl->delete_work);
444*3a85a5deSChristoph Hellwig 
445*3a85a5deSChristoph Hellwig 	return 0;
446*3a85a5deSChristoph Hellwig }
447*3a85a5deSChristoph Hellwig 
448*3a85a5deSChristoph Hellwig static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
449*3a85a5deSChristoph Hellwig {
450*3a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl;
451*3a85a5deSChristoph Hellwig 
452*3a85a5deSChristoph Hellwig 	mutex_lock(&nvme_loop_ctrl_mutex);
453*3a85a5deSChristoph Hellwig 	list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
454*3a85a5deSChristoph Hellwig 		if (ctrl->ctrl.cntlid == nctrl->cntlid)
455*3a85a5deSChristoph Hellwig 			__nvme_loop_del_ctrl(ctrl);
456*3a85a5deSChristoph Hellwig 	}
457*3a85a5deSChristoph Hellwig 	mutex_unlock(&nvme_loop_ctrl_mutex);
458*3a85a5deSChristoph Hellwig }
459*3a85a5deSChristoph Hellwig 
460*3a85a5deSChristoph Hellwig static void nvme_loop_reset_ctrl_work(struct work_struct *work)
461*3a85a5deSChristoph Hellwig {
462*3a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = container_of(work,
463*3a85a5deSChristoph Hellwig 					struct nvme_loop_ctrl, reset_work);
464*3a85a5deSChristoph Hellwig 	bool changed;
465*3a85a5deSChristoph Hellwig 	int i, ret;
466*3a85a5deSChristoph Hellwig 
467*3a85a5deSChristoph Hellwig 	nvme_loop_shutdown_ctrl(ctrl);
468*3a85a5deSChristoph Hellwig 
469*3a85a5deSChristoph Hellwig 	ret = nvme_loop_configure_admin_queue(ctrl);
470*3a85a5deSChristoph Hellwig 	if (ret)
471*3a85a5deSChristoph Hellwig 		goto out_disable;
472*3a85a5deSChristoph Hellwig 
473*3a85a5deSChristoph Hellwig 	for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
474*3a85a5deSChristoph Hellwig 		ctrl->queues[i].ctrl = ctrl;
475*3a85a5deSChristoph Hellwig 		ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
476*3a85a5deSChristoph Hellwig 		if (ret)
477*3a85a5deSChristoph Hellwig 			goto out_free_queues;
478*3a85a5deSChristoph Hellwig 
479*3a85a5deSChristoph Hellwig 		ctrl->queue_count++;
480*3a85a5deSChristoph Hellwig 	}
481*3a85a5deSChristoph Hellwig 
482*3a85a5deSChristoph Hellwig 	for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
483*3a85a5deSChristoph Hellwig 		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
484*3a85a5deSChristoph Hellwig 		if (ret)
485*3a85a5deSChristoph Hellwig 			goto out_free_queues;
486*3a85a5deSChristoph Hellwig 	}
487*3a85a5deSChristoph Hellwig 
488*3a85a5deSChristoph Hellwig 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
489*3a85a5deSChristoph Hellwig 	WARN_ON_ONCE(!changed);
490*3a85a5deSChristoph Hellwig 
491*3a85a5deSChristoph Hellwig 	nvme_queue_scan(&ctrl->ctrl);
492*3a85a5deSChristoph Hellwig 	nvme_queue_async_events(&ctrl->ctrl);
493*3a85a5deSChristoph Hellwig 
494*3a85a5deSChristoph Hellwig 	nvme_start_queues(&ctrl->ctrl);
495*3a85a5deSChristoph Hellwig 
496*3a85a5deSChristoph Hellwig 	return;
497*3a85a5deSChristoph Hellwig 
498*3a85a5deSChristoph Hellwig out_free_queues:
499*3a85a5deSChristoph Hellwig 	for (i = 1; i < ctrl->queue_count; i++)
500*3a85a5deSChristoph Hellwig 		nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
501*3a85a5deSChristoph Hellwig 	nvme_loop_destroy_admin_queue(ctrl);
502*3a85a5deSChristoph Hellwig out_disable:
503*3a85a5deSChristoph Hellwig 	dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
504*3a85a5deSChristoph Hellwig 	nvme_remove_namespaces(&ctrl->ctrl);
505*3a85a5deSChristoph Hellwig 	nvme_uninit_ctrl(&ctrl->ctrl);
506*3a85a5deSChristoph Hellwig 	nvme_put_ctrl(&ctrl->ctrl);
507*3a85a5deSChristoph Hellwig }
508*3a85a5deSChristoph Hellwig 
509*3a85a5deSChristoph Hellwig static int nvme_loop_reset_ctrl(struct nvme_ctrl *nctrl)
510*3a85a5deSChristoph Hellwig {
511*3a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
512*3a85a5deSChristoph Hellwig 
513*3a85a5deSChristoph Hellwig 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
514*3a85a5deSChristoph Hellwig 		return -EBUSY;
515*3a85a5deSChristoph Hellwig 
516*3a85a5deSChristoph Hellwig 	if (!schedule_work(&ctrl->reset_work))
517*3a85a5deSChristoph Hellwig 		return -EBUSY;
518*3a85a5deSChristoph Hellwig 
519*3a85a5deSChristoph Hellwig 	flush_work(&ctrl->reset_work);
520*3a85a5deSChristoph Hellwig 
521*3a85a5deSChristoph Hellwig 	return 0;
522*3a85a5deSChristoph Hellwig }
523*3a85a5deSChristoph Hellwig 
524*3a85a5deSChristoph Hellwig static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
525*3a85a5deSChristoph Hellwig 	.name			= "loop",
526*3a85a5deSChristoph Hellwig 	.module			= THIS_MODULE,
527*3a85a5deSChristoph Hellwig 	.is_fabrics		= true,
528*3a85a5deSChristoph Hellwig 	.reg_read32		= nvmf_reg_read32,
529*3a85a5deSChristoph Hellwig 	.reg_read64		= nvmf_reg_read64,
530*3a85a5deSChristoph Hellwig 	.reg_write32		= nvmf_reg_write32,
531*3a85a5deSChristoph Hellwig 	.reset_ctrl		= nvme_loop_reset_ctrl,
532*3a85a5deSChristoph Hellwig 	.free_ctrl		= nvme_loop_free_ctrl,
533*3a85a5deSChristoph Hellwig 	.submit_async_event	= nvme_loop_submit_async_event,
534*3a85a5deSChristoph Hellwig 	.delete_ctrl		= nvme_loop_del_ctrl,
535*3a85a5deSChristoph Hellwig 	.get_subsysnqn		= nvmf_get_subsysnqn,
536*3a85a5deSChristoph Hellwig };
537*3a85a5deSChristoph Hellwig 
538*3a85a5deSChristoph Hellwig static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
539*3a85a5deSChristoph Hellwig {
540*3a85a5deSChristoph Hellwig 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
541*3a85a5deSChristoph Hellwig 	int ret, i;
542*3a85a5deSChristoph Hellwig 
543*3a85a5deSChristoph Hellwig 	ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
544*3a85a5deSChristoph Hellwig 	if (ret || !opts->nr_io_queues)
545*3a85a5deSChristoph Hellwig 		return ret;
546*3a85a5deSChristoph Hellwig 
547*3a85a5deSChristoph Hellwig 	dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
548*3a85a5deSChristoph Hellwig 		opts->nr_io_queues);
549*3a85a5deSChristoph Hellwig 
550*3a85a5deSChristoph Hellwig 	for (i = 1; i <= opts->nr_io_queues; i++) {
551*3a85a5deSChristoph Hellwig 		ctrl->queues[i].ctrl = ctrl;
552*3a85a5deSChristoph Hellwig 		ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
553*3a85a5deSChristoph Hellwig 		if (ret)
554*3a85a5deSChristoph Hellwig 			goto out_destroy_queues;
555*3a85a5deSChristoph Hellwig 
556*3a85a5deSChristoph Hellwig 		ctrl->queue_count++;
557*3a85a5deSChristoph Hellwig 	}
558*3a85a5deSChristoph Hellwig 
559*3a85a5deSChristoph Hellwig 	memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
560*3a85a5deSChristoph Hellwig 	ctrl->tag_set.ops = &nvme_loop_mq_ops;
561*3a85a5deSChristoph Hellwig 	ctrl->tag_set.queue_depth = ctrl->ctrl.sqsize;
562*3a85a5deSChristoph Hellwig 	ctrl->tag_set.reserved_tags = 1; /* fabric connect */
563*3a85a5deSChristoph Hellwig 	ctrl->tag_set.numa_node = NUMA_NO_NODE;
564*3a85a5deSChristoph Hellwig 	ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
565*3a85a5deSChristoph Hellwig 	ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
566*3a85a5deSChristoph Hellwig 		SG_CHUNK_SIZE * sizeof(struct scatterlist);
567*3a85a5deSChristoph Hellwig 	ctrl->tag_set.driver_data = ctrl;
568*3a85a5deSChristoph Hellwig 	ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
569*3a85a5deSChristoph Hellwig 	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
570*3a85a5deSChristoph Hellwig 	ctrl->ctrl.tagset = &ctrl->tag_set;
571*3a85a5deSChristoph Hellwig 
572*3a85a5deSChristoph Hellwig 	ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
573*3a85a5deSChristoph Hellwig 	if (ret)
574*3a85a5deSChristoph Hellwig 		goto out_destroy_queues;
575*3a85a5deSChristoph Hellwig 
576*3a85a5deSChristoph Hellwig 	ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
577*3a85a5deSChristoph Hellwig 	if (IS_ERR(ctrl->ctrl.connect_q)) {
578*3a85a5deSChristoph Hellwig 		ret = PTR_ERR(ctrl->ctrl.connect_q);
579*3a85a5deSChristoph Hellwig 		goto out_free_tagset;
580*3a85a5deSChristoph Hellwig 	}
581*3a85a5deSChristoph Hellwig 
582*3a85a5deSChristoph Hellwig 	for (i = 1; i <= opts->nr_io_queues; i++) {
583*3a85a5deSChristoph Hellwig 		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
584*3a85a5deSChristoph Hellwig 		if (ret)
585*3a85a5deSChristoph Hellwig 			goto out_cleanup_connect_q;
586*3a85a5deSChristoph Hellwig 	}
587*3a85a5deSChristoph Hellwig 
588*3a85a5deSChristoph Hellwig 	return 0;
589*3a85a5deSChristoph Hellwig 
590*3a85a5deSChristoph Hellwig out_cleanup_connect_q:
591*3a85a5deSChristoph Hellwig 	blk_cleanup_queue(ctrl->ctrl.connect_q);
592*3a85a5deSChristoph Hellwig out_free_tagset:
593*3a85a5deSChristoph Hellwig 	blk_mq_free_tag_set(&ctrl->tag_set);
594*3a85a5deSChristoph Hellwig out_destroy_queues:
595*3a85a5deSChristoph Hellwig 	for (i = 1; i < ctrl->queue_count; i++)
596*3a85a5deSChristoph Hellwig 		nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
597*3a85a5deSChristoph Hellwig 	return ret;
598*3a85a5deSChristoph Hellwig }
599*3a85a5deSChristoph Hellwig 
600*3a85a5deSChristoph Hellwig static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
601*3a85a5deSChristoph Hellwig 		struct nvmf_ctrl_options *opts)
602*3a85a5deSChristoph Hellwig {
603*3a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl;
604*3a85a5deSChristoph Hellwig 	bool changed;
605*3a85a5deSChristoph Hellwig 	int ret;
606*3a85a5deSChristoph Hellwig 
607*3a85a5deSChristoph Hellwig 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
608*3a85a5deSChristoph Hellwig 	if (!ctrl)
609*3a85a5deSChristoph Hellwig 		return ERR_PTR(-ENOMEM);
610*3a85a5deSChristoph Hellwig 	ctrl->ctrl.opts = opts;
611*3a85a5deSChristoph Hellwig 	INIT_LIST_HEAD(&ctrl->list);
612*3a85a5deSChristoph Hellwig 
613*3a85a5deSChristoph Hellwig 	INIT_WORK(&ctrl->delete_work, nvme_loop_del_ctrl_work);
614*3a85a5deSChristoph Hellwig 	INIT_WORK(&ctrl->reset_work, nvme_loop_reset_ctrl_work);
615*3a85a5deSChristoph Hellwig 
616*3a85a5deSChristoph Hellwig 	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
617*3a85a5deSChristoph Hellwig 				0 /* no quirks, we're perfect! */);
618*3a85a5deSChristoph Hellwig 	if (ret)
619*3a85a5deSChristoph Hellwig 		goto out_put_ctrl;
620*3a85a5deSChristoph Hellwig 
621*3a85a5deSChristoph Hellwig 	spin_lock_init(&ctrl->lock);
622*3a85a5deSChristoph Hellwig 
623*3a85a5deSChristoph Hellwig 	ret = -ENOMEM;
624*3a85a5deSChristoph Hellwig 
625*3a85a5deSChristoph Hellwig 	ctrl->ctrl.sqsize = opts->queue_size;
626*3a85a5deSChristoph Hellwig 	ctrl->ctrl.kato = opts->kato;
627*3a85a5deSChristoph Hellwig 
628*3a85a5deSChristoph Hellwig 	ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
629*3a85a5deSChristoph Hellwig 			GFP_KERNEL);
630*3a85a5deSChristoph Hellwig 	if (!ctrl->queues)
631*3a85a5deSChristoph Hellwig 		goto out_uninit_ctrl;
632*3a85a5deSChristoph Hellwig 
633*3a85a5deSChristoph Hellwig 	ret = nvme_loop_configure_admin_queue(ctrl);
634*3a85a5deSChristoph Hellwig 	if (ret)
635*3a85a5deSChristoph Hellwig 		goto out_free_queues;
636*3a85a5deSChristoph Hellwig 
637*3a85a5deSChristoph Hellwig 	if (opts->queue_size > ctrl->ctrl.maxcmd) {
638*3a85a5deSChristoph Hellwig 		/* warn if maxcmd is lower than queue_size */
639*3a85a5deSChristoph Hellwig 		dev_warn(ctrl->ctrl.device,
640*3a85a5deSChristoph Hellwig 			"queue_size %zu > ctrl maxcmd %u, clamping down\n",
641*3a85a5deSChristoph Hellwig 			opts->queue_size, ctrl->ctrl.maxcmd);
642*3a85a5deSChristoph Hellwig 		opts->queue_size = ctrl->ctrl.maxcmd;
643*3a85a5deSChristoph Hellwig 	}
644*3a85a5deSChristoph Hellwig 
645*3a85a5deSChristoph Hellwig 	if (opts->nr_io_queues) {
646*3a85a5deSChristoph Hellwig 		ret = nvme_loop_create_io_queues(ctrl);
647*3a85a5deSChristoph Hellwig 		if (ret)
648*3a85a5deSChristoph Hellwig 			goto out_remove_admin_queue;
649*3a85a5deSChristoph Hellwig 	}
650*3a85a5deSChristoph Hellwig 
651*3a85a5deSChristoph Hellwig 	nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
652*3a85a5deSChristoph Hellwig 
653*3a85a5deSChristoph Hellwig 	dev_info(ctrl->ctrl.device,
654*3a85a5deSChristoph Hellwig 		 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
655*3a85a5deSChristoph Hellwig 
656*3a85a5deSChristoph Hellwig 	kref_get(&ctrl->ctrl.kref);
657*3a85a5deSChristoph Hellwig 
658*3a85a5deSChristoph Hellwig 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
659*3a85a5deSChristoph Hellwig 	WARN_ON_ONCE(!changed);
660*3a85a5deSChristoph Hellwig 
661*3a85a5deSChristoph Hellwig 	mutex_lock(&nvme_loop_ctrl_mutex);
662*3a85a5deSChristoph Hellwig 	list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
663*3a85a5deSChristoph Hellwig 	mutex_unlock(&nvme_loop_ctrl_mutex);
664*3a85a5deSChristoph Hellwig 
665*3a85a5deSChristoph Hellwig 	if (opts->nr_io_queues) {
666*3a85a5deSChristoph Hellwig 		nvme_queue_scan(&ctrl->ctrl);
667*3a85a5deSChristoph Hellwig 		nvme_queue_async_events(&ctrl->ctrl);
668*3a85a5deSChristoph Hellwig 	}
669*3a85a5deSChristoph Hellwig 
670*3a85a5deSChristoph Hellwig 	return &ctrl->ctrl;
671*3a85a5deSChristoph Hellwig 
672*3a85a5deSChristoph Hellwig out_remove_admin_queue:
673*3a85a5deSChristoph Hellwig 	nvme_loop_destroy_admin_queue(ctrl);
674*3a85a5deSChristoph Hellwig out_free_queues:
675*3a85a5deSChristoph Hellwig 	kfree(ctrl->queues);
676*3a85a5deSChristoph Hellwig out_uninit_ctrl:
677*3a85a5deSChristoph Hellwig 	nvme_uninit_ctrl(&ctrl->ctrl);
678*3a85a5deSChristoph Hellwig out_put_ctrl:
679*3a85a5deSChristoph Hellwig 	nvme_put_ctrl(&ctrl->ctrl);
680*3a85a5deSChristoph Hellwig 	if (ret > 0)
681*3a85a5deSChristoph Hellwig 		ret = -EIO;
682*3a85a5deSChristoph Hellwig 	return ERR_PTR(ret);
683*3a85a5deSChristoph Hellwig }
684*3a85a5deSChristoph Hellwig 
685*3a85a5deSChristoph Hellwig static int nvme_loop_add_port(struct nvmet_port *port)
686*3a85a5deSChristoph Hellwig {
687*3a85a5deSChristoph Hellwig 	/*
688*3a85a5deSChristoph Hellwig 	 * XXX: disalow adding more than one port so
689*3a85a5deSChristoph Hellwig 	 * there is no connection rejections when a
690*3a85a5deSChristoph Hellwig 	 * a subsystem is assigned to a port for which
691*3a85a5deSChristoph Hellwig 	 * loop doesn't have a pointer.
692*3a85a5deSChristoph Hellwig 	 * This scenario would be possible if we allowed
693*3a85a5deSChristoph Hellwig 	 * more than one port to be added and a subsystem
694*3a85a5deSChristoph Hellwig 	 * was assigned to a port other than nvmet_loop_port.
695*3a85a5deSChristoph Hellwig 	 */
696*3a85a5deSChristoph Hellwig 
697*3a85a5deSChristoph Hellwig 	if (nvmet_loop_port)
698*3a85a5deSChristoph Hellwig 		return -EPERM;
699*3a85a5deSChristoph Hellwig 
700*3a85a5deSChristoph Hellwig 	nvmet_loop_port = port;
701*3a85a5deSChristoph Hellwig 	return 0;
702*3a85a5deSChristoph Hellwig }
703*3a85a5deSChristoph Hellwig 
704*3a85a5deSChristoph Hellwig static void nvme_loop_remove_port(struct nvmet_port *port)
705*3a85a5deSChristoph Hellwig {
706*3a85a5deSChristoph Hellwig 	if (port == nvmet_loop_port)
707*3a85a5deSChristoph Hellwig 		nvmet_loop_port = NULL;
708*3a85a5deSChristoph Hellwig }
709*3a85a5deSChristoph Hellwig 
710*3a85a5deSChristoph Hellwig static struct nvmet_fabrics_ops nvme_loop_ops = {
711*3a85a5deSChristoph Hellwig 	.owner		= THIS_MODULE,
712*3a85a5deSChristoph Hellwig 	.type		= NVMF_TRTYPE_LOOP,
713*3a85a5deSChristoph Hellwig 	.add_port	= nvme_loop_add_port,
714*3a85a5deSChristoph Hellwig 	.remove_port	= nvme_loop_remove_port,
715*3a85a5deSChristoph Hellwig 	.queue_response = nvme_loop_queue_response,
716*3a85a5deSChristoph Hellwig 	.delete_ctrl	= nvme_loop_delete_ctrl,
717*3a85a5deSChristoph Hellwig };
718*3a85a5deSChristoph Hellwig 
719*3a85a5deSChristoph Hellwig static struct nvmf_transport_ops nvme_loop_transport = {
720*3a85a5deSChristoph Hellwig 	.name		= "loop",
721*3a85a5deSChristoph Hellwig 	.create_ctrl	= nvme_loop_create_ctrl,
722*3a85a5deSChristoph Hellwig };
723*3a85a5deSChristoph Hellwig 
724*3a85a5deSChristoph Hellwig static int __init nvme_loop_init_module(void)
725*3a85a5deSChristoph Hellwig {
726*3a85a5deSChristoph Hellwig 	int ret;
727*3a85a5deSChristoph Hellwig 
728*3a85a5deSChristoph Hellwig 	ret = nvmet_register_transport(&nvme_loop_ops);
729*3a85a5deSChristoph Hellwig 	if (ret)
730*3a85a5deSChristoph Hellwig 		return ret;
731*3a85a5deSChristoph Hellwig 	nvmf_register_transport(&nvme_loop_transport);
732*3a85a5deSChristoph Hellwig 	return 0;
733*3a85a5deSChristoph Hellwig }
734*3a85a5deSChristoph Hellwig 
735*3a85a5deSChristoph Hellwig static void __exit nvme_loop_cleanup_module(void)
736*3a85a5deSChristoph Hellwig {
737*3a85a5deSChristoph Hellwig 	struct nvme_loop_ctrl *ctrl, *next;
738*3a85a5deSChristoph Hellwig 
739*3a85a5deSChristoph Hellwig 	nvmf_unregister_transport(&nvme_loop_transport);
740*3a85a5deSChristoph Hellwig 	nvmet_unregister_transport(&nvme_loop_ops);
741*3a85a5deSChristoph Hellwig 
742*3a85a5deSChristoph Hellwig 	mutex_lock(&nvme_loop_ctrl_mutex);
743*3a85a5deSChristoph Hellwig 	list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
744*3a85a5deSChristoph Hellwig 		__nvme_loop_del_ctrl(ctrl);
745*3a85a5deSChristoph Hellwig 	mutex_unlock(&nvme_loop_ctrl_mutex);
746*3a85a5deSChristoph Hellwig 
747*3a85a5deSChristoph Hellwig 	flush_scheduled_work();
748*3a85a5deSChristoph Hellwig }
749*3a85a5deSChristoph Hellwig 
750*3a85a5deSChristoph Hellwig module_init(nvme_loop_init_module);
751*3a85a5deSChristoph Hellwig module_exit(nvme_loop_cleanup_module);
752*3a85a5deSChristoph Hellwig 
753*3a85a5deSChristoph Hellwig MODULE_LICENSE("GPL v2");
754*3a85a5deSChristoph Hellwig MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */
755