xref: /openbmc/linux/drivers/nvme/target/loop.c (revision e4f9fffb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe over Fabrics loopback device.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/scatterlist.h>
8 #include <linux/blk-mq.h>
9 #include <linux/nvme.h>
10 #include <linux/module.h>
11 #include <linux/parser.h>
12 #include "nvmet.h"
13 #include "../host/nvme.h"
14 #include "../host/fabrics.h"
15 
16 #define NVME_LOOP_MAX_SEGMENTS		256
17 
18 struct nvme_loop_iod {
19 	struct nvme_request	nvme_req;
20 	struct nvme_command	cmd;
21 	struct nvme_completion	cqe;
22 	struct nvmet_req	req;
23 	struct nvme_loop_queue	*queue;
24 	struct work_struct	work;
25 	struct sg_table		sg_table;
26 	struct scatterlist	first_sgl[];
27 };
28 
29 struct nvme_loop_ctrl {
30 	struct nvme_loop_queue	*queues;
31 
32 	struct blk_mq_tag_set	admin_tag_set;
33 
34 	struct list_head	list;
35 	struct blk_mq_tag_set	tag_set;
36 	struct nvme_loop_iod	async_event_iod;
37 	struct nvme_ctrl	ctrl;
38 
39 	struct nvmet_port	*port;
40 };
41 
to_loop_ctrl(struct nvme_ctrl * ctrl)42 static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
43 {
44 	return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
45 }
46 
47 enum nvme_loop_queue_flags {
48 	NVME_LOOP_Q_LIVE	= 0,
49 };
50 
51 struct nvme_loop_queue {
52 	struct nvmet_cq		nvme_cq;
53 	struct nvmet_sq		nvme_sq;
54 	struct nvme_loop_ctrl	*ctrl;
55 	unsigned long		flags;
56 };
57 
58 static LIST_HEAD(nvme_loop_ports);
59 static DEFINE_MUTEX(nvme_loop_ports_mutex);
60 
61 static LIST_HEAD(nvme_loop_ctrl_list);
62 static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
63 
64 static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
65 static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
66 
67 static const struct nvmet_fabrics_ops nvme_loop_ops;
68 
nvme_loop_queue_idx(struct nvme_loop_queue * queue)69 static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
70 {
71 	return queue - queue->ctrl->queues;
72 }
73 
nvme_loop_complete_rq(struct request * req)74 static void nvme_loop_complete_rq(struct request *req)
75 {
76 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
77 
78 	sg_free_table_chained(&iod->sg_table, NVME_INLINE_SG_CNT);
79 	nvme_complete_rq(req);
80 }
81 
nvme_loop_tagset(struct nvme_loop_queue * queue)82 static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
83 {
84 	u32 queue_idx = nvme_loop_queue_idx(queue);
85 
86 	if (queue_idx == 0)
87 		return queue->ctrl->admin_tag_set.tags[queue_idx];
88 	return queue->ctrl->tag_set.tags[queue_idx - 1];
89 }
90 
nvme_loop_queue_response(struct nvmet_req * req)91 static void nvme_loop_queue_response(struct nvmet_req *req)
92 {
93 	struct nvme_loop_queue *queue =
94 		container_of(req->sq, struct nvme_loop_queue, nvme_sq);
95 	struct nvme_completion *cqe = req->cqe;
96 
97 	/*
98 	 * AEN requests are special as they don't time out and can
99 	 * survive any kind of queue freeze and often don't respond to
100 	 * aborts.  We don't even bother to allocate a struct request
101 	 * for them but rather special case them here.
102 	 */
103 	if (unlikely(nvme_is_aen_req(nvme_loop_queue_idx(queue),
104 				     cqe->command_id))) {
105 		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
106 				&cqe->result);
107 	} else {
108 		struct request *rq;
109 
110 		rq = nvme_find_rq(nvme_loop_tagset(queue), cqe->command_id);
111 		if (!rq) {
112 			dev_err(queue->ctrl->ctrl.device,
113 				"got bad command_id %#x on queue %d\n",
114 				cqe->command_id, nvme_loop_queue_idx(queue));
115 			return;
116 		}
117 
118 		if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
119 			nvme_loop_complete_rq(rq);
120 	}
121 }
122 
nvme_loop_execute_work(struct work_struct * work)123 static void nvme_loop_execute_work(struct work_struct *work)
124 {
125 	struct nvme_loop_iod *iod =
126 		container_of(work, struct nvme_loop_iod, work);
127 
128 	iod->req.execute(&iod->req);
129 }
130 
nvme_loop_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)131 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
132 		const struct blk_mq_queue_data *bd)
133 {
134 	struct nvme_ns *ns = hctx->queue->queuedata;
135 	struct nvme_loop_queue *queue = hctx->driver_data;
136 	struct request *req = bd->rq;
137 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
138 	bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
139 	blk_status_t ret;
140 
141 	if (!nvme_check_ready(&queue->ctrl->ctrl, req, queue_ready))
142 		return nvme_fail_nonready_command(&queue->ctrl->ctrl, req);
143 
144 	ret = nvme_setup_cmd(ns, req);
145 	if (ret)
146 		return ret;
147 
148 	nvme_start_request(req);
149 	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
150 	iod->req.port = queue->ctrl->port;
151 	if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
152 			&queue->nvme_sq, &nvme_loop_ops))
153 		return BLK_STS_OK;
154 
155 	if (blk_rq_nr_phys_segments(req)) {
156 		iod->sg_table.sgl = iod->first_sgl;
157 		if (sg_alloc_table_chained(&iod->sg_table,
158 				blk_rq_nr_phys_segments(req),
159 				iod->sg_table.sgl, NVME_INLINE_SG_CNT)) {
160 			nvme_cleanup_cmd(req);
161 			return BLK_STS_RESOURCE;
162 		}
163 
164 		iod->req.sg = iod->sg_table.sgl;
165 		iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
166 		iod->req.transfer_len = blk_rq_payload_bytes(req);
167 	}
168 
169 	queue_work(nvmet_wq, &iod->work);
170 	return BLK_STS_OK;
171 }
172 
nvme_loop_submit_async_event(struct nvme_ctrl * arg)173 static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
174 {
175 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
176 	struct nvme_loop_queue *queue = &ctrl->queues[0];
177 	struct nvme_loop_iod *iod = &ctrl->async_event_iod;
178 
179 	memset(&iod->cmd, 0, sizeof(iod->cmd));
180 	iod->cmd.common.opcode = nvme_admin_async_event;
181 	iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
182 	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
183 
184 	if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
185 			&nvme_loop_ops)) {
186 		dev_err(ctrl->ctrl.device, "failed async event work\n");
187 		return;
188 	}
189 
190 	queue_work(nvmet_wq, &iod->work);
191 }
192 
nvme_loop_init_iod(struct nvme_loop_ctrl * ctrl,struct nvme_loop_iod * iod,unsigned int queue_idx)193 static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
194 		struct nvme_loop_iod *iod, unsigned int queue_idx)
195 {
196 	iod->req.cmd = &iod->cmd;
197 	iod->req.cqe = &iod->cqe;
198 	iod->queue = &ctrl->queues[queue_idx];
199 	INIT_WORK(&iod->work, nvme_loop_execute_work);
200 	return 0;
201 }
202 
nvme_loop_init_request(struct blk_mq_tag_set * set,struct request * req,unsigned int hctx_idx,unsigned int numa_node)203 static int nvme_loop_init_request(struct blk_mq_tag_set *set,
204 		struct request *req, unsigned int hctx_idx,
205 		unsigned int numa_node)
206 {
207 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(set->driver_data);
208 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
209 
210 	nvme_req(req)->ctrl = &ctrl->ctrl;
211 	nvme_req(req)->cmd = &iod->cmd;
212 	return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
213 			(set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
214 }
215 
216 static struct lock_class_key loop_hctx_fq_lock_key;
217 
nvme_loop_init_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int hctx_idx)218 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
219 		unsigned int hctx_idx)
220 {
221 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(data);
222 	struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
223 
224 	BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
225 
226 	/*
227 	 * flush_end_io() can be called recursively for us, so use our own
228 	 * lock class key for avoiding lockdep possible recursive locking,
229 	 * then we can remove the dynamically allocated lock class for each
230 	 * flush queue, that way may cause horrible boot delay.
231 	 */
232 	blk_mq_hctx_set_fq_lock_class(hctx, &loop_hctx_fq_lock_key);
233 
234 	hctx->driver_data = queue;
235 	return 0;
236 }
237 
nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int hctx_idx)238 static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
239 		unsigned int hctx_idx)
240 {
241 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(data);
242 	struct nvme_loop_queue *queue = &ctrl->queues[0];
243 
244 	BUG_ON(hctx_idx != 0);
245 
246 	hctx->driver_data = queue;
247 	return 0;
248 }
249 
250 static const struct blk_mq_ops nvme_loop_mq_ops = {
251 	.queue_rq	= nvme_loop_queue_rq,
252 	.complete	= nvme_loop_complete_rq,
253 	.init_request	= nvme_loop_init_request,
254 	.init_hctx	= nvme_loop_init_hctx,
255 };
256 
257 static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
258 	.queue_rq	= nvme_loop_queue_rq,
259 	.complete	= nvme_loop_complete_rq,
260 	.init_request	= nvme_loop_init_request,
261 	.init_hctx	= nvme_loop_init_admin_hctx,
262 };
263 
nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl * ctrl)264 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
265 {
266 	if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
267 		return;
268 	/*
269 	 * It's possible that some requests might have been added
270 	 * after admin queue is stopped/quiesced. So now start the
271 	 * queue to flush these requests to the completion.
272 	 */
273 	nvme_unquiesce_admin_queue(&ctrl->ctrl);
274 
275 	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
276 	nvme_remove_admin_tag_set(&ctrl->ctrl);
277 }
278 
nvme_loop_free_ctrl(struct nvme_ctrl * nctrl)279 static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
280 {
281 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
282 
283 	if (list_empty(&ctrl->list))
284 		goto free_ctrl;
285 
286 	mutex_lock(&nvme_loop_ctrl_mutex);
287 	list_del(&ctrl->list);
288 	mutex_unlock(&nvme_loop_ctrl_mutex);
289 
290 	if (nctrl->tagset)
291 		nvme_remove_io_tag_set(nctrl);
292 	kfree(ctrl->queues);
293 	nvmf_free_options(nctrl->opts);
294 free_ctrl:
295 	kfree(ctrl);
296 }
297 
nvme_loop_destroy_io_queues(struct nvme_loop_ctrl * ctrl)298 static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
299 {
300 	int i;
301 
302 	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
303 		clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
304 		nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
305 	}
306 	ctrl->ctrl.queue_count = 1;
307 	/*
308 	 * It's possible that some requests might have been added
309 	 * after io queue is stopped/quiesced. So now start the
310 	 * queue to flush these requests to the completion.
311 	 */
312 	nvme_unquiesce_io_queues(&ctrl->ctrl);
313 }
314 
nvme_loop_init_io_queues(struct nvme_loop_ctrl * ctrl)315 static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
316 {
317 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
318 	unsigned int nr_io_queues;
319 	int ret, i;
320 
321 	nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
322 	ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
323 	if (ret || !nr_io_queues)
324 		return ret;
325 
326 	dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
327 
328 	for (i = 1; i <= nr_io_queues; i++) {
329 		ctrl->queues[i].ctrl = ctrl;
330 		ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
331 		if (ret)
332 			goto out_destroy_queues;
333 
334 		ctrl->ctrl.queue_count++;
335 	}
336 
337 	return 0;
338 
339 out_destroy_queues:
340 	nvme_loop_destroy_io_queues(ctrl);
341 	return ret;
342 }
343 
nvme_loop_connect_io_queues(struct nvme_loop_ctrl * ctrl)344 static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
345 {
346 	int i, ret;
347 
348 	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
349 		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
350 		if (ret)
351 			return ret;
352 		set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
353 	}
354 
355 	return 0;
356 }
357 
nvme_loop_configure_admin_queue(struct nvme_loop_ctrl * ctrl)358 static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
359 {
360 	int error;
361 
362 	ctrl->queues[0].ctrl = ctrl;
363 	error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
364 	if (error)
365 		return error;
366 	ctrl->ctrl.queue_count = 1;
367 
368 	error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
369 			&nvme_loop_admin_mq_ops,
370 			sizeof(struct nvme_loop_iod) +
371 			NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
372 	if (error)
373 		goto out_free_sq;
374 
375 	/* reset stopped state for the fresh admin queue */
376 	clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->ctrl.flags);
377 
378 	error = nvmf_connect_admin_queue(&ctrl->ctrl);
379 	if (error)
380 		goto out_cleanup_tagset;
381 
382 	set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
383 
384 	error = nvme_enable_ctrl(&ctrl->ctrl);
385 	if (error)
386 		goto out_cleanup_tagset;
387 
388 	ctrl->ctrl.max_hw_sectors =
389 		(NVME_LOOP_MAX_SEGMENTS - 1) << PAGE_SECTORS_SHIFT;
390 
391 	nvme_unquiesce_admin_queue(&ctrl->ctrl);
392 
393 	error = nvme_init_ctrl_finish(&ctrl->ctrl, false);
394 	if (error)
395 		goto out_cleanup_tagset;
396 
397 	return 0;
398 
399 out_cleanup_tagset:
400 	clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
401 	nvme_remove_admin_tag_set(&ctrl->ctrl);
402 out_free_sq:
403 	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
404 	return error;
405 }
406 
nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl * ctrl)407 static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
408 {
409 	if (ctrl->ctrl.queue_count > 1) {
410 		nvme_quiesce_io_queues(&ctrl->ctrl);
411 		nvme_cancel_tagset(&ctrl->ctrl);
412 		nvme_loop_destroy_io_queues(ctrl);
413 	}
414 
415 	nvme_quiesce_admin_queue(&ctrl->ctrl);
416 	if (ctrl->ctrl.state == NVME_CTRL_LIVE)
417 		nvme_disable_ctrl(&ctrl->ctrl, true);
418 
419 	nvme_cancel_admin_tagset(&ctrl->ctrl);
420 	nvme_loop_destroy_admin_queue(ctrl);
421 }
422 
nvme_loop_delete_ctrl_host(struct nvme_ctrl * ctrl)423 static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
424 {
425 	nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
426 }
427 
nvme_loop_delete_ctrl(struct nvmet_ctrl * nctrl)428 static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
429 {
430 	struct nvme_loop_ctrl *ctrl;
431 
432 	mutex_lock(&nvme_loop_ctrl_mutex);
433 	list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
434 		if (ctrl->ctrl.cntlid == nctrl->cntlid)
435 			nvme_delete_ctrl(&ctrl->ctrl);
436 	}
437 	mutex_unlock(&nvme_loop_ctrl_mutex);
438 }
439 
nvme_loop_reset_ctrl_work(struct work_struct * work)440 static void nvme_loop_reset_ctrl_work(struct work_struct *work)
441 {
442 	struct nvme_loop_ctrl *ctrl =
443 		container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
444 	int ret;
445 
446 	nvme_stop_ctrl(&ctrl->ctrl);
447 	nvme_loop_shutdown_ctrl(ctrl);
448 
449 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
450 		if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
451 		    ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
452 			/* state change failure for non-deleted ctrl? */
453 			WARN_ON_ONCE(1);
454 		return;
455 	}
456 
457 	ret = nvme_loop_configure_admin_queue(ctrl);
458 	if (ret)
459 		goto out_disable;
460 
461 	ret = nvme_loop_init_io_queues(ctrl);
462 	if (ret)
463 		goto out_destroy_admin;
464 
465 	ret = nvme_loop_connect_io_queues(ctrl);
466 	if (ret)
467 		goto out_destroy_io;
468 
469 	blk_mq_update_nr_hw_queues(&ctrl->tag_set,
470 			ctrl->ctrl.queue_count - 1);
471 
472 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
473 		WARN_ON_ONCE(1);
474 
475 	nvme_start_ctrl(&ctrl->ctrl);
476 
477 	return;
478 
479 out_destroy_io:
480 	nvme_loop_destroy_io_queues(ctrl);
481 out_destroy_admin:
482 	nvme_loop_destroy_admin_queue(ctrl);
483 out_disable:
484 	dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
485 	nvme_uninit_ctrl(&ctrl->ctrl);
486 }
487 
488 static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
489 	.name			= "loop",
490 	.module			= THIS_MODULE,
491 	.flags			= NVME_F_FABRICS,
492 	.reg_read32		= nvmf_reg_read32,
493 	.reg_read64		= nvmf_reg_read64,
494 	.reg_write32		= nvmf_reg_write32,
495 	.free_ctrl		= nvme_loop_free_ctrl,
496 	.submit_async_event	= nvme_loop_submit_async_event,
497 	.delete_ctrl		= nvme_loop_delete_ctrl_host,
498 	.get_address		= nvmf_get_address,
499 };
500 
nvme_loop_create_io_queues(struct nvme_loop_ctrl * ctrl)501 static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
502 {
503 	int ret;
504 
505 	ret = nvme_loop_init_io_queues(ctrl);
506 	if (ret)
507 		return ret;
508 
509 	ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
510 			&nvme_loop_mq_ops, 1,
511 			sizeof(struct nvme_loop_iod) +
512 			NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
513 	if (ret)
514 		goto out_destroy_queues;
515 
516 	ret = nvme_loop_connect_io_queues(ctrl);
517 	if (ret)
518 		goto out_cleanup_tagset;
519 
520 	return 0;
521 
522 out_cleanup_tagset:
523 	nvme_remove_io_tag_set(&ctrl->ctrl);
524 out_destroy_queues:
525 	nvme_loop_destroy_io_queues(ctrl);
526 	return ret;
527 }
528 
nvme_loop_find_port(struct nvme_ctrl * ctrl)529 static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl)
530 {
531 	struct nvmet_port *p, *found = NULL;
532 
533 	mutex_lock(&nvme_loop_ports_mutex);
534 	list_for_each_entry(p, &nvme_loop_ports, entry) {
535 		/* if no transport address is specified use the first port */
536 		if ((ctrl->opts->mask & NVMF_OPT_TRADDR) &&
537 		    strcmp(ctrl->opts->traddr, p->disc_addr.traddr))
538 			continue;
539 		found = p;
540 		break;
541 	}
542 	mutex_unlock(&nvme_loop_ports_mutex);
543 	return found;
544 }
545 
nvme_loop_create_ctrl(struct device * dev,struct nvmf_ctrl_options * opts)546 static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
547 		struct nvmf_ctrl_options *opts)
548 {
549 	struct nvme_loop_ctrl *ctrl;
550 	int ret;
551 
552 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
553 	if (!ctrl)
554 		return ERR_PTR(-ENOMEM);
555 	ctrl->ctrl.opts = opts;
556 	INIT_LIST_HEAD(&ctrl->list);
557 
558 	INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
559 
560 	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
561 				0 /* no quirks, we're perfect! */);
562 	if (ret) {
563 		kfree(ctrl);
564 		goto out;
565 	}
566 
567 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
568 		WARN_ON_ONCE(1);
569 
570 	ret = -ENOMEM;
571 
572 	ctrl->ctrl.kato = opts->kato;
573 	ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
574 
575 	ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
576 			GFP_KERNEL);
577 	if (!ctrl->queues)
578 		goto out_uninit_ctrl;
579 
580 	ret = nvme_loop_configure_admin_queue(ctrl);
581 	if (ret)
582 		goto out_free_queues;
583 
584 	if (opts->queue_size > ctrl->ctrl.maxcmd) {
585 		/* warn if maxcmd is lower than queue_size */
586 		dev_warn(ctrl->ctrl.device,
587 			"queue_size %zu > ctrl maxcmd %u, clamping down\n",
588 			opts->queue_size, ctrl->ctrl.maxcmd);
589 		opts->queue_size = ctrl->ctrl.maxcmd;
590 	}
591 	ctrl->ctrl.sqsize = opts->queue_size - 1;
592 
593 	if (opts->nr_io_queues) {
594 		ret = nvme_loop_create_io_queues(ctrl);
595 		if (ret)
596 			goto out_remove_admin_queue;
597 	}
598 
599 	nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
600 
601 	dev_info(ctrl->ctrl.device,
602 		 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
603 
604 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
605 		WARN_ON_ONCE(1);
606 
607 	mutex_lock(&nvme_loop_ctrl_mutex);
608 	list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
609 	mutex_unlock(&nvme_loop_ctrl_mutex);
610 
611 	nvme_start_ctrl(&ctrl->ctrl);
612 
613 	return &ctrl->ctrl;
614 
615 out_remove_admin_queue:
616 	nvme_loop_destroy_admin_queue(ctrl);
617 out_free_queues:
618 	kfree(ctrl->queues);
619 out_uninit_ctrl:
620 	nvme_uninit_ctrl(&ctrl->ctrl);
621 	nvme_put_ctrl(&ctrl->ctrl);
622 out:
623 	if (ret > 0)
624 		ret = -EIO;
625 	return ERR_PTR(ret);
626 }
627 
nvme_loop_add_port(struct nvmet_port * port)628 static int nvme_loop_add_port(struct nvmet_port *port)
629 {
630 	mutex_lock(&nvme_loop_ports_mutex);
631 	list_add_tail(&port->entry, &nvme_loop_ports);
632 	mutex_unlock(&nvme_loop_ports_mutex);
633 	return 0;
634 }
635 
nvme_loop_remove_port(struct nvmet_port * port)636 static void nvme_loop_remove_port(struct nvmet_port *port)
637 {
638 	mutex_lock(&nvme_loop_ports_mutex);
639 	list_del_init(&port->entry);
640 	mutex_unlock(&nvme_loop_ports_mutex);
641 
642 	/*
643 	 * Ensure any ctrls that are in the process of being
644 	 * deleted are in fact deleted before we return
645 	 * and free the port. This is to prevent active
646 	 * ctrls from using a port after it's freed.
647 	 */
648 	flush_workqueue(nvme_delete_wq);
649 }
650 
651 static const struct nvmet_fabrics_ops nvme_loop_ops = {
652 	.owner		= THIS_MODULE,
653 	.type		= NVMF_TRTYPE_LOOP,
654 	.add_port	= nvme_loop_add_port,
655 	.remove_port	= nvme_loop_remove_port,
656 	.queue_response = nvme_loop_queue_response,
657 	.delete_ctrl	= nvme_loop_delete_ctrl,
658 };
659 
660 static struct nvmf_transport_ops nvme_loop_transport = {
661 	.name		= "loop",
662 	.module		= THIS_MODULE,
663 	.create_ctrl	= nvme_loop_create_ctrl,
664 	.allowed_opts	= NVMF_OPT_TRADDR,
665 };
666 
nvme_loop_init_module(void)667 static int __init nvme_loop_init_module(void)
668 {
669 	int ret;
670 
671 	ret = nvmet_register_transport(&nvme_loop_ops);
672 	if (ret)
673 		return ret;
674 
675 	ret = nvmf_register_transport(&nvme_loop_transport);
676 	if (ret)
677 		nvmet_unregister_transport(&nvme_loop_ops);
678 
679 	return ret;
680 }
681 
nvme_loop_cleanup_module(void)682 static void __exit nvme_loop_cleanup_module(void)
683 {
684 	struct nvme_loop_ctrl *ctrl, *next;
685 
686 	nvmf_unregister_transport(&nvme_loop_transport);
687 	nvmet_unregister_transport(&nvme_loop_ops);
688 
689 	mutex_lock(&nvme_loop_ctrl_mutex);
690 	list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
691 		nvme_delete_ctrl(&ctrl->ctrl);
692 	mutex_unlock(&nvme_loop_ctrl_mutex);
693 
694 	flush_workqueue(nvme_delete_wq);
695 }
696 
697 module_init(nvme_loop_init_module);
698 module_exit(nvme_loop_cleanup_module);
699 
700 MODULE_LICENSE("GPL v2");
701 MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */
702