xref: /openbmc/linux/drivers/nvme/target/passthru.c (revision ba8ff971)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe Over Fabrics Target Passthrough command implementation.
4  *
5  * Copyright (c) 2017-2018 Western Digital Corporation or its
6  * affiliates.
7  * Copyright (c) 2019-2020, Eideticom Inc.
8  *
9  */
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/module.h>
12 
13 #include "../host/nvme.h"
14 #include "nvmet.h"
15 
16 MODULE_IMPORT_NS(NVME_TARGET_PASSTHRU);
17 
18 /*
19  * xarray to maintain one passthru subsystem per nvme controller.
20  */
21 static DEFINE_XARRAY(passthru_subsystems);
22 
23 void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
24 {
25 	/*
26 	 * Multiple command set support can only be declared if the underlying
27 	 * controller actually supports it.
28 	 */
29 	if (!nvme_multi_css(ctrl->subsys->passthru_ctrl))
30 		ctrl->cap &= ~(1ULL << 43);
31 }
32 
33 static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
34 {
35 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
36 	u16 status = NVME_SC_SUCCESS;
37 	int pos, len;
38 	bool csi_seen = false;
39 	void *data;
40 	u8 csi;
41 
42 	if (!ctrl->subsys->clear_ids)
43 		return status;
44 
45 	data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
46 	if (!data)
47 		return NVME_SC_INTERNAL;
48 
49 	status = nvmet_copy_from_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
50 	if (status)
51 		goto out_free;
52 
53 	for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
54 		struct nvme_ns_id_desc *cur = data + pos;
55 
56 		if (cur->nidl == 0)
57 			break;
58 		if (cur->nidt == NVME_NIDT_CSI) {
59 			memcpy(&csi, cur + 1, NVME_NIDT_CSI_LEN);
60 			csi_seen = true;
61 			break;
62 		}
63 		len = sizeof(struct nvme_ns_id_desc) + cur->nidl;
64 	}
65 
66 	memset(data, 0, NVME_IDENTIFY_DATA_SIZE);
67 	if (csi_seen) {
68 		struct nvme_ns_id_desc *cur = data;
69 
70 		cur->nidt = NVME_NIDT_CSI;
71 		cur->nidl = NVME_NIDT_CSI_LEN;
72 		memcpy(cur + 1, &csi, NVME_NIDT_CSI_LEN);
73 	}
74 	status = nvmet_copy_to_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
75 out_free:
76 	kfree(data);
77 	return status;
78 }
79 
80 static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
81 {
82 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
83 	struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl;
84 	u16 status = NVME_SC_SUCCESS;
85 	struct nvme_id_ctrl *id;
86 	unsigned int max_hw_sectors;
87 	int page_shift;
88 
89 	id = kzalloc(sizeof(*id), GFP_KERNEL);
90 	if (!id)
91 		return NVME_SC_INTERNAL;
92 
93 	status = nvmet_copy_from_sgl(req, 0, id, sizeof(*id));
94 	if (status)
95 		goto out_free;
96 
97 	id->cntlid = cpu_to_le16(ctrl->cntlid);
98 	id->ver = cpu_to_le32(ctrl->subsys->ver);
99 
100 	/*
101 	 * The passthru NVMe driver may have a limit on the number of segments
102 	 * which depends on the host's memory fragementation. To solve this,
103 	 * ensure mdts is limited to the pages equal to the number of segments.
104 	 */
105 	max_hw_sectors = min_not_zero(pctrl->max_segments << (PAGE_SHIFT - 9),
106 				      pctrl->max_hw_sectors);
107 
108 	/*
109 	 * nvmet_passthru_map_sg is limitted to using a single bio so limit
110 	 * the mdts based on BIO_MAX_VECS as well
111 	 */
112 	max_hw_sectors = min_not_zero(BIO_MAX_VECS << (PAGE_SHIFT - 9),
113 				      max_hw_sectors);
114 
115 	page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
116 
117 	id->mdts = ilog2(max_hw_sectors) + 9 - page_shift;
118 
119 	id->acl = 3;
120 	/*
121 	 * We export aerl limit for the fabrics controller, update this when
122 	 * passthru based aerl support is added.
123 	 */
124 	id->aerl = NVMET_ASYNC_EVENTS - 1;
125 
126 	/* emulate kas as most of the PCIe ctrl don't have a support for kas */
127 	id->kas = cpu_to_le16(NVMET_KAS);
128 
129 	/* don't support host memory buffer */
130 	id->hmpre = 0;
131 	id->hmmin = 0;
132 
133 	id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes);
134 	id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes);
135 	id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
136 
137 	/* don't support fuse commands */
138 	id->fuses = 0;
139 
140 	id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
141 	if (ctrl->ops->flags & NVMF_KEYED_SGLS)
142 		id->sgls |= cpu_to_le32(1 << 2);
143 	if (req->port->inline_data_size)
144 		id->sgls |= cpu_to_le32(1 << 20);
145 
146 	/*
147 	 * When passthru controller is setup using nvme-loop transport it will
148 	 * export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in
149 	 * the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl()
150 	 * code path with duplicate ctr subsynqn. In order to prevent that we
151 	 * mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn.
152 	 */
153 	memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn));
154 
155 	/* use fabric id-ctrl values */
156 	id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
157 				req->port->inline_data_size) / 16);
158 	id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
159 
160 	id->msdbd = ctrl->ops->msdbd;
161 
162 	/* Support multipath connections with fabrics */
163 	id->cmic |= 1 << 1;
164 
165 	/* Disable reservations, see nvmet_parse_passthru_io_cmd() */
166 	id->oncs &= cpu_to_le16(~NVME_CTRL_ONCS_RESERVATIONS);
167 
168 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(struct nvme_id_ctrl));
169 
170 out_free:
171 	kfree(id);
172 	return status;
173 }
174 
175 static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
176 {
177 	u16 status = NVME_SC_SUCCESS;
178 	struct nvme_id_ns *id;
179 	int i;
180 
181 	id = kzalloc(sizeof(*id), GFP_KERNEL);
182 	if (!id)
183 		return NVME_SC_INTERNAL;
184 
185 	status = nvmet_copy_from_sgl(req, 0, id, sizeof(struct nvme_id_ns));
186 	if (status)
187 		goto out_free;
188 
189 	for (i = 0; i < (id->nlbaf + 1); i++)
190 		if (id->lbaf[i].ms)
191 			memset(&id->lbaf[i], 0, sizeof(id->lbaf[i]));
192 
193 	id->flbas = id->flbas & ~(1 << 4);
194 
195 	/*
196 	 * Presently the NVMEof target code does not support sending
197 	 * metadata, so we must disable it here. This should be updated
198 	 * once target starts supporting metadata.
199 	 */
200 	id->mc = 0;
201 
202 	if (req->sq->ctrl->subsys->clear_ids) {
203 		memset(id->nguid, 0, NVME_NIDT_NGUID_LEN);
204 		memset(id->eui64, 0, NVME_NIDT_EUI64_LEN);
205 	}
206 
207 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
208 
209 out_free:
210 	kfree(id);
211 	return status;
212 }
213 
214 static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
215 {
216 	struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
217 	struct request *rq = req->p.rq;
218 	struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
219 	u32 effects;
220 	int status;
221 
222 	status = nvme_execute_passthru_rq(rq, &effects);
223 
224 	if (status == NVME_SC_SUCCESS &&
225 	    req->cmd->common.opcode == nvme_admin_identify) {
226 		switch (req->cmd->identify.cns) {
227 		case NVME_ID_CNS_CTRL:
228 			nvmet_passthru_override_id_ctrl(req);
229 			break;
230 		case NVME_ID_CNS_NS:
231 			nvmet_passthru_override_id_ns(req);
232 			break;
233 		case NVME_ID_CNS_NS_DESC_LIST:
234 			nvmet_passthru_override_id_descs(req);
235 			break;
236 		}
237 	} else if (status < 0)
238 		status = NVME_SC_INTERNAL;
239 
240 	req->cqe->result = nvme_req(rq)->result;
241 	nvmet_req_complete(req, status);
242 	blk_mq_free_request(rq);
243 
244 	if (effects)
245 		nvme_passthru_end(ctrl, effects, req->cmd, status);
246 }
247 
248 static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq,
249 						  blk_status_t blk_status)
250 {
251 	struct nvmet_req *req = rq->end_io_data;
252 
253 	req->cqe->result = nvme_req(rq)->result;
254 	nvmet_req_complete(req, nvme_req(rq)->status);
255 	blk_mq_free_request(rq);
256 	return RQ_END_IO_NONE;
257 }
258 
259 static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
260 {
261 	struct scatterlist *sg;
262 	struct bio *bio;
263 	int i;
264 
265 	if (req->sg_cnt > BIO_MAX_VECS)
266 		return -EINVAL;
267 
268 	if (nvmet_use_inline_bvec(req)) {
269 		bio = &req->p.inline_bio;
270 		bio_init(bio, NULL, req->inline_bvec,
271 			 ARRAY_SIZE(req->inline_bvec), req_op(rq));
272 	} else {
273 		bio = bio_alloc(NULL, bio_max_segs(req->sg_cnt), req_op(rq),
274 				GFP_KERNEL);
275 		bio->bi_end_io = bio_put;
276 	}
277 
278 	for_each_sg(req->sg, sg, req->sg_cnt, i) {
279 		if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
280 				    sg->offset) < sg->length) {
281 			nvmet_req_bio_put(req, bio);
282 			return -EINVAL;
283 		}
284 	}
285 
286 	blk_rq_bio_prep(rq, bio, req->sg_cnt);
287 
288 	return 0;
289 }
290 
291 static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
292 {
293 	struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl;
294 	struct request_queue *q = ctrl->admin_q;
295 	struct nvme_ns *ns = NULL;
296 	struct request *rq = NULL;
297 	unsigned int timeout;
298 	u32 effects;
299 	u16 status;
300 	int ret;
301 
302 	if (likely(req->sq->qid != 0)) {
303 		u32 nsid = le32_to_cpu(req->cmd->common.nsid);
304 
305 		ns = nvme_find_get_ns(ctrl, nsid);
306 		if (unlikely(!ns)) {
307 			pr_err("failed to get passthru ns nsid:%u\n", nsid);
308 			status = NVME_SC_INVALID_NS | NVME_SC_DNR;
309 			goto out;
310 		}
311 
312 		q = ns->queue;
313 		timeout = nvmet_req_subsys(req)->io_timeout;
314 	} else {
315 		timeout = nvmet_req_subsys(req)->admin_timeout;
316 	}
317 
318 	rq = blk_mq_alloc_request(q, nvme_req_op(req->cmd), 0);
319 	if (IS_ERR(rq)) {
320 		status = NVME_SC_INTERNAL;
321 		goto out_put_ns;
322 	}
323 	nvme_init_request(rq, req->cmd);
324 
325 	if (timeout)
326 		rq->timeout = timeout;
327 
328 	if (req->sg_cnt) {
329 		ret = nvmet_passthru_map_sg(req, rq);
330 		if (unlikely(ret)) {
331 			status = NVME_SC_INTERNAL;
332 			goto out_put_req;
333 		}
334 	}
335 
336 	/*
337 	 * If a command needs post-execution fixups, or there are any
338 	 * non-trivial effects, make sure to execute the command synchronously
339 	 * in a workqueue so that nvme_passthru_end gets called.
340 	 */
341 	effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode);
342 	if (req->p.use_workqueue ||
343 	    (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))) {
344 		INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
345 		req->p.rq = rq;
346 		queue_work(nvmet_wq, &req->p.work);
347 	} else {
348 		rq->end_io = nvmet_passthru_req_done;
349 		rq->end_io_data = req;
350 		blk_execute_rq_nowait(rq, false);
351 	}
352 
353 	if (ns)
354 		nvme_put_ns(ns);
355 
356 	return;
357 
358 out_put_req:
359 	blk_mq_free_request(rq);
360 out_put_ns:
361 	if (ns)
362 		nvme_put_ns(ns);
363 out:
364 	nvmet_req_complete(req, status);
365 }
366 
367 /*
368  * We need to emulate set host behaviour to ensure that any requested
369  * behaviour of the target's host matches the requested behaviour
370  * of the device's host and fail otherwise.
371  */
372 static void nvmet_passthru_set_host_behaviour(struct nvmet_req *req)
373 {
374 	struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl;
375 	struct nvme_feat_host_behavior *host;
376 	u16 status = NVME_SC_INTERNAL;
377 	int ret;
378 
379 	host = kzalloc(sizeof(*host) * 2, GFP_KERNEL);
380 	if (!host)
381 		goto out_complete_req;
382 
383 	ret = nvme_get_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
384 				host, sizeof(*host), NULL);
385 	if (ret)
386 		goto out_free_host;
387 
388 	status = nvmet_copy_from_sgl(req, 0, &host[1], sizeof(*host));
389 	if (status)
390 		goto out_free_host;
391 
392 	if (memcmp(&host[0], &host[1], sizeof(host[0]))) {
393 		pr_warn("target host has requested different behaviour from the local host\n");
394 		status = NVME_SC_INTERNAL;
395 	}
396 
397 out_free_host:
398 	kfree(host);
399 out_complete_req:
400 	nvmet_req_complete(req, status);
401 }
402 
403 static u16 nvmet_setup_passthru_command(struct nvmet_req *req)
404 {
405 	req->p.use_workqueue = false;
406 	req->execute = nvmet_passthru_execute_cmd;
407 	return NVME_SC_SUCCESS;
408 }
409 
410 u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
411 {
412 	/* Reject any commands with non-sgl flags set (ie. fused commands) */
413 	if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
414 		return NVME_SC_INVALID_FIELD;
415 
416 	switch (req->cmd->common.opcode) {
417 	case nvme_cmd_resv_register:
418 	case nvme_cmd_resv_report:
419 	case nvme_cmd_resv_acquire:
420 	case nvme_cmd_resv_release:
421 		/*
422 		 * Reservations cannot be supported properly because the
423 		 * underlying device has no way of differentiating different
424 		 * hosts that connect via fabrics. This could potentially be
425 		 * emulated in the future if regular targets grow support for
426 		 * this feature.
427 		 */
428 		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
429 	}
430 
431 	return nvmet_setup_passthru_command(req);
432 }
433 
434 /*
435  * Only features that are emulated or specifically allowed in the list  are
436  * passed down to the controller. This function implements the allow list for
437  * both get and set features.
438  */
439 static u16 nvmet_passthru_get_set_features(struct nvmet_req *req)
440 {
441 	switch (le32_to_cpu(req->cmd->features.fid)) {
442 	case NVME_FEAT_ARBITRATION:
443 	case NVME_FEAT_POWER_MGMT:
444 	case NVME_FEAT_LBA_RANGE:
445 	case NVME_FEAT_TEMP_THRESH:
446 	case NVME_FEAT_ERR_RECOVERY:
447 	case NVME_FEAT_VOLATILE_WC:
448 	case NVME_FEAT_WRITE_ATOMIC:
449 	case NVME_FEAT_AUTO_PST:
450 	case NVME_FEAT_TIMESTAMP:
451 	case NVME_FEAT_HCTM:
452 	case NVME_FEAT_NOPSC:
453 	case NVME_FEAT_RRL:
454 	case NVME_FEAT_PLM_CONFIG:
455 	case NVME_FEAT_PLM_WINDOW:
456 	case NVME_FEAT_HOST_BEHAVIOR:
457 	case NVME_FEAT_SANITIZE:
458 	case NVME_FEAT_VENDOR_START ... NVME_FEAT_VENDOR_END:
459 		return nvmet_setup_passthru_command(req);
460 
461 	case NVME_FEAT_ASYNC_EVENT:
462 		/* There is no support for forwarding ASYNC events */
463 	case NVME_FEAT_IRQ_COALESCE:
464 	case NVME_FEAT_IRQ_CONFIG:
465 		/* The IRQ settings will not apply to the target controller */
466 	case NVME_FEAT_HOST_MEM_BUF:
467 		/*
468 		 * Any HMB that's set will not be passed through and will
469 		 * not work as expected
470 		 */
471 	case NVME_FEAT_SW_PROGRESS:
472 		/*
473 		 * The Pre-Boot Software Load Count doesn't make much
474 		 * sense for a target to export
475 		 */
476 	case NVME_FEAT_RESV_MASK:
477 	case NVME_FEAT_RESV_PERSIST:
478 		/* No reservations, see nvmet_parse_passthru_io_cmd() */
479 	default:
480 		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
481 	}
482 }
483 
484 u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
485 {
486 	/* Reject any commands with non-sgl flags set (ie. fused commands) */
487 	if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
488 		return NVME_SC_INVALID_FIELD;
489 
490 	/*
491 	 * Passthru all vendor specific commands
492 	 */
493 	if (req->cmd->common.opcode >= nvme_admin_vendor_start)
494 		return nvmet_setup_passthru_command(req);
495 
496 	switch (req->cmd->common.opcode) {
497 	case nvme_admin_async_event:
498 		req->execute = nvmet_execute_async_event;
499 		return NVME_SC_SUCCESS;
500 	case nvme_admin_keep_alive:
501 		/*
502 		 * Most PCIe ctrls don't support keep alive cmd, we route keep
503 		 * alive to the non-passthru mode. In future please change this
504 		 * code when PCIe ctrls with keep alive support available.
505 		 */
506 		req->execute = nvmet_execute_keep_alive;
507 		return NVME_SC_SUCCESS;
508 	case nvme_admin_set_features:
509 		switch (le32_to_cpu(req->cmd->features.fid)) {
510 		case NVME_FEAT_ASYNC_EVENT:
511 		case NVME_FEAT_KATO:
512 		case NVME_FEAT_NUM_QUEUES:
513 		case NVME_FEAT_HOST_ID:
514 			req->execute = nvmet_execute_set_features;
515 			return NVME_SC_SUCCESS;
516 		case NVME_FEAT_HOST_BEHAVIOR:
517 			req->execute = nvmet_passthru_set_host_behaviour;
518 			return NVME_SC_SUCCESS;
519 		default:
520 			return nvmet_passthru_get_set_features(req);
521 		}
522 		break;
523 	case nvme_admin_get_features:
524 		switch (le32_to_cpu(req->cmd->features.fid)) {
525 		case NVME_FEAT_ASYNC_EVENT:
526 		case NVME_FEAT_KATO:
527 		case NVME_FEAT_NUM_QUEUES:
528 		case NVME_FEAT_HOST_ID:
529 			req->execute = nvmet_execute_get_features;
530 			return NVME_SC_SUCCESS;
531 		default:
532 			return nvmet_passthru_get_set_features(req);
533 		}
534 		break;
535 	case nvme_admin_identify:
536 		switch (req->cmd->identify.cns) {
537 		case NVME_ID_CNS_CTRL:
538 			req->execute = nvmet_passthru_execute_cmd;
539 			req->p.use_workqueue = true;
540 			return NVME_SC_SUCCESS;
541 		case NVME_ID_CNS_CS_CTRL:
542 			switch (req->cmd->identify.csi) {
543 			case NVME_CSI_ZNS:
544 				req->execute = nvmet_passthru_execute_cmd;
545 				req->p.use_workqueue = true;
546 				return NVME_SC_SUCCESS;
547 			}
548 			return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
549 		case NVME_ID_CNS_NS:
550 			req->execute = nvmet_passthru_execute_cmd;
551 			req->p.use_workqueue = true;
552 			return NVME_SC_SUCCESS;
553 		case NVME_ID_CNS_CS_NS:
554 			switch (req->cmd->identify.csi) {
555 			case NVME_CSI_ZNS:
556 				req->execute = nvmet_passthru_execute_cmd;
557 				req->p.use_workqueue = true;
558 				return NVME_SC_SUCCESS;
559 			}
560 			return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
561 		default:
562 			return nvmet_setup_passthru_command(req);
563 		}
564 	case nvme_admin_get_log_page:
565 		return nvmet_setup_passthru_command(req);
566 	default:
567 		/* Reject commands not in the allowlist above */
568 		return nvmet_report_invalid_opcode(req);
569 	}
570 }
571 
572 int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys)
573 {
574 	struct nvme_ctrl *ctrl;
575 	struct file *file;
576 	int ret = -EINVAL;
577 	void *old;
578 
579 	mutex_lock(&subsys->lock);
580 	if (!subsys->passthru_ctrl_path)
581 		goto out_unlock;
582 	if (subsys->passthru_ctrl)
583 		goto out_unlock;
584 
585 	if (subsys->nr_namespaces) {
586 		pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
587 		goto out_unlock;
588 	}
589 
590 	file = filp_open(subsys->passthru_ctrl_path, O_RDWR, 0);
591 	if (IS_ERR(file)) {
592 		ret = PTR_ERR(file);
593 		goto out_unlock;
594 	}
595 
596 	ctrl = nvme_ctrl_from_file(file);
597 	if (!ctrl) {
598 		pr_err("failed to open nvme controller %s\n",
599 		       subsys->passthru_ctrl_path);
600 
601 		goto out_put_file;
602 	}
603 
604 	old = xa_cmpxchg(&passthru_subsystems, ctrl->cntlid, NULL,
605 			 subsys, GFP_KERNEL);
606 	if (xa_is_err(old)) {
607 		ret = xa_err(old);
608 		goto out_put_file;
609 	}
610 
611 	if (old)
612 		goto out_put_file;
613 
614 	subsys->passthru_ctrl = ctrl;
615 	subsys->ver = ctrl->vs;
616 
617 	if (subsys->ver < NVME_VS(1, 2, 1)) {
618 		pr_warn("nvme controller version is too old: %llu.%llu.%llu, advertising 1.2.1\n",
619 			NVME_MAJOR(subsys->ver), NVME_MINOR(subsys->ver),
620 			NVME_TERTIARY(subsys->ver));
621 		subsys->ver = NVME_VS(1, 2, 1);
622 	}
623 	nvme_get_ctrl(ctrl);
624 	__module_get(subsys->passthru_ctrl->ops->module);
625 	ret = 0;
626 
627 out_put_file:
628 	filp_close(file, NULL);
629 out_unlock:
630 	mutex_unlock(&subsys->lock);
631 	return ret;
632 }
633 
634 static void __nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
635 {
636 	if (subsys->passthru_ctrl) {
637 		xa_erase(&passthru_subsystems, subsys->passthru_ctrl->cntlid);
638 		module_put(subsys->passthru_ctrl->ops->module);
639 		nvme_put_ctrl(subsys->passthru_ctrl);
640 	}
641 	subsys->passthru_ctrl = NULL;
642 	subsys->ver = NVMET_DEFAULT_VS;
643 }
644 
645 void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
646 {
647 	mutex_lock(&subsys->lock);
648 	__nvmet_passthru_ctrl_disable(subsys);
649 	mutex_unlock(&subsys->lock);
650 }
651 
652 void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
653 {
654 	mutex_lock(&subsys->lock);
655 	__nvmet_passthru_ctrl_disable(subsys);
656 	mutex_unlock(&subsys->lock);
657 	kfree(subsys->passthru_ctrl_path);
658 }
659