xref: /openbmc/linux/drivers/nvme/target/passthru.c (revision 65b96377)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe Over Fabrics Target Passthrough command implementation.
4  *
5  * Copyright (c) 2017-2018 Western Digital Corporation or its
6  * affiliates.
7  * Copyright (c) 2019-2020, Eideticom Inc.
8  *
9  */
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/module.h>
12 
13 #include "../host/nvme.h"
14 #include "nvmet.h"
15 
16 MODULE_IMPORT_NS(NVME_TARGET_PASSTHRU);
17 
18 /*
19  * xarray to maintain one passthru subsystem per nvme controller.
20  */
21 static DEFINE_XARRAY(passthru_subsystems);
22 
23 void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
24 {
25 	/*
26 	 * Multiple command set support can only be declared if the underlying
27 	 * controller actually supports it.
28 	 */
29 	if (!nvme_multi_css(ctrl->subsys->passthru_ctrl))
30 		ctrl->cap &= ~(1ULL << 43);
31 }
32 
33 static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
34 {
35 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
36 	struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl;
37 	u16 status = NVME_SC_SUCCESS;
38 	struct nvme_id_ctrl *id;
39 	unsigned int max_hw_sectors;
40 	int page_shift;
41 
42 	id = kzalloc(sizeof(*id), GFP_KERNEL);
43 	if (!id)
44 		return NVME_SC_INTERNAL;
45 
46 	status = nvmet_copy_from_sgl(req, 0, id, sizeof(*id));
47 	if (status)
48 		goto out_free;
49 
50 	id->cntlid = cpu_to_le16(ctrl->cntlid);
51 	id->ver = cpu_to_le32(ctrl->subsys->ver);
52 
53 	/*
54 	 * The passthru NVMe driver may have a limit on the number of segments
55 	 * which depends on the host's memory fragementation. To solve this,
56 	 * ensure mdts is limited to the pages equal to the number of segments.
57 	 */
58 	max_hw_sectors = min_not_zero(pctrl->max_segments << (PAGE_SHIFT - 9),
59 				      pctrl->max_hw_sectors);
60 
61 	/*
62 	 * nvmet_passthru_map_sg is limitted to using a single bio so limit
63 	 * the mdts based on BIO_MAX_VECS as well
64 	 */
65 	max_hw_sectors = min_not_zero(BIO_MAX_VECS << (PAGE_SHIFT - 9),
66 				      max_hw_sectors);
67 
68 	page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
69 
70 	id->mdts = ilog2(max_hw_sectors) + 9 - page_shift;
71 
72 	id->acl = 3;
73 	/*
74 	 * We export aerl limit for the fabrics controller, update this when
75 	 * passthru based aerl support is added.
76 	 */
77 	id->aerl = NVMET_ASYNC_EVENTS - 1;
78 
79 	/* emulate kas as most of the PCIe ctrl don't have a support for kas */
80 	id->kas = cpu_to_le16(NVMET_KAS);
81 
82 	/* don't support host memory buffer */
83 	id->hmpre = 0;
84 	id->hmmin = 0;
85 
86 	id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes);
87 	id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes);
88 	id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
89 
90 	/* don't support fuse commands */
91 	id->fuses = 0;
92 
93 	id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
94 	if (ctrl->ops->flags & NVMF_KEYED_SGLS)
95 		id->sgls |= cpu_to_le32(1 << 2);
96 	if (req->port->inline_data_size)
97 		id->sgls |= cpu_to_le32(1 << 20);
98 
99 	/*
100 	 * When passsthru controller is setup using nvme-loop transport it will
101 	 * export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in
102 	 * the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl()
103 	 * code path with duplicate ctr subsynqn. In order to prevent that we
104 	 * mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn.
105 	 */
106 	memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn));
107 
108 	/* use fabric id-ctrl values */
109 	id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
110 				req->port->inline_data_size) / 16);
111 	id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
112 
113 	id->msdbd = ctrl->ops->msdbd;
114 
115 	/* Support multipath connections with fabrics */
116 	id->cmic |= 1 << 1;
117 
118 	/* Disable reservations, see nvmet_parse_passthru_io_cmd() */
119 	id->oncs &= cpu_to_le16(~NVME_CTRL_ONCS_RESERVATIONS);
120 
121 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(struct nvme_id_ctrl));
122 
123 out_free:
124 	kfree(id);
125 	return status;
126 }
127 
128 static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
129 {
130 	u16 status = NVME_SC_SUCCESS;
131 	struct nvme_id_ns *id;
132 	int i;
133 
134 	id = kzalloc(sizeof(*id), GFP_KERNEL);
135 	if (!id)
136 		return NVME_SC_INTERNAL;
137 
138 	status = nvmet_copy_from_sgl(req, 0, id, sizeof(struct nvme_id_ns));
139 	if (status)
140 		goto out_free;
141 
142 	for (i = 0; i < (id->nlbaf + 1); i++)
143 		if (id->lbaf[i].ms)
144 			memset(&id->lbaf[i], 0, sizeof(id->lbaf[i]));
145 
146 	id->flbas = id->flbas & ~(1 << 4);
147 
148 	/*
149 	 * Presently the NVMEof target code does not support sending
150 	 * metadata, so we must disable it here. This should be updated
151 	 * once target starts supporting metadata.
152 	 */
153 	id->mc = 0;
154 
155 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
156 
157 out_free:
158 	kfree(id);
159 	return status;
160 }
161 
162 static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
163 {
164 	struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
165 	struct request *rq = req->p.rq;
166 	int status;
167 
168 	status = nvme_execute_passthru_rq(rq);
169 
170 	if (status == NVME_SC_SUCCESS &&
171 	    req->cmd->common.opcode == nvme_admin_identify) {
172 		switch (req->cmd->identify.cns) {
173 		case NVME_ID_CNS_CTRL:
174 			nvmet_passthru_override_id_ctrl(req);
175 			break;
176 		case NVME_ID_CNS_NS:
177 			nvmet_passthru_override_id_ns(req);
178 			break;
179 		}
180 	} else if (status < 0)
181 		status = NVME_SC_INTERNAL;
182 
183 	req->cqe->result = nvme_req(rq)->result;
184 	nvmet_req_complete(req, status);
185 	blk_mq_free_request(rq);
186 }
187 
188 static void nvmet_passthru_req_done(struct request *rq,
189 				    blk_status_t blk_status)
190 {
191 	struct nvmet_req *req = rq->end_io_data;
192 
193 	req->cqe->result = nvme_req(rq)->result;
194 	nvmet_req_complete(req, nvme_req(rq)->status);
195 	blk_mq_free_request(rq);
196 }
197 
198 static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
199 {
200 	struct scatterlist *sg;
201 	struct bio *bio;
202 	int i;
203 
204 	if (req->sg_cnt > BIO_MAX_VECS)
205 		return -EINVAL;
206 
207 	if (nvmet_use_inline_bvec(req)) {
208 		bio = &req->p.inline_bio;
209 		bio_init(bio, NULL, req->inline_bvec,
210 			 ARRAY_SIZE(req->inline_bvec), req_op(rq));
211 	} else {
212 		bio = bio_alloc(NULL, bio_max_segs(req->sg_cnt), req_op(rq),
213 				GFP_KERNEL);
214 		bio->bi_end_io = bio_put;
215 	}
216 
217 	for_each_sg(req->sg, sg, req->sg_cnt, i) {
218 		if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
219 				    sg->offset) < sg->length) {
220 			nvmet_req_bio_put(req, bio);
221 			return -EINVAL;
222 		}
223 	}
224 
225 	blk_rq_bio_prep(rq, bio, req->sg_cnt);
226 
227 	return 0;
228 }
229 
230 static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
231 {
232 	struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl;
233 	struct request_queue *q = ctrl->admin_q;
234 	struct nvme_ns *ns = NULL;
235 	struct request *rq = NULL;
236 	unsigned int timeout;
237 	u32 effects;
238 	u16 status;
239 	int ret;
240 
241 	if (likely(req->sq->qid != 0)) {
242 		u32 nsid = le32_to_cpu(req->cmd->common.nsid);
243 
244 		ns = nvme_find_get_ns(ctrl, nsid);
245 		if (unlikely(!ns)) {
246 			pr_err("failed to get passthru ns nsid:%u\n", nsid);
247 			status = NVME_SC_INVALID_NS | NVME_SC_DNR;
248 			goto out;
249 		}
250 
251 		q = ns->queue;
252 		timeout = nvmet_req_subsys(req)->io_timeout;
253 	} else {
254 		timeout = nvmet_req_subsys(req)->admin_timeout;
255 	}
256 
257 	rq = blk_mq_alloc_request(q, nvme_req_op(req->cmd), 0);
258 	if (IS_ERR(rq)) {
259 		status = NVME_SC_INTERNAL;
260 		goto out_put_ns;
261 	}
262 	nvme_init_request(rq, req->cmd);
263 
264 	if (timeout)
265 		rq->timeout = timeout;
266 
267 	if (req->sg_cnt) {
268 		ret = nvmet_passthru_map_sg(req, rq);
269 		if (unlikely(ret)) {
270 			status = NVME_SC_INTERNAL;
271 			goto out_put_req;
272 		}
273 	}
274 
275 	/*
276 	 * If there are effects for the command we are about to execute, or
277 	 * an end_req function we need to use nvme_execute_passthru_rq()
278 	 * synchronously in a work item seeing the end_req function and
279 	 * nvme_passthru_end() can't be called in the request done callback
280 	 * which is typically in interrupt context.
281 	 */
282 	effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode);
283 	if (req->p.use_workqueue || effects) {
284 		INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
285 		req->p.rq = rq;
286 		queue_work(nvmet_wq, &req->p.work);
287 	} else {
288 		rq->end_io_data = req;
289 		blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done);
290 	}
291 
292 	if (ns)
293 		nvme_put_ns(ns);
294 
295 	return;
296 
297 out_put_req:
298 	blk_mq_free_request(rq);
299 out_put_ns:
300 	if (ns)
301 		nvme_put_ns(ns);
302 out:
303 	nvmet_req_complete(req, status);
304 }
305 
306 /*
307  * We need to emulate set host behaviour to ensure that any requested
308  * behaviour of the target's host matches the requested behaviour
309  * of the device's host and fail otherwise.
310  */
311 static void nvmet_passthru_set_host_behaviour(struct nvmet_req *req)
312 {
313 	struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl;
314 	struct nvme_feat_host_behavior *host;
315 	u16 status = NVME_SC_INTERNAL;
316 	int ret;
317 
318 	host = kzalloc(sizeof(*host) * 2, GFP_KERNEL);
319 	if (!host)
320 		goto out_complete_req;
321 
322 	ret = nvme_get_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
323 				host, sizeof(*host), NULL);
324 	if (ret)
325 		goto out_free_host;
326 
327 	status = nvmet_copy_from_sgl(req, 0, &host[1], sizeof(*host));
328 	if (status)
329 		goto out_free_host;
330 
331 	if (memcmp(&host[0], &host[1], sizeof(host[0]))) {
332 		pr_warn("target host has requested different behaviour from the local host\n");
333 		status = NVME_SC_INTERNAL;
334 	}
335 
336 out_free_host:
337 	kfree(host);
338 out_complete_req:
339 	nvmet_req_complete(req, status);
340 }
341 
342 static u16 nvmet_setup_passthru_command(struct nvmet_req *req)
343 {
344 	req->p.use_workqueue = false;
345 	req->execute = nvmet_passthru_execute_cmd;
346 	return NVME_SC_SUCCESS;
347 }
348 
349 u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
350 {
351 	/* Reject any commands with non-sgl flags set (ie. fused commands) */
352 	if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
353 		return NVME_SC_INVALID_FIELD;
354 
355 	switch (req->cmd->common.opcode) {
356 	case nvme_cmd_resv_register:
357 	case nvme_cmd_resv_report:
358 	case nvme_cmd_resv_acquire:
359 	case nvme_cmd_resv_release:
360 		/*
361 		 * Reservations cannot be supported properly because the
362 		 * underlying device has no way of differentiating different
363 		 * hosts that connect via fabrics. This could potentially be
364 		 * emulated in the future if regular targets grow support for
365 		 * this feature.
366 		 */
367 		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
368 	}
369 
370 	return nvmet_setup_passthru_command(req);
371 }
372 
373 /*
374  * Only features that are emulated or specifically allowed in the list  are
375  * passed down to the controller. This function implements the allow list for
376  * both get and set features.
377  */
378 static u16 nvmet_passthru_get_set_features(struct nvmet_req *req)
379 {
380 	switch (le32_to_cpu(req->cmd->features.fid)) {
381 	case NVME_FEAT_ARBITRATION:
382 	case NVME_FEAT_POWER_MGMT:
383 	case NVME_FEAT_LBA_RANGE:
384 	case NVME_FEAT_TEMP_THRESH:
385 	case NVME_FEAT_ERR_RECOVERY:
386 	case NVME_FEAT_VOLATILE_WC:
387 	case NVME_FEAT_WRITE_ATOMIC:
388 	case NVME_FEAT_AUTO_PST:
389 	case NVME_FEAT_TIMESTAMP:
390 	case NVME_FEAT_HCTM:
391 	case NVME_FEAT_NOPSC:
392 	case NVME_FEAT_RRL:
393 	case NVME_FEAT_PLM_CONFIG:
394 	case NVME_FEAT_PLM_WINDOW:
395 	case NVME_FEAT_HOST_BEHAVIOR:
396 	case NVME_FEAT_SANITIZE:
397 	case NVME_FEAT_VENDOR_START ... NVME_FEAT_VENDOR_END:
398 		return nvmet_setup_passthru_command(req);
399 
400 	case NVME_FEAT_ASYNC_EVENT:
401 		/* There is no support for forwarding ASYNC events */
402 	case NVME_FEAT_IRQ_COALESCE:
403 	case NVME_FEAT_IRQ_CONFIG:
404 		/* The IRQ settings will not apply to the target controller */
405 	case NVME_FEAT_HOST_MEM_BUF:
406 		/*
407 		 * Any HMB that's set will not be passed through and will
408 		 * not work as expected
409 		 */
410 	case NVME_FEAT_SW_PROGRESS:
411 		/*
412 		 * The Pre-Boot Software Load Count doesn't make much
413 		 * sense for a target to export
414 		 */
415 	case NVME_FEAT_RESV_MASK:
416 	case NVME_FEAT_RESV_PERSIST:
417 		/* No reservations, see nvmet_parse_passthru_io_cmd() */
418 	default:
419 		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
420 	}
421 }
422 
423 u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
424 {
425 	/* Reject any commands with non-sgl flags set (ie. fused commands) */
426 	if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
427 		return NVME_SC_INVALID_FIELD;
428 
429 	/*
430 	 * Passthru all vendor specific commands
431 	 */
432 	if (req->cmd->common.opcode >= nvme_admin_vendor_start)
433 		return nvmet_setup_passthru_command(req);
434 
435 	switch (req->cmd->common.opcode) {
436 	case nvme_admin_async_event:
437 		req->execute = nvmet_execute_async_event;
438 		return NVME_SC_SUCCESS;
439 	case nvme_admin_keep_alive:
440 		/*
441 		 * Most PCIe ctrls don't support keep alive cmd, we route keep
442 		 * alive to the non-passthru mode. In future please change this
443 		 * code when PCIe ctrls with keep alive support available.
444 		 */
445 		req->execute = nvmet_execute_keep_alive;
446 		return NVME_SC_SUCCESS;
447 	case nvme_admin_set_features:
448 		switch (le32_to_cpu(req->cmd->features.fid)) {
449 		case NVME_FEAT_ASYNC_EVENT:
450 		case NVME_FEAT_KATO:
451 		case NVME_FEAT_NUM_QUEUES:
452 		case NVME_FEAT_HOST_ID:
453 			req->execute = nvmet_execute_set_features;
454 			return NVME_SC_SUCCESS;
455 		case NVME_FEAT_HOST_BEHAVIOR:
456 			req->execute = nvmet_passthru_set_host_behaviour;
457 			return NVME_SC_SUCCESS;
458 		default:
459 			return nvmet_passthru_get_set_features(req);
460 		}
461 		break;
462 	case nvme_admin_get_features:
463 		switch (le32_to_cpu(req->cmd->features.fid)) {
464 		case NVME_FEAT_ASYNC_EVENT:
465 		case NVME_FEAT_KATO:
466 		case NVME_FEAT_NUM_QUEUES:
467 		case NVME_FEAT_HOST_ID:
468 			req->execute = nvmet_execute_get_features;
469 			return NVME_SC_SUCCESS;
470 		default:
471 			return nvmet_passthru_get_set_features(req);
472 		}
473 		break;
474 	case nvme_admin_identify:
475 		switch (req->cmd->identify.cns) {
476 		case NVME_ID_CNS_CTRL:
477 			req->execute = nvmet_passthru_execute_cmd;
478 			req->p.use_workqueue = true;
479 			return NVME_SC_SUCCESS;
480 		case NVME_ID_CNS_CS_CTRL:
481 			switch (req->cmd->identify.csi) {
482 			case NVME_CSI_ZNS:
483 				req->execute = nvmet_passthru_execute_cmd;
484 				req->p.use_workqueue = true;
485 				return NVME_SC_SUCCESS;
486 			}
487 			return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
488 		case NVME_ID_CNS_NS:
489 			req->execute = nvmet_passthru_execute_cmd;
490 			req->p.use_workqueue = true;
491 			return NVME_SC_SUCCESS;
492 		case NVME_ID_CNS_CS_NS:
493 			switch (req->cmd->identify.csi) {
494 			case NVME_CSI_ZNS:
495 				req->execute = nvmet_passthru_execute_cmd;
496 				req->p.use_workqueue = true;
497 				return NVME_SC_SUCCESS;
498 			}
499 			return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
500 		default:
501 			return nvmet_setup_passthru_command(req);
502 		}
503 	case nvme_admin_get_log_page:
504 		return nvmet_setup_passthru_command(req);
505 	default:
506 		/* Reject commands not in the allowlist above */
507 		return nvmet_report_invalid_opcode(req);
508 	}
509 }
510 
511 int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys)
512 {
513 	struct nvme_ctrl *ctrl;
514 	struct file *file;
515 	int ret = -EINVAL;
516 	void *old;
517 
518 	mutex_lock(&subsys->lock);
519 	if (!subsys->passthru_ctrl_path)
520 		goto out_unlock;
521 	if (subsys->passthru_ctrl)
522 		goto out_unlock;
523 
524 	if (subsys->nr_namespaces) {
525 		pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
526 		goto out_unlock;
527 	}
528 
529 	file = filp_open(subsys->passthru_ctrl_path, O_RDWR, 0);
530 	if (IS_ERR(file)) {
531 		ret = PTR_ERR(file);
532 		goto out_unlock;
533 	}
534 
535 	ctrl = nvme_ctrl_from_file(file);
536 	if (!ctrl) {
537 		pr_err("failed to open nvme controller %s\n",
538 		       subsys->passthru_ctrl_path);
539 
540 		goto out_put_file;
541 	}
542 
543 	old = xa_cmpxchg(&passthru_subsystems, ctrl->cntlid, NULL,
544 			 subsys, GFP_KERNEL);
545 	if (xa_is_err(old)) {
546 		ret = xa_err(old);
547 		goto out_put_file;
548 	}
549 
550 	if (old)
551 		goto out_put_file;
552 
553 	subsys->passthru_ctrl = ctrl;
554 	subsys->ver = ctrl->vs;
555 
556 	if (subsys->ver < NVME_VS(1, 2, 1)) {
557 		pr_warn("nvme controller version is too old: %llu.%llu.%llu, advertising 1.2.1\n",
558 			NVME_MAJOR(subsys->ver), NVME_MINOR(subsys->ver),
559 			NVME_TERTIARY(subsys->ver));
560 		subsys->ver = NVME_VS(1, 2, 1);
561 	}
562 	nvme_get_ctrl(ctrl);
563 	__module_get(subsys->passthru_ctrl->ops->module);
564 	ret = 0;
565 
566 out_put_file:
567 	filp_close(file, NULL);
568 out_unlock:
569 	mutex_unlock(&subsys->lock);
570 	return ret;
571 }
572 
573 static void __nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
574 {
575 	if (subsys->passthru_ctrl) {
576 		xa_erase(&passthru_subsystems, subsys->passthru_ctrl->cntlid);
577 		module_put(subsys->passthru_ctrl->ops->module);
578 		nvme_put_ctrl(subsys->passthru_ctrl);
579 	}
580 	subsys->passthru_ctrl = NULL;
581 	subsys->ver = NVMET_DEFAULT_VS;
582 }
583 
584 void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
585 {
586 	mutex_lock(&subsys->lock);
587 	__nvmet_passthru_ctrl_disable(subsys);
588 	mutex_unlock(&subsys->lock);
589 }
590 
591 void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
592 {
593 	mutex_lock(&subsys->lock);
594 	__nvmet_passthru_ctrl_disable(subsys);
595 	mutex_unlock(&subsys->lock);
596 	kfree(subsys->passthru_ctrl_path);
597 }
598