xref: /openbmc/linux/drivers/nvme/host/ioctl.c (revision 7df45f35313c1ae083dac72c066b3aebfc7fc0cd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2011-2014, Intel Corporation.
4  * Copyright (c) 2017-2021 Christoph Hellwig.
5  */
6 #include <linux/blk-integrity.h>
7 #include <linux/ptrace.h>	/* for force_successful_syscall_return */
8 #include <linux/nvme_ioctl.h>
9 #include <linux/io_uring.h>
10 #include "nvme.h"
11 
12 enum {
13 	NVME_IOCTL_VEC		= (1 << 0),
14 	NVME_IOCTL_PARTITION	= (1 << 1),
15 };
16 
nvme_cmd_allowed(struct nvme_ns * ns,struct nvme_command * c,unsigned int flags,bool open_for_write)17 static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
18 		unsigned int flags, bool open_for_write)
19 {
20 	u32 effects;
21 
22 	/*
23 	 * Do not allow unprivileged passthrough on partitions, as that allows an
24 	 * escape from the containment of the partition.
25 	 */
26 	if (flags & NVME_IOCTL_PARTITION)
27 		goto admin;
28 
29 	/*
30 	 * Do not allow unprivileged processes to send vendor specific or fabrics
31 	 * commands as we can't be sure about their effects.
32 	 */
33 	if (c->common.opcode >= nvme_cmd_vendor_start ||
34 	    c->common.opcode == nvme_fabrics_command)
35 		goto admin;
36 
37 	/*
38 	 * Do not allow unprivileged passthrough of admin commands except
39 	 * for a subset of identify commands that contain information required
40 	 * to form proper I/O commands in userspace and do not expose any
41 	 * potentially sensitive information.
42 	 */
43 	if (!ns) {
44 		if (c->common.opcode == nvme_admin_identify) {
45 			switch (c->identify.cns) {
46 			case NVME_ID_CNS_NS:
47 			case NVME_ID_CNS_CS_NS:
48 			case NVME_ID_CNS_NS_CS_INDEP:
49 			case NVME_ID_CNS_CS_CTRL:
50 			case NVME_ID_CNS_CTRL:
51 				return true;
52 			}
53 		}
54 		goto admin;
55 	}
56 
57 	/*
58 	 * Check if the controller provides a Commands Supported and Effects log
59 	 * and marks this command as supported.  If not reject unprivileged
60 	 * passthrough.
61 	 */
62 	effects = nvme_command_effects(ns->ctrl, ns, c->common.opcode);
63 	if (!(effects & NVME_CMD_EFFECTS_CSUPP))
64 		goto admin;
65 
66 	/*
67 	 * Don't allow passthrough for command that have intrusive (or unknown)
68 	 * effects.
69 	 */
70 	if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
71 			NVME_CMD_EFFECTS_UUID_SEL |
72 			NVME_CMD_EFFECTS_SCOPE_MASK))
73 		goto admin;
74 
75 	/*
76 	 * Only allow I/O commands that transfer data to the controller or that
77 	 * change the logical block contents if the file descriptor is open for
78 	 * writing.
79 	 */
80 	if ((nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC)) &&
81 	    !open_for_write)
82 		goto admin;
83 
84 	return true;
85 admin:
86 	return capable(CAP_SYS_ADMIN);
87 }
88 
89 /*
90  * Convert integer values from ioctl structures to user pointers, silently
91  * ignoring the upper bits in the compat case to match behaviour of 32-bit
92  * kernels.
93  */
nvme_to_user_ptr(uintptr_t ptrval)94 static void __user *nvme_to_user_ptr(uintptr_t ptrval)
95 {
96 	if (in_compat_syscall())
97 		ptrval = (compat_uptr_t)ptrval;
98 	return (void __user *)ptrval;
99 }
100 
nvme_add_user_metadata(struct request * req,void __user * ubuf,unsigned len,u32 seed)101 static void *nvme_add_user_metadata(struct request *req, void __user *ubuf,
102 		unsigned len, u32 seed)
103 {
104 	struct bio_integrity_payload *bip;
105 	int ret = -ENOMEM;
106 	void *buf;
107 	struct bio *bio = req->bio;
108 
109 	buf = kmalloc(len, GFP_KERNEL);
110 	if (!buf)
111 		goto out;
112 
113 	if (req_op(req) == REQ_OP_DRV_OUT) {
114 		ret = -EFAULT;
115 		if (copy_from_user(buf, ubuf, len))
116 			goto out_free_meta;
117 	} else {
118 		memset(buf, 0, len);
119 	}
120 
121 	bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
122 	if (IS_ERR(bip)) {
123 		ret = PTR_ERR(bip);
124 		goto out_free_meta;
125 	}
126 
127 	bip->bip_iter.bi_sector = seed;
128 	ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
129 			offset_in_page(buf));
130 	if (ret != len) {
131 		ret = -ENOMEM;
132 		goto out_free_meta;
133 	}
134 
135 	req->cmd_flags |= REQ_INTEGRITY;
136 	return buf;
137 out_free_meta:
138 	kfree(buf);
139 out:
140 	return ERR_PTR(ret);
141 }
142 
nvme_finish_user_metadata(struct request * req,void __user * ubuf,void * meta,unsigned len,int ret)143 static int nvme_finish_user_metadata(struct request *req, void __user *ubuf,
144 		void *meta, unsigned len, int ret)
145 {
146 	if (!ret && req_op(req) == REQ_OP_DRV_IN &&
147 	    copy_to_user(ubuf, meta, len))
148 		ret = -EFAULT;
149 	kfree(meta);
150 	return ret;
151 }
152 
nvme_alloc_user_request(struct request_queue * q,struct nvme_command * cmd,blk_opf_t rq_flags,blk_mq_req_flags_t blk_flags)153 static struct request *nvme_alloc_user_request(struct request_queue *q,
154 		struct nvme_command *cmd, blk_opf_t rq_flags,
155 		blk_mq_req_flags_t blk_flags)
156 {
157 	struct request *req;
158 
159 	req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags);
160 	if (IS_ERR(req))
161 		return req;
162 	nvme_init_request(req, cmd);
163 	nvme_req(req)->flags |= NVME_REQ_USERCMD;
164 	return req;
165 }
166 
nvme_map_user_request(struct request * req,u64 ubuffer,unsigned bufflen,void __user * meta_buffer,unsigned meta_len,u32 meta_seed,void ** metap,struct io_uring_cmd * ioucmd,unsigned int flags)167 static int nvme_map_user_request(struct request *req, u64 ubuffer,
168 		unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
169 		u32 meta_seed, void **metap, struct io_uring_cmd *ioucmd,
170 		unsigned int flags)
171 {
172 	struct request_queue *q = req->q;
173 	struct nvme_ns *ns = q->queuedata;
174 	struct block_device *bdev = ns ? ns->disk->part0 : NULL;
175 	bool supports_metadata = bdev && blk_get_integrity(bdev->bd_disk);
176 	bool has_metadata = meta_buffer && meta_len;
177 	struct bio *bio = NULL;
178 	void *meta = NULL;
179 	int ret;
180 
181 	if (has_metadata && !supports_metadata)
182 		return -EINVAL;
183 
184 	if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
185 		struct iov_iter iter;
186 
187 		/* fixedbufs is only for non-vectored io */
188 		if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC))
189 			return -EINVAL;
190 		ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
191 				rq_data_dir(req), &iter, ioucmd);
192 		if (ret < 0)
193 			goto out;
194 		ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
195 	} else {
196 		ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
197 				bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0,
198 				0, rq_data_dir(req));
199 	}
200 
201 	if (ret)
202 		goto out;
203 	bio = req->bio;
204 	if (bdev)
205 		bio_set_dev(bio, bdev);
206 
207 	if (has_metadata) {
208 		meta = nvme_add_user_metadata(req, meta_buffer, meta_len,
209 				meta_seed);
210 		if (IS_ERR(meta)) {
211 			ret = PTR_ERR(meta);
212 			goto out_unmap;
213 		}
214 		*metap = meta;
215 	}
216 
217 	return ret;
218 
219 out_unmap:
220 	if (bio)
221 		blk_rq_unmap_user(bio);
222 out:
223 	blk_mq_free_request(req);
224 	return ret;
225 }
226 
nvme_submit_user_cmd(struct request_queue * q,struct nvme_command * cmd,u64 ubuffer,unsigned bufflen,void __user * meta_buffer,unsigned meta_len,u32 meta_seed,u64 * result,unsigned timeout,unsigned int flags)227 static int nvme_submit_user_cmd(struct request_queue *q,
228 		struct nvme_command *cmd, u64 ubuffer, unsigned bufflen,
229 		void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
230 		u64 *result, unsigned timeout, unsigned int flags)
231 {
232 	struct nvme_ns *ns = q->queuedata;
233 	struct nvme_ctrl *ctrl;
234 	struct request *req;
235 	void *meta = NULL;
236 	struct bio *bio;
237 	u32 effects;
238 	int ret;
239 
240 	req = nvme_alloc_user_request(q, cmd, 0, 0);
241 	if (IS_ERR(req))
242 		return PTR_ERR(req);
243 
244 	req->timeout = timeout;
245 	if (ubuffer && bufflen) {
246 		ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
247 				meta_len, meta_seed, &meta, NULL, flags);
248 		if (ret)
249 			return ret;
250 	}
251 
252 	bio = req->bio;
253 	ctrl = nvme_req(req)->ctrl;
254 
255 	effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
256 	ret = nvme_execute_rq(req, false);
257 	if (result)
258 		*result = le64_to_cpu(nvme_req(req)->result.u64);
259 	if (meta)
260 		ret = nvme_finish_user_metadata(req, meta_buffer, meta,
261 						meta_len, ret);
262 	if (bio)
263 		blk_rq_unmap_user(bio);
264 	blk_mq_free_request(req);
265 
266 	if (effects)
267 		nvme_passthru_end(ctrl, ns, effects, cmd, ret);
268 
269 	return ret;
270 }
271 
nvme_submit_io(struct nvme_ns * ns,struct nvme_user_io __user * uio)272 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
273 {
274 	struct nvme_user_io io;
275 	struct nvme_command c;
276 	unsigned length, meta_len;
277 	void __user *metadata;
278 
279 	if (copy_from_user(&io, uio, sizeof(io)))
280 		return -EFAULT;
281 	if (io.flags)
282 		return -EINVAL;
283 
284 	switch (io.opcode) {
285 	case nvme_cmd_write:
286 	case nvme_cmd_read:
287 	case nvme_cmd_compare:
288 		break;
289 	default:
290 		return -EINVAL;
291 	}
292 
293 	length = (io.nblocks + 1) << ns->lba_shift;
294 
295 	if ((io.control & NVME_RW_PRINFO_PRACT) &&
296 	    ns->ms == sizeof(struct t10_pi_tuple)) {
297 		/*
298 		 * Protection information is stripped/inserted by the
299 		 * controller.
300 		 */
301 		if (nvme_to_user_ptr(io.metadata))
302 			return -EINVAL;
303 		meta_len = 0;
304 		metadata = NULL;
305 	} else {
306 		meta_len = (io.nblocks + 1) * ns->ms;
307 		metadata = nvme_to_user_ptr(io.metadata);
308 	}
309 
310 	if (ns->features & NVME_NS_EXT_LBAS) {
311 		length += meta_len;
312 		meta_len = 0;
313 	} else if (meta_len) {
314 		if ((io.metadata & 3) || !io.metadata)
315 			return -EINVAL;
316 	}
317 
318 	memset(&c, 0, sizeof(c));
319 	c.rw.opcode = io.opcode;
320 	c.rw.flags = io.flags;
321 	c.rw.nsid = cpu_to_le32(ns->head->ns_id);
322 	c.rw.slba = cpu_to_le64(io.slba);
323 	c.rw.length = cpu_to_le16(io.nblocks);
324 	c.rw.control = cpu_to_le16(io.control);
325 	c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
326 	c.rw.reftag = cpu_to_le32(io.reftag);
327 	c.rw.apptag = cpu_to_le16(io.apptag);
328 	c.rw.appmask = cpu_to_le16(io.appmask);
329 
330 	return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata,
331 			meta_len, lower_32_bits(io.slba), NULL, 0, 0);
332 }
333 
nvme_validate_passthru_nsid(struct nvme_ctrl * ctrl,struct nvme_ns * ns,__u32 nsid)334 static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
335 					struct nvme_ns *ns, __u32 nsid)
336 {
337 	if (ns && nsid != ns->head->ns_id) {
338 		dev_err(ctrl->device,
339 			"%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n",
340 			current->comm, nsid, ns->head->ns_id);
341 		return false;
342 	}
343 
344 	return true;
345 }
346 
nvme_user_cmd(struct nvme_ctrl * ctrl,struct nvme_ns * ns,struct nvme_passthru_cmd __user * ucmd,unsigned int flags,bool open_for_write)347 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
348 		struct nvme_passthru_cmd __user *ucmd, unsigned int flags,
349 		bool open_for_write)
350 {
351 	struct nvme_passthru_cmd cmd;
352 	struct nvme_command c;
353 	unsigned timeout = 0;
354 	u64 result;
355 	int status;
356 
357 	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
358 		return -EFAULT;
359 	if (cmd.flags)
360 		return -EINVAL;
361 	if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
362 		return -EINVAL;
363 
364 	memset(&c, 0, sizeof(c));
365 	c.common.opcode = cmd.opcode;
366 	c.common.flags = cmd.flags;
367 	c.common.nsid = cpu_to_le32(cmd.nsid);
368 	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
369 	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
370 	c.common.cdw10 = cpu_to_le32(cmd.cdw10);
371 	c.common.cdw11 = cpu_to_le32(cmd.cdw11);
372 	c.common.cdw12 = cpu_to_le32(cmd.cdw12);
373 	c.common.cdw13 = cpu_to_le32(cmd.cdw13);
374 	c.common.cdw14 = cpu_to_le32(cmd.cdw14);
375 	c.common.cdw15 = cpu_to_le32(cmd.cdw15);
376 
377 	if (!nvme_cmd_allowed(ns, &c, 0, open_for_write))
378 		return -EACCES;
379 
380 	if (cmd.timeout_ms)
381 		timeout = msecs_to_jiffies(cmd.timeout_ms);
382 
383 	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
384 			cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
385 			cmd.metadata_len, 0, &result, timeout, 0);
386 
387 	if (status >= 0) {
388 		if (put_user(result, &ucmd->result))
389 			return -EFAULT;
390 	}
391 
392 	return status;
393 }
394 
nvme_user_cmd64(struct nvme_ctrl * ctrl,struct nvme_ns * ns,struct nvme_passthru_cmd64 __user * ucmd,unsigned int flags,bool open_for_write)395 static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
396 		struct nvme_passthru_cmd64 __user *ucmd, unsigned int flags,
397 		bool open_for_write)
398 {
399 	struct nvme_passthru_cmd64 cmd;
400 	struct nvme_command c;
401 	unsigned timeout = 0;
402 	int status;
403 
404 	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
405 		return -EFAULT;
406 	if (cmd.flags)
407 		return -EINVAL;
408 	if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
409 		return -EINVAL;
410 
411 	memset(&c, 0, sizeof(c));
412 	c.common.opcode = cmd.opcode;
413 	c.common.flags = cmd.flags;
414 	c.common.nsid = cpu_to_le32(cmd.nsid);
415 	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
416 	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
417 	c.common.cdw10 = cpu_to_le32(cmd.cdw10);
418 	c.common.cdw11 = cpu_to_le32(cmd.cdw11);
419 	c.common.cdw12 = cpu_to_le32(cmd.cdw12);
420 	c.common.cdw13 = cpu_to_le32(cmd.cdw13);
421 	c.common.cdw14 = cpu_to_le32(cmd.cdw14);
422 	c.common.cdw15 = cpu_to_le32(cmd.cdw15);
423 
424 	if (!nvme_cmd_allowed(ns, &c, flags, open_for_write))
425 		return -EACCES;
426 
427 	if (cmd.timeout_ms)
428 		timeout = msecs_to_jiffies(cmd.timeout_ms);
429 
430 	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
431 			cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
432 			cmd.metadata_len, 0, &cmd.result, timeout, flags);
433 
434 	if (status >= 0) {
435 		if (put_user(cmd.result, &ucmd->result))
436 			return -EFAULT;
437 	}
438 
439 	return status;
440 }
441 
442 struct nvme_uring_data {
443 	__u64	metadata;
444 	__u64	addr;
445 	__u32	data_len;
446 	__u32	metadata_len;
447 	__u32	timeout_ms;
448 };
449 
450 /*
451  * This overlays struct io_uring_cmd pdu.
452  * Expect build errors if this grows larger than that.
453  */
454 struct nvme_uring_cmd_pdu {
455 	union {
456 		struct bio *bio;
457 		struct request *req;
458 	};
459 	u32 meta_len;
460 	u32 nvme_status;
461 	union {
462 		struct {
463 			void *meta; /* kernel-resident buffer */
464 			void __user *meta_buffer;
465 		};
466 		u64 result;
467 	} u;
468 };
469 
nvme_uring_cmd_pdu(struct io_uring_cmd * ioucmd)470 static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
471 		struct io_uring_cmd *ioucmd)
472 {
473 	return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
474 }
475 
nvme_uring_task_meta_cb(struct io_uring_cmd * ioucmd,unsigned issue_flags)476 static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd,
477 				    unsigned issue_flags)
478 {
479 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
480 	struct request *req = pdu->req;
481 	int status;
482 	u64 result;
483 
484 	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
485 		status = -EINTR;
486 	else
487 		status = nvme_req(req)->status;
488 
489 	result = le64_to_cpu(nvme_req(req)->result.u64);
490 
491 	if (pdu->meta_len)
492 		status = nvme_finish_user_metadata(req, pdu->u.meta_buffer,
493 					pdu->u.meta, pdu->meta_len, status);
494 	if (req->bio)
495 		blk_rq_unmap_user(req->bio);
496 	blk_mq_free_request(req);
497 
498 	io_uring_cmd_done(ioucmd, status, result, issue_flags);
499 }
500 
nvme_uring_task_cb(struct io_uring_cmd * ioucmd,unsigned issue_flags)501 static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
502 			       unsigned issue_flags)
503 {
504 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
505 
506 	if (pdu->bio)
507 		blk_rq_unmap_user(pdu->bio);
508 
509 	io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result, issue_flags);
510 }
511 
nvme_uring_cmd_end_io(struct request * req,blk_status_t err)512 static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
513 						blk_status_t err)
514 {
515 	struct io_uring_cmd *ioucmd = req->end_io_data;
516 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
517 
518 	req->bio = pdu->bio;
519 	if (nvme_req(req)->flags & NVME_REQ_CANCELLED) {
520 		pdu->nvme_status = -EINTR;
521 	} else {
522 		pdu->nvme_status = nvme_req(req)->status;
523 		if (!pdu->nvme_status)
524 			pdu->nvme_status = blk_status_to_errno(err);
525 	}
526 	pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64);
527 
528 	/*
529 	 * IOPOLL could potentially complete this request directly, but
530 	 * if multiple rings are polling on the same queue, then it's possible
531 	 * for one ring to find completions for another ring. Punting the
532 	 * completion via task_work will always direct it to the right
533 	 * location, rather than potentially complete requests for ringA
534 	 * under iopoll invocations from ringB.
535 	 */
536 	io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
537 	return RQ_END_IO_FREE;
538 }
539 
nvme_uring_cmd_end_io_meta(struct request * req,blk_status_t err)540 static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
541 						     blk_status_t err)
542 {
543 	struct io_uring_cmd *ioucmd = req->end_io_data;
544 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
545 
546 	req->bio = pdu->bio;
547 	pdu->req = req;
548 
549 	/*
550 	 * For iopoll, complete it directly.
551 	 * Otherwise, move the completion to task work.
552 	 */
553 	if (blk_rq_is_poll(req)) {
554 		WRITE_ONCE(ioucmd->cookie, NULL);
555 		nvme_uring_task_meta_cb(ioucmd, IO_URING_F_UNLOCKED);
556 	} else {
557 		io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_meta_cb);
558 	}
559 
560 	return RQ_END_IO_NONE;
561 }
562 
nvme_uring_cmd_io(struct nvme_ctrl * ctrl,struct nvme_ns * ns,struct io_uring_cmd * ioucmd,unsigned int issue_flags,bool vec)563 static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
564 		struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec)
565 {
566 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
567 	const struct nvme_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe);
568 	struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
569 	struct nvme_uring_data d;
570 	struct nvme_command c;
571 	struct request *req;
572 	blk_opf_t rq_flags = REQ_ALLOC_CACHE;
573 	blk_mq_req_flags_t blk_flags = 0;
574 	void *meta = NULL;
575 	int ret;
576 
577 	c.common.opcode = READ_ONCE(cmd->opcode);
578 	c.common.flags = READ_ONCE(cmd->flags);
579 	if (c.common.flags)
580 		return -EINVAL;
581 
582 	c.common.command_id = 0;
583 	c.common.nsid = cpu_to_le32(cmd->nsid);
584 	if (!nvme_validate_passthru_nsid(ctrl, ns, le32_to_cpu(c.common.nsid)))
585 		return -EINVAL;
586 
587 	c.common.cdw2[0] = cpu_to_le32(READ_ONCE(cmd->cdw2));
588 	c.common.cdw2[1] = cpu_to_le32(READ_ONCE(cmd->cdw3));
589 	c.common.metadata = 0;
590 	c.common.dptr.prp1 = c.common.dptr.prp2 = 0;
591 	c.common.cdw10 = cpu_to_le32(READ_ONCE(cmd->cdw10));
592 	c.common.cdw11 = cpu_to_le32(READ_ONCE(cmd->cdw11));
593 	c.common.cdw12 = cpu_to_le32(READ_ONCE(cmd->cdw12));
594 	c.common.cdw13 = cpu_to_le32(READ_ONCE(cmd->cdw13));
595 	c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14));
596 	c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15));
597 
598 	if (!nvme_cmd_allowed(ns, &c, 0, ioucmd->file->f_mode & FMODE_WRITE))
599 		return -EACCES;
600 
601 	d.metadata = READ_ONCE(cmd->metadata);
602 	d.addr = READ_ONCE(cmd->addr);
603 	d.data_len = READ_ONCE(cmd->data_len);
604 	d.metadata_len = READ_ONCE(cmd->metadata_len);
605 	d.timeout_ms = READ_ONCE(cmd->timeout_ms);
606 
607 	if (issue_flags & IO_URING_F_NONBLOCK) {
608 		rq_flags |= REQ_NOWAIT;
609 		blk_flags = BLK_MQ_REQ_NOWAIT;
610 	}
611 	if (issue_flags & IO_URING_F_IOPOLL)
612 		rq_flags |= REQ_POLLED;
613 
614 	req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags);
615 	if (IS_ERR(req))
616 		return PTR_ERR(req);
617 	req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0;
618 
619 	if (d.addr && d.data_len) {
620 		ret = nvme_map_user_request(req, d.addr,
621 			d.data_len, nvme_to_user_ptr(d.metadata),
622 			d.metadata_len, 0, &meta, ioucmd, vec);
623 		if (ret)
624 			return ret;
625 	}
626 
627 	if (blk_rq_is_poll(req)) {
628 		ioucmd->flags |= IORING_URING_CMD_POLLED;
629 		WRITE_ONCE(ioucmd->cookie, req);
630 	}
631 
632 	/* to free bio on completion, as req->bio will be null at that time */
633 	pdu->bio = req->bio;
634 	pdu->meta_len = d.metadata_len;
635 	req->end_io_data = ioucmd;
636 	if (pdu->meta_len) {
637 		pdu->u.meta = meta;
638 		pdu->u.meta_buffer = nvme_to_user_ptr(d.metadata);
639 		req->end_io = nvme_uring_cmd_end_io_meta;
640 	} else {
641 		req->end_io = nvme_uring_cmd_end_io;
642 	}
643 	blk_execute_rq_nowait(req, false);
644 	return -EIOCBQUEUED;
645 }
646 
is_ctrl_ioctl(unsigned int cmd)647 static bool is_ctrl_ioctl(unsigned int cmd)
648 {
649 	if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
650 		return true;
651 	if (is_sed_ioctl(cmd))
652 		return true;
653 	return false;
654 }
655 
nvme_ctrl_ioctl(struct nvme_ctrl * ctrl,unsigned int cmd,void __user * argp,bool open_for_write)656 static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
657 		void __user *argp, bool open_for_write)
658 {
659 	switch (cmd) {
660 	case NVME_IOCTL_ADMIN_CMD:
661 		return nvme_user_cmd(ctrl, NULL, argp, 0, open_for_write);
662 	case NVME_IOCTL_ADMIN64_CMD:
663 		return nvme_user_cmd64(ctrl, NULL, argp, 0, open_for_write);
664 	default:
665 		return sed_ioctl(ctrl->opal_dev, cmd, argp);
666 	}
667 }
668 
669 #ifdef COMPAT_FOR_U64_ALIGNMENT
670 struct nvme_user_io32 {
671 	__u8	opcode;
672 	__u8	flags;
673 	__u16	control;
674 	__u16	nblocks;
675 	__u16	rsvd;
676 	__u64	metadata;
677 	__u64	addr;
678 	__u64	slba;
679 	__u32	dsmgmt;
680 	__u32	reftag;
681 	__u16	apptag;
682 	__u16	appmask;
683 } __attribute__((__packed__));
684 #define NVME_IOCTL_SUBMIT_IO32	_IOW('N', 0x42, struct nvme_user_io32)
685 #endif /* COMPAT_FOR_U64_ALIGNMENT */
686 
nvme_ns_ioctl(struct nvme_ns * ns,unsigned int cmd,void __user * argp,unsigned int flags,bool open_for_write)687 static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
688 		void __user *argp, unsigned int flags, bool open_for_write)
689 {
690 	switch (cmd) {
691 	case NVME_IOCTL_ID:
692 		force_successful_syscall_return();
693 		return ns->head->ns_id;
694 	case NVME_IOCTL_IO_CMD:
695 		return nvme_user_cmd(ns->ctrl, ns, argp, flags, open_for_write);
696 	/*
697 	 * struct nvme_user_io can have different padding on some 32-bit ABIs.
698 	 * Just accept the compat version as all fields that are used are the
699 	 * same size and at the same offset.
700 	 */
701 #ifdef COMPAT_FOR_U64_ALIGNMENT
702 	case NVME_IOCTL_SUBMIT_IO32:
703 #endif
704 	case NVME_IOCTL_SUBMIT_IO:
705 		return nvme_submit_io(ns, argp);
706 	case NVME_IOCTL_IO64_CMD_VEC:
707 		flags |= NVME_IOCTL_VEC;
708 		fallthrough;
709 	case NVME_IOCTL_IO64_CMD:
710 		return nvme_user_cmd64(ns->ctrl, ns, argp, flags,
711 				       open_for_write);
712 	default:
713 		return -ENOTTY;
714 	}
715 }
716 
nvme_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg)717 int nvme_ioctl(struct block_device *bdev, blk_mode_t mode,
718 		unsigned int cmd, unsigned long arg)
719 {
720 	struct nvme_ns *ns = bdev->bd_disk->private_data;
721 	bool open_for_write = mode & BLK_OPEN_WRITE;
722 	void __user *argp = (void __user *)arg;
723 	unsigned int flags = 0;
724 
725 	if (bdev_is_partition(bdev))
726 		flags |= NVME_IOCTL_PARTITION;
727 
728 	if (is_ctrl_ioctl(cmd))
729 		return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write);
730 	return nvme_ns_ioctl(ns, cmd, argp, flags, open_for_write);
731 }
732 
nvme_ns_chr_ioctl(struct file * file,unsigned int cmd,unsigned long arg)733 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
734 {
735 	struct nvme_ns *ns =
736 		container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev);
737 	bool open_for_write = file->f_mode & FMODE_WRITE;
738 	void __user *argp = (void __user *)arg;
739 
740 	if (is_ctrl_ioctl(cmd))
741 		return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write);
742 	return nvme_ns_ioctl(ns, cmd, argp, 0, open_for_write);
743 }
744 
nvme_uring_cmd_checks(unsigned int issue_flags)745 static int nvme_uring_cmd_checks(unsigned int issue_flags)
746 {
747 
748 	/* NVMe passthrough requires big SQE/CQE support */
749 	if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) !=
750 	    (IO_URING_F_SQE128|IO_URING_F_CQE32))
751 		return -EOPNOTSUPP;
752 	return 0;
753 }
754 
nvme_ns_uring_cmd(struct nvme_ns * ns,struct io_uring_cmd * ioucmd,unsigned int issue_flags)755 static int nvme_ns_uring_cmd(struct nvme_ns *ns, struct io_uring_cmd *ioucmd,
756 			     unsigned int issue_flags)
757 {
758 	struct nvme_ctrl *ctrl = ns->ctrl;
759 	int ret;
760 
761 	BUILD_BUG_ON(sizeof(struct nvme_uring_cmd_pdu) > sizeof(ioucmd->pdu));
762 
763 	ret = nvme_uring_cmd_checks(issue_flags);
764 	if (ret)
765 		return ret;
766 
767 	switch (ioucmd->cmd_op) {
768 	case NVME_URING_CMD_IO:
769 		ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, false);
770 		break;
771 	case NVME_URING_CMD_IO_VEC:
772 		ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, true);
773 		break;
774 	default:
775 		ret = -ENOTTY;
776 	}
777 
778 	return ret;
779 }
780 
nvme_ns_chr_uring_cmd(struct io_uring_cmd * ioucmd,unsigned int issue_flags)781 int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
782 {
783 	struct nvme_ns *ns = container_of(file_inode(ioucmd->file)->i_cdev,
784 			struct nvme_ns, cdev);
785 
786 	return nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
787 }
788 
nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd * ioucmd,struct io_comp_batch * iob,unsigned int poll_flags)789 int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
790 				 struct io_comp_batch *iob,
791 				 unsigned int poll_flags)
792 {
793 	struct request *req;
794 	int ret = 0;
795 
796 	if (!(ioucmd->flags & IORING_URING_CMD_POLLED))
797 		return 0;
798 
799 	req = READ_ONCE(ioucmd->cookie);
800 	if (req && blk_rq_is_poll(req))
801 		ret = blk_rq_poll(req, iob, poll_flags);
802 	return ret;
803 }
804 #ifdef CONFIG_NVME_MULTIPATH
nvme_ns_head_ctrl_ioctl(struct nvme_ns * ns,unsigned int cmd,void __user * argp,struct nvme_ns_head * head,int srcu_idx,bool open_for_write)805 static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
806 		void __user *argp, struct nvme_ns_head *head, int srcu_idx,
807 		bool open_for_write)
808 	__releases(&head->srcu)
809 {
810 	struct nvme_ctrl *ctrl = ns->ctrl;
811 	int ret;
812 
813 	nvme_get_ctrl(ns->ctrl);
814 	srcu_read_unlock(&head->srcu, srcu_idx);
815 	ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write);
816 
817 	nvme_put_ctrl(ctrl);
818 	return ret;
819 }
820 
nvme_ns_head_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg)821 int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode,
822 		unsigned int cmd, unsigned long arg)
823 {
824 	struct nvme_ns_head *head = bdev->bd_disk->private_data;
825 	bool open_for_write = mode & BLK_OPEN_WRITE;
826 	void __user *argp = (void __user *)arg;
827 	struct nvme_ns *ns;
828 	int srcu_idx, ret = -EWOULDBLOCK;
829 	unsigned int flags = 0;
830 
831 	if (bdev_is_partition(bdev))
832 		flags |= NVME_IOCTL_PARTITION;
833 
834 	srcu_idx = srcu_read_lock(&head->srcu);
835 	ns = nvme_find_path(head);
836 	if (!ns)
837 		goto out_unlock;
838 
839 	/*
840 	 * Handle ioctls that apply to the controller instead of the namespace
841 	 * seperately and drop the ns SRCU reference early.  This avoids a
842 	 * deadlock when deleting namespaces using the passthrough interface.
843 	 */
844 	if (is_ctrl_ioctl(cmd))
845 		return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
846 					       open_for_write);
847 
848 	ret = nvme_ns_ioctl(ns, cmd, argp, flags, open_for_write);
849 out_unlock:
850 	srcu_read_unlock(&head->srcu, srcu_idx);
851 	return ret;
852 }
853 
nvme_ns_head_chr_ioctl(struct file * file,unsigned int cmd,unsigned long arg)854 long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
855 		unsigned long arg)
856 {
857 	bool open_for_write = file->f_mode & FMODE_WRITE;
858 	struct cdev *cdev = file_inode(file)->i_cdev;
859 	struct nvme_ns_head *head =
860 		container_of(cdev, struct nvme_ns_head, cdev);
861 	void __user *argp = (void __user *)arg;
862 	struct nvme_ns *ns;
863 	int srcu_idx, ret = -EWOULDBLOCK;
864 
865 	srcu_idx = srcu_read_lock(&head->srcu);
866 	ns = nvme_find_path(head);
867 	if (!ns)
868 		goto out_unlock;
869 
870 	if (is_ctrl_ioctl(cmd))
871 		return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
872 				open_for_write);
873 
874 	ret = nvme_ns_ioctl(ns, cmd, argp, 0, open_for_write);
875 out_unlock:
876 	srcu_read_unlock(&head->srcu, srcu_idx);
877 	return ret;
878 }
879 
nvme_ns_head_chr_uring_cmd(struct io_uring_cmd * ioucmd,unsigned int issue_flags)880 int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
881 		unsigned int issue_flags)
882 {
883 	struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
884 	struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
885 	int srcu_idx = srcu_read_lock(&head->srcu);
886 	struct nvme_ns *ns = nvme_find_path(head);
887 	int ret = -EINVAL;
888 
889 	if (ns)
890 		ret = nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
891 	srcu_read_unlock(&head->srcu, srcu_idx);
892 	return ret;
893 }
894 #endif /* CONFIG_NVME_MULTIPATH */
895 
nvme_dev_uring_cmd(struct io_uring_cmd * ioucmd,unsigned int issue_flags)896 int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
897 {
898 	struct nvme_ctrl *ctrl = ioucmd->file->private_data;
899 	int ret;
900 
901 	/* IOPOLL not supported yet */
902 	if (issue_flags & IO_URING_F_IOPOLL)
903 		return -EOPNOTSUPP;
904 
905 	ret = nvme_uring_cmd_checks(issue_flags);
906 	if (ret)
907 		return ret;
908 
909 	switch (ioucmd->cmd_op) {
910 	case NVME_URING_CMD_ADMIN:
911 		ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, false);
912 		break;
913 	case NVME_URING_CMD_ADMIN_VEC:
914 		ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, true);
915 		break;
916 	default:
917 		ret = -ENOTTY;
918 	}
919 
920 	return ret;
921 }
922 
nvme_dev_user_cmd(struct nvme_ctrl * ctrl,void __user * argp,bool open_for_write)923 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
924 		bool open_for_write)
925 {
926 	struct nvme_ns *ns;
927 	int ret, srcu_idx;
928 
929 	srcu_idx = srcu_read_lock(&ctrl->srcu);
930 	if (list_empty(&ctrl->namespaces)) {
931 		ret = -ENOTTY;
932 		goto out_unlock;
933 	}
934 
935 	ns = list_first_or_null_rcu(&ctrl->namespaces, struct nvme_ns, list);
936 	if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
937 		dev_warn(ctrl->device,
938 			"NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
939 		ret = -EINVAL;
940 		goto out_unlock;
941 	}
942 
943 	dev_warn(ctrl->device,
944 		"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
945 	if (!nvme_get_ns(ns)) {
946 		ret = -ENXIO;
947 		goto out_unlock;
948 	}
949 	srcu_read_unlock(&ctrl->srcu, srcu_idx);
950 
951 	ret = nvme_user_cmd(ctrl, ns, argp, 0, open_for_write);
952 	nvme_put_ns(ns);
953 	return ret;
954 
955 out_unlock:
956 	srcu_read_unlock(&ctrl->srcu, srcu_idx);
957 	return ret;
958 }
959 
nvme_dev_ioctl(struct file * file,unsigned int cmd,unsigned long arg)960 long nvme_dev_ioctl(struct file *file, unsigned int cmd,
961 		unsigned long arg)
962 {
963 	bool open_for_write = file->f_mode & FMODE_WRITE;
964 	struct nvme_ctrl *ctrl = file->private_data;
965 	void __user *argp = (void __user *)arg;
966 
967 	switch (cmd) {
968 	case NVME_IOCTL_ADMIN_CMD:
969 		return nvme_user_cmd(ctrl, NULL, argp, 0, open_for_write);
970 	case NVME_IOCTL_ADMIN64_CMD:
971 		return nvme_user_cmd64(ctrl, NULL, argp, 0, open_for_write);
972 	case NVME_IOCTL_IO_CMD:
973 		return nvme_dev_user_cmd(ctrl, argp, open_for_write);
974 	case NVME_IOCTL_RESET:
975 		if (!capable(CAP_SYS_ADMIN))
976 			return -EACCES;
977 		dev_warn(ctrl->device, "resetting controller\n");
978 		return nvme_reset_ctrl_sync(ctrl);
979 	case NVME_IOCTL_SUBSYS_RESET:
980 		if (!capable(CAP_SYS_ADMIN))
981 			return -EACCES;
982 		return nvme_reset_subsystem(ctrl);
983 	case NVME_IOCTL_RESCAN:
984 		if (!capable(CAP_SYS_ADMIN))
985 			return -EACCES;
986 		nvme_queue_scan(ctrl);
987 		return 0;
988 	default:
989 		return -ENOTTY;
990 	}
991 }
992