xref: /openbmc/linux/drivers/nvme/host/ioctl.c (revision dd1431e5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2011-2014, Intel Corporation.
4  * Copyright (c) 2017-2021 Christoph Hellwig.
5  */
6 #include <linux/ptrace.h>	/* for force_successful_syscall_return */
7 #include <linux/nvme_ioctl.h>
8 #include "nvme.h"
9 
10 /*
11  * Convert integer values from ioctl structures to user pointers, silently
12  * ignoring the upper bits in the compat case to match behaviour of 32-bit
13  * kernels.
14  */
15 static void __user *nvme_to_user_ptr(uintptr_t ptrval)
16 {
17 	if (in_compat_syscall())
18 		ptrval = (compat_uptr_t)ptrval;
19 	return (void __user *)ptrval;
20 }
21 
22 static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
23 		unsigned len, u32 seed, bool write)
24 {
25 	struct bio_integrity_payload *bip;
26 	int ret = -ENOMEM;
27 	void *buf;
28 
29 	buf = kmalloc(len, GFP_KERNEL);
30 	if (!buf)
31 		goto out;
32 
33 	ret = -EFAULT;
34 	if (write && copy_from_user(buf, ubuf, len))
35 		goto out_free_meta;
36 
37 	bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
38 	if (IS_ERR(bip)) {
39 		ret = PTR_ERR(bip);
40 		goto out_free_meta;
41 	}
42 
43 	bip->bip_iter.bi_size = len;
44 	bip->bip_iter.bi_sector = seed;
45 	ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
46 			offset_in_page(buf));
47 	if (ret == len)
48 		return buf;
49 	ret = -ENOMEM;
50 out_free_meta:
51 	kfree(buf);
52 out:
53 	return ERR_PTR(ret);
54 }
55 
56 static int nvme_submit_user_cmd(struct request_queue *q,
57 		struct nvme_command *cmd, void __user *ubuffer,
58 		unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
59 		u32 meta_seed, u64 *result, unsigned timeout)
60 {
61 	bool write = nvme_is_write(cmd);
62 	struct nvme_ns *ns = q->queuedata;
63 	struct block_device *bdev = ns ? ns->disk->part0 : NULL;
64 	struct request *req;
65 	struct bio *bio = NULL;
66 	void *meta = NULL;
67 	int ret;
68 
69 	req = nvme_alloc_request(q, cmd, 0);
70 	if (IS_ERR(req))
71 		return PTR_ERR(req);
72 
73 	if (timeout)
74 		req->timeout = timeout;
75 	nvme_req(req)->flags |= NVME_REQ_USERCMD;
76 
77 	if (ubuffer && bufflen) {
78 		ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
79 				GFP_KERNEL);
80 		if (ret)
81 			goto out;
82 		bio = req->bio;
83 		if (bdev)
84 			bio_set_dev(bio, bdev);
85 		if (bdev && meta_buffer && meta_len) {
86 			meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
87 					meta_seed, write);
88 			if (IS_ERR(meta)) {
89 				ret = PTR_ERR(meta);
90 				goto out_unmap;
91 			}
92 			req->cmd_flags |= REQ_INTEGRITY;
93 		}
94 	}
95 
96 	nvme_execute_passthru_rq(req);
97 	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
98 		ret = -EINTR;
99 	else
100 		ret = nvme_req(req)->status;
101 	if (result)
102 		*result = le64_to_cpu(nvme_req(req)->result.u64);
103 	if (meta && !ret && !write) {
104 		if (copy_to_user(meta_buffer, meta, meta_len))
105 			ret = -EFAULT;
106 	}
107 	kfree(meta);
108  out_unmap:
109 	if (bio)
110 		blk_rq_unmap_user(bio);
111  out:
112 	blk_mq_free_request(req);
113 	return ret;
114 }
115 
116 
117 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
118 {
119 	struct nvme_user_io io;
120 	struct nvme_command c;
121 	unsigned length, meta_len;
122 	void __user *metadata;
123 
124 	if (copy_from_user(&io, uio, sizeof(io)))
125 		return -EFAULT;
126 	if (io.flags)
127 		return -EINVAL;
128 
129 	switch (io.opcode) {
130 	case nvme_cmd_write:
131 	case nvme_cmd_read:
132 	case nvme_cmd_compare:
133 		break;
134 	default:
135 		return -EINVAL;
136 	}
137 
138 	length = (io.nblocks + 1) << ns->lba_shift;
139 
140 	if ((io.control & NVME_RW_PRINFO_PRACT) &&
141 	    ns->ms == sizeof(struct t10_pi_tuple)) {
142 		/*
143 		 * Protection information is stripped/inserted by the
144 		 * controller.
145 		 */
146 		if (nvme_to_user_ptr(io.metadata))
147 			return -EINVAL;
148 		meta_len = 0;
149 		metadata = NULL;
150 	} else {
151 		meta_len = (io.nblocks + 1) * ns->ms;
152 		metadata = nvme_to_user_ptr(io.metadata);
153 	}
154 
155 	if (ns->features & NVME_NS_EXT_LBAS) {
156 		length += meta_len;
157 		meta_len = 0;
158 	} else if (meta_len) {
159 		if ((io.metadata & 3) || !io.metadata)
160 			return -EINVAL;
161 	}
162 
163 	memset(&c, 0, sizeof(c));
164 	c.rw.opcode = io.opcode;
165 	c.rw.flags = io.flags;
166 	c.rw.nsid = cpu_to_le32(ns->head->ns_id);
167 	c.rw.slba = cpu_to_le64(io.slba);
168 	c.rw.length = cpu_to_le16(io.nblocks);
169 	c.rw.control = cpu_to_le16(io.control);
170 	c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
171 	c.rw.reftag = cpu_to_le32(io.reftag);
172 	c.rw.apptag = cpu_to_le16(io.apptag);
173 	c.rw.appmask = cpu_to_le16(io.appmask);
174 
175 	return nvme_submit_user_cmd(ns->queue, &c,
176 			nvme_to_user_ptr(io.addr), length,
177 			metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
178 }
179 
180 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
181 			struct nvme_passthru_cmd __user *ucmd)
182 {
183 	struct nvme_passthru_cmd cmd;
184 	struct nvme_command c;
185 	unsigned timeout = 0;
186 	u64 result;
187 	int status;
188 
189 	if (!capable(CAP_SYS_ADMIN))
190 		return -EACCES;
191 	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
192 		return -EFAULT;
193 	if (cmd.flags)
194 		return -EINVAL;
195 	if (ns && cmd.nsid != ns->head->ns_id) {
196 		dev_err(ctrl->device,
197 			"%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n",
198 			current->comm, cmd.nsid, ns->head->ns_id);
199 		return -EINVAL;
200 	}
201 
202 	memset(&c, 0, sizeof(c));
203 	c.common.opcode = cmd.opcode;
204 	c.common.flags = cmd.flags;
205 	c.common.nsid = cpu_to_le32(cmd.nsid);
206 	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
207 	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
208 	c.common.cdw10 = cpu_to_le32(cmd.cdw10);
209 	c.common.cdw11 = cpu_to_le32(cmd.cdw11);
210 	c.common.cdw12 = cpu_to_le32(cmd.cdw12);
211 	c.common.cdw13 = cpu_to_le32(cmd.cdw13);
212 	c.common.cdw14 = cpu_to_le32(cmd.cdw14);
213 	c.common.cdw15 = cpu_to_le32(cmd.cdw15);
214 
215 	if (cmd.timeout_ms)
216 		timeout = msecs_to_jiffies(cmd.timeout_ms);
217 
218 	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
219 			nvme_to_user_ptr(cmd.addr), cmd.data_len,
220 			nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
221 			0, &result, timeout);
222 
223 	if (status >= 0) {
224 		if (put_user(result, &ucmd->result))
225 			return -EFAULT;
226 	}
227 
228 	return status;
229 }
230 
231 static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
232 			struct nvme_passthru_cmd64 __user *ucmd)
233 {
234 	struct nvme_passthru_cmd64 cmd;
235 	struct nvme_command c;
236 	unsigned timeout = 0;
237 	int status;
238 
239 	if (!capable(CAP_SYS_ADMIN))
240 		return -EACCES;
241 	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
242 		return -EFAULT;
243 	if (cmd.flags)
244 		return -EINVAL;
245 	if (ns && cmd.nsid != ns->head->ns_id) {
246 		dev_err(ctrl->device,
247 			"%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n",
248 			current->comm, cmd.nsid, ns->head->ns_id);
249 		return -EINVAL;
250 	}
251 
252 	memset(&c, 0, sizeof(c));
253 	c.common.opcode = cmd.opcode;
254 	c.common.flags = cmd.flags;
255 	c.common.nsid = cpu_to_le32(cmd.nsid);
256 	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
257 	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
258 	c.common.cdw10 = cpu_to_le32(cmd.cdw10);
259 	c.common.cdw11 = cpu_to_le32(cmd.cdw11);
260 	c.common.cdw12 = cpu_to_le32(cmd.cdw12);
261 	c.common.cdw13 = cpu_to_le32(cmd.cdw13);
262 	c.common.cdw14 = cpu_to_le32(cmd.cdw14);
263 	c.common.cdw15 = cpu_to_le32(cmd.cdw15);
264 
265 	if (cmd.timeout_ms)
266 		timeout = msecs_to_jiffies(cmd.timeout_ms);
267 
268 	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
269 			nvme_to_user_ptr(cmd.addr), cmd.data_len,
270 			nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
271 			0, &cmd.result, timeout);
272 
273 	if (status >= 0) {
274 		if (put_user(cmd.result, &ucmd->result))
275 			return -EFAULT;
276 	}
277 
278 	return status;
279 }
280 
281 static bool is_ctrl_ioctl(unsigned int cmd)
282 {
283 	if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
284 		return true;
285 	if (is_sed_ioctl(cmd))
286 		return true;
287 	return false;
288 }
289 
290 static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
291 		void __user *argp)
292 {
293 	switch (cmd) {
294 	case NVME_IOCTL_ADMIN_CMD:
295 		return nvme_user_cmd(ctrl, NULL, argp);
296 	case NVME_IOCTL_ADMIN64_CMD:
297 		return nvme_user_cmd64(ctrl, NULL, argp);
298 	default:
299 		return sed_ioctl(ctrl->opal_dev, cmd, argp);
300 	}
301 }
302 
303 #ifdef COMPAT_FOR_U64_ALIGNMENT
304 struct nvme_user_io32 {
305 	__u8	opcode;
306 	__u8	flags;
307 	__u16	control;
308 	__u16	nblocks;
309 	__u16	rsvd;
310 	__u64	metadata;
311 	__u64	addr;
312 	__u64	slba;
313 	__u32	dsmgmt;
314 	__u32	reftag;
315 	__u16	apptag;
316 	__u16	appmask;
317 } __attribute__((__packed__));
318 #define NVME_IOCTL_SUBMIT_IO32	_IOW('N', 0x42, struct nvme_user_io32)
319 #endif /* COMPAT_FOR_U64_ALIGNMENT */
320 
321 static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
322 		void __user *argp)
323 {
324 	switch (cmd) {
325 	case NVME_IOCTL_ID:
326 		force_successful_syscall_return();
327 		return ns->head->ns_id;
328 	case NVME_IOCTL_IO_CMD:
329 		return nvme_user_cmd(ns->ctrl, ns, argp);
330 	/*
331 	 * struct nvme_user_io can have different padding on some 32-bit ABIs.
332 	 * Just accept the compat version as all fields that are used are the
333 	 * same size and at the same offset.
334 	 */
335 #ifdef COMPAT_FOR_U64_ALIGNMENT
336 	case NVME_IOCTL_SUBMIT_IO32:
337 #endif
338 	case NVME_IOCTL_SUBMIT_IO:
339 		return nvme_submit_io(ns, argp);
340 	case NVME_IOCTL_IO64_CMD:
341 		return nvme_user_cmd64(ns->ctrl, ns, argp);
342 	default:
343 		if (!ns->ndev)
344 			return -ENOTTY;
345 		return nvme_nvm_ioctl(ns, cmd, argp);
346 	}
347 }
348 
349 static int __nvme_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *arg)
350 {
351        if (is_ctrl_ioctl(cmd))
352                return nvme_ctrl_ioctl(ns->ctrl, cmd, arg);
353        return nvme_ns_ioctl(ns, cmd, arg);
354 }
355 
356 int nvme_ioctl(struct block_device *bdev, fmode_t mode,
357 		unsigned int cmd, unsigned long arg)
358 {
359 	struct nvme_ns *ns = bdev->bd_disk->private_data;
360 
361 	return __nvme_ioctl(ns, cmd, (void __user *)arg);
362 }
363 
364 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
365 {
366 	struct nvme_ns *ns =
367 		container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev);
368 
369 	return __nvme_ioctl(ns, cmd, (void __user *)arg);
370 }
371 
372 #ifdef CONFIG_NVME_MULTIPATH
373 static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
374 		void __user *argp, struct nvme_ns_head *head, int srcu_idx)
375 {
376 	struct nvme_ctrl *ctrl = ns->ctrl;
377 	int ret;
378 
379 	nvme_get_ctrl(ns->ctrl);
380 	nvme_put_ns_from_disk(head, srcu_idx);
381 	ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp);
382 
383 	nvme_put_ctrl(ctrl);
384 	return ret;
385 }
386 
387 int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
388 		unsigned int cmd, unsigned long arg)
389 {
390 	struct nvme_ns_head *head = NULL;
391 	void __user *argp = (void __user *)arg;
392 	struct nvme_ns *ns;
393 	int srcu_idx, ret;
394 
395 	ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
396 	if (unlikely(!ns))
397 		return -EWOULDBLOCK;
398 
399 	/*
400 	 * Handle ioctls that apply to the controller instead of the namespace
401 	 * seperately and drop the ns SRCU reference early.  This avoids a
402 	 * deadlock when deleting namespaces using the passthrough interface.
403 	 */
404 	if (is_ctrl_ioctl(cmd))
405 		ret = nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
406 	else {
407 		ret = nvme_ns_ioctl(ns, cmd, argp);
408 		nvme_put_ns_from_disk(head, srcu_idx);
409 	}
410 
411 	return ret;
412 }
413 
414 long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
415 		unsigned long arg)
416 {
417 	struct cdev *cdev = file_inode(file)->i_cdev;
418 	struct nvme_ns_head *head =
419 		container_of(cdev, struct nvme_ns_head, cdev);
420 	void __user *argp = (void __user *)arg;
421 	struct nvme_ns *ns;
422 	int srcu_idx, ret;
423 
424 	srcu_idx = srcu_read_lock(&head->srcu);
425 	ns = nvme_find_path(head);
426 	if (!ns) {
427 		srcu_read_unlock(&head->srcu, srcu_idx);
428 		return -EWOULDBLOCK;
429 	}
430 
431 	if (is_ctrl_ioctl(cmd))
432 		return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
433 
434 	ret = nvme_ns_ioctl(ns, cmd, argp);
435 	nvme_put_ns_from_disk(head, srcu_idx);
436 
437 	return ret;
438 }
439 #endif /* CONFIG_NVME_MULTIPATH */
440 
441 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
442 {
443 	struct nvme_ns *ns;
444 	int ret;
445 
446 	down_read(&ctrl->namespaces_rwsem);
447 	if (list_empty(&ctrl->namespaces)) {
448 		ret = -ENOTTY;
449 		goto out_unlock;
450 	}
451 
452 	ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
453 	if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
454 		dev_warn(ctrl->device,
455 			"NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
456 		ret = -EINVAL;
457 		goto out_unlock;
458 	}
459 
460 	dev_warn(ctrl->device,
461 		"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
462 	kref_get(&ns->kref);
463 	up_read(&ctrl->namespaces_rwsem);
464 
465 	ret = nvme_user_cmd(ctrl, ns, argp);
466 	nvme_put_ns(ns);
467 	return ret;
468 
469 out_unlock:
470 	up_read(&ctrl->namespaces_rwsem);
471 	return ret;
472 }
473 
474 long nvme_dev_ioctl(struct file *file, unsigned int cmd,
475 		unsigned long arg)
476 {
477 	struct nvme_ctrl *ctrl = file->private_data;
478 	void __user *argp = (void __user *)arg;
479 
480 	switch (cmd) {
481 	case NVME_IOCTL_ADMIN_CMD:
482 		return nvme_user_cmd(ctrl, NULL, argp);
483 	case NVME_IOCTL_ADMIN64_CMD:
484 		return nvme_user_cmd64(ctrl, NULL, argp);
485 	case NVME_IOCTL_IO_CMD:
486 		return nvme_dev_user_cmd(ctrl, argp);
487 	case NVME_IOCTL_RESET:
488 		dev_warn(ctrl->device, "resetting controller\n");
489 		return nvme_reset_ctrl_sync(ctrl);
490 	case NVME_IOCTL_SUBSYS_RESET:
491 		return nvme_reset_subsystem(ctrl);
492 	case NVME_IOCTL_RESCAN:
493 		nvme_queue_scan(ctrl);
494 		return 0;
495 	default:
496 		return -ENOTTY;
497 	}
498 }
499