1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2011-2014, Intel Corporation. 4 * Copyright (c) 2017-2021 Christoph Hellwig. 5 */ 6 #include <linux/ptrace.h> /* for force_successful_syscall_return */ 7 #include <linux/nvme_ioctl.h> 8 #include "nvme.h" 9 10 /* 11 * Convert integer values from ioctl structures to user pointers, silently 12 * ignoring the upper bits in the compat case to match behaviour of 32-bit 13 * kernels. 14 */ 15 static void __user *nvme_to_user_ptr(uintptr_t ptrval) 16 { 17 if (in_compat_syscall()) 18 ptrval = (compat_uptr_t)ptrval; 19 return (void __user *)ptrval; 20 } 21 22 static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf, 23 unsigned len, u32 seed, bool write) 24 { 25 struct bio_integrity_payload *bip; 26 int ret = -ENOMEM; 27 void *buf; 28 29 buf = kmalloc(len, GFP_KERNEL); 30 if (!buf) 31 goto out; 32 33 ret = -EFAULT; 34 if (write && copy_from_user(buf, ubuf, len)) 35 goto out_free_meta; 36 37 bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); 38 if (IS_ERR(bip)) { 39 ret = PTR_ERR(bip); 40 goto out_free_meta; 41 } 42 43 bip->bip_iter.bi_size = len; 44 bip->bip_iter.bi_sector = seed; 45 ret = bio_integrity_add_page(bio, virt_to_page(buf), len, 46 offset_in_page(buf)); 47 if (ret == len) 48 return buf; 49 ret = -ENOMEM; 50 out_free_meta: 51 kfree(buf); 52 out: 53 return ERR_PTR(ret); 54 } 55 56 static int nvme_submit_user_cmd(struct request_queue *q, 57 struct nvme_command *cmd, void __user *ubuffer, 58 unsigned bufflen, void __user *meta_buffer, unsigned meta_len, 59 u32 meta_seed, u64 *result, unsigned timeout) 60 { 61 bool write = nvme_is_write(cmd); 62 struct nvme_ns *ns = q->queuedata; 63 struct block_device *bdev = ns ? ns->disk->part0 : NULL; 64 struct request *req; 65 struct bio *bio = NULL; 66 void *meta = NULL; 67 int ret; 68 69 req = nvme_alloc_request(q, cmd, 0); 70 if (IS_ERR(req)) 71 return PTR_ERR(req); 72 73 if (timeout) 74 req->timeout = timeout; 75 nvme_req(req)->flags |= NVME_REQ_USERCMD; 76 77 if (ubuffer && bufflen) { 78 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, 79 GFP_KERNEL); 80 if (ret) 81 goto out; 82 bio = req->bio; 83 if (bdev) 84 bio_set_dev(bio, bdev); 85 if (bdev && meta_buffer && meta_len) { 86 meta = nvme_add_user_metadata(bio, meta_buffer, meta_len, 87 meta_seed, write); 88 if (IS_ERR(meta)) { 89 ret = PTR_ERR(meta); 90 goto out_unmap; 91 } 92 req->cmd_flags |= REQ_INTEGRITY; 93 } 94 } 95 96 ret = nvme_execute_passthru_rq(req); 97 if (result) 98 *result = le64_to_cpu(nvme_req(req)->result.u64); 99 if (meta && !ret && !write) { 100 if (copy_to_user(meta_buffer, meta, meta_len)) 101 ret = -EFAULT; 102 } 103 kfree(meta); 104 out_unmap: 105 if (bio) 106 blk_rq_unmap_user(bio); 107 out: 108 blk_mq_free_request(req); 109 return ret; 110 } 111 112 113 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) 114 { 115 struct nvme_user_io io; 116 struct nvme_command c; 117 unsigned length, meta_len; 118 void __user *metadata; 119 120 if (copy_from_user(&io, uio, sizeof(io))) 121 return -EFAULT; 122 if (io.flags) 123 return -EINVAL; 124 125 switch (io.opcode) { 126 case nvme_cmd_write: 127 case nvme_cmd_read: 128 case nvme_cmd_compare: 129 break; 130 default: 131 return -EINVAL; 132 } 133 134 length = (io.nblocks + 1) << ns->lba_shift; 135 136 if ((io.control & NVME_RW_PRINFO_PRACT) && 137 ns->ms == sizeof(struct t10_pi_tuple)) { 138 /* 139 * Protection information is stripped/inserted by the 140 * controller. 141 */ 142 if (nvme_to_user_ptr(io.metadata)) 143 return -EINVAL; 144 meta_len = 0; 145 metadata = NULL; 146 } else { 147 meta_len = (io.nblocks + 1) * ns->ms; 148 metadata = nvme_to_user_ptr(io.metadata); 149 } 150 151 if (ns->features & NVME_NS_EXT_LBAS) { 152 length += meta_len; 153 meta_len = 0; 154 } else if (meta_len) { 155 if ((io.metadata & 3) || !io.metadata) 156 return -EINVAL; 157 } 158 159 memset(&c, 0, sizeof(c)); 160 c.rw.opcode = io.opcode; 161 c.rw.flags = io.flags; 162 c.rw.nsid = cpu_to_le32(ns->head->ns_id); 163 c.rw.slba = cpu_to_le64(io.slba); 164 c.rw.length = cpu_to_le16(io.nblocks); 165 c.rw.control = cpu_to_le16(io.control); 166 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); 167 c.rw.reftag = cpu_to_le32(io.reftag); 168 c.rw.apptag = cpu_to_le16(io.apptag); 169 c.rw.appmask = cpu_to_le16(io.appmask); 170 171 return nvme_submit_user_cmd(ns->queue, &c, 172 nvme_to_user_ptr(io.addr), length, 173 metadata, meta_len, lower_32_bits(io.slba), NULL, 0); 174 } 175 176 static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl, 177 struct nvme_ns *ns, __u32 nsid) 178 { 179 if (ns && nsid != ns->head->ns_id) { 180 dev_err(ctrl->device, 181 "%s: nsid (%u) in cmd does not match nsid (%u)" 182 "of namespace\n", 183 current->comm, nsid, ns->head->ns_id); 184 return false; 185 } 186 187 return true; 188 } 189 190 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 191 struct nvme_passthru_cmd __user *ucmd) 192 { 193 struct nvme_passthru_cmd cmd; 194 struct nvme_command c; 195 unsigned timeout = 0; 196 u64 result; 197 int status; 198 199 if (!capable(CAP_SYS_ADMIN)) 200 return -EACCES; 201 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 202 return -EFAULT; 203 if (cmd.flags) 204 return -EINVAL; 205 if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid)) 206 return -EINVAL; 207 208 memset(&c, 0, sizeof(c)); 209 c.common.opcode = cmd.opcode; 210 c.common.flags = cmd.flags; 211 c.common.nsid = cpu_to_le32(cmd.nsid); 212 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); 213 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); 214 c.common.cdw10 = cpu_to_le32(cmd.cdw10); 215 c.common.cdw11 = cpu_to_le32(cmd.cdw11); 216 c.common.cdw12 = cpu_to_le32(cmd.cdw12); 217 c.common.cdw13 = cpu_to_le32(cmd.cdw13); 218 c.common.cdw14 = cpu_to_le32(cmd.cdw14); 219 c.common.cdw15 = cpu_to_le32(cmd.cdw15); 220 221 if (cmd.timeout_ms) 222 timeout = msecs_to_jiffies(cmd.timeout_ms); 223 224 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, 225 nvme_to_user_ptr(cmd.addr), cmd.data_len, 226 nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, 227 0, &result, timeout); 228 229 if (status >= 0) { 230 if (put_user(result, &ucmd->result)) 231 return -EFAULT; 232 } 233 234 return status; 235 } 236 237 static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 238 struct nvme_passthru_cmd64 __user *ucmd) 239 { 240 struct nvme_passthru_cmd64 cmd; 241 struct nvme_command c; 242 unsigned timeout = 0; 243 int status; 244 245 if (!capable(CAP_SYS_ADMIN)) 246 return -EACCES; 247 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 248 return -EFAULT; 249 if (cmd.flags) 250 return -EINVAL; 251 if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid)) 252 return -EINVAL; 253 254 memset(&c, 0, sizeof(c)); 255 c.common.opcode = cmd.opcode; 256 c.common.flags = cmd.flags; 257 c.common.nsid = cpu_to_le32(cmd.nsid); 258 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); 259 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); 260 c.common.cdw10 = cpu_to_le32(cmd.cdw10); 261 c.common.cdw11 = cpu_to_le32(cmd.cdw11); 262 c.common.cdw12 = cpu_to_le32(cmd.cdw12); 263 c.common.cdw13 = cpu_to_le32(cmd.cdw13); 264 c.common.cdw14 = cpu_to_le32(cmd.cdw14); 265 c.common.cdw15 = cpu_to_le32(cmd.cdw15); 266 267 if (cmd.timeout_ms) 268 timeout = msecs_to_jiffies(cmd.timeout_ms); 269 270 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, 271 nvme_to_user_ptr(cmd.addr), cmd.data_len, 272 nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, 273 0, &cmd.result, timeout); 274 275 if (status >= 0) { 276 if (put_user(cmd.result, &ucmd->result)) 277 return -EFAULT; 278 } 279 280 return status; 281 } 282 283 static bool is_ctrl_ioctl(unsigned int cmd) 284 { 285 if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD) 286 return true; 287 if (is_sed_ioctl(cmd)) 288 return true; 289 return false; 290 } 291 292 static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd, 293 void __user *argp) 294 { 295 switch (cmd) { 296 case NVME_IOCTL_ADMIN_CMD: 297 return nvme_user_cmd(ctrl, NULL, argp); 298 case NVME_IOCTL_ADMIN64_CMD: 299 return nvme_user_cmd64(ctrl, NULL, argp); 300 default: 301 return sed_ioctl(ctrl->opal_dev, cmd, argp); 302 } 303 } 304 305 #ifdef COMPAT_FOR_U64_ALIGNMENT 306 struct nvme_user_io32 { 307 __u8 opcode; 308 __u8 flags; 309 __u16 control; 310 __u16 nblocks; 311 __u16 rsvd; 312 __u64 metadata; 313 __u64 addr; 314 __u64 slba; 315 __u32 dsmgmt; 316 __u32 reftag; 317 __u16 apptag; 318 __u16 appmask; 319 } __attribute__((__packed__)); 320 #define NVME_IOCTL_SUBMIT_IO32 _IOW('N', 0x42, struct nvme_user_io32) 321 #endif /* COMPAT_FOR_U64_ALIGNMENT */ 322 323 static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd, 324 void __user *argp) 325 { 326 switch (cmd) { 327 case NVME_IOCTL_ID: 328 force_successful_syscall_return(); 329 return ns->head->ns_id; 330 case NVME_IOCTL_IO_CMD: 331 return nvme_user_cmd(ns->ctrl, ns, argp); 332 /* 333 * struct nvme_user_io can have different padding on some 32-bit ABIs. 334 * Just accept the compat version as all fields that are used are the 335 * same size and at the same offset. 336 */ 337 #ifdef COMPAT_FOR_U64_ALIGNMENT 338 case NVME_IOCTL_SUBMIT_IO32: 339 #endif 340 case NVME_IOCTL_SUBMIT_IO: 341 return nvme_submit_io(ns, argp); 342 case NVME_IOCTL_IO64_CMD: 343 return nvme_user_cmd64(ns->ctrl, ns, argp); 344 default: 345 if (!ns->ndev) 346 return -ENOTTY; 347 return nvme_nvm_ioctl(ns, cmd, argp); 348 } 349 } 350 351 static int __nvme_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *arg) 352 { 353 if (is_ctrl_ioctl(cmd)) 354 return nvme_ctrl_ioctl(ns->ctrl, cmd, arg); 355 return nvme_ns_ioctl(ns, cmd, arg); 356 } 357 358 int nvme_ioctl(struct block_device *bdev, fmode_t mode, 359 unsigned int cmd, unsigned long arg) 360 { 361 struct nvme_ns *ns = bdev->bd_disk->private_data; 362 363 return __nvme_ioctl(ns, cmd, (void __user *)arg); 364 } 365 366 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 367 { 368 struct nvme_ns *ns = 369 container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev); 370 371 return __nvme_ioctl(ns, cmd, (void __user *)arg); 372 } 373 374 #ifdef CONFIG_NVME_MULTIPATH 375 static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd, 376 void __user *argp, struct nvme_ns_head *head, int srcu_idx) 377 __releases(&head->srcu) 378 { 379 struct nvme_ctrl *ctrl = ns->ctrl; 380 int ret; 381 382 nvme_get_ctrl(ns->ctrl); 383 srcu_read_unlock(&head->srcu, srcu_idx); 384 ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp); 385 386 nvme_put_ctrl(ctrl); 387 return ret; 388 } 389 390 int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode, 391 unsigned int cmd, unsigned long arg) 392 { 393 struct nvme_ns_head *head = bdev->bd_disk->private_data; 394 void __user *argp = (void __user *)arg; 395 struct nvme_ns *ns; 396 int srcu_idx, ret = -EWOULDBLOCK; 397 398 srcu_idx = srcu_read_lock(&head->srcu); 399 ns = nvme_find_path(head); 400 if (!ns) 401 goto out_unlock; 402 403 /* 404 * Handle ioctls that apply to the controller instead of the namespace 405 * seperately and drop the ns SRCU reference early. This avoids a 406 * deadlock when deleting namespaces using the passthrough interface. 407 */ 408 if (is_ctrl_ioctl(cmd)) 409 return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx); 410 411 ret = nvme_ns_ioctl(ns, cmd, argp); 412 out_unlock: 413 srcu_read_unlock(&head->srcu, srcu_idx); 414 return ret; 415 } 416 417 long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd, 418 unsigned long arg) 419 { 420 struct cdev *cdev = file_inode(file)->i_cdev; 421 struct nvme_ns_head *head = 422 container_of(cdev, struct nvme_ns_head, cdev); 423 void __user *argp = (void __user *)arg; 424 struct nvme_ns *ns; 425 int srcu_idx, ret = -EWOULDBLOCK; 426 427 srcu_idx = srcu_read_lock(&head->srcu); 428 ns = nvme_find_path(head); 429 if (!ns) 430 goto out_unlock; 431 432 if (is_ctrl_ioctl(cmd)) 433 return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx); 434 435 ret = nvme_ns_ioctl(ns, cmd, argp); 436 out_unlock: 437 srcu_read_unlock(&head->srcu, srcu_idx); 438 return ret; 439 } 440 #endif /* CONFIG_NVME_MULTIPATH */ 441 442 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) 443 { 444 struct nvme_ns *ns; 445 int ret; 446 447 down_read(&ctrl->namespaces_rwsem); 448 if (list_empty(&ctrl->namespaces)) { 449 ret = -ENOTTY; 450 goto out_unlock; 451 } 452 453 ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list); 454 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { 455 dev_warn(ctrl->device, 456 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n"); 457 ret = -EINVAL; 458 goto out_unlock; 459 } 460 461 dev_warn(ctrl->device, 462 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n"); 463 kref_get(&ns->kref); 464 up_read(&ctrl->namespaces_rwsem); 465 466 ret = nvme_user_cmd(ctrl, ns, argp); 467 nvme_put_ns(ns); 468 return ret; 469 470 out_unlock: 471 up_read(&ctrl->namespaces_rwsem); 472 return ret; 473 } 474 475 long nvme_dev_ioctl(struct file *file, unsigned int cmd, 476 unsigned long arg) 477 { 478 struct nvme_ctrl *ctrl = file->private_data; 479 void __user *argp = (void __user *)arg; 480 481 switch (cmd) { 482 case NVME_IOCTL_ADMIN_CMD: 483 return nvme_user_cmd(ctrl, NULL, argp); 484 case NVME_IOCTL_ADMIN64_CMD: 485 return nvme_user_cmd64(ctrl, NULL, argp); 486 case NVME_IOCTL_IO_CMD: 487 return nvme_dev_user_cmd(ctrl, argp); 488 case NVME_IOCTL_RESET: 489 dev_warn(ctrl->device, "resetting controller\n"); 490 return nvme_reset_ctrl_sync(ctrl); 491 case NVME_IOCTL_SUBSYS_RESET: 492 return nvme_reset_subsystem(ctrl); 493 case NVME_IOCTL_RESCAN: 494 nvme_queue_scan(ctrl); 495 return 0; 496 default: 497 return -ENOTTY; 498 } 499 } 500