1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ioctl.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 #include <linux/syscalls.h> 9 #include <linux/mm.h> 10 #include <linux/capability.h> 11 #include <linux/compat.h> 12 #include <linux/file.h> 13 #include <linux/fs.h> 14 #include <linux/security.h> 15 #include <linux/export.h> 16 #include <linux/uaccess.h> 17 #include <linux/writeback.h> 18 #include <linux/buffer_head.h> 19 #include <linux/falloc.h> 20 #include <linux/sched/signal.h> 21 #include <linux/fiemap.h> 22 23 #include "internal.h" 24 25 #include <asm/ioctls.h> 26 27 /* So that the fiemap access checks can't overflow on 32 bit machines. */ 28 #define FIEMAP_MAX_EXTENTS (UINT_MAX / sizeof(struct fiemap_extent)) 29 30 /** 31 * vfs_ioctl - call filesystem specific ioctl methods 32 * @filp: open file to invoke ioctl method on 33 * @cmd: ioctl command to execute 34 * @arg: command-specific argument for ioctl 35 * 36 * Invokes filesystem specific ->unlocked_ioctl, if one exists; otherwise 37 * returns -ENOTTY. 38 * 39 * Returns 0 on success, -errno on error. 40 */ 41 long vfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 42 { 43 int error = -ENOTTY; 44 45 if (!filp->f_op->unlocked_ioctl) 46 goto out; 47 48 error = filp->f_op->unlocked_ioctl(filp, cmd, arg); 49 if (error == -ENOIOCTLCMD) 50 error = -ENOTTY; 51 out: 52 return error; 53 } 54 EXPORT_SYMBOL(vfs_ioctl); 55 56 static int ioctl_fibmap(struct file *filp, int __user *p) 57 { 58 struct inode *inode = file_inode(filp); 59 struct super_block *sb = inode->i_sb; 60 int error, ur_block; 61 sector_t block; 62 63 if (!capable(CAP_SYS_RAWIO)) 64 return -EPERM; 65 66 error = get_user(ur_block, p); 67 if (error) 68 return error; 69 70 if (ur_block < 0) 71 return -EINVAL; 72 73 block = ur_block; 74 error = bmap(inode, &block); 75 76 if (block > INT_MAX) { 77 error = -ERANGE; 78 pr_warn_ratelimited("[%s/%d] FS: %s File: %pD4 would truncate fibmap result\n", 79 current->comm, task_pid_nr(current), 80 sb->s_id, filp); 81 } 82 83 if (error) 84 ur_block = 0; 85 else 86 ur_block = block; 87 88 if (put_user(ur_block, p)) 89 error = -EFAULT; 90 91 return error; 92 } 93 94 /** 95 * fiemap_fill_next_extent - Fiemap helper function 96 * @fieinfo: Fiemap context passed into ->fiemap 97 * @logical: Extent logical start offset, in bytes 98 * @phys: Extent physical start offset, in bytes 99 * @len: Extent length, in bytes 100 * @flags: FIEMAP_EXTENT flags that describe this extent 101 * 102 * Called from file system ->fiemap callback. Will populate extent 103 * info as passed in via arguments and copy to user memory. On 104 * success, extent count on fieinfo is incremented. 105 * 106 * Returns 0 on success, -errno on error, 1 if this was the last 107 * extent that will fit in user array. 108 */ 109 #define SET_UNKNOWN_FLAGS (FIEMAP_EXTENT_DELALLOC) 110 #define SET_NO_UNMOUNTED_IO_FLAGS (FIEMAP_EXTENT_DATA_ENCRYPTED) 111 #define SET_NOT_ALIGNED_FLAGS (FIEMAP_EXTENT_DATA_TAIL|FIEMAP_EXTENT_DATA_INLINE) 112 int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical, 113 u64 phys, u64 len, u32 flags) 114 { 115 struct fiemap_extent extent; 116 struct fiemap_extent __user *dest = fieinfo->fi_extents_start; 117 118 /* only count the extents */ 119 if (fieinfo->fi_extents_max == 0) { 120 fieinfo->fi_extents_mapped++; 121 return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0; 122 } 123 124 if (fieinfo->fi_extents_mapped >= fieinfo->fi_extents_max) 125 return 1; 126 127 if (flags & SET_UNKNOWN_FLAGS) 128 flags |= FIEMAP_EXTENT_UNKNOWN; 129 if (flags & SET_NO_UNMOUNTED_IO_FLAGS) 130 flags |= FIEMAP_EXTENT_ENCODED; 131 if (flags & SET_NOT_ALIGNED_FLAGS) 132 flags |= FIEMAP_EXTENT_NOT_ALIGNED; 133 134 memset(&extent, 0, sizeof(extent)); 135 extent.fe_logical = logical; 136 extent.fe_physical = phys; 137 extent.fe_length = len; 138 extent.fe_flags = flags; 139 140 dest += fieinfo->fi_extents_mapped; 141 if (copy_to_user(dest, &extent, sizeof(extent))) 142 return -EFAULT; 143 144 fieinfo->fi_extents_mapped++; 145 if (fieinfo->fi_extents_mapped == fieinfo->fi_extents_max) 146 return 1; 147 return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0; 148 } 149 EXPORT_SYMBOL(fiemap_fill_next_extent); 150 151 /** 152 * fiemap_prep - check validity of requested flags for fiemap 153 * @inode: Inode to operate on 154 * @fieinfo: Fiemap context passed into ->fiemap 155 * @start: Start of the mapped range 156 * @len: Length of the mapped range, can be truncated by this function. 157 * @supported_flags: Set of fiemap flags that the file system understands 158 * 159 * This function must be called from each ->fiemap instance to validate the 160 * fiemap request against the file system parameters. 161 * 162 * Returns 0 on success, or a negative error on failure. 163 */ 164 int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo, 165 u64 start, u64 *len, u32 supported_flags) 166 { 167 u64 maxbytes = inode->i_sb->s_maxbytes; 168 u32 incompat_flags; 169 int ret = 0; 170 171 if (*len == 0) 172 return -EINVAL; 173 if (start > maxbytes) 174 return -EFBIG; 175 176 /* 177 * Shrink request scope to what the fs can actually handle. 178 */ 179 if (*len > maxbytes || (maxbytes - *len) < start) 180 *len = maxbytes - start; 181 182 supported_flags |= FIEMAP_FLAG_SYNC; 183 supported_flags &= FIEMAP_FLAGS_COMPAT; 184 incompat_flags = fieinfo->fi_flags & ~supported_flags; 185 if (incompat_flags) { 186 fieinfo->fi_flags = incompat_flags; 187 return -EBADR; 188 } 189 190 if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) 191 ret = filemap_write_and_wait(inode->i_mapping); 192 return ret; 193 } 194 EXPORT_SYMBOL(fiemap_prep); 195 196 static int ioctl_fiemap(struct file *filp, struct fiemap __user *ufiemap) 197 { 198 struct fiemap fiemap; 199 struct fiemap_extent_info fieinfo = { 0, }; 200 struct inode *inode = file_inode(filp); 201 int error; 202 203 if (!inode->i_op->fiemap) 204 return -EOPNOTSUPP; 205 206 if (copy_from_user(&fiemap, ufiemap, sizeof(fiemap))) 207 return -EFAULT; 208 209 if (fiemap.fm_extent_count > FIEMAP_MAX_EXTENTS) 210 return -EINVAL; 211 212 fieinfo.fi_flags = fiemap.fm_flags; 213 fieinfo.fi_extents_max = fiemap.fm_extent_count; 214 fieinfo.fi_extents_start = ufiemap->fm_extents; 215 216 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, 217 fiemap.fm_length); 218 219 fiemap.fm_flags = fieinfo.fi_flags; 220 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped; 221 if (copy_to_user(ufiemap, &fiemap, sizeof(fiemap))) 222 error = -EFAULT; 223 224 return error; 225 } 226 227 static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd, 228 u64 off, u64 olen, u64 destoff) 229 { 230 struct fd src_file = fdget(srcfd); 231 loff_t cloned; 232 int ret; 233 234 if (!src_file.file) 235 return -EBADF; 236 ret = -EXDEV; 237 if (src_file.file->f_path.mnt != dst_file->f_path.mnt) 238 goto fdput; 239 cloned = vfs_clone_file_range(src_file.file, off, dst_file, destoff, 240 olen, 0); 241 if (cloned < 0) 242 ret = cloned; 243 else if (olen && cloned != olen) 244 ret = -EINVAL; 245 else 246 ret = 0; 247 fdput: 248 fdput(src_file); 249 return ret; 250 } 251 252 static long ioctl_file_clone_range(struct file *file, 253 struct file_clone_range __user *argp) 254 { 255 struct file_clone_range args; 256 257 if (copy_from_user(&args, argp, sizeof(args))) 258 return -EFAULT; 259 return ioctl_file_clone(file, args.src_fd, args.src_offset, 260 args.src_length, args.dest_offset); 261 } 262 263 #ifdef CONFIG_BLOCK 264 265 static inline sector_t logical_to_blk(struct inode *inode, loff_t offset) 266 { 267 return (offset >> inode->i_blkbits); 268 } 269 270 static inline loff_t blk_to_logical(struct inode *inode, sector_t blk) 271 { 272 return (blk << inode->i_blkbits); 273 } 274 275 /** 276 * __generic_block_fiemap - FIEMAP for block based inodes (no locking) 277 * @inode: the inode to map 278 * @fieinfo: the fiemap info struct that will be passed back to userspace 279 * @start: where to start mapping in the inode 280 * @len: how much space to map 281 * @get_block: the fs's get_block function 282 * 283 * This does FIEMAP for block based inodes. Basically it will just loop 284 * through get_block until we hit the number of extents we want to map, or we 285 * go past the end of the file and hit a hole. 286 * 287 * If it is possible to have data blocks beyond a hole past @inode->i_size, then 288 * please do not use this function, it will stop at the first unmapped block 289 * beyond i_size. 290 * 291 * If you use this function directly, you need to do your own locking. Use 292 * generic_block_fiemap if you want the locking done for you. 293 */ 294 static int __generic_block_fiemap(struct inode *inode, 295 struct fiemap_extent_info *fieinfo, loff_t start, 296 loff_t len, get_block_t *get_block) 297 { 298 struct buffer_head map_bh; 299 sector_t start_blk, last_blk; 300 loff_t isize = i_size_read(inode); 301 u64 logical = 0, phys = 0, size = 0; 302 u32 flags = FIEMAP_EXTENT_MERGED; 303 bool past_eof = false, whole_file = false; 304 int ret = 0; 305 306 ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_SYNC); 307 if (ret) 308 return ret; 309 310 /* 311 * Either the i_mutex or other appropriate locking needs to be held 312 * since we expect isize to not change at all through the duration of 313 * this call. 314 */ 315 if (len >= isize) { 316 whole_file = true; 317 len = isize; 318 } 319 320 /* 321 * Some filesystems can't deal with being asked to map less than 322 * blocksize, so make sure our len is at least block length. 323 */ 324 if (logical_to_blk(inode, len) == 0) 325 len = blk_to_logical(inode, 1); 326 327 start_blk = logical_to_blk(inode, start); 328 last_blk = logical_to_blk(inode, start + len - 1); 329 330 do { 331 /* 332 * we set b_size to the total size we want so it will map as 333 * many contiguous blocks as possible at once 334 */ 335 memset(&map_bh, 0, sizeof(struct buffer_head)); 336 map_bh.b_size = len; 337 338 ret = get_block(inode, start_blk, &map_bh, 0); 339 if (ret) 340 break; 341 342 /* HOLE */ 343 if (!buffer_mapped(&map_bh)) { 344 start_blk++; 345 346 /* 347 * We want to handle the case where there is an 348 * allocated block at the front of the file, and then 349 * nothing but holes up to the end of the file properly, 350 * to make sure that extent at the front gets properly 351 * marked with FIEMAP_EXTENT_LAST 352 */ 353 if (!past_eof && 354 blk_to_logical(inode, start_blk) >= isize) 355 past_eof = 1; 356 357 /* 358 * First hole after going past the EOF, this is our 359 * last extent 360 */ 361 if (past_eof && size) { 362 flags = FIEMAP_EXTENT_MERGED|FIEMAP_EXTENT_LAST; 363 ret = fiemap_fill_next_extent(fieinfo, logical, 364 phys, size, 365 flags); 366 } else if (size) { 367 ret = fiemap_fill_next_extent(fieinfo, logical, 368 phys, size, flags); 369 size = 0; 370 } 371 372 /* if we have holes up to/past EOF then we're done */ 373 if (start_blk > last_blk || past_eof || ret) 374 break; 375 } else { 376 /* 377 * We have gone over the length of what we wanted to 378 * map, and it wasn't the entire file, so add the extent 379 * we got last time and exit. 380 * 381 * This is for the case where say we want to map all the 382 * way up to the second to the last block in a file, but 383 * the last block is a hole, making the second to last 384 * block FIEMAP_EXTENT_LAST. In this case we want to 385 * see if there is a hole after the second to last block 386 * so we can mark it properly. If we found data after 387 * we exceeded the length we were requesting, then we 388 * are good to go, just add the extent to the fieinfo 389 * and break 390 */ 391 if (start_blk > last_blk && !whole_file) { 392 ret = fiemap_fill_next_extent(fieinfo, logical, 393 phys, size, 394 flags); 395 break; 396 } 397 398 /* 399 * if size != 0 then we know we already have an extent 400 * to add, so add it. 401 */ 402 if (size) { 403 ret = fiemap_fill_next_extent(fieinfo, logical, 404 phys, size, 405 flags); 406 if (ret) 407 break; 408 } 409 410 logical = blk_to_logical(inode, start_blk); 411 phys = blk_to_logical(inode, map_bh.b_blocknr); 412 size = map_bh.b_size; 413 flags = FIEMAP_EXTENT_MERGED; 414 415 start_blk += logical_to_blk(inode, size); 416 417 /* 418 * If we are past the EOF, then we need to make sure as 419 * soon as we find a hole that the last extent we found 420 * is marked with FIEMAP_EXTENT_LAST 421 */ 422 if (!past_eof && logical + size >= isize) 423 past_eof = true; 424 } 425 cond_resched(); 426 if (fatal_signal_pending(current)) { 427 ret = -EINTR; 428 break; 429 } 430 431 } while (1); 432 433 /* If ret is 1 then we just hit the end of the extent array */ 434 if (ret == 1) 435 ret = 0; 436 437 return ret; 438 } 439 440 /** 441 * generic_block_fiemap - FIEMAP for block based inodes 442 * @inode: The inode to map 443 * @fieinfo: The mapping information 444 * @start: The initial block to map 445 * @len: The length of the extect to attempt to map 446 * @get_block: The block mapping function for the fs 447 * 448 * Calls __generic_block_fiemap to map the inode, after taking 449 * the inode's mutex lock. 450 */ 451 452 int generic_block_fiemap(struct inode *inode, 453 struct fiemap_extent_info *fieinfo, u64 start, 454 u64 len, get_block_t *get_block) 455 { 456 int ret; 457 inode_lock(inode); 458 ret = __generic_block_fiemap(inode, fieinfo, start, len, get_block); 459 inode_unlock(inode); 460 return ret; 461 } 462 EXPORT_SYMBOL(generic_block_fiemap); 463 464 #endif /* CONFIG_BLOCK */ 465 466 /* 467 * This provides compatibility with legacy XFS pre-allocation ioctls 468 * which predate the fallocate syscall. 469 * 470 * Only the l_start, l_len and l_whence fields of the 'struct space_resv' 471 * are used here, rest are ignored. 472 */ 473 static int ioctl_preallocate(struct file *filp, int mode, void __user *argp) 474 { 475 struct inode *inode = file_inode(filp); 476 struct space_resv sr; 477 478 if (copy_from_user(&sr, argp, sizeof(sr))) 479 return -EFAULT; 480 481 switch (sr.l_whence) { 482 case SEEK_SET: 483 break; 484 case SEEK_CUR: 485 sr.l_start += filp->f_pos; 486 break; 487 case SEEK_END: 488 sr.l_start += i_size_read(inode); 489 break; 490 default: 491 return -EINVAL; 492 } 493 494 return vfs_fallocate(filp, mode | FALLOC_FL_KEEP_SIZE, sr.l_start, 495 sr.l_len); 496 } 497 498 /* on ia32 l_start is on a 32-bit boundary */ 499 #if defined CONFIG_COMPAT && defined(CONFIG_X86_64) 500 /* just account for different alignment */ 501 static int compat_ioctl_preallocate(struct file *file, int mode, 502 struct space_resv_32 __user *argp) 503 { 504 struct inode *inode = file_inode(file); 505 struct space_resv_32 sr; 506 507 if (copy_from_user(&sr, argp, sizeof(sr))) 508 return -EFAULT; 509 510 switch (sr.l_whence) { 511 case SEEK_SET: 512 break; 513 case SEEK_CUR: 514 sr.l_start += file->f_pos; 515 break; 516 case SEEK_END: 517 sr.l_start += i_size_read(inode); 518 break; 519 default: 520 return -EINVAL; 521 } 522 523 return vfs_fallocate(file, mode | FALLOC_FL_KEEP_SIZE, sr.l_start, sr.l_len); 524 } 525 #endif 526 527 static int file_ioctl(struct file *filp, unsigned int cmd, int __user *p) 528 { 529 switch (cmd) { 530 case FIBMAP: 531 return ioctl_fibmap(filp, p); 532 case FS_IOC_RESVSP: 533 case FS_IOC_RESVSP64: 534 return ioctl_preallocate(filp, 0, p); 535 case FS_IOC_UNRESVSP: 536 case FS_IOC_UNRESVSP64: 537 return ioctl_preallocate(filp, FALLOC_FL_PUNCH_HOLE, p); 538 case FS_IOC_ZERO_RANGE: 539 return ioctl_preallocate(filp, FALLOC_FL_ZERO_RANGE, p); 540 } 541 542 return -ENOIOCTLCMD; 543 } 544 545 static int ioctl_fionbio(struct file *filp, int __user *argp) 546 { 547 unsigned int flag; 548 int on, error; 549 550 error = get_user(on, argp); 551 if (error) 552 return error; 553 flag = O_NONBLOCK; 554 #ifdef __sparc__ 555 /* SunOS compatibility item. */ 556 if (O_NONBLOCK != O_NDELAY) 557 flag |= O_NDELAY; 558 #endif 559 spin_lock(&filp->f_lock); 560 if (on) 561 filp->f_flags |= flag; 562 else 563 filp->f_flags &= ~flag; 564 spin_unlock(&filp->f_lock); 565 return error; 566 } 567 568 static int ioctl_fioasync(unsigned int fd, struct file *filp, 569 int __user *argp) 570 { 571 unsigned int flag; 572 int on, error; 573 574 error = get_user(on, argp); 575 if (error) 576 return error; 577 flag = on ? FASYNC : 0; 578 579 /* Did FASYNC state change ? */ 580 if ((flag ^ filp->f_flags) & FASYNC) { 581 if (filp->f_op->fasync) 582 /* fasync() adjusts filp->f_flags */ 583 error = filp->f_op->fasync(fd, filp, on); 584 else 585 error = -ENOTTY; 586 } 587 return error < 0 ? error : 0; 588 } 589 590 static int ioctl_fsfreeze(struct file *filp) 591 { 592 struct super_block *sb = file_inode(filp)->i_sb; 593 594 if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) 595 return -EPERM; 596 597 /* If filesystem doesn't support freeze feature, return. */ 598 if (sb->s_op->freeze_fs == NULL && sb->s_op->freeze_super == NULL) 599 return -EOPNOTSUPP; 600 601 /* Freeze */ 602 if (sb->s_op->freeze_super) 603 return sb->s_op->freeze_super(sb); 604 return freeze_super(sb); 605 } 606 607 static int ioctl_fsthaw(struct file *filp) 608 { 609 struct super_block *sb = file_inode(filp)->i_sb; 610 611 if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) 612 return -EPERM; 613 614 /* Thaw */ 615 if (sb->s_op->thaw_super) 616 return sb->s_op->thaw_super(sb); 617 return thaw_super(sb); 618 } 619 620 static int ioctl_file_dedupe_range(struct file *file, 621 struct file_dedupe_range __user *argp) 622 { 623 struct file_dedupe_range *same = NULL; 624 int ret; 625 unsigned long size; 626 u16 count; 627 628 if (get_user(count, &argp->dest_count)) { 629 ret = -EFAULT; 630 goto out; 631 } 632 633 size = offsetof(struct file_dedupe_range __user, info[count]); 634 if (size > PAGE_SIZE) { 635 ret = -ENOMEM; 636 goto out; 637 } 638 639 same = memdup_user(argp, size); 640 if (IS_ERR(same)) { 641 ret = PTR_ERR(same); 642 same = NULL; 643 goto out; 644 } 645 646 same->dest_count = count; 647 ret = vfs_dedupe_file_range(file, same); 648 if (ret) 649 goto out; 650 651 ret = copy_to_user(argp, same, size); 652 if (ret) 653 ret = -EFAULT; 654 655 out: 656 kfree(same); 657 return ret; 658 } 659 660 /* 661 * do_vfs_ioctl() is not for drivers and not intended to be EXPORT_SYMBOL()'d. 662 * It's just a simple helper for sys_ioctl and compat_sys_ioctl. 663 * 664 * When you add any new common ioctls to the switches above and below, 665 * please ensure they have compatible arguments in compat mode. 666 */ 667 static int do_vfs_ioctl(struct file *filp, unsigned int fd, 668 unsigned int cmd, unsigned long arg) 669 { 670 void __user *argp = (void __user *)arg; 671 struct inode *inode = file_inode(filp); 672 673 switch (cmd) { 674 case FIOCLEX: 675 set_close_on_exec(fd, 1); 676 return 0; 677 678 case FIONCLEX: 679 set_close_on_exec(fd, 0); 680 return 0; 681 682 case FIONBIO: 683 return ioctl_fionbio(filp, argp); 684 685 case FIOASYNC: 686 return ioctl_fioasync(fd, filp, argp); 687 688 case FIOQSIZE: 689 if (S_ISDIR(inode->i_mode) || S_ISREG(inode->i_mode) || 690 S_ISLNK(inode->i_mode)) { 691 loff_t res = inode_get_bytes(inode); 692 return copy_to_user(argp, &res, sizeof(res)) ? 693 -EFAULT : 0; 694 } 695 696 return -ENOTTY; 697 698 case FIFREEZE: 699 return ioctl_fsfreeze(filp); 700 701 case FITHAW: 702 return ioctl_fsthaw(filp); 703 704 case FS_IOC_FIEMAP: 705 return ioctl_fiemap(filp, argp); 706 707 case FIGETBSZ: 708 /* anon_bdev filesystems may not have a block size */ 709 if (!inode->i_sb->s_blocksize) 710 return -EINVAL; 711 712 return put_user(inode->i_sb->s_blocksize, (int __user *)argp); 713 714 case FICLONE: 715 return ioctl_file_clone(filp, arg, 0, 0, 0); 716 717 case FICLONERANGE: 718 return ioctl_file_clone_range(filp, argp); 719 720 case FIDEDUPERANGE: 721 return ioctl_file_dedupe_range(filp, argp); 722 723 case FIONREAD: 724 if (!S_ISREG(inode->i_mode)) 725 return vfs_ioctl(filp, cmd, arg); 726 727 return put_user(i_size_read(inode) - filp->f_pos, 728 (int __user *)argp); 729 730 default: 731 if (S_ISREG(inode->i_mode)) 732 return file_ioctl(filp, cmd, argp); 733 break; 734 } 735 736 return -ENOIOCTLCMD; 737 } 738 739 int ksys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg) 740 { 741 struct fd f = fdget(fd); 742 int error; 743 744 if (!f.file) 745 return -EBADF; 746 747 error = security_file_ioctl(f.file, cmd, arg); 748 if (error) 749 goto out; 750 751 error = do_vfs_ioctl(f.file, fd, cmd, arg); 752 if (error == -ENOIOCTLCMD) 753 error = vfs_ioctl(f.file, cmd, arg); 754 755 out: 756 fdput(f); 757 return error; 758 } 759 760 SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, unsigned long, arg) 761 { 762 return ksys_ioctl(fd, cmd, arg); 763 } 764 765 #ifdef CONFIG_COMPAT 766 /** 767 * compat_ptr_ioctl - generic implementation of .compat_ioctl file operation 768 * 769 * This is not normally called as a function, but instead set in struct 770 * file_operations as 771 * 772 * .compat_ioctl = compat_ptr_ioctl, 773 * 774 * On most architectures, the compat_ptr_ioctl() just passes all arguments 775 * to the corresponding ->ioctl handler. The exception is arch/s390, where 776 * compat_ptr() clears the top bit of a 32-bit pointer value, so user space 777 * pointers to the second 2GB alias the first 2GB, as is the case for 778 * native 32-bit s390 user space. 779 * 780 * The compat_ptr_ioctl() function must therefore be used only with ioctl 781 * functions that either ignore the argument or pass a pointer to a 782 * compatible data type. 783 * 784 * If any ioctl command handled by fops->unlocked_ioctl passes a plain 785 * integer instead of a pointer, or any of the passed data types 786 * is incompatible between 32-bit and 64-bit architectures, a proper 787 * handler is required instead of compat_ptr_ioctl. 788 */ 789 long compat_ptr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 790 { 791 if (!file->f_op->unlocked_ioctl) 792 return -ENOIOCTLCMD; 793 794 return file->f_op->unlocked_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 795 } 796 EXPORT_SYMBOL(compat_ptr_ioctl); 797 798 COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, 799 compat_ulong_t, arg) 800 { 801 struct fd f = fdget(fd); 802 int error; 803 804 if (!f.file) 805 return -EBADF; 806 807 /* RED-PEN how should LSM module know it's handling 32bit? */ 808 error = security_file_ioctl(f.file, cmd, arg); 809 if (error) 810 goto out; 811 812 switch (cmd) { 813 /* FICLONE takes an int argument, so don't use compat_ptr() */ 814 case FICLONE: 815 error = ioctl_file_clone(f.file, arg, 0, 0, 0); 816 break; 817 818 #if defined(CONFIG_X86_64) 819 /* these get messy on amd64 due to alignment differences */ 820 case FS_IOC_RESVSP_32: 821 case FS_IOC_RESVSP64_32: 822 error = compat_ioctl_preallocate(f.file, 0, compat_ptr(arg)); 823 break; 824 case FS_IOC_UNRESVSP_32: 825 case FS_IOC_UNRESVSP64_32: 826 error = compat_ioctl_preallocate(f.file, FALLOC_FL_PUNCH_HOLE, 827 compat_ptr(arg)); 828 break; 829 case FS_IOC_ZERO_RANGE_32: 830 error = compat_ioctl_preallocate(f.file, FALLOC_FL_ZERO_RANGE, 831 compat_ptr(arg)); 832 break; 833 #endif 834 835 /* 836 * everything else in do_vfs_ioctl() takes either a compatible 837 * pointer argument or no argument -- call it with a modified 838 * argument. 839 */ 840 default: 841 error = do_vfs_ioctl(f.file, fd, cmd, 842 (unsigned long)compat_ptr(arg)); 843 if (error != -ENOIOCTLCMD) 844 break; 845 846 if (f.file->f_op->compat_ioctl) 847 error = f.file->f_op->compat_ioctl(f.file, cmd, arg); 848 if (error == -ENOIOCTLCMD) 849 error = -ENOTTY; 850 break; 851 } 852 853 out: 854 fdput(f); 855 856 return error; 857 } 858 #endif 859