1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_mount.h" 25 #include "xfs_inode.h" 26 #include "xfs_ioctl.h" 27 #include "xfs_alloc.h" 28 #include "xfs_rtalloc.h" 29 #include "xfs_itable.h" 30 #include "xfs_error.h" 31 #include "xfs_attr.h" 32 #include "xfs_bmap.h" 33 #include "xfs_bmap_util.h" 34 #include "xfs_fsops.h" 35 #include "xfs_discard.h" 36 #include "xfs_quota.h" 37 #include "xfs_export.h" 38 #include "xfs_trace.h" 39 #include "xfs_icache.h" 40 #include "xfs_symlink.h" 41 #include "xfs_trans.h" 42 #include "xfs_pnfs.h" 43 #include "xfs_acl.h" 44 #include "xfs_btree.h" 45 #include <linux/fsmap.h> 46 #include "xfs_fsmap.h" 47 #include "scrub/xfs_scrub.h" 48 49 #include <linux/capability.h> 50 #include <linux/cred.h> 51 #include <linux/dcache.h> 52 #include <linux/mount.h> 53 #include <linux/namei.h> 54 #include <linux/pagemap.h> 55 #include <linux/slab.h> 56 #include <linux/exportfs.h> 57 58 /* 59 * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to 60 * a file or fs handle. 61 * 62 * XFS_IOC_PATH_TO_FSHANDLE 63 * returns fs handle for a mount point or path within that mount point 64 * XFS_IOC_FD_TO_HANDLE 65 * returns full handle for a FD opened in user space 66 * XFS_IOC_PATH_TO_HANDLE 67 * returns full handle for a path 68 */ 69 int 70 xfs_find_handle( 71 unsigned int cmd, 72 xfs_fsop_handlereq_t *hreq) 73 { 74 int hsize; 75 xfs_handle_t handle; 76 struct inode *inode; 77 struct fd f = {NULL}; 78 struct path path; 79 int error; 80 struct xfs_inode *ip; 81 82 if (cmd == XFS_IOC_FD_TO_HANDLE) { 83 f = fdget(hreq->fd); 84 if (!f.file) 85 return -EBADF; 86 inode = file_inode(f.file); 87 } else { 88 error = user_lpath((const char __user *)hreq->path, &path); 89 if (error) 90 return error; 91 inode = d_inode(path.dentry); 92 } 93 ip = XFS_I(inode); 94 95 /* 96 * We can only generate handles for inodes residing on a XFS filesystem, 97 * and only for regular files, directories or symbolic links. 98 */ 99 error = -EINVAL; 100 if (inode->i_sb->s_magic != XFS_SB_MAGIC) 101 goto out_put; 102 103 error = -EBADF; 104 if (!S_ISREG(inode->i_mode) && 105 !S_ISDIR(inode->i_mode) && 106 !S_ISLNK(inode->i_mode)) 107 goto out_put; 108 109 110 memcpy(&handle.ha_fsid, ip->i_mount->m_fixedfsid, sizeof(xfs_fsid_t)); 111 112 if (cmd == XFS_IOC_PATH_TO_FSHANDLE) { 113 /* 114 * This handle only contains an fsid, zero the rest. 115 */ 116 memset(&handle.ha_fid, 0, sizeof(handle.ha_fid)); 117 hsize = sizeof(xfs_fsid_t); 118 } else { 119 handle.ha_fid.fid_len = sizeof(xfs_fid_t) - 120 sizeof(handle.ha_fid.fid_len); 121 handle.ha_fid.fid_pad = 0; 122 handle.ha_fid.fid_gen = inode->i_generation; 123 handle.ha_fid.fid_ino = ip->i_ino; 124 hsize = sizeof(xfs_handle_t); 125 } 126 127 error = -EFAULT; 128 if (copy_to_user(hreq->ohandle, &handle, hsize) || 129 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32))) 130 goto out_put; 131 132 error = 0; 133 134 out_put: 135 if (cmd == XFS_IOC_FD_TO_HANDLE) 136 fdput(f); 137 else 138 path_put(&path); 139 return error; 140 } 141 142 /* 143 * No need to do permission checks on the various pathname components 144 * as the handle operations are privileged. 145 */ 146 STATIC int 147 xfs_handle_acceptable( 148 void *context, 149 struct dentry *dentry) 150 { 151 return 1; 152 } 153 154 /* 155 * Convert userspace handle data into a dentry. 156 */ 157 struct dentry * 158 xfs_handle_to_dentry( 159 struct file *parfilp, 160 void __user *uhandle, 161 u32 hlen) 162 { 163 xfs_handle_t handle; 164 struct xfs_fid64 fid; 165 166 /* 167 * Only allow handle opens under a directory. 168 */ 169 if (!S_ISDIR(file_inode(parfilp)->i_mode)) 170 return ERR_PTR(-ENOTDIR); 171 172 if (hlen != sizeof(xfs_handle_t)) 173 return ERR_PTR(-EINVAL); 174 if (copy_from_user(&handle, uhandle, hlen)) 175 return ERR_PTR(-EFAULT); 176 if (handle.ha_fid.fid_len != 177 sizeof(handle.ha_fid) - sizeof(handle.ha_fid.fid_len)) 178 return ERR_PTR(-EINVAL); 179 180 memset(&fid, 0, sizeof(struct fid)); 181 fid.ino = handle.ha_fid.fid_ino; 182 fid.gen = handle.ha_fid.fid_gen; 183 184 return exportfs_decode_fh(parfilp->f_path.mnt, (struct fid *)&fid, 3, 185 FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG, 186 xfs_handle_acceptable, NULL); 187 } 188 189 STATIC struct dentry * 190 xfs_handlereq_to_dentry( 191 struct file *parfilp, 192 xfs_fsop_handlereq_t *hreq) 193 { 194 return xfs_handle_to_dentry(parfilp, hreq->ihandle, hreq->ihandlen); 195 } 196 197 int 198 xfs_open_by_handle( 199 struct file *parfilp, 200 xfs_fsop_handlereq_t *hreq) 201 { 202 const struct cred *cred = current_cred(); 203 int error; 204 int fd; 205 int permflag; 206 struct file *filp; 207 struct inode *inode; 208 struct dentry *dentry; 209 fmode_t fmode; 210 struct path path; 211 212 if (!capable(CAP_SYS_ADMIN)) 213 return -EPERM; 214 215 dentry = xfs_handlereq_to_dentry(parfilp, hreq); 216 if (IS_ERR(dentry)) 217 return PTR_ERR(dentry); 218 inode = d_inode(dentry); 219 220 /* Restrict xfs_open_by_handle to directories & regular files. */ 221 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) { 222 error = -EPERM; 223 goto out_dput; 224 } 225 226 #if BITS_PER_LONG != 32 227 hreq->oflags |= O_LARGEFILE; 228 #endif 229 230 permflag = hreq->oflags; 231 fmode = OPEN_FMODE(permflag); 232 if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) && 233 (fmode & FMODE_WRITE) && IS_APPEND(inode)) { 234 error = -EPERM; 235 goto out_dput; 236 } 237 238 if ((fmode & FMODE_WRITE) && IS_IMMUTABLE(inode)) { 239 error = -EPERM; 240 goto out_dput; 241 } 242 243 /* Can't write directories. */ 244 if (S_ISDIR(inode->i_mode) && (fmode & FMODE_WRITE)) { 245 error = -EISDIR; 246 goto out_dput; 247 } 248 249 fd = get_unused_fd_flags(0); 250 if (fd < 0) { 251 error = fd; 252 goto out_dput; 253 } 254 255 path.mnt = parfilp->f_path.mnt; 256 path.dentry = dentry; 257 filp = dentry_open(&path, hreq->oflags, cred); 258 dput(dentry); 259 if (IS_ERR(filp)) { 260 put_unused_fd(fd); 261 return PTR_ERR(filp); 262 } 263 264 if (S_ISREG(inode->i_mode)) { 265 filp->f_flags |= O_NOATIME; 266 filp->f_mode |= FMODE_NOCMTIME; 267 } 268 269 fd_install(fd, filp); 270 return fd; 271 272 out_dput: 273 dput(dentry); 274 return error; 275 } 276 277 int 278 xfs_readlink_by_handle( 279 struct file *parfilp, 280 xfs_fsop_handlereq_t *hreq) 281 { 282 struct dentry *dentry; 283 __u32 olen; 284 int error; 285 286 if (!capable(CAP_SYS_ADMIN)) 287 return -EPERM; 288 289 dentry = xfs_handlereq_to_dentry(parfilp, hreq); 290 if (IS_ERR(dentry)) 291 return PTR_ERR(dentry); 292 293 /* Restrict this handle operation to symlinks only. */ 294 if (!d_is_symlink(dentry)) { 295 error = -EINVAL; 296 goto out_dput; 297 } 298 299 if (copy_from_user(&olen, hreq->ohandlen, sizeof(__u32))) { 300 error = -EFAULT; 301 goto out_dput; 302 } 303 304 error = vfs_readlink(dentry, hreq->ohandle, olen); 305 306 out_dput: 307 dput(dentry); 308 return error; 309 } 310 311 int 312 xfs_set_dmattrs( 313 xfs_inode_t *ip, 314 uint evmask, 315 uint16_t state) 316 { 317 xfs_mount_t *mp = ip->i_mount; 318 xfs_trans_t *tp; 319 int error; 320 321 if (!capable(CAP_SYS_ADMIN)) 322 return -EPERM; 323 324 if (XFS_FORCED_SHUTDOWN(mp)) 325 return -EIO; 326 327 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp); 328 if (error) 329 return error; 330 331 xfs_ilock(ip, XFS_ILOCK_EXCL); 332 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 333 334 ip->i_d.di_dmevmask = evmask; 335 ip->i_d.di_dmstate = state; 336 337 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 338 error = xfs_trans_commit(tp); 339 340 return error; 341 } 342 343 STATIC int 344 xfs_fssetdm_by_handle( 345 struct file *parfilp, 346 void __user *arg) 347 { 348 int error; 349 struct fsdmidata fsd; 350 xfs_fsop_setdm_handlereq_t dmhreq; 351 struct dentry *dentry; 352 353 if (!capable(CAP_MKNOD)) 354 return -EPERM; 355 if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t))) 356 return -EFAULT; 357 358 error = mnt_want_write_file(parfilp); 359 if (error) 360 return error; 361 362 dentry = xfs_handlereq_to_dentry(parfilp, &dmhreq.hreq); 363 if (IS_ERR(dentry)) { 364 mnt_drop_write_file(parfilp); 365 return PTR_ERR(dentry); 366 } 367 368 if (IS_IMMUTABLE(d_inode(dentry)) || IS_APPEND(d_inode(dentry))) { 369 error = -EPERM; 370 goto out; 371 } 372 373 if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) { 374 error = -EFAULT; 375 goto out; 376 } 377 378 error = xfs_set_dmattrs(XFS_I(d_inode(dentry)), fsd.fsd_dmevmask, 379 fsd.fsd_dmstate); 380 381 out: 382 mnt_drop_write_file(parfilp); 383 dput(dentry); 384 return error; 385 } 386 387 STATIC int 388 xfs_attrlist_by_handle( 389 struct file *parfilp, 390 void __user *arg) 391 { 392 int error = -ENOMEM; 393 attrlist_cursor_kern_t *cursor; 394 struct xfs_fsop_attrlist_handlereq __user *p = arg; 395 xfs_fsop_attrlist_handlereq_t al_hreq; 396 struct dentry *dentry; 397 char *kbuf; 398 399 if (!capable(CAP_SYS_ADMIN)) 400 return -EPERM; 401 if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t))) 402 return -EFAULT; 403 if (al_hreq.buflen < sizeof(struct attrlist) || 404 al_hreq.buflen > XFS_XATTR_LIST_MAX) 405 return -EINVAL; 406 407 /* 408 * Reject flags, only allow namespaces. 409 */ 410 if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE)) 411 return -EINVAL; 412 413 dentry = xfs_handlereq_to_dentry(parfilp, &al_hreq.hreq); 414 if (IS_ERR(dentry)) 415 return PTR_ERR(dentry); 416 417 kbuf = kmem_zalloc_large(al_hreq.buflen, KM_SLEEP); 418 if (!kbuf) 419 goto out_dput; 420 421 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; 422 error = xfs_attr_list(XFS_I(d_inode(dentry)), kbuf, al_hreq.buflen, 423 al_hreq.flags, cursor); 424 if (error) 425 goto out_kfree; 426 427 if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) { 428 error = -EFAULT; 429 goto out_kfree; 430 } 431 432 if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen)) 433 error = -EFAULT; 434 435 out_kfree: 436 kmem_free(kbuf); 437 out_dput: 438 dput(dentry); 439 return error; 440 } 441 442 int 443 xfs_attrmulti_attr_get( 444 struct inode *inode, 445 unsigned char *name, 446 unsigned char __user *ubuf, 447 uint32_t *len, 448 uint32_t flags) 449 { 450 unsigned char *kbuf; 451 int error = -EFAULT; 452 453 if (*len > XFS_XATTR_SIZE_MAX) 454 return -EINVAL; 455 kbuf = kmem_zalloc_large(*len, KM_SLEEP); 456 if (!kbuf) 457 return -ENOMEM; 458 459 error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags); 460 if (error) 461 goto out_kfree; 462 463 if (copy_to_user(ubuf, kbuf, *len)) 464 error = -EFAULT; 465 466 out_kfree: 467 kmem_free(kbuf); 468 return error; 469 } 470 471 int 472 xfs_attrmulti_attr_set( 473 struct inode *inode, 474 unsigned char *name, 475 const unsigned char __user *ubuf, 476 uint32_t len, 477 uint32_t flags) 478 { 479 unsigned char *kbuf; 480 int error; 481 482 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) 483 return -EPERM; 484 if (len > XFS_XATTR_SIZE_MAX) 485 return -EINVAL; 486 487 kbuf = memdup_user(ubuf, len); 488 if (IS_ERR(kbuf)) 489 return PTR_ERR(kbuf); 490 491 error = xfs_attr_set(XFS_I(inode), name, kbuf, len, flags); 492 if (!error) 493 xfs_forget_acl(inode, name, flags); 494 kfree(kbuf); 495 return error; 496 } 497 498 int 499 xfs_attrmulti_attr_remove( 500 struct inode *inode, 501 unsigned char *name, 502 uint32_t flags) 503 { 504 int error; 505 506 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) 507 return -EPERM; 508 error = xfs_attr_remove(XFS_I(inode), name, flags); 509 if (!error) 510 xfs_forget_acl(inode, name, flags); 511 return error; 512 } 513 514 STATIC int 515 xfs_attrmulti_by_handle( 516 struct file *parfilp, 517 void __user *arg) 518 { 519 int error; 520 xfs_attr_multiop_t *ops; 521 xfs_fsop_attrmulti_handlereq_t am_hreq; 522 struct dentry *dentry; 523 unsigned int i, size; 524 unsigned char *attr_name; 525 526 if (!capable(CAP_SYS_ADMIN)) 527 return -EPERM; 528 if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t))) 529 return -EFAULT; 530 531 /* overflow check */ 532 if (am_hreq.opcount >= INT_MAX / sizeof(xfs_attr_multiop_t)) 533 return -E2BIG; 534 535 dentry = xfs_handlereq_to_dentry(parfilp, &am_hreq.hreq); 536 if (IS_ERR(dentry)) 537 return PTR_ERR(dentry); 538 539 error = -E2BIG; 540 size = am_hreq.opcount * sizeof(xfs_attr_multiop_t); 541 if (!size || size > 16 * PAGE_SIZE) 542 goto out_dput; 543 544 ops = memdup_user(am_hreq.ops, size); 545 if (IS_ERR(ops)) { 546 error = PTR_ERR(ops); 547 goto out_dput; 548 } 549 550 error = -ENOMEM; 551 attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL); 552 if (!attr_name) 553 goto out_kfree_ops; 554 555 error = 0; 556 for (i = 0; i < am_hreq.opcount; i++) { 557 ops[i].am_error = strncpy_from_user((char *)attr_name, 558 ops[i].am_attrname, MAXNAMELEN); 559 if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN) 560 error = -ERANGE; 561 if (ops[i].am_error < 0) 562 break; 563 564 switch (ops[i].am_opcode) { 565 case ATTR_OP_GET: 566 ops[i].am_error = xfs_attrmulti_attr_get( 567 d_inode(dentry), attr_name, 568 ops[i].am_attrvalue, &ops[i].am_length, 569 ops[i].am_flags); 570 break; 571 case ATTR_OP_SET: 572 ops[i].am_error = mnt_want_write_file(parfilp); 573 if (ops[i].am_error) 574 break; 575 ops[i].am_error = xfs_attrmulti_attr_set( 576 d_inode(dentry), attr_name, 577 ops[i].am_attrvalue, ops[i].am_length, 578 ops[i].am_flags); 579 mnt_drop_write_file(parfilp); 580 break; 581 case ATTR_OP_REMOVE: 582 ops[i].am_error = mnt_want_write_file(parfilp); 583 if (ops[i].am_error) 584 break; 585 ops[i].am_error = xfs_attrmulti_attr_remove( 586 d_inode(dentry), attr_name, 587 ops[i].am_flags); 588 mnt_drop_write_file(parfilp); 589 break; 590 default: 591 ops[i].am_error = -EINVAL; 592 } 593 } 594 595 if (copy_to_user(am_hreq.ops, ops, size)) 596 error = -EFAULT; 597 598 kfree(attr_name); 599 out_kfree_ops: 600 kfree(ops); 601 out_dput: 602 dput(dentry); 603 return error; 604 } 605 606 int 607 xfs_ioc_space( 608 struct file *filp, 609 unsigned int cmd, 610 xfs_flock64_t *bf) 611 { 612 struct inode *inode = file_inode(filp); 613 struct xfs_inode *ip = XFS_I(inode); 614 struct iattr iattr; 615 enum xfs_prealloc_flags flags = 0; 616 uint iolock = XFS_IOLOCK_EXCL; 617 int error; 618 619 /* 620 * Only allow the sys admin to reserve space unless 621 * unwritten extents are enabled. 622 */ 623 if (!xfs_sb_version_hasextflgbit(&ip->i_mount->m_sb) && 624 !capable(CAP_SYS_ADMIN)) 625 return -EPERM; 626 627 if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) 628 return -EPERM; 629 630 if (!(filp->f_mode & FMODE_WRITE)) 631 return -EBADF; 632 633 if (!S_ISREG(inode->i_mode)) 634 return -EINVAL; 635 636 if (filp->f_flags & O_DSYNC) 637 flags |= XFS_PREALLOC_SYNC; 638 if (filp->f_mode & FMODE_NOCMTIME) 639 flags |= XFS_PREALLOC_INVISIBLE; 640 641 error = mnt_want_write_file(filp); 642 if (error) 643 return error; 644 645 xfs_ilock(ip, iolock); 646 error = xfs_break_layouts(inode, &iolock); 647 if (error) 648 goto out_unlock; 649 650 xfs_ilock(ip, XFS_MMAPLOCK_EXCL); 651 iolock |= XFS_MMAPLOCK_EXCL; 652 653 switch (bf->l_whence) { 654 case 0: /*SEEK_SET*/ 655 break; 656 case 1: /*SEEK_CUR*/ 657 bf->l_start += filp->f_pos; 658 break; 659 case 2: /*SEEK_END*/ 660 bf->l_start += XFS_ISIZE(ip); 661 break; 662 default: 663 error = -EINVAL; 664 goto out_unlock; 665 } 666 667 /* 668 * length of <= 0 for resv/unresv/zero is invalid. length for 669 * alloc/free is ignored completely and we have no idea what userspace 670 * might have set it to, so set it to zero to allow range 671 * checks to pass. 672 */ 673 switch (cmd) { 674 case XFS_IOC_ZERO_RANGE: 675 case XFS_IOC_RESVSP: 676 case XFS_IOC_RESVSP64: 677 case XFS_IOC_UNRESVSP: 678 case XFS_IOC_UNRESVSP64: 679 if (bf->l_len <= 0) { 680 error = -EINVAL; 681 goto out_unlock; 682 } 683 break; 684 default: 685 bf->l_len = 0; 686 break; 687 } 688 689 if (bf->l_start < 0 || 690 bf->l_start > inode->i_sb->s_maxbytes || 691 bf->l_start + bf->l_len < 0 || 692 bf->l_start + bf->l_len >= inode->i_sb->s_maxbytes) { 693 error = -EINVAL; 694 goto out_unlock; 695 } 696 697 switch (cmd) { 698 case XFS_IOC_ZERO_RANGE: 699 flags |= XFS_PREALLOC_SET; 700 error = xfs_zero_file_space(ip, bf->l_start, bf->l_len); 701 break; 702 case XFS_IOC_RESVSP: 703 case XFS_IOC_RESVSP64: 704 flags |= XFS_PREALLOC_SET; 705 error = xfs_alloc_file_space(ip, bf->l_start, bf->l_len, 706 XFS_BMAPI_PREALLOC); 707 break; 708 case XFS_IOC_UNRESVSP: 709 case XFS_IOC_UNRESVSP64: 710 error = xfs_free_file_space(ip, bf->l_start, bf->l_len); 711 break; 712 case XFS_IOC_ALLOCSP: 713 case XFS_IOC_ALLOCSP64: 714 case XFS_IOC_FREESP: 715 case XFS_IOC_FREESP64: 716 flags |= XFS_PREALLOC_CLEAR; 717 if (bf->l_start > XFS_ISIZE(ip)) { 718 error = xfs_alloc_file_space(ip, XFS_ISIZE(ip), 719 bf->l_start - XFS_ISIZE(ip), 0); 720 if (error) 721 goto out_unlock; 722 } 723 724 iattr.ia_valid = ATTR_SIZE; 725 iattr.ia_size = bf->l_start; 726 727 error = xfs_vn_setattr_size(file_dentry(filp), &iattr); 728 break; 729 default: 730 ASSERT(0); 731 error = -EINVAL; 732 } 733 734 if (error) 735 goto out_unlock; 736 737 error = xfs_update_prealloc_flags(ip, flags); 738 739 out_unlock: 740 xfs_iunlock(ip, iolock); 741 mnt_drop_write_file(filp); 742 return error; 743 } 744 745 STATIC int 746 xfs_ioc_bulkstat( 747 xfs_mount_t *mp, 748 unsigned int cmd, 749 void __user *arg) 750 { 751 xfs_fsop_bulkreq_t bulkreq; 752 int count; /* # of records returned */ 753 xfs_ino_t inlast; /* last inode number */ 754 int done; 755 int error; 756 757 /* done = 1 if there are more stats to get and if bulkstat */ 758 /* should be called again (unused here, but used in dmapi) */ 759 760 if (!capable(CAP_SYS_ADMIN)) 761 return -EPERM; 762 763 if (XFS_FORCED_SHUTDOWN(mp)) 764 return -EIO; 765 766 if (copy_from_user(&bulkreq, arg, sizeof(xfs_fsop_bulkreq_t))) 767 return -EFAULT; 768 769 if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64))) 770 return -EFAULT; 771 772 if ((count = bulkreq.icount) <= 0) 773 return -EINVAL; 774 775 if (bulkreq.ubuffer == NULL) 776 return -EINVAL; 777 778 if (cmd == XFS_IOC_FSINUMBERS) 779 error = xfs_inumbers(mp, &inlast, &count, 780 bulkreq.ubuffer, xfs_inumbers_fmt); 781 else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE) 782 error = xfs_bulkstat_one(mp, inlast, bulkreq.ubuffer, 783 sizeof(xfs_bstat_t), NULL, &done); 784 else /* XFS_IOC_FSBULKSTAT */ 785 error = xfs_bulkstat(mp, &inlast, &count, xfs_bulkstat_one, 786 sizeof(xfs_bstat_t), bulkreq.ubuffer, 787 &done); 788 789 if (error) 790 return error; 791 792 if (bulkreq.ocount != NULL) { 793 if (copy_to_user(bulkreq.lastip, &inlast, 794 sizeof(xfs_ino_t))) 795 return -EFAULT; 796 797 if (copy_to_user(bulkreq.ocount, &count, sizeof(count))) 798 return -EFAULT; 799 } 800 801 return 0; 802 } 803 804 STATIC int 805 xfs_ioc_fsgeometry_v1( 806 xfs_mount_t *mp, 807 void __user *arg) 808 { 809 xfs_fsop_geom_t fsgeo; 810 int error; 811 812 error = xfs_fs_geometry(mp, &fsgeo, 3); 813 if (error) 814 return error; 815 816 /* 817 * Caller should have passed an argument of type 818 * xfs_fsop_geom_v1_t. This is a proper subset of the 819 * xfs_fsop_geom_t that xfs_fs_geometry() fills in. 820 */ 821 if (copy_to_user(arg, &fsgeo, sizeof(xfs_fsop_geom_v1_t))) 822 return -EFAULT; 823 return 0; 824 } 825 826 STATIC int 827 xfs_ioc_fsgeometry( 828 xfs_mount_t *mp, 829 void __user *arg) 830 { 831 xfs_fsop_geom_t fsgeo; 832 int error; 833 834 error = xfs_fs_geometry(mp, &fsgeo, 4); 835 if (error) 836 return error; 837 838 if (copy_to_user(arg, &fsgeo, sizeof(fsgeo))) 839 return -EFAULT; 840 return 0; 841 } 842 843 /* 844 * Linux extended inode flags interface. 845 */ 846 847 STATIC unsigned int 848 xfs_merge_ioc_xflags( 849 unsigned int flags, 850 unsigned int start) 851 { 852 unsigned int xflags = start; 853 854 if (flags & FS_IMMUTABLE_FL) 855 xflags |= FS_XFLAG_IMMUTABLE; 856 else 857 xflags &= ~FS_XFLAG_IMMUTABLE; 858 if (flags & FS_APPEND_FL) 859 xflags |= FS_XFLAG_APPEND; 860 else 861 xflags &= ~FS_XFLAG_APPEND; 862 if (flags & FS_SYNC_FL) 863 xflags |= FS_XFLAG_SYNC; 864 else 865 xflags &= ~FS_XFLAG_SYNC; 866 if (flags & FS_NOATIME_FL) 867 xflags |= FS_XFLAG_NOATIME; 868 else 869 xflags &= ~FS_XFLAG_NOATIME; 870 if (flags & FS_NODUMP_FL) 871 xflags |= FS_XFLAG_NODUMP; 872 else 873 xflags &= ~FS_XFLAG_NODUMP; 874 875 return xflags; 876 } 877 878 STATIC unsigned int 879 xfs_di2lxflags( 880 uint16_t di_flags) 881 { 882 unsigned int flags = 0; 883 884 if (di_flags & XFS_DIFLAG_IMMUTABLE) 885 flags |= FS_IMMUTABLE_FL; 886 if (di_flags & XFS_DIFLAG_APPEND) 887 flags |= FS_APPEND_FL; 888 if (di_flags & XFS_DIFLAG_SYNC) 889 flags |= FS_SYNC_FL; 890 if (di_flags & XFS_DIFLAG_NOATIME) 891 flags |= FS_NOATIME_FL; 892 if (di_flags & XFS_DIFLAG_NODUMP) 893 flags |= FS_NODUMP_FL; 894 return flags; 895 } 896 897 STATIC int 898 xfs_ioc_fsgetxattr( 899 xfs_inode_t *ip, 900 int attr, 901 void __user *arg) 902 { 903 struct fsxattr fa; 904 905 memset(&fa, 0, sizeof(struct fsxattr)); 906 907 xfs_ilock(ip, XFS_ILOCK_SHARED); 908 fa.fsx_xflags = xfs_ip2xflags(ip); 909 fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog; 910 fa.fsx_cowextsize = ip->i_d.di_cowextsize << 911 ip->i_mount->m_sb.sb_blocklog; 912 fa.fsx_projid = xfs_get_projid(ip); 913 914 if (attr) { 915 if (ip->i_afp) { 916 if (ip->i_afp->if_flags & XFS_IFEXTENTS) 917 fa.fsx_nextents = xfs_iext_count(ip->i_afp); 918 else 919 fa.fsx_nextents = ip->i_d.di_anextents; 920 } else 921 fa.fsx_nextents = 0; 922 } else { 923 if (ip->i_df.if_flags & XFS_IFEXTENTS) 924 fa.fsx_nextents = xfs_iext_count(&ip->i_df); 925 else 926 fa.fsx_nextents = ip->i_d.di_nextents; 927 } 928 xfs_iunlock(ip, XFS_ILOCK_SHARED); 929 930 if (copy_to_user(arg, &fa, sizeof(fa))) 931 return -EFAULT; 932 return 0; 933 } 934 935 STATIC uint16_t 936 xfs_flags2diflags( 937 struct xfs_inode *ip, 938 unsigned int xflags) 939 { 940 /* can't set PREALLOC this way, just preserve it */ 941 uint16_t di_flags = 942 (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC); 943 944 if (xflags & FS_XFLAG_IMMUTABLE) 945 di_flags |= XFS_DIFLAG_IMMUTABLE; 946 if (xflags & FS_XFLAG_APPEND) 947 di_flags |= XFS_DIFLAG_APPEND; 948 if (xflags & FS_XFLAG_SYNC) 949 di_flags |= XFS_DIFLAG_SYNC; 950 if (xflags & FS_XFLAG_NOATIME) 951 di_flags |= XFS_DIFLAG_NOATIME; 952 if (xflags & FS_XFLAG_NODUMP) 953 di_flags |= XFS_DIFLAG_NODUMP; 954 if (xflags & FS_XFLAG_NODEFRAG) 955 di_flags |= XFS_DIFLAG_NODEFRAG; 956 if (xflags & FS_XFLAG_FILESTREAM) 957 di_flags |= XFS_DIFLAG_FILESTREAM; 958 if (S_ISDIR(VFS_I(ip)->i_mode)) { 959 if (xflags & FS_XFLAG_RTINHERIT) 960 di_flags |= XFS_DIFLAG_RTINHERIT; 961 if (xflags & FS_XFLAG_NOSYMLINKS) 962 di_flags |= XFS_DIFLAG_NOSYMLINKS; 963 if (xflags & FS_XFLAG_EXTSZINHERIT) 964 di_flags |= XFS_DIFLAG_EXTSZINHERIT; 965 if (xflags & FS_XFLAG_PROJINHERIT) 966 di_flags |= XFS_DIFLAG_PROJINHERIT; 967 } else if (S_ISREG(VFS_I(ip)->i_mode)) { 968 if (xflags & FS_XFLAG_REALTIME) 969 di_flags |= XFS_DIFLAG_REALTIME; 970 if (xflags & FS_XFLAG_EXTSIZE) 971 di_flags |= XFS_DIFLAG_EXTSIZE; 972 } 973 974 return di_flags; 975 } 976 977 STATIC uint64_t 978 xfs_flags2diflags2( 979 struct xfs_inode *ip, 980 unsigned int xflags) 981 { 982 uint64_t di_flags2 = 983 (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK); 984 985 if (xflags & FS_XFLAG_DAX) 986 di_flags2 |= XFS_DIFLAG2_DAX; 987 if (xflags & FS_XFLAG_COWEXTSIZE) 988 di_flags2 |= XFS_DIFLAG2_COWEXTSIZE; 989 990 return di_flags2; 991 } 992 993 STATIC void 994 xfs_diflags_to_linux( 995 struct xfs_inode *ip) 996 { 997 struct inode *inode = VFS_I(ip); 998 unsigned int xflags = xfs_ip2xflags(ip); 999 1000 if (xflags & FS_XFLAG_IMMUTABLE) 1001 inode->i_flags |= S_IMMUTABLE; 1002 else 1003 inode->i_flags &= ~S_IMMUTABLE; 1004 if (xflags & FS_XFLAG_APPEND) 1005 inode->i_flags |= S_APPEND; 1006 else 1007 inode->i_flags &= ~S_APPEND; 1008 if (xflags & FS_XFLAG_SYNC) 1009 inode->i_flags |= S_SYNC; 1010 else 1011 inode->i_flags &= ~S_SYNC; 1012 if (xflags & FS_XFLAG_NOATIME) 1013 inode->i_flags |= S_NOATIME; 1014 else 1015 inode->i_flags &= ~S_NOATIME; 1016 #if 0 /* disabled until the flag switching races are sorted out */ 1017 if (xflags & FS_XFLAG_DAX) 1018 inode->i_flags |= S_DAX; 1019 else 1020 inode->i_flags &= ~S_DAX; 1021 #endif 1022 } 1023 1024 static int 1025 xfs_ioctl_setattr_xflags( 1026 struct xfs_trans *tp, 1027 struct xfs_inode *ip, 1028 struct fsxattr *fa) 1029 { 1030 struct xfs_mount *mp = ip->i_mount; 1031 uint64_t di_flags2; 1032 1033 /* Can't change realtime flag if any extents are allocated. */ 1034 if ((ip->i_d.di_nextents || ip->i_delayed_blks) && 1035 XFS_IS_REALTIME_INODE(ip) != (fa->fsx_xflags & FS_XFLAG_REALTIME)) 1036 return -EINVAL; 1037 1038 /* If realtime flag is set then must have realtime device */ 1039 if (fa->fsx_xflags & FS_XFLAG_REALTIME) { 1040 if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 || 1041 (ip->i_d.di_extsize % mp->m_sb.sb_rextsize)) 1042 return -EINVAL; 1043 } 1044 1045 /* Clear reflink if we are actually able to set the rt flag. */ 1046 if ((fa->fsx_xflags & FS_XFLAG_REALTIME) && xfs_is_reflink_inode(ip)) 1047 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK; 1048 1049 /* Don't allow us to set DAX mode for a reflinked file for now. */ 1050 if ((fa->fsx_xflags & FS_XFLAG_DAX) && xfs_is_reflink_inode(ip)) 1051 return -EINVAL; 1052 1053 /* 1054 * Can't modify an immutable/append-only file unless 1055 * we have appropriate permission. 1056 */ 1057 if (((ip->i_d.di_flags & (XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND)) || 1058 (fa->fsx_xflags & (FS_XFLAG_IMMUTABLE | FS_XFLAG_APPEND))) && 1059 !capable(CAP_LINUX_IMMUTABLE)) 1060 return -EPERM; 1061 1062 /* diflags2 only valid for v3 inodes. */ 1063 di_flags2 = xfs_flags2diflags2(ip, fa->fsx_xflags); 1064 if (di_flags2 && ip->i_d.di_version < 3) 1065 return -EINVAL; 1066 1067 ip->i_d.di_flags = xfs_flags2diflags(ip, fa->fsx_xflags); 1068 ip->i_d.di_flags2 = di_flags2; 1069 1070 xfs_diflags_to_linux(ip); 1071 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); 1072 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1073 XFS_STATS_INC(mp, xs_ig_attrchg); 1074 return 0; 1075 } 1076 1077 /* 1078 * If we are changing DAX flags, we have to ensure the file is clean and any 1079 * cached objects in the address space are invalidated and removed. This 1080 * requires us to lock out other IO and page faults similar to a truncate 1081 * operation. The locks need to be held until the transaction has been committed 1082 * so that the cache invalidation is atomic with respect to the DAX flag 1083 * manipulation. 1084 */ 1085 static int 1086 xfs_ioctl_setattr_dax_invalidate( 1087 struct xfs_inode *ip, 1088 struct fsxattr *fa, 1089 int *join_flags) 1090 { 1091 struct inode *inode = VFS_I(ip); 1092 struct super_block *sb = inode->i_sb; 1093 int error; 1094 1095 *join_flags = 0; 1096 1097 /* 1098 * It is only valid to set the DAX flag on regular files and 1099 * directories on filesystems where the block size is equal to the page 1100 * size. On directories it serves as an inherit hint. 1101 */ 1102 if (fa->fsx_xflags & FS_XFLAG_DAX) { 1103 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) 1104 return -EINVAL; 1105 if (bdev_dax_supported(sb, sb->s_blocksize) < 0) 1106 return -EINVAL; 1107 } 1108 1109 /* If the DAX state is not changing, we have nothing to do here. */ 1110 if ((fa->fsx_xflags & FS_XFLAG_DAX) && IS_DAX(inode)) 1111 return 0; 1112 if (!(fa->fsx_xflags & FS_XFLAG_DAX) && !IS_DAX(inode)) 1113 return 0; 1114 1115 /* lock, flush and invalidate mapping in preparation for flag change */ 1116 xfs_ilock(ip, XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL); 1117 error = filemap_write_and_wait(inode->i_mapping); 1118 if (error) 1119 goto out_unlock; 1120 error = invalidate_inode_pages2(inode->i_mapping); 1121 if (error) 1122 goto out_unlock; 1123 1124 *join_flags = XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL; 1125 return 0; 1126 1127 out_unlock: 1128 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL); 1129 return error; 1130 1131 } 1132 1133 /* 1134 * Set up the transaction structure for the setattr operation, checking that we 1135 * have permission to do so. On success, return a clean transaction and the 1136 * inode locked exclusively ready for further operation specific checks. On 1137 * failure, return an error without modifying or locking the inode. 1138 * 1139 * The inode might already be IO locked on call. If this is the case, it is 1140 * indicated in @join_flags and we take full responsibility for ensuring they 1141 * are unlocked from now on. Hence if we have an error here, we still have to 1142 * unlock them. Otherwise, once they are joined to the transaction, they will 1143 * be unlocked on commit/cancel. 1144 */ 1145 static struct xfs_trans * 1146 xfs_ioctl_setattr_get_trans( 1147 struct xfs_inode *ip, 1148 int join_flags) 1149 { 1150 struct xfs_mount *mp = ip->i_mount; 1151 struct xfs_trans *tp; 1152 int error = -EROFS; 1153 1154 if (mp->m_flags & XFS_MOUNT_RDONLY) 1155 goto out_unlock; 1156 error = -EIO; 1157 if (XFS_FORCED_SHUTDOWN(mp)) 1158 goto out_unlock; 1159 1160 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp); 1161 if (error) 1162 return ERR_PTR(error); 1163 1164 xfs_ilock(ip, XFS_ILOCK_EXCL); 1165 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | join_flags); 1166 join_flags = 0; 1167 1168 /* 1169 * CAP_FOWNER overrides the following restrictions: 1170 * 1171 * The user ID of the calling process must be equal to the file owner 1172 * ID, except in cases where the CAP_FSETID capability is applicable. 1173 */ 1174 if (!inode_owner_or_capable(VFS_I(ip))) { 1175 error = -EPERM; 1176 goto out_cancel; 1177 } 1178 1179 if (mp->m_flags & XFS_MOUNT_WSYNC) 1180 xfs_trans_set_sync(tp); 1181 1182 return tp; 1183 1184 out_cancel: 1185 xfs_trans_cancel(tp); 1186 out_unlock: 1187 if (join_flags) 1188 xfs_iunlock(ip, join_flags); 1189 return ERR_PTR(error); 1190 } 1191 1192 /* 1193 * extent size hint validation is somewhat cumbersome. Rules are: 1194 * 1195 * 1. extent size hint is only valid for directories and regular files 1196 * 2. FS_XFLAG_EXTSIZE is only valid for regular files 1197 * 3. FS_XFLAG_EXTSZINHERIT is only valid for directories. 1198 * 4. can only be changed on regular files if no extents are allocated 1199 * 5. can be changed on directories at any time 1200 * 6. extsize hint of 0 turns off hints, clears inode flags. 1201 * 7. Extent size must be a multiple of the appropriate block size. 1202 * 8. for non-realtime files, the extent size hint must be limited 1203 * to half the AG size to avoid alignment extending the extent beyond the 1204 * limits of the AG. 1205 * 1206 * Please keep this function in sync with xfs_scrub_inode_extsize. 1207 */ 1208 static int 1209 xfs_ioctl_setattr_check_extsize( 1210 struct xfs_inode *ip, 1211 struct fsxattr *fa) 1212 { 1213 struct xfs_mount *mp = ip->i_mount; 1214 1215 if ((fa->fsx_xflags & FS_XFLAG_EXTSIZE) && !S_ISREG(VFS_I(ip)->i_mode)) 1216 return -EINVAL; 1217 1218 if ((fa->fsx_xflags & FS_XFLAG_EXTSZINHERIT) && 1219 !S_ISDIR(VFS_I(ip)->i_mode)) 1220 return -EINVAL; 1221 1222 if (S_ISREG(VFS_I(ip)->i_mode) && ip->i_d.di_nextents && 1223 ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != fa->fsx_extsize)) 1224 return -EINVAL; 1225 1226 if (fa->fsx_extsize != 0) { 1227 xfs_extlen_t size; 1228 xfs_fsblock_t extsize_fsb; 1229 1230 extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize); 1231 if (extsize_fsb > MAXEXTLEN) 1232 return -EINVAL; 1233 1234 if (XFS_IS_REALTIME_INODE(ip) || 1235 (fa->fsx_xflags & FS_XFLAG_REALTIME)) { 1236 size = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog; 1237 } else { 1238 size = mp->m_sb.sb_blocksize; 1239 if (extsize_fsb > mp->m_sb.sb_agblocks / 2) 1240 return -EINVAL; 1241 } 1242 1243 if (fa->fsx_extsize % size) 1244 return -EINVAL; 1245 } else 1246 fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE | FS_XFLAG_EXTSZINHERIT); 1247 1248 return 0; 1249 } 1250 1251 /* 1252 * CoW extent size hint validation rules are: 1253 * 1254 * 1. CoW extent size hint can only be set if reflink is enabled on the fs. 1255 * The inode does not have to have any shared blocks, but it must be a v3. 1256 * 2. FS_XFLAG_COWEXTSIZE is only valid for directories and regular files; 1257 * for a directory, the hint is propagated to new files. 1258 * 3. Can be changed on files & directories at any time. 1259 * 4. CoW extsize hint of 0 turns off hints, clears inode flags. 1260 * 5. Extent size must be a multiple of the appropriate block size. 1261 * 6. The extent size hint must be limited to half the AG size to avoid 1262 * alignment extending the extent beyond the limits of the AG. 1263 * 1264 * Please keep this function in sync with xfs_scrub_inode_cowextsize. 1265 */ 1266 static int 1267 xfs_ioctl_setattr_check_cowextsize( 1268 struct xfs_inode *ip, 1269 struct fsxattr *fa) 1270 { 1271 struct xfs_mount *mp = ip->i_mount; 1272 1273 if (!(fa->fsx_xflags & FS_XFLAG_COWEXTSIZE)) 1274 return 0; 1275 1276 if (!xfs_sb_version_hasreflink(&ip->i_mount->m_sb) || 1277 ip->i_d.di_version != 3) 1278 return -EINVAL; 1279 1280 if (!S_ISREG(VFS_I(ip)->i_mode) && !S_ISDIR(VFS_I(ip)->i_mode)) 1281 return -EINVAL; 1282 1283 if (fa->fsx_cowextsize != 0) { 1284 xfs_extlen_t size; 1285 xfs_fsblock_t cowextsize_fsb; 1286 1287 cowextsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_cowextsize); 1288 if (cowextsize_fsb > MAXEXTLEN) 1289 return -EINVAL; 1290 1291 size = mp->m_sb.sb_blocksize; 1292 if (cowextsize_fsb > mp->m_sb.sb_agblocks / 2) 1293 return -EINVAL; 1294 1295 if (fa->fsx_cowextsize % size) 1296 return -EINVAL; 1297 } else 1298 fa->fsx_xflags &= ~FS_XFLAG_COWEXTSIZE; 1299 1300 return 0; 1301 } 1302 1303 static int 1304 xfs_ioctl_setattr_check_projid( 1305 struct xfs_inode *ip, 1306 struct fsxattr *fa) 1307 { 1308 /* Disallow 32bit project ids if projid32bit feature is not enabled. */ 1309 if (fa->fsx_projid > (uint16_t)-1 && 1310 !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb)) 1311 return -EINVAL; 1312 1313 /* 1314 * Project Quota ID state is only allowed to change from within the init 1315 * namespace. Enforce that restriction only if we are trying to change 1316 * the quota ID state. Everything else is allowed in user namespaces. 1317 */ 1318 if (current_user_ns() == &init_user_ns) 1319 return 0; 1320 1321 if (xfs_get_projid(ip) != fa->fsx_projid) 1322 return -EINVAL; 1323 if ((fa->fsx_xflags & FS_XFLAG_PROJINHERIT) != 1324 (ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)) 1325 return -EINVAL; 1326 1327 return 0; 1328 } 1329 1330 STATIC int 1331 xfs_ioctl_setattr( 1332 xfs_inode_t *ip, 1333 struct fsxattr *fa) 1334 { 1335 struct xfs_mount *mp = ip->i_mount; 1336 struct xfs_trans *tp; 1337 struct xfs_dquot *udqp = NULL; 1338 struct xfs_dquot *pdqp = NULL; 1339 struct xfs_dquot *olddquot = NULL; 1340 int code; 1341 int join_flags = 0; 1342 1343 trace_xfs_ioctl_setattr(ip); 1344 1345 code = xfs_ioctl_setattr_check_projid(ip, fa); 1346 if (code) 1347 return code; 1348 1349 /* 1350 * If disk quotas is on, we make sure that the dquots do exist on disk, 1351 * before we start any other transactions. Trying to do this later 1352 * is messy. We don't care to take a readlock to look at the ids 1353 * in inode here, because we can't hold it across the trans_reserve. 1354 * If the IDs do change before we take the ilock, we're covered 1355 * because the i_*dquot fields will get updated anyway. 1356 */ 1357 if (XFS_IS_QUOTA_ON(mp)) { 1358 code = xfs_qm_vop_dqalloc(ip, ip->i_d.di_uid, 1359 ip->i_d.di_gid, fa->fsx_projid, 1360 XFS_QMOPT_PQUOTA, &udqp, NULL, &pdqp); 1361 if (code) 1362 return code; 1363 } 1364 1365 /* 1366 * Changing DAX config may require inode locking for mapping 1367 * invalidation. These need to be held all the way to transaction commit 1368 * or cancel time, so need to be passed through to 1369 * xfs_ioctl_setattr_get_trans() so it can apply them to the join call 1370 * appropriately. 1371 */ 1372 code = xfs_ioctl_setattr_dax_invalidate(ip, fa, &join_flags); 1373 if (code) 1374 goto error_free_dquots; 1375 1376 tp = xfs_ioctl_setattr_get_trans(ip, join_flags); 1377 if (IS_ERR(tp)) { 1378 code = PTR_ERR(tp); 1379 goto error_free_dquots; 1380 } 1381 1382 1383 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp) && 1384 xfs_get_projid(ip) != fa->fsx_projid) { 1385 code = xfs_qm_vop_chown_reserve(tp, ip, udqp, NULL, pdqp, 1386 capable(CAP_FOWNER) ? XFS_QMOPT_FORCE_RES : 0); 1387 if (code) /* out of quota */ 1388 goto error_trans_cancel; 1389 } 1390 1391 code = xfs_ioctl_setattr_check_extsize(ip, fa); 1392 if (code) 1393 goto error_trans_cancel; 1394 1395 code = xfs_ioctl_setattr_check_cowextsize(ip, fa); 1396 if (code) 1397 goto error_trans_cancel; 1398 1399 code = xfs_ioctl_setattr_xflags(tp, ip, fa); 1400 if (code) 1401 goto error_trans_cancel; 1402 1403 /* 1404 * Change file ownership. Must be the owner or privileged. CAP_FSETID 1405 * overrides the following restrictions: 1406 * 1407 * The set-user-ID and set-group-ID bits of a file will be cleared upon 1408 * successful return from chown() 1409 */ 1410 1411 if ((VFS_I(ip)->i_mode & (S_ISUID|S_ISGID)) && 1412 !capable_wrt_inode_uidgid(VFS_I(ip), CAP_FSETID)) 1413 VFS_I(ip)->i_mode &= ~(S_ISUID|S_ISGID); 1414 1415 /* Change the ownerships and register project quota modifications */ 1416 if (xfs_get_projid(ip) != fa->fsx_projid) { 1417 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) { 1418 olddquot = xfs_qm_vop_chown(tp, ip, 1419 &ip->i_pdquot, pdqp); 1420 } 1421 ASSERT(ip->i_d.di_version > 1); 1422 xfs_set_projid(ip, fa->fsx_projid); 1423 } 1424 1425 /* 1426 * Only set the extent size hint if we've already determined that the 1427 * extent size hint should be set on the inode. If no extent size flags 1428 * are set on the inode then unconditionally clear the extent size hint. 1429 */ 1430 if (ip->i_d.di_flags & (XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT)) 1431 ip->i_d.di_extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog; 1432 else 1433 ip->i_d.di_extsize = 0; 1434 if (ip->i_d.di_version == 3 && 1435 (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)) 1436 ip->i_d.di_cowextsize = fa->fsx_cowextsize >> 1437 mp->m_sb.sb_blocklog; 1438 else 1439 ip->i_d.di_cowextsize = 0; 1440 1441 code = xfs_trans_commit(tp); 1442 1443 /* 1444 * Release any dquot(s) the inode had kept before chown. 1445 */ 1446 xfs_qm_dqrele(olddquot); 1447 xfs_qm_dqrele(udqp); 1448 xfs_qm_dqrele(pdqp); 1449 1450 return code; 1451 1452 error_trans_cancel: 1453 xfs_trans_cancel(tp); 1454 error_free_dquots: 1455 xfs_qm_dqrele(udqp); 1456 xfs_qm_dqrele(pdqp); 1457 return code; 1458 } 1459 1460 STATIC int 1461 xfs_ioc_fssetxattr( 1462 xfs_inode_t *ip, 1463 struct file *filp, 1464 void __user *arg) 1465 { 1466 struct fsxattr fa; 1467 int error; 1468 1469 if (copy_from_user(&fa, arg, sizeof(fa))) 1470 return -EFAULT; 1471 1472 error = mnt_want_write_file(filp); 1473 if (error) 1474 return error; 1475 error = xfs_ioctl_setattr(ip, &fa); 1476 mnt_drop_write_file(filp); 1477 return error; 1478 } 1479 1480 STATIC int 1481 xfs_ioc_getxflags( 1482 xfs_inode_t *ip, 1483 void __user *arg) 1484 { 1485 unsigned int flags; 1486 1487 flags = xfs_di2lxflags(ip->i_d.di_flags); 1488 if (copy_to_user(arg, &flags, sizeof(flags))) 1489 return -EFAULT; 1490 return 0; 1491 } 1492 1493 STATIC int 1494 xfs_ioc_setxflags( 1495 struct xfs_inode *ip, 1496 struct file *filp, 1497 void __user *arg) 1498 { 1499 struct xfs_trans *tp; 1500 struct fsxattr fa; 1501 unsigned int flags; 1502 int join_flags = 0; 1503 int error; 1504 1505 if (copy_from_user(&flags, arg, sizeof(flags))) 1506 return -EFAULT; 1507 1508 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ 1509 FS_NOATIME_FL | FS_NODUMP_FL | \ 1510 FS_SYNC_FL)) 1511 return -EOPNOTSUPP; 1512 1513 fa.fsx_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip)); 1514 1515 error = mnt_want_write_file(filp); 1516 if (error) 1517 return error; 1518 1519 /* 1520 * Changing DAX config may require inode locking for mapping 1521 * invalidation. These need to be held all the way to transaction commit 1522 * or cancel time, so need to be passed through to 1523 * xfs_ioctl_setattr_get_trans() so it can apply them to the join call 1524 * appropriately. 1525 */ 1526 error = xfs_ioctl_setattr_dax_invalidate(ip, &fa, &join_flags); 1527 if (error) 1528 goto out_drop_write; 1529 1530 tp = xfs_ioctl_setattr_get_trans(ip, join_flags); 1531 if (IS_ERR(tp)) { 1532 error = PTR_ERR(tp); 1533 goto out_drop_write; 1534 } 1535 1536 error = xfs_ioctl_setattr_xflags(tp, ip, &fa); 1537 if (error) { 1538 xfs_trans_cancel(tp); 1539 goto out_drop_write; 1540 } 1541 1542 error = xfs_trans_commit(tp); 1543 out_drop_write: 1544 mnt_drop_write_file(filp); 1545 return error; 1546 } 1547 1548 static bool 1549 xfs_getbmap_format( 1550 struct kgetbmap *p, 1551 struct getbmapx __user *u, 1552 size_t recsize) 1553 { 1554 if (put_user(p->bmv_offset, &u->bmv_offset) || 1555 put_user(p->bmv_block, &u->bmv_block) || 1556 put_user(p->bmv_length, &u->bmv_length) || 1557 put_user(0, &u->bmv_count) || 1558 put_user(0, &u->bmv_entries)) 1559 return false; 1560 if (recsize < sizeof(struct getbmapx)) 1561 return true; 1562 if (put_user(0, &u->bmv_iflags) || 1563 put_user(p->bmv_oflags, &u->bmv_oflags) || 1564 put_user(0, &u->bmv_unused1) || 1565 put_user(0, &u->bmv_unused2)) 1566 return false; 1567 return true; 1568 } 1569 1570 STATIC int 1571 xfs_ioc_getbmap( 1572 struct file *file, 1573 unsigned int cmd, 1574 void __user *arg) 1575 { 1576 struct getbmapx bmx = { 0 }; 1577 struct kgetbmap *buf; 1578 size_t recsize; 1579 int error, i; 1580 1581 switch (cmd) { 1582 case XFS_IOC_GETBMAPA: 1583 bmx.bmv_iflags = BMV_IF_ATTRFORK; 1584 /*FALLTHRU*/ 1585 case XFS_IOC_GETBMAP: 1586 if (file->f_mode & FMODE_NOCMTIME) 1587 bmx.bmv_iflags |= BMV_IF_NO_DMAPI_READ; 1588 /* struct getbmap is a strict subset of struct getbmapx. */ 1589 recsize = sizeof(struct getbmap); 1590 break; 1591 case XFS_IOC_GETBMAPX: 1592 recsize = sizeof(struct getbmapx); 1593 break; 1594 default: 1595 return -EINVAL; 1596 } 1597 1598 if (copy_from_user(&bmx, arg, recsize)) 1599 return -EFAULT; 1600 1601 if (bmx.bmv_count < 2) 1602 return -EINVAL; 1603 if (bmx.bmv_count > ULONG_MAX / recsize) 1604 return -ENOMEM; 1605 1606 buf = kmem_zalloc_large(bmx.bmv_count * sizeof(*buf), 0); 1607 if (!buf) 1608 return -ENOMEM; 1609 1610 error = xfs_getbmap(XFS_I(file_inode(file)), &bmx, buf); 1611 if (error) 1612 goto out_free_buf; 1613 1614 error = -EFAULT; 1615 if (copy_to_user(arg, &bmx, recsize)) 1616 goto out_free_buf; 1617 arg += recsize; 1618 1619 for (i = 0; i < bmx.bmv_entries; i++) { 1620 if (!xfs_getbmap_format(buf + i, arg, recsize)) 1621 goto out_free_buf; 1622 arg += recsize; 1623 } 1624 1625 error = 0; 1626 out_free_buf: 1627 kmem_free(buf); 1628 return 0; 1629 } 1630 1631 struct getfsmap_info { 1632 struct xfs_mount *mp; 1633 struct fsmap_head __user *data; 1634 unsigned int idx; 1635 __u32 last_flags; 1636 }; 1637 1638 STATIC int 1639 xfs_getfsmap_format(struct xfs_fsmap *xfm, void *priv) 1640 { 1641 struct getfsmap_info *info = priv; 1642 struct fsmap fm; 1643 1644 trace_xfs_getfsmap_mapping(info->mp, xfm); 1645 1646 info->last_flags = xfm->fmr_flags; 1647 xfs_fsmap_from_internal(&fm, xfm); 1648 if (copy_to_user(&info->data->fmh_recs[info->idx++], &fm, 1649 sizeof(struct fsmap))) 1650 return -EFAULT; 1651 1652 return 0; 1653 } 1654 1655 STATIC int 1656 xfs_ioc_getfsmap( 1657 struct xfs_inode *ip, 1658 struct fsmap_head __user *arg) 1659 { 1660 struct getfsmap_info info = { NULL }; 1661 struct xfs_fsmap_head xhead = {0}; 1662 struct fsmap_head head; 1663 bool aborted = false; 1664 int error; 1665 1666 if (copy_from_user(&head, arg, sizeof(struct fsmap_head))) 1667 return -EFAULT; 1668 if (memchr_inv(head.fmh_reserved, 0, sizeof(head.fmh_reserved)) || 1669 memchr_inv(head.fmh_keys[0].fmr_reserved, 0, 1670 sizeof(head.fmh_keys[0].fmr_reserved)) || 1671 memchr_inv(head.fmh_keys[1].fmr_reserved, 0, 1672 sizeof(head.fmh_keys[1].fmr_reserved))) 1673 return -EINVAL; 1674 1675 xhead.fmh_iflags = head.fmh_iflags; 1676 xhead.fmh_count = head.fmh_count; 1677 xfs_fsmap_to_internal(&xhead.fmh_keys[0], &head.fmh_keys[0]); 1678 xfs_fsmap_to_internal(&xhead.fmh_keys[1], &head.fmh_keys[1]); 1679 1680 trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]); 1681 trace_xfs_getfsmap_high_key(ip->i_mount, &xhead.fmh_keys[1]); 1682 1683 info.mp = ip->i_mount; 1684 info.data = arg; 1685 error = xfs_getfsmap(ip->i_mount, &xhead, xfs_getfsmap_format, &info); 1686 if (error == XFS_BTREE_QUERY_RANGE_ABORT) { 1687 error = 0; 1688 aborted = true; 1689 } else if (error) 1690 return error; 1691 1692 /* If we didn't abort, set the "last" flag in the last fmx */ 1693 if (!aborted && info.idx) { 1694 info.last_flags |= FMR_OF_LAST; 1695 if (copy_to_user(&info.data->fmh_recs[info.idx - 1].fmr_flags, 1696 &info.last_flags, sizeof(info.last_flags))) 1697 return -EFAULT; 1698 } 1699 1700 /* copy back header */ 1701 head.fmh_entries = xhead.fmh_entries; 1702 head.fmh_oflags = xhead.fmh_oflags; 1703 if (copy_to_user(arg, &head, sizeof(struct fsmap_head))) 1704 return -EFAULT; 1705 1706 return 0; 1707 } 1708 1709 STATIC int 1710 xfs_ioc_scrub_metadata( 1711 struct xfs_inode *ip, 1712 void __user *arg) 1713 { 1714 struct xfs_scrub_metadata scrub; 1715 int error; 1716 1717 if (!capable(CAP_SYS_ADMIN)) 1718 return -EPERM; 1719 1720 if (copy_from_user(&scrub, arg, sizeof(scrub))) 1721 return -EFAULT; 1722 1723 error = xfs_scrub_metadata(ip, &scrub); 1724 if (error) 1725 return error; 1726 1727 if (copy_to_user(arg, &scrub, sizeof(scrub))) 1728 return -EFAULT; 1729 1730 return 0; 1731 } 1732 1733 int 1734 xfs_ioc_swapext( 1735 xfs_swapext_t *sxp) 1736 { 1737 xfs_inode_t *ip, *tip; 1738 struct fd f, tmp; 1739 int error = 0; 1740 1741 /* Pull information for the target fd */ 1742 f = fdget((int)sxp->sx_fdtarget); 1743 if (!f.file) { 1744 error = -EINVAL; 1745 goto out; 1746 } 1747 1748 if (!(f.file->f_mode & FMODE_WRITE) || 1749 !(f.file->f_mode & FMODE_READ) || 1750 (f.file->f_flags & O_APPEND)) { 1751 error = -EBADF; 1752 goto out_put_file; 1753 } 1754 1755 tmp = fdget((int)sxp->sx_fdtmp); 1756 if (!tmp.file) { 1757 error = -EINVAL; 1758 goto out_put_file; 1759 } 1760 1761 if (!(tmp.file->f_mode & FMODE_WRITE) || 1762 !(tmp.file->f_mode & FMODE_READ) || 1763 (tmp.file->f_flags & O_APPEND)) { 1764 error = -EBADF; 1765 goto out_put_tmp_file; 1766 } 1767 1768 if (IS_SWAPFILE(file_inode(f.file)) || 1769 IS_SWAPFILE(file_inode(tmp.file))) { 1770 error = -EINVAL; 1771 goto out_put_tmp_file; 1772 } 1773 1774 /* 1775 * We need to ensure that the fds passed in point to XFS inodes 1776 * before we cast and access them as XFS structures as we have no 1777 * control over what the user passes us here. 1778 */ 1779 if (f.file->f_op != &xfs_file_operations || 1780 tmp.file->f_op != &xfs_file_operations) { 1781 error = -EINVAL; 1782 goto out_put_tmp_file; 1783 } 1784 1785 ip = XFS_I(file_inode(f.file)); 1786 tip = XFS_I(file_inode(tmp.file)); 1787 1788 if (ip->i_mount != tip->i_mount) { 1789 error = -EINVAL; 1790 goto out_put_tmp_file; 1791 } 1792 1793 if (ip->i_ino == tip->i_ino) { 1794 error = -EINVAL; 1795 goto out_put_tmp_file; 1796 } 1797 1798 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { 1799 error = -EIO; 1800 goto out_put_tmp_file; 1801 } 1802 1803 error = xfs_swap_extents(ip, tip, sxp); 1804 1805 out_put_tmp_file: 1806 fdput(tmp); 1807 out_put_file: 1808 fdput(f); 1809 out: 1810 return error; 1811 } 1812 1813 /* 1814 * Note: some of the ioctl's return positive numbers as a 1815 * byte count indicating success, such as readlink_by_handle. 1816 * So we don't "sign flip" like most other routines. This means 1817 * true errors need to be returned as a negative value. 1818 */ 1819 long 1820 xfs_file_ioctl( 1821 struct file *filp, 1822 unsigned int cmd, 1823 unsigned long p) 1824 { 1825 struct inode *inode = file_inode(filp); 1826 struct xfs_inode *ip = XFS_I(inode); 1827 struct xfs_mount *mp = ip->i_mount; 1828 void __user *arg = (void __user *)p; 1829 int error; 1830 1831 trace_xfs_file_ioctl(ip); 1832 1833 switch (cmd) { 1834 case FITRIM: 1835 return xfs_ioc_trim(mp, arg); 1836 case XFS_IOC_ALLOCSP: 1837 case XFS_IOC_FREESP: 1838 case XFS_IOC_RESVSP: 1839 case XFS_IOC_UNRESVSP: 1840 case XFS_IOC_ALLOCSP64: 1841 case XFS_IOC_FREESP64: 1842 case XFS_IOC_RESVSP64: 1843 case XFS_IOC_UNRESVSP64: 1844 case XFS_IOC_ZERO_RANGE: { 1845 xfs_flock64_t bf; 1846 1847 if (copy_from_user(&bf, arg, sizeof(bf))) 1848 return -EFAULT; 1849 return xfs_ioc_space(filp, cmd, &bf); 1850 } 1851 case XFS_IOC_DIOINFO: { 1852 struct dioattr da; 1853 xfs_buftarg_t *target = 1854 XFS_IS_REALTIME_INODE(ip) ? 1855 mp->m_rtdev_targp : mp->m_ddev_targp; 1856 1857 da.d_mem = da.d_miniosz = target->bt_logical_sectorsize; 1858 da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1); 1859 1860 if (copy_to_user(arg, &da, sizeof(da))) 1861 return -EFAULT; 1862 return 0; 1863 } 1864 1865 case XFS_IOC_FSBULKSTAT_SINGLE: 1866 case XFS_IOC_FSBULKSTAT: 1867 case XFS_IOC_FSINUMBERS: 1868 return xfs_ioc_bulkstat(mp, cmd, arg); 1869 1870 case XFS_IOC_FSGEOMETRY_V1: 1871 return xfs_ioc_fsgeometry_v1(mp, arg); 1872 1873 case XFS_IOC_FSGEOMETRY: 1874 return xfs_ioc_fsgeometry(mp, arg); 1875 1876 case XFS_IOC_GETVERSION: 1877 return put_user(inode->i_generation, (int __user *)arg); 1878 1879 case XFS_IOC_FSGETXATTR: 1880 return xfs_ioc_fsgetxattr(ip, 0, arg); 1881 case XFS_IOC_FSGETXATTRA: 1882 return xfs_ioc_fsgetxattr(ip, 1, arg); 1883 case XFS_IOC_FSSETXATTR: 1884 return xfs_ioc_fssetxattr(ip, filp, arg); 1885 case XFS_IOC_GETXFLAGS: 1886 return xfs_ioc_getxflags(ip, arg); 1887 case XFS_IOC_SETXFLAGS: 1888 return xfs_ioc_setxflags(ip, filp, arg); 1889 1890 case XFS_IOC_FSSETDM: { 1891 struct fsdmidata dmi; 1892 1893 if (copy_from_user(&dmi, arg, sizeof(dmi))) 1894 return -EFAULT; 1895 1896 error = mnt_want_write_file(filp); 1897 if (error) 1898 return error; 1899 1900 error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask, 1901 dmi.fsd_dmstate); 1902 mnt_drop_write_file(filp); 1903 return error; 1904 } 1905 1906 case XFS_IOC_GETBMAP: 1907 case XFS_IOC_GETBMAPA: 1908 case XFS_IOC_GETBMAPX: 1909 return xfs_ioc_getbmap(filp, cmd, arg); 1910 1911 case FS_IOC_GETFSMAP: 1912 return xfs_ioc_getfsmap(ip, arg); 1913 1914 case XFS_IOC_SCRUB_METADATA: 1915 return xfs_ioc_scrub_metadata(ip, arg); 1916 1917 case XFS_IOC_FD_TO_HANDLE: 1918 case XFS_IOC_PATH_TO_HANDLE: 1919 case XFS_IOC_PATH_TO_FSHANDLE: { 1920 xfs_fsop_handlereq_t hreq; 1921 1922 if (copy_from_user(&hreq, arg, sizeof(hreq))) 1923 return -EFAULT; 1924 return xfs_find_handle(cmd, &hreq); 1925 } 1926 case XFS_IOC_OPEN_BY_HANDLE: { 1927 xfs_fsop_handlereq_t hreq; 1928 1929 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) 1930 return -EFAULT; 1931 return xfs_open_by_handle(filp, &hreq); 1932 } 1933 case XFS_IOC_FSSETDM_BY_HANDLE: 1934 return xfs_fssetdm_by_handle(filp, arg); 1935 1936 case XFS_IOC_READLINK_BY_HANDLE: { 1937 xfs_fsop_handlereq_t hreq; 1938 1939 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) 1940 return -EFAULT; 1941 return xfs_readlink_by_handle(filp, &hreq); 1942 } 1943 case XFS_IOC_ATTRLIST_BY_HANDLE: 1944 return xfs_attrlist_by_handle(filp, arg); 1945 1946 case XFS_IOC_ATTRMULTI_BY_HANDLE: 1947 return xfs_attrmulti_by_handle(filp, arg); 1948 1949 case XFS_IOC_SWAPEXT: { 1950 struct xfs_swapext sxp; 1951 1952 if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t))) 1953 return -EFAULT; 1954 error = mnt_want_write_file(filp); 1955 if (error) 1956 return error; 1957 error = xfs_ioc_swapext(&sxp); 1958 mnt_drop_write_file(filp); 1959 return error; 1960 } 1961 1962 case XFS_IOC_FSCOUNTS: { 1963 xfs_fsop_counts_t out; 1964 1965 error = xfs_fs_counts(mp, &out); 1966 if (error) 1967 return error; 1968 1969 if (copy_to_user(arg, &out, sizeof(out))) 1970 return -EFAULT; 1971 return 0; 1972 } 1973 1974 case XFS_IOC_SET_RESBLKS: { 1975 xfs_fsop_resblks_t inout; 1976 uint64_t in; 1977 1978 if (!capable(CAP_SYS_ADMIN)) 1979 return -EPERM; 1980 1981 if (mp->m_flags & XFS_MOUNT_RDONLY) 1982 return -EROFS; 1983 1984 if (copy_from_user(&inout, arg, sizeof(inout))) 1985 return -EFAULT; 1986 1987 error = mnt_want_write_file(filp); 1988 if (error) 1989 return error; 1990 1991 /* input parameter is passed in resblks field of structure */ 1992 in = inout.resblks; 1993 error = xfs_reserve_blocks(mp, &in, &inout); 1994 mnt_drop_write_file(filp); 1995 if (error) 1996 return error; 1997 1998 if (copy_to_user(arg, &inout, sizeof(inout))) 1999 return -EFAULT; 2000 return 0; 2001 } 2002 2003 case XFS_IOC_GET_RESBLKS: { 2004 xfs_fsop_resblks_t out; 2005 2006 if (!capable(CAP_SYS_ADMIN)) 2007 return -EPERM; 2008 2009 error = xfs_reserve_blocks(mp, NULL, &out); 2010 if (error) 2011 return error; 2012 2013 if (copy_to_user(arg, &out, sizeof(out))) 2014 return -EFAULT; 2015 2016 return 0; 2017 } 2018 2019 case XFS_IOC_FSGROWFSDATA: { 2020 xfs_growfs_data_t in; 2021 2022 if (copy_from_user(&in, arg, sizeof(in))) 2023 return -EFAULT; 2024 2025 error = mnt_want_write_file(filp); 2026 if (error) 2027 return error; 2028 error = xfs_growfs_data(mp, &in); 2029 mnt_drop_write_file(filp); 2030 return error; 2031 } 2032 2033 case XFS_IOC_FSGROWFSLOG: { 2034 xfs_growfs_log_t in; 2035 2036 if (copy_from_user(&in, arg, sizeof(in))) 2037 return -EFAULT; 2038 2039 error = mnt_want_write_file(filp); 2040 if (error) 2041 return error; 2042 error = xfs_growfs_log(mp, &in); 2043 mnt_drop_write_file(filp); 2044 return error; 2045 } 2046 2047 case XFS_IOC_FSGROWFSRT: { 2048 xfs_growfs_rt_t in; 2049 2050 if (copy_from_user(&in, arg, sizeof(in))) 2051 return -EFAULT; 2052 2053 error = mnt_want_write_file(filp); 2054 if (error) 2055 return error; 2056 error = xfs_growfs_rt(mp, &in); 2057 mnt_drop_write_file(filp); 2058 return error; 2059 } 2060 2061 case XFS_IOC_GOINGDOWN: { 2062 uint32_t in; 2063 2064 if (!capable(CAP_SYS_ADMIN)) 2065 return -EPERM; 2066 2067 if (get_user(in, (uint32_t __user *)arg)) 2068 return -EFAULT; 2069 2070 return xfs_fs_goingdown(mp, in); 2071 } 2072 2073 case XFS_IOC_ERROR_INJECTION: { 2074 xfs_error_injection_t in; 2075 2076 if (!capable(CAP_SYS_ADMIN)) 2077 return -EPERM; 2078 2079 if (copy_from_user(&in, arg, sizeof(in))) 2080 return -EFAULT; 2081 2082 return xfs_errortag_add(mp, in.errtag); 2083 } 2084 2085 case XFS_IOC_ERROR_CLEARALL: 2086 if (!capable(CAP_SYS_ADMIN)) 2087 return -EPERM; 2088 2089 return xfs_errortag_clearall(mp); 2090 2091 case XFS_IOC_FREE_EOFBLOCKS: { 2092 struct xfs_fs_eofblocks eofb; 2093 struct xfs_eofblocks keofb; 2094 2095 if (!capable(CAP_SYS_ADMIN)) 2096 return -EPERM; 2097 2098 if (mp->m_flags & XFS_MOUNT_RDONLY) 2099 return -EROFS; 2100 2101 if (copy_from_user(&eofb, arg, sizeof(eofb))) 2102 return -EFAULT; 2103 2104 error = xfs_fs_eofblocks_from_user(&eofb, &keofb); 2105 if (error) 2106 return error; 2107 2108 return xfs_icache_free_eofblocks(mp, &keofb); 2109 } 2110 2111 default: 2112 return -ENOTTY; 2113 } 2114 } 2115