1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_mount.h" 25 #include "xfs_inode.h" 26 #include "xfs_ioctl.h" 27 #include "xfs_alloc.h" 28 #include "xfs_rtalloc.h" 29 #include "xfs_itable.h" 30 #include "xfs_error.h" 31 #include "xfs_attr.h" 32 #include "xfs_bmap.h" 33 #include "xfs_bmap_util.h" 34 #include "xfs_fsops.h" 35 #include "xfs_discard.h" 36 #include "xfs_quota.h" 37 #include "xfs_export.h" 38 #include "xfs_trace.h" 39 #include "xfs_icache.h" 40 #include "xfs_symlink.h" 41 #include "xfs_trans.h" 42 #include "xfs_pnfs.h" 43 #include "xfs_acl.h" 44 #include "xfs_btree.h" 45 #include <linux/fsmap.h> 46 #include "xfs_fsmap.h" 47 48 #include <linux/capability.h> 49 #include <linux/cred.h> 50 #include <linux/dcache.h> 51 #include <linux/mount.h> 52 #include <linux/namei.h> 53 #include <linux/pagemap.h> 54 #include <linux/slab.h> 55 #include <linux/exportfs.h> 56 57 /* 58 * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to 59 * a file or fs handle. 60 * 61 * XFS_IOC_PATH_TO_FSHANDLE 62 * returns fs handle for a mount point or path within that mount point 63 * XFS_IOC_FD_TO_HANDLE 64 * returns full handle for a FD opened in user space 65 * XFS_IOC_PATH_TO_HANDLE 66 * returns full handle for a path 67 */ 68 int 69 xfs_find_handle( 70 unsigned int cmd, 71 xfs_fsop_handlereq_t *hreq) 72 { 73 int hsize; 74 xfs_handle_t handle; 75 struct inode *inode; 76 struct fd f = {NULL}; 77 struct path path; 78 int error; 79 struct xfs_inode *ip; 80 81 if (cmd == XFS_IOC_FD_TO_HANDLE) { 82 f = fdget(hreq->fd); 83 if (!f.file) 84 return -EBADF; 85 inode = file_inode(f.file); 86 } else { 87 error = user_lpath((const char __user *)hreq->path, &path); 88 if (error) 89 return error; 90 inode = d_inode(path.dentry); 91 } 92 ip = XFS_I(inode); 93 94 /* 95 * We can only generate handles for inodes residing on a XFS filesystem, 96 * and only for regular files, directories or symbolic links. 97 */ 98 error = -EINVAL; 99 if (inode->i_sb->s_magic != XFS_SB_MAGIC) 100 goto out_put; 101 102 error = -EBADF; 103 if (!S_ISREG(inode->i_mode) && 104 !S_ISDIR(inode->i_mode) && 105 !S_ISLNK(inode->i_mode)) 106 goto out_put; 107 108 109 memcpy(&handle.ha_fsid, ip->i_mount->m_fixedfsid, sizeof(xfs_fsid_t)); 110 111 if (cmd == XFS_IOC_PATH_TO_FSHANDLE) { 112 /* 113 * This handle only contains an fsid, zero the rest. 114 */ 115 memset(&handle.ha_fid, 0, sizeof(handle.ha_fid)); 116 hsize = sizeof(xfs_fsid_t); 117 } else { 118 handle.ha_fid.fid_len = sizeof(xfs_fid_t) - 119 sizeof(handle.ha_fid.fid_len); 120 handle.ha_fid.fid_pad = 0; 121 handle.ha_fid.fid_gen = inode->i_generation; 122 handle.ha_fid.fid_ino = ip->i_ino; 123 hsize = sizeof(xfs_handle_t); 124 } 125 126 error = -EFAULT; 127 if (copy_to_user(hreq->ohandle, &handle, hsize) || 128 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32))) 129 goto out_put; 130 131 error = 0; 132 133 out_put: 134 if (cmd == XFS_IOC_FD_TO_HANDLE) 135 fdput(f); 136 else 137 path_put(&path); 138 return error; 139 } 140 141 /* 142 * No need to do permission checks on the various pathname components 143 * as the handle operations are privileged. 144 */ 145 STATIC int 146 xfs_handle_acceptable( 147 void *context, 148 struct dentry *dentry) 149 { 150 return 1; 151 } 152 153 /* 154 * Convert userspace handle data into a dentry. 155 */ 156 struct dentry * 157 xfs_handle_to_dentry( 158 struct file *parfilp, 159 void __user *uhandle, 160 u32 hlen) 161 { 162 xfs_handle_t handle; 163 struct xfs_fid64 fid; 164 165 /* 166 * Only allow handle opens under a directory. 167 */ 168 if (!S_ISDIR(file_inode(parfilp)->i_mode)) 169 return ERR_PTR(-ENOTDIR); 170 171 if (hlen != sizeof(xfs_handle_t)) 172 return ERR_PTR(-EINVAL); 173 if (copy_from_user(&handle, uhandle, hlen)) 174 return ERR_PTR(-EFAULT); 175 if (handle.ha_fid.fid_len != 176 sizeof(handle.ha_fid) - sizeof(handle.ha_fid.fid_len)) 177 return ERR_PTR(-EINVAL); 178 179 memset(&fid, 0, sizeof(struct fid)); 180 fid.ino = handle.ha_fid.fid_ino; 181 fid.gen = handle.ha_fid.fid_gen; 182 183 return exportfs_decode_fh(parfilp->f_path.mnt, (struct fid *)&fid, 3, 184 FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG, 185 xfs_handle_acceptable, NULL); 186 } 187 188 STATIC struct dentry * 189 xfs_handlereq_to_dentry( 190 struct file *parfilp, 191 xfs_fsop_handlereq_t *hreq) 192 { 193 return xfs_handle_to_dentry(parfilp, hreq->ihandle, hreq->ihandlen); 194 } 195 196 int 197 xfs_open_by_handle( 198 struct file *parfilp, 199 xfs_fsop_handlereq_t *hreq) 200 { 201 const struct cred *cred = current_cred(); 202 int error; 203 int fd; 204 int permflag; 205 struct file *filp; 206 struct inode *inode; 207 struct dentry *dentry; 208 fmode_t fmode; 209 struct path path; 210 211 if (!capable(CAP_SYS_ADMIN)) 212 return -EPERM; 213 214 dentry = xfs_handlereq_to_dentry(parfilp, hreq); 215 if (IS_ERR(dentry)) 216 return PTR_ERR(dentry); 217 inode = d_inode(dentry); 218 219 /* Restrict xfs_open_by_handle to directories & regular files. */ 220 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) { 221 error = -EPERM; 222 goto out_dput; 223 } 224 225 #if BITS_PER_LONG != 32 226 hreq->oflags |= O_LARGEFILE; 227 #endif 228 229 permflag = hreq->oflags; 230 fmode = OPEN_FMODE(permflag); 231 if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) && 232 (fmode & FMODE_WRITE) && IS_APPEND(inode)) { 233 error = -EPERM; 234 goto out_dput; 235 } 236 237 if ((fmode & FMODE_WRITE) && IS_IMMUTABLE(inode)) { 238 error = -EPERM; 239 goto out_dput; 240 } 241 242 /* Can't write directories. */ 243 if (S_ISDIR(inode->i_mode) && (fmode & FMODE_WRITE)) { 244 error = -EISDIR; 245 goto out_dput; 246 } 247 248 fd = get_unused_fd_flags(0); 249 if (fd < 0) { 250 error = fd; 251 goto out_dput; 252 } 253 254 path.mnt = parfilp->f_path.mnt; 255 path.dentry = dentry; 256 filp = dentry_open(&path, hreq->oflags, cred); 257 dput(dentry); 258 if (IS_ERR(filp)) { 259 put_unused_fd(fd); 260 return PTR_ERR(filp); 261 } 262 263 if (S_ISREG(inode->i_mode)) { 264 filp->f_flags |= O_NOATIME; 265 filp->f_mode |= FMODE_NOCMTIME; 266 } 267 268 fd_install(fd, filp); 269 return fd; 270 271 out_dput: 272 dput(dentry); 273 return error; 274 } 275 276 int 277 xfs_readlink_by_handle( 278 struct file *parfilp, 279 xfs_fsop_handlereq_t *hreq) 280 { 281 struct dentry *dentry; 282 __u32 olen; 283 int error; 284 285 if (!capable(CAP_SYS_ADMIN)) 286 return -EPERM; 287 288 dentry = xfs_handlereq_to_dentry(parfilp, hreq); 289 if (IS_ERR(dentry)) 290 return PTR_ERR(dentry); 291 292 /* Restrict this handle operation to symlinks only. */ 293 if (!d_is_symlink(dentry)) { 294 error = -EINVAL; 295 goto out_dput; 296 } 297 298 if (copy_from_user(&olen, hreq->ohandlen, sizeof(__u32))) { 299 error = -EFAULT; 300 goto out_dput; 301 } 302 303 error = vfs_readlink(dentry, hreq->ohandle, olen); 304 305 out_dput: 306 dput(dentry); 307 return error; 308 } 309 310 int 311 xfs_set_dmattrs( 312 xfs_inode_t *ip, 313 u_int evmask, 314 u_int16_t state) 315 { 316 xfs_mount_t *mp = ip->i_mount; 317 xfs_trans_t *tp; 318 int error; 319 320 if (!capable(CAP_SYS_ADMIN)) 321 return -EPERM; 322 323 if (XFS_FORCED_SHUTDOWN(mp)) 324 return -EIO; 325 326 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp); 327 if (error) 328 return error; 329 330 xfs_ilock(ip, XFS_ILOCK_EXCL); 331 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 332 333 ip->i_d.di_dmevmask = evmask; 334 ip->i_d.di_dmstate = state; 335 336 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 337 error = xfs_trans_commit(tp); 338 339 return error; 340 } 341 342 STATIC int 343 xfs_fssetdm_by_handle( 344 struct file *parfilp, 345 void __user *arg) 346 { 347 int error; 348 struct fsdmidata fsd; 349 xfs_fsop_setdm_handlereq_t dmhreq; 350 struct dentry *dentry; 351 352 if (!capable(CAP_MKNOD)) 353 return -EPERM; 354 if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t))) 355 return -EFAULT; 356 357 error = mnt_want_write_file(parfilp); 358 if (error) 359 return error; 360 361 dentry = xfs_handlereq_to_dentry(parfilp, &dmhreq.hreq); 362 if (IS_ERR(dentry)) { 363 mnt_drop_write_file(parfilp); 364 return PTR_ERR(dentry); 365 } 366 367 if (IS_IMMUTABLE(d_inode(dentry)) || IS_APPEND(d_inode(dentry))) { 368 error = -EPERM; 369 goto out; 370 } 371 372 if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) { 373 error = -EFAULT; 374 goto out; 375 } 376 377 error = xfs_set_dmattrs(XFS_I(d_inode(dentry)), fsd.fsd_dmevmask, 378 fsd.fsd_dmstate); 379 380 out: 381 mnt_drop_write_file(parfilp); 382 dput(dentry); 383 return error; 384 } 385 386 STATIC int 387 xfs_attrlist_by_handle( 388 struct file *parfilp, 389 void __user *arg) 390 { 391 int error = -ENOMEM; 392 attrlist_cursor_kern_t *cursor; 393 struct xfs_fsop_attrlist_handlereq __user *p = arg; 394 xfs_fsop_attrlist_handlereq_t al_hreq; 395 struct dentry *dentry; 396 char *kbuf; 397 398 if (!capable(CAP_SYS_ADMIN)) 399 return -EPERM; 400 if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t))) 401 return -EFAULT; 402 if (al_hreq.buflen < sizeof(struct attrlist) || 403 al_hreq.buflen > XFS_XATTR_LIST_MAX) 404 return -EINVAL; 405 406 /* 407 * Reject flags, only allow namespaces. 408 */ 409 if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE)) 410 return -EINVAL; 411 412 dentry = xfs_handlereq_to_dentry(parfilp, &al_hreq.hreq); 413 if (IS_ERR(dentry)) 414 return PTR_ERR(dentry); 415 416 kbuf = kmem_zalloc_large(al_hreq.buflen, KM_SLEEP); 417 if (!kbuf) 418 goto out_dput; 419 420 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; 421 error = xfs_attr_list(XFS_I(d_inode(dentry)), kbuf, al_hreq.buflen, 422 al_hreq.flags, cursor); 423 if (error) 424 goto out_kfree; 425 426 if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) { 427 error = -EFAULT; 428 goto out_kfree; 429 } 430 431 if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen)) 432 error = -EFAULT; 433 434 out_kfree: 435 kmem_free(kbuf); 436 out_dput: 437 dput(dentry); 438 return error; 439 } 440 441 int 442 xfs_attrmulti_attr_get( 443 struct inode *inode, 444 unsigned char *name, 445 unsigned char __user *ubuf, 446 uint32_t *len, 447 uint32_t flags) 448 { 449 unsigned char *kbuf; 450 int error = -EFAULT; 451 452 if (*len > XFS_XATTR_SIZE_MAX) 453 return -EINVAL; 454 kbuf = kmem_zalloc_large(*len, KM_SLEEP); 455 if (!kbuf) 456 return -ENOMEM; 457 458 error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags); 459 if (error) 460 goto out_kfree; 461 462 if (copy_to_user(ubuf, kbuf, *len)) 463 error = -EFAULT; 464 465 out_kfree: 466 kmem_free(kbuf); 467 return error; 468 } 469 470 int 471 xfs_attrmulti_attr_set( 472 struct inode *inode, 473 unsigned char *name, 474 const unsigned char __user *ubuf, 475 uint32_t len, 476 uint32_t flags) 477 { 478 unsigned char *kbuf; 479 int error; 480 481 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) 482 return -EPERM; 483 if (len > XFS_XATTR_SIZE_MAX) 484 return -EINVAL; 485 486 kbuf = memdup_user(ubuf, len); 487 if (IS_ERR(kbuf)) 488 return PTR_ERR(kbuf); 489 490 error = xfs_attr_set(XFS_I(inode), name, kbuf, len, flags); 491 if (!error) 492 xfs_forget_acl(inode, name, flags); 493 kfree(kbuf); 494 return error; 495 } 496 497 int 498 xfs_attrmulti_attr_remove( 499 struct inode *inode, 500 unsigned char *name, 501 uint32_t flags) 502 { 503 int error; 504 505 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) 506 return -EPERM; 507 error = xfs_attr_remove(XFS_I(inode), name, flags); 508 if (!error) 509 xfs_forget_acl(inode, name, flags); 510 return error; 511 } 512 513 STATIC int 514 xfs_attrmulti_by_handle( 515 struct file *parfilp, 516 void __user *arg) 517 { 518 int error; 519 xfs_attr_multiop_t *ops; 520 xfs_fsop_attrmulti_handlereq_t am_hreq; 521 struct dentry *dentry; 522 unsigned int i, size; 523 unsigned char *attr_name; 524 525 if (!capable(CAP_SYS_ADMIN)) 526 return -EPERM; 527 if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t))) 528 return -EFAULT; 529 530 /* overflow check */ 531 if (am_hreq.opcount >= INT_MAX / sizeof(xfs_attr_multiop_t)) 532 return -E2BIG; 533 534 dentry = xfs_handlereq_to_dentry(parfilp, &am_hreq.hreq); 535 if (IS_ERR(dentry)) 536 return PTR_ERR(dentry); 537 538 error = -E2BIG; 539 size = am_hreq.opcount * sizeof(xfs_attr_multiop_t); 540 if (!size || size > 16 * PAGE_SIZE) 541 goto out_dput; 542 543 ops = memdup_user(am_hreq.ops, size); 544 if (IS_ERR(ops)) { 545 error = PTR_ERR(ops); 546 goto out_dput; 547 } 548 549 error = -ENOMEM; 550 attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL); 551 if (!attr_name) 552 goto out_kfree_ops; 553 554 error = 0; 555 for (i = 0; i < am_hreq.opcount; i++) { 556 ops[i].am_error = strncpy_from_user((char *)attr_name, 557 ops[i].am_attrname, MAXNAMELEN); 558 if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN) 559 error = -ERANGE; 560 if (ops[i].am_error < 0) 561 break; 562 563 switch (ops[i].am_opcode) { 564 case ATTR_OP_GET: 565 ops[i].am_error = xfs_attrmulti_attr_get( 566 d_inode(dentry), attr_name, 567 ops[i].am_attrvalue, &ops[i].am_length, 568 ops[i].am_flags); 569 break; 570 case ATTR_OP_SET: 571 ops[i].am_error = mnt_want_write_file(parfilp); 572 if (ops[i].am_error) 573 break; 574 ops[i].am_error = xfs_attrmulti_attr_set( 575 d_inode(dentry), attr_name, 576 ops[i].am_attrvalue, ops[i].am_length, 577 ops[i].am_flags); 578 mnt_drop_write_file(parfilp); 579 break; 580 case ATTR_OP_REMOVE: 581 ops[i].am_error = mnt_want_write_file(parfilp); 582 if (ops[i].am_error) 583 break; 584 ops[i].am_error = xfs_attrmulti_attr_remove( 585 d_inode(dentry), attr_name, 586 ops[i].am_flags); 587 mnt_drop_write_file(parfilp); 588 break; 589 default: 590 ops[i].am_error = -EINVAL; 591 } 592 } 593 594 if (copy_to_user(am_hreq.ops, ops, size)) 595 error = -EFAULT; 596 597 kfree(attr_name); 598 out_kfree_ops: 599 kfree(ops); 600 out_dput: 601 dput(dentry); 602 return error; 603 } 604 605 int 606 xfs_ioc_space( 607 struct file *filp, 608 unsigned int cmd, 609 xfs_flock64_t *bf) 610 { 611 struct inode *inode = file_inode(filp); 612 struct xfs_inode *ip = XFS_I(inode); 613 struct iattr iattr; 614 enum xfs_prealloc_flags flags = 0; 615 uint iolock = XFS_IOLOCK_EXCL; 616 int error; 617 618 /* 619 * Only allow the sys admin to reserve space unless 620 * unwritten extents are enabled. 621 */ 622 if (!xfs_sb_version_hasextflgbit(&ip->i_mount->m_sb) && 623 !capable(CAP_SYS_ADMIN)) 624 return -EPERM; 625 626 if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) 627 return -EPERM; 628 629 if (!(filp->f_mode & FMODE_WRITE)) 630 return -EBADF; 631 632 if (!S_ISREG(inode->i_mode)) 633 return -EINVAL; 634 635 if (filp->f_flags & O_DSYNC) 636 flags |= XFS_PREALLOC_SYNC; 637 if (filp->f_mode & FMODE_NOCMTIME) 638 flags |= XFS_PREALLOC_INVISIBLE; 639 640 error = mnt_want_write_file(filp); 641 if (error) 642 return error; 643 644 xfs_ilock(ip, iolock); 645 error = xfs_break_layouts(inode, &iolock); 646 if (error) 647 goto out_unlock; 648 649 xfs_ilock(ip, XFS_MMAPLOCK_EXCL); 650 iolock |= XFS_MMAPLOCK_EXCL; 651 652 switch (bf->l_whence) { 653 case 0: /*SEEK_SET*/ 654 break; 655 case 1: /*SEEK_CUR*/ 656 bf->l_start += filp->f_pos; 657 break; 658 case 2: /*SEEK_END*/ 659 bf->l_start += XFS_ISIZE(ip); 660 break; 661 default: 662 error = -EINVAL; 663 goto out_unlock; 664 } 665 666 /* 667 * length of <= 0 for resv/unresv/zero is invalid. length for 668 * alloc/free is ignored completely and we have no idea what userspace 669 * might have set it to, so set it to zero to allow range 670 * checks to pass. 671 */ 672 switch (cmd) { 673 case XFS_IOC_ZERO_RANGE: 674 case XFS_IOC_RESVSP: 675 case XFS_IOC_RESVSP64: 676 case XFS_IOC_UNRESVSP: 677 case XFS_IOC_UNRESVSP64: 678 if (bf->l_len <= 0) { 679 error = -EINVAL; 680 goto out_unlock; 681 } 682 break; 683 default: 684 bf->l_len = 0; 685 break; 686 } 687 688 if (bf->l_start < 0 || 689 bf->l_start > inode->i_sb->s_maxbytes || 690 bf->l_start + bf->l_len < 0 || 691 bf->l_start + bf->l_len >= inode->i_sb->s_maxbytes) { 692 error = -EINVAL; 693 goto out_unlock; 694 } 695 696 switch (cmd) { 697 case XFS_IOC_ZERO_RANGE: 698 flags |= XFS_PREALLOC_SET; 699 error = xfs_zero_file_space(ip, bf->l_start, bf->l_len); 700 break; 701 case XFS_IOC_RESVSP: 702 case XFS_IOC_RESVSP64: 703 flags |= XFS_PREALLOC_SET; 704 error = xfs_alloc_file_space(ip, bf->l_start, bf->l_len, 705 XFS_BMAPI_PREALLOC); 706 break; 707 case XFS_IOC_UNRESVSP: 708 case XFS_IOC_UNRESVSP64: 709 error = xfs_free_file_space(ip, bf->l_start, bf->l_len); 710 break; 711 case XFS_IOC_ALLOCSP: 712 case XFS_IOC_ALLOCSP64: 713 case XFS_IOC_FREESP: 714 case XFS_IOC_FREESP64: 715 flags |= XFS_PREALLOC_CLEAR; 716 if (bf->l_start > XFS_ISIZE(ip)) { 717 error = xfs_alloc_file_space(ip, XFS_ISIZE(ip), 718 bf->l_start - XFS_ISIZE(ip), 0); 719 if (error) 720 goto out_unlock; 721 } 722 723 iattr.ia_valid = ATTR_SIZE; 724 iattr.ia_size = bf->l_start; 725 726 error = xfs_vn_setattr_size(file_dentry(filp), &iattr); 727 break; 728 default: 729 ASSERT(0); 730 error = -EINVAL; 731 } 732 733 if (error) 734 goto out_unlock; 735 736 error = xfs_update_prealloc_flags(ip, flags); 737 738 out_unlock: 739 xfs_iunlock(ip, iolock); 740 mnt_drop_write_file(filp); 741 return error; 742 } 743 744 STATIC int 745 xfs_ioc_bulkstat( 746 xfs_mount_t *mp, 747 unsigned int cmd, 748 void __user *arg) 749 { 750 xfs_fsop_bulkreq_t bulkreq; 751 int count; /* # of records returned */ 752 xfs_ino_t inlast; /* last inode number */ 753 int done; 754 int error; 755 756 /* done = 1 if there are more stats to get and if bulkstat */ 757 /* should be called again (unused here, but used in dmapi) */ 758 759 if (!capable(CAP_SYS_ADMIN)) 760 return -EPERM; 761 762 if (XFS_FORCED_SHUTDOWN(mp)) 763 return -EIO; 764 765 if (copy_from_user(&bulkreq, arg, sizeof(xfs_fsop_bulkreq_t))) 766 return -EFAULT; 767 768 if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64))) 769 return -EFAULT; 770 771 if ((count = bulkreq.icount) <= 0) 772 return -EINVAL; 773 774 if (bulkreq.ubuffer == NULL) 775 return -EINVAL; 776 777 if (cmd == XFS_IOC_FSINUMBERS) 778 error = xfs_inumbers(mp, &inlast, &count, 779 bulkreq.ubuffer, xfs_inumbers_fmt); 780 else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE) 781 error = xfs_bulkstat_one(mp, inlast, bulkreq.ubuffer, 782 sizeof(xfs_bstat_t), NULL, &done); 783 else /* XFS_IOC_FSBULKSTAT */ 784 error = xfs_bulkstat(mp, &inlast, &count, xfs_bulkstat_one, 785 sizeof(xfs_bstat_t), bulkreq.ubuffer, 786 &done); 787 788 if (error) 789 return error; 790 791 if (bulkreq.ocount != NULL) { 792 if (copy_to_user(bulkreq.lastip, &inlast, 793 sizeof(xfs_ino_t))) 794 return -EFAULT; 795 796 if (copy_to_user(bulkreq.ocount, &count, sizeof(count))) 797 return -EFAULT; 798 } 799 800 return 0; 801 } 802 803 STATIC int 804 xfs_ioc_fsgeometry_v1( 805 xfs_mount_t *mp, 806 void __user *arg) 807 { 808 xfs_fsop_geom_t fsgeo; 809 int error; 810 811 error = xfs_fs_geometry(mp, &fsgeo, 3); 812 if (error) 813 return error; 814 815 /* 816 * Caller should have passed an argument of type 817 * xfs_fsop_geom_v1_t. This is a proper subset of the 818 * xfs_fsop_geom_t that xfs_fs_geometry() fills in. 819 */ 820 if (copy_to_user(arg, &fsgeo, sizeof(xfs_fsop_geom_v1_t))) 821 return -EFAULT; 822 return 0; 823 } 824 825 STATIC int 826 xfs_ioc_fsgeometry( 827 xfs_mount_t *mp, 828 void __user *arg) 829 { 830 xfs_fsop_geom_t fsgeo; 831 int error; 832 833 error = xfs_fs_geometry(mp, &fsgeo, 4); 834 if (error) 835 return error; 836 837 if (copy_to_user(arg, &fsgeo, sizeof(fsgeo))) 838 return -EFAULT; 839 return 0; 840 } 841 842 /* 843 * Linux extended inode flags interface. 844 */ 845 846 STATIC unsigned int 847 xfs_merge_ioc_xflags( 848 unsigned int flags, 849 unsigned int start) 850 { 851 unsigned int xflags = start; 852 853 if (flags & FS_IMMUTABLE_FL) 854 xflags |= FS_XFLAG_IMMUTABLE; 855 else 856 xflags &= ~FS_XFLAG_IMMUTABLE; 857 if (flags & FS_APPEND_FL) 858 xflags |= FS_XFLAG_APPEND; 859 else 860 xflags &= ~FS_XFLAG_APPEND; 861 if (flags & FS_SYNC_FL) 862 xflags |= FS_XFLAG_SYNC; 863 else 864 xflags &= ~FS_XFLAG_SYNC; 865 if (flags & FS_NOATIME_FL) 866 xflags |= FS_XFLAG_NOATIME; 867 else 868 xflags &= ~FS_XFLAG_NOATIME; 869 if (flags & FS_NODUMP_FL) 870 xflags |= FS_XFLAG_NODUMP; 871 else 872 xflags &= ~FS_XFLAG_NODUMP; 873 874 return xflags; 875 } 876 877 STATIC unsigned int 878 xfs_di2lxflags( 879 uint16_t di_flags) 880 { 881 unsigned int flags = 0; 882 883 if (di_flags & XFS_DIFLAG_IMMUTABLE) 884 flags |= FS_IMMUTABLE_FL; 885 if (di_flags & XFS_DIFLAG_APPEND) 886 flags |= FS_APPEND_FL; 887 if (di_flags & XFS_DIFLAG_SYNC) 888 flags |= FS_SYNC_FL; 889 if (di_flags & XFS_DIFLAG_NOATIME) 890 flags |= FS_NOATIME_FL; 891 if (di_flags & XFS_DIFLAG_NODUMP) 892 flags |= FS_NODUMP_FL; 893 return flags; 894 } 895 896 STATIC int 897 xfs_ioc_fsgetxattr( 898 xfs_inode_t *ip, 899 int attr, 900 void __user *arg) 901 { 902 struct fsxattr fa; 903 904 memset(&fa, 0, sizeof(struct fsxattr)); 905 906 xfs_ilock(ip, XFS_ILOCK_SHARED); 907 fa.fsx_xflags = xfs_ip2xflags(ip); 908 fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog; 909 fa.fsx_cowextsize = ip->i_d.di_cowextsize << 910 ip->i_mount->m_sb.sb_blocklog; 911 fa.fsx_projid = xfs_get_projid(ip); 912 913 if (attr) { 914 if (ip->i_afp) { 915 if (ip->i_afp->if_flags & XFS_IFEXTENTS) 916 fa.fsx_nextents = xfs_iext_count(ip->i_afp); 917 else 918 fa.fsx_nextents = ip->i_d.di_anextents; 919 } else 920 fa.fsx_nextents = 0; 921 } else { 922 if (ip->i_df.if_flags & XFS_IFEXTENTS) 923 fa.fsx_nextents = xfs_iext_count(&ip->i_df); 924 else 925 fa.fsx_nextents = ip->i_d.di_nextents; 926 } 927 xfs_iunlock(ip, XFS_ILOCK_SHARED); 928 929 if (copy_to_user(arg, &fa, sizeof(fa))) 930 return -EFAULT; 931 return 0; 932 } 933 934 STATIC uint16_t 935 xfs_flags2diflags( 936 struct xfs_inode *ip, 937 unsigned int xflags) 938 { 939 /* can't set PREALLOC this way, just preserve it */ 940 uint16_t di_flags = 941 (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC); 942 943 if (xflags & FS_XFLAG_IMMUTABLE) 944 di_flags |= XFS_DIFLAG_IMMUTABLE; 945 if (xflags & FS_XFLAG_APPEND) 946 di_flags |= XFS_DIFLAG_APPEND; 947 if (xflags & FS_XFLAG_SYNC) 948 di_flags |= XFS_DIFLAG_SYNC; 949 if (xflags & FS_XFLAG_NOATIME) 950 di_flags |= XFS_DIFLAG_NOATIME; 951 if (xflags & FS_XFLAG_NODUMP) 952 di_flags |= XFS_DIFLAG_NODUMP; 953 if (xflags & FS_XFLAG_NODEFRAG) 954 di_flags |= XFS_DIFLAG_NODEFRAG; 955 if (xflags & FS_XFLAG_FILESTREAM) 956 di_flags |= XFS_DIFLAG_FILESTREAM; 957 if (S_ISDIR(VFS_I(ip)->i_mode)) { 958 if (xflags & FS_XFLAG_RTINHERIT) 959 di_flags |= XFS_DIFLAG_RTINHERIT; 960 if (xflags & FS_XFLAG_NOSYMLINKS) 961 di_flags |= XFS_DIFLAG_NOSYMLINKS; 962 if (xflags & FS_XFLAG_EXTSZINHERIT) 963 di_flags |= XFS_DIFLAG_EXTSZINHERIT; 964 if (xflags & FS_XFLAG_PROJINHERIT) 965 di_flags |= XFS_DIFLAG_PROJINHERIT; 966 } else if (S_ISREG(VFS_I(ip)->i_mode)) { 967 if (xflags & FS_XFLAG_REALTIME) 968 di_flags |= XFS_DIFLAG_REALTIME; 969 if (xflags & FS_XFLAG_EXTSIZE) 970 di_flags |= XFS_DIFLAG_EXTSIZE; 971 } 972 973 return di_flags; 974 } 975 976 STATIC uint64_t 977 xfs_flags2diflags2( 978 struct xfs_inode *ip, 979 unsigned int xflags) 980 { 981 uint64_t di_flags2 = 982 (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK); 983 984 if (xflags & FS_XFLAG_DAX) 985 di_flags2 |= XFS_DIFLAG2_DAX; 986 if (xflags & FS_XFLAG_COWEXTSIZE) 987 di_flags2 |= XFS_DIFLAG2_COWEXTSIZE; 988 989 return di_flags2; 990 } 991 992 STATIC void 993 xfs_diflags_to_linux( 994 struct xfs_inode *ip) 995 { 996 struct inode *inode = VFS_I(ip); 997 unsigned int xflags = xfs_ip2xflags(ip); 998 999 if (xflags & FS_XFLAG_IMMUTABLE) 1000 inode->i_flags |= S_IMMUTABLE; 1001 else 1002 inode->i_flags &= ~S_IMMUTABLE; 1003 if (xflags & FS_XFLAG_APPEND) 1004 inode->i_flags |= S_APPEND; 1005 else 1006 inode->i_flags &= ~S_APPEND; 1007 if (xflags & FS_XFLAG_SYNC) 1008 inode->i_flags |= S_SYNC; 1009 else 1010 inode->i_flags &= ~S_SYNC; 1011 if (xflags & FS_XFLAG_NOATIME) 1012 inode->i_flags |= S_NOATIME; 1013 else 1014 inode->i_flags &= ~S_NOATIME; 1015 #if 0 /* disabled until the flag switching races are sorted out */ 1016 if (xflags & FS_XFLAG_DAX) 1017 inode->i_flags |= S_DAX; 1018 else 1019 inode->i_flags &= ~S_DAX; 1020 #endif 1021 } 1022 1023 static int 1024 xfs_ioctl_setattr_xflags( 1025 struct xfs_trans *tp, 1026 struct xfs_inode *ip, 1027 struct fsxattr *fa) 1028 { 1029 struct xfs_mount *mp = ip->i_mount; 1030 uint64_t di_flags2; 1031 1032 /* Can't change realtime flag if any extents are allocated. */ 1033 if ((ip->i_d.di_nextents || ip->i_delayed_blks) && 1034 XFS_IS_REALTIME_INODE(ip) != (fa->fsx_xflags & FS_XFLAG_REALTIME)) 1035 return -EINVAL; 1036 1037 /* If realtime flag is set then must have realtime device */ 1038 if (fa->fsx_xflags & FS_XFLAG_REALTIME) { 1039 if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 || 1040 (ip->i_d.di_extsize % mp->m_sb.sb_rextsize)) 1041 return -EINVAL; 1042 } 1043 1044 /* Clear reflink if we are actually able to set the rt flag. */ 1045 if ((fa->fsx_xflags & FS_XFLAG_REALTIME) && xfs_is_reflink_inode(ip)) 1046 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK; 1047 1048 /* Don't allow us to set DAX mode for a reflinked file for now. */ 1049 if ((fa->fsx_xflags & FS_XFLAG_DAX) && xfs_is_reflink_inode(ip)) 1050 return -EINVAL; 1051 1052 /* 1053 * Can't modify an immutable/append-only file unless 1054 * we have appropriate permission. 1055 */ 1056 if (((ip->i_d.di_flags & (XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND)) || 1057 (fa->fsx_xflags & (FS_XFLAG_IMMUTABLE | FS_XFLAG_APPEND))) && 1058 !capable(CAP_LINUX_IMMUTABLE)) 1059 return -EPERM; 1060 1061 /* diflags2 only valid for v3 inodes. */ 1062 di_flags2 = xfs_flags2diflags2(ip, fa->fsx_xflags); 1063 if (di_flags2 && ip->i_d.di_version < 3) 1064 return -EINVAL; 1065 1066 ip->i_d.di_flags = xfs_flags2diflags(ip, fa->fsx_xflags); 1067 ip->i_d.di_flags2 = di_flags2; 1068 1069 xfs_diflags_to_linux(ip); 1070 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); 1071 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1072 XFS_STATS_INC(mp, xs_ig_attrchg); 1073 return 0; 1074 } 1075 1076 /* 1077 * If we are changing DAX flags, we have to ensure the file is clean and any 1078 * cached objects in the address space are invalidated and removed. This 1079 * requires us to lock out other IO and page faults similar to a truncate 1080 * operation. The locks need to be held until the transaction has been committed 1081 * so that the cache invalidation is atomic with respect to the DAX flag 1082 * manipulation. 1083 */ 1084 static int 1085 xfs_ioctl_setattr_dax_invalidate( 1086 struct xfs_inode *ip, 1087 struct fsxattr *fa, 1088 int *join_flags) 1089 { 1090 struct inode *inode = VFS_I(ip); 1091 struct super_block *sb = inode->i_sb; 1092 int error; 1093 1094 *join_flags = 0; 1095 1096 /* 1097 * It is only valid to set the DAX flag on regular files and 1098 * directories on filesystems where the block size is equal to the page 1099 * size. On directories it serves as an inherit hint. 1100 */ 1101 if (fa->fsx_xflags & FS_XFLAG_DAX) { 1102 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) 1103 return -EINVAL; 1104 if (bdev_dax_supported(sb, sb->s_blocksize) < 0) 1105 return -EINVAL; 1106 } 1107 1108 /* If the DAX state is not changing, we have nothing to do here. */ 1109 if ((fa->fsx_xflags & FS_XFLAG_DAX) && IS_DAX(inode)) 1110 return 0; 1111 if (!(fa->fsx_xflags & FS_XFLAG_DAX) && !IS_DAX(inode)) 1112 return 0; 1113 1114 /* lock, flush and invalidate mapping in preparation for flag change */ 1115 xfs_ilock(ip, XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL); 1116 error = filemap_write_and_wait(inode->i_mapping); 1117 if (error) 1118 goto out_unlock; 1119 error = invalidate_inode_pages2(inode->i_mapping); 1120 if (error) 1121 goto out_unlock; 1122 1123 *join_flags = XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL; 1124 return 0; 1125 1126 out_unlock: 1127 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL); 1128 return error; 1129 1130 } 1131 1132 /* 1133 * Set up the transaction structure for the setattr operation, checking that we 1134 * have permission to do so. On success, return a clean transaction and the 1135 * inode locked exclusively ready for further operation specific checks. On 1136 * failure, return an error without modifying or locking the inode. 1137 * 1138 * The inode might already be IO locked on call. If this is the case, it is 1139 * indicated in @join_flags and we take full responsibility for ensuring they 1140 * are unlocked from now on. Hence if we have an error here, we still have to 1141 * unlock them. Otherwise, once they are joined to the transaction, they will 1142 * be unlocked on commit/cancel. 1143 */ 1144 static struct xfs_trans * 1145 xfs_ioctl_setattr_get_trans( 1146 struct xfs_inode *ip, 1147 int join_flags) 1148 { 1149 struct xfs_mount *mp = ip->i_mount; 1150 struct xfs_trans *tp; 1151 int error = -EROFS; 1152 1153 if (mp->m_flags & XFS_MOUNT_RDONLY) 1154 goto out_unlock; 1155 error = -EIO; 1156 if (XFS_FORCED_SHUTDOWN(mp)) 1157 goto out_unlock; 1158 1159 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp); 1160 if (error) 1161 return ERR_PTR(error); 1162 1163 xfs_ilock(ip, XFS_ILOCK_EXCL); 1164 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | join_flags); 1165 join_flags = 0; 1166 1167 /* 1168 * CAP_FOWNER overrides the following restrictions: 1169 * 1170 * The user ID of the calling process must be equal to the file owner 1171 * ID, except in cases where the CAP_FSETID capability is applicable. 1172 */ 1173 if (!inode_owner_or_capable(VFS_I(ip))) { 1174 error = -EPERM; 1175 goto out_cancel; 1176 } 1177 1178 if (mp->m_flags & XFS_MOUNT_WSYNC) 1179 xfs_trans_set_sync(tp); 1180 1181 return tp; 1182 1183 out_cancel: 1184 xfs_trans_cancel(tp); 1185 out_unlock: 1186 if (join_flags) 1187 xfs_iunlock(ip, join_flags); 1188 return ERR_PTR(error); 1189 } 1190 1191 /* 1192 * extent size hint validation is somewhat cumbersome. Rules are: 1193 * 1194 * 1. extent size hint is only valid for directories and regular files 1195 * 2. FS_XFLAG_EXTSIZE is only valid for regular files 1196 * 3. FS_XFLAG_EXTSZINHERIT is only valid for directories. 1197 * 4. can only be changed on regular files if no extents are allocated 1198 * 5. can be changed on directories at any time 1199 * 6. extsize hint of 0 turns off hints, clears inode flags. 1200 * 7. Extent size must be a multiple of the appropriate block size. 1201 * 8. for non-realtime files, the extent size hint must be limited 1202 * to half the AG size to avoid alignment extending the extent beyond the 1203 * limits of the AG. 1204 */ 1205 static int 1206 xfs_ioctl_setattr_check_extsize( 1207 struct xfs_inode *ip, 1208 struct fsxattr *fa) 1209 { 1210 struct xfs_mount *mp = ip->i_mount; 1211 1212 if ((fa->fsx_xflags & FS_XFLAG_EXTSIZE) && !S_ISREG(VFS_I(ip)->i_mode)) 1213 return -EINVAL; 1214 1215 if ((fa->fsx_xflags & FS_XFLAG_EXTSZINHERIT) && 1216 !S_ISDIR(VFS_I(ip)->i_mode)) 1217 return -EINVAL; 1218 1219 if (S_ISREG(VFS_I(ip)->i_mode) && ip->i_d.di_nextents && 1220 ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != fa->fsx_extsize)) 1221 return -EINVAL; 1222 1223 if (fa->fsx_extsize != 0) { 1224 xfs_extlen_t size; 1225 xfs_fsblock_t extsize_fsb; 1226 1227 extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize); 1228 if (extsize_fsb > MAXEXTLEN) 1229 return -EINVAL; 1230 1231 if (XFS_IS_REALTIME_INODE(ip) || 1232 (fa->fsx_xflags & FS_XFLAG_REALTIME)) { 1233 size = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog; 1234 } else { 1235 size = mp->m_sb.sb_blocksize; 1236 if (extsize_fsb > mp->m_sb.sb_agblocks / 2) 1237 return -EINVAL; 1238 } 1239 1240 if (fa->fsx_extsize % size) 1241 return -EINVAL; 1242 } else 1243 fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE | FS_XFLAG_EXTSZINHERIT); 1244 1245 return 0; 1246 } 1247 1248 /* 1249 * CoW extent size hint validation rules are: 1250 * 1251 * 1. CoW extent size hint can only be set if reflink is enabled on the fs. 1252 * The inode does not have to have any shared blocks, but it must be a v3. 1253 * 2. FS_XFLAG_COWEXTSIZE is only valid for directories and regular files; 1254 * for a directory, the hint is propagated to new files. 1255 * 3. Can be changed on files & directories at any time. 1256 * 4. CoW extsize hint of 0 turns off hints, clears inode flags. 1257 * 5. Extent size must be a multiple of the appropriate block size. 1258 * 6. The extent size hint must be limited to half the AG size to avoid 1259 * alignment extending the extent beyond the limits of the AG. 1260 */ 1261 static int 1262 xfs_ioctl_setattr_check_cowextsize( 1263 struct xfs_inode *ip, 1264 struct fsxattr *fa) 1265 { 1266 struct xfs_mount *mp = ip->i_mount; 1267 1268 if (!(fa->fsx_xflags & FS_XFLAG_COWEXTSIZE)) 1269 return 0; 1270 1271 if (!xfs_sb_version_hasreflink(&ip->i_mount->m_sb) || 1272 ip->i_d.di_version != 3) 1273 return -EINVAL; 1274 1275 if (!S_ISREG(VFS_I(ip)->i_mode) && !S_ISDIR(VFS_I(ip)->i_mode)) 1276 return -EINVAL; 1277 1278 if (fa->fsx_cowextsize != 0) { 1279 xfs_extlen_t size; 1280 xfs_fsblock_t cowextsize_fsb; 1281 1282 cowextsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_cowextsize); 1283 if (cowextsize_fsb > MAXEXTLEN) 1284 return -EINVAL; 1285 1286 size = mp->m_sb.sb_blocksize; 1287 if (cowextsize_fsb > mp->m_sb.sb_agblocks / 2) 1288 return -EINVAL; 1289 1290 if (fa->fsx_cowextsize % size) 1291 return -EINVAL; 1292 } else 1293 fa->fsx_xflags &= ~FS_XFLAG_COWEXTSIZE; 1294 1295 return 0; 1296 } 1297 1298 static int 1299 xfs_ioctl_setattr_check_projid( 1300 struct xfs_inode *ip, 1301 struct fsxattr *fa) 1302 { 1303 /* Disallow 32bit project ids if projid32bit feature is not enabled. */ 1304 if (fa->fsx_projid > (uint16_t)-1 && 1305 !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb)) 1306 return -EINVAL; 1307 1308 /* 1309 * Project Quota ID state is only allowed to change from within the init 1310 * namespace. Enforce that restriction only if we are trying to change 1311 * the quota ID state. Everything else is allowed in user namespaces. 1312 */ 1313 if (current_user_ns() == &init_user_ns) 1314 return 0; 1315 1316 if (xfs_get_projid(ip) != fa->fsx_projid) 1317 return -EINVAL; 1318 if ((fa->fsx_xflags & FS_XFLAG_PROJINHERIT) != 1319 (ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)) 1320 return -EINVAL; 1321 1322 return 0; 1323 } 1324 1325 STATIC int 1326 xfs_ioctl_setattr( 1327 xfs_inode_t *ip, 1328 struct fsxattr *fa) 1329 { 1330 struct xfs_mount *mp = ip->i_mount; 1331 struct xfs_trans *tp; 1332 struct xfs_dquot *udqp = NULL; 1333 struct xfs_dquot *pdqp = NULL; 1334 struct xfs_dquot *olddquot = NULL; 1335 int code; 1336 int join_flags = 0; 1337 1338 trace_xfs_ioctl_setattr(ip); 1339 1340 code = xfs_ioctl_setattr_check_projid(ip, fa); 1341 if (code) 1342 return code; 1343 1344 /* 1345 * If disk quotas is on, we make sure that the dquots do exist on disk, 1346 * before we start any other transactions. Trying to do this later 1347 * is messy. We don't care to take a readlock to look at the ids 1348 * in inode here, because we can't hold it across the trans_reserve. 1349 * If the IDs do change before we take the ilock, we're covered 1350 * because the i_*dquot fields will get updated anyway. 1351 */ 1352 if (XFS_IS_QUOTA_ON(mp)) { 1353 code = xfs_qm_vop_dqalloc(ip, ip->i_d.di_uid, 1354 ip->i_d.di_gid, fa->fsx_projid, 1355 XFS_QMOPT_PQUOTA, &udqp, NULL, &pdqp); 1356 if (code) 1357 return code; 1358 } 1359 1360 /* 1361 * Changing DAX config may require inode locking for mapping 1362 * invalidation. These need to be held all the way to transaction commit 1363 * or cancel time, so need to be passed through to 1364 * xfs_ioctl_setattr_get_trans() so it can apply them to the join call 1365 * appropriately. 1366 */ 1367 code = xfs_ioctl_setattr_dax_invalidate(ip, fa, &join_flags); 1368 if (code) 1369 goto error_free_dquots; 1370 1371 tp = xfs_ioctl_setattr_get_trans(ip, join_flags); 1372 if (IS_ERR(tp)) { 1373 code = PTR_ERR(tp); 1374 goto error_free_dquots; 1375 } 1376 1377 1378 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp) && 1379 xfs_get_projid(ip) != fa->fsx_projid) { 1380 code = xfs_qm_vop_chown_reserve(tp, ip, udqp, NULL, pdqp, 1381 capable(CAP_FOWNER) ? XFS_QMOPT_FORCE_RES : 0); 1382 if (code) /* out of quota */ 1383 goto error_trans_cancel; 1384 } 1385 1386 code = xfs_ioctl_setattr_check_extsize(ip, fa); 1387 if (code) 1388 goto error_trans_cancel; 1389 1390 code = xfs_ioctl_setattr_check_cowextsize(ip, fa); 1391 if (code) 1392 goto error_trans_cancel; 1393 1394 code = xfs_ioctl_setattr_xflags(tp, ip, fa); 1395 if (code) 1396 goto error_trans_cancel; 1397 1398 /* 1399 * Change file ownership. Must be the owner or privileged. CAP_FSETID 1400 * overrides the following restrictions: 1401 * 1402 * The set-user-ID and set-group-ID bits of a file will be cleared upon 1403 * successful return from chown() 1404 */ 1405 1406 if ((VFS_I(ip)->i_mode & (S_ISUID|S_ISGID)) && 1407 !capable_wrt_inode_uidgid(VFS_I(ip), CAP_FSETID)) 1408 VFS_I(ip)->i_mode &= ~(S_ISUID|S_ISGID); 1409 1410 /* Change the ownerships and register project quota modifications */ 1411 if (xfs_get_projid(ip) != fa->fsx_projid) { 1412 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) { 1413 olddquot = xfs_qm_vop_chown(tp, ip, 1414 &ip->i_pdquot, pdqp); 1415 } 1416 ASSERT(ip->i_d.di_version > 1); 1417 xfs_set_projid(ip, fa->fsx_projid); 1418 } 1419 1420 /* 1421 * Only set the extent size hint if we've already determined that the 1422 * extent size hint should be set on the inode. If no extent size flags 1423 * are set on the inode then unconditionally clear the extent size hint. 1424 */ 1425 if (ip->i_d.di_flags & (XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT)) 1426 ip->i_d.di_extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog; 1427 else 1428 ip->i_d.di_extsize = 0; 1429 if (ip->i_d.di_version == 3 && 1430 (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)) 1431 ip->i_d.di_cowextsize = fa->fsx_cowextsize >> 1432 mp->m_sb.sb_blocklog; 1433 else 1434 ip->i_d.di_cowextsize = 0; 1435 1436 code = xfs_trans_commit(tp); 1437 1438 /* 1439 * Release any dquot(s) the inode had kept before chown. 1440 */ 1441 xfs_qm_dqrele(olddquot); 1442 xfs_qm_dqrele(udqp); 1443 xfs_qm_dqrele(pdqp); 1444 1445 return code; 1446 1447 error_trans_cancel: 1448 xfs_trans_cancel(tp); 1449 error_free_dquots: 1450 xfs_qm_dqrele(udqp); 1451 xfs_qm_dqrele(pdqp); 1452 return code; 1453 } 1454 1455 STATIC int 1456 xfs_ioc_fssetxattr( 1457 xfs_inode_t *ip, 1458 struct file *filp, 1459 void __user *arg) 1460 { 1461 struct fsxattr fa; 1462 int error; 1463 1464 if (copy_from_user(&fa, arg, sizeof(fa))) 1465 return -EFAULT; 1466 1467 error = mnt_want_write_file(filp); 1468 if (error) 1469 return error; 1470 error = xfs_ioctl_setattr(ip, &fa); 1471 mnt_drop_write_file(filp); 1472 return error; 1473 } 1474 1475 STATIC int 1476 xfs_ioc_getxflags( 1477 xfs_inode_t *ip, 1478 void __user *arg) 1479 { 1480 unsigned int flags; 1481 1482 flags = xfs_di2lxflags(ip->i_d.di_flags); 1483 if (copy_to_user(arg, &flags, sizeof(flags))) 1484 return -EFAULT; 1485 return 0; 1486 } 1487 1488 STATIC int 1489 xfs_ioc_setxflags( 1490 struct xfs_inode *ip, 1491 struct file *filp, 1492 void __user *arg) 1493 { 1494 struct xfs_trans *tp; 1495 struct fsxattr fa; 1496 unsigned int flags; 1497 int join_flags = 0; 1498 int error; 1499 1500 if (copy_from_user(&flags, arg, sizeof(flags))) 1501 return -EFAULT; 1502 1503 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ 1504 FS_NOATIME_FL | FS_NODUMP_FL | \ 1505 FS_SYNC_FL)) 1506 return -EOPNOTSUPP; 1507 1508 fa.fsx_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip)); 1509 1510 error = mnt_want_write_file(filp); 1511 if (error) 1512 return error; 1513 1514 /* 1515 * Changing DAX config may require inode locking for mapping 1516 * invalidation. These need to be held all the way to transaction commit 1517 * or cancel time, so need to be passed through to 1518 * xfs_ioctl_setattr_get_trans() so it can apply them to the join call 1519 * appropriately. 1520 */ 1521 error = xfs_ioctl_setattr_dax_invalidate(ip, &fa, &join_flags); 1522 if (error) 1523 goto out_drop_write; 1524 1525 tp = xfs_ioctl_setattr_get_trans(ip, join_flags); 1526 if (IS_ERR(tp)) { 1527 error = PTR_ERR(tp); 1528 goto out_drop_write; 1529 } 1530 1531 error = xfs_ioctl_setattr_xflags(tp, ip, &fa); 1532 if (error) { 1533 xfs_trans_cancel(tp); 1534 goto out_drop_write; 1535 } 1536 1537 error = xfs_trans_commit(tp); 1538 out_drop_write: 1539 mnt_drop_write_file(filp); 1540 return error; 1541 } 1542 1543 STATIC int 1544 xfs_getbmap_format(void **ap, struct getbmapx *bmv) 1545 { 1546 struct getbmap __user *base = (struct getbmap __user *)*ap; 1547 1548 /* copy only getbmap portion (not getbmapx) */ 1549 if (copy_to_user(base, bmv, sizeof(struct getbmap))) 1550 return -EFAULT; 1551 1552 *ap += sizeof(struct getbmap); 1553 return 0; 1554 } 1555 1556 STATIC int 1557 xfs_ioc_getbmap( 1558 struct file *file, 1559 unsigned int cmd, 1560 void __user *arg) 1561 { 1562 struct getbmapx bmx = { 0 }; 1563 int error; 1564 1565 /* struct getbmap is a strict subset of struct getbmapx. */ 1566 if (copy_from_user(&bmx, arg, offsetof(struct getbmapx, bmv_iflags))) 1567 return -EFAULT; 1568 1569 if (bmx.bmv_count < 2) 1570 return -EINVAL; 1571 1572 bmx.bmv_iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0); 1573 if (file->f_mode & FMODE_NOCMTIME) 1574 bmx.bmv_iflags |= BMV_IF_NO_DMAPI_READ; 1575 1576 error = xfs_getbmap(XFS_I(file_inode(file)), &bmx, xfs_getbmap_format, 1577 (__force struct getbmap *)arg+1); 1578 if (error) 1579 return error; 1580 1581 /* copy back header - only size of getbmap */ 1582 if (copy_to_user(arg, &bmx, sizeof(struct getbmap))) 1583 return -EFAULT; 1584 return 0; 1585 } 1586 1587 STATIC int 1588 xfs_getbmapx_format(void **ap, struct getbmapx *bmv) 1589 { 1590 struct getbmapx __user *base = (struct getbmapx __user *)*ap; 1591 1592 if (copy_to_user(base, bmv, sizeof(struct getbmapx))) 1593 return -EFAULT; 1594 1595 *ap += sizeof(struct getbmapx); 1596 return 0; 1597 } 1598 1599 STATIC int 1600 xfs_ioc_getbmapx( 1601 struct xfs_inode *ip, 1602 void __user *arg) 1603 { 1604 struct getbmapx bmx; 1605 int error; 1606 1607 if (copy_from_user(&bmx, arg, sizeof(bmx))) 1608 return -EFAULT; 1609 1610 if (bmx.bmv_count < 2) 1611 return -EINVAL; 1612 1613 if (bmx.bmv_iflags & (~BMV_IF_VALID)) 1614 return -EINVAL; 1615 1616 error = xfs_getbmap(ip, &bmx, xfs_getbmapx_format, 1617 (__force struct getbmapx *)arg+1); 1618 if (error) 1619 return error; 1620 1621 /* copy back header */ 1622 if (copy_to_user(arg, &bmx, sizeof(struct getbmapx))) 1623 return -EFAULT; 1624 1625 return 0; 1626 } 1627 1628 struct getfsmap_info { 1629 struct xfs_mount *mp; 1630 struct fsmap_head __user *data; 1631 unsigned int idx; 1632 __u32 last_flags; 1633 }; 1634 1635 STATIC int 1636 xfs_getfsmap_format(struct xfs_fsmap *xfm, void *priv) 1637 { 1638 struct getfsmap_info *info = priv; 1639 struct fsmap fm; 1640 1641 trace_xfs_getfsmap_mapping(info->mp, xfm); 1642 1643 info->last_flags = xfm->fmr_flags; 1644 xfs_fsmap_from_internal(&fm, xfm); 1645 if (copy_to_user(&info->data->fmh_recs[info->idx++], &fm, 1646 sizeof(struct fsmap))) 1647 return -EFAULT; 1648 1649 return 0; 1650 } 1651 1652 STATIC int 1653 xfs_ioc_getfsmap( 1654 struct xfs_inode *ip, 1655 struct fsmap_head __user *arg) 1656 { 1657 struct getfsmap_info info = { NULL }; 1658 struct xfs_fsmap_head xhead = {0}; 1659 struct fsmap_head head; 1660 bool aborted = false; 1661 int error; 1662 1663 if (copy_from_user(&head, arg, sizeof(struct fsmap_head))) 1664 return -EFAULT; 1665 if (memchr_inv(head.fmh_reserved, 0, sizeof(head.fmh_reserved)) || 1666 memchr_inv(head.fmh_keys[0].fmr_reserved, 0, 1667 sizeof(head.fmh_keys[0].fmr_reserved)) || 1668 memchr_inv(head.fmh_keys[1].fmr_reserved, 0, 1669 sizeof(head.fmh_keys[1].fmr_reserved))) 1670 return -EINVAL; 1671 1672 xhead.fmh_iflags = head.fmh_iflags; 1673 xhead.fmh_count = head.fmh_count; 1674 xfs_fsmap_to_internal(&xhead.fmh_keys[0], &head.fmh_keys[0]); 1675 xfs_fsmap_to_internal(&xhead.fmh_keys[1], &head.fmh_keys[1]); 1676 1677 trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]); 1678 trace_xfs_getfsmap_high_key(ip->i_mount, &xhead.fmh_keys[1]); 1679 1680 info.mp = ip->i_mount; 1681 info.data = arg; 1682 error = xfs_getfsmap(ip->i_mount, &xhead, xfs_getfsmap_format, &info); 1683 if (error == XFS_BTREE_QUERY_RANGE_ABORT) { 1684 error = 0; 1685 aborted = true; 1686 } else if (error) 1687 return error; 1688 1689 /* If we didn't abort, set the "last" flag in the last fmx */ 1690 if (!aborted && info.idx) { 1691 info.last_flags |= FMR_OF_LAST; 1692 if (copy_to_user(&info.data->fmh_recs[info.idx - 1].fmr_flags, 1693 &info.last_flags, sizeof(info.last_flags))) 1694 return -EFAULT; 1695 } 1696 1697 /* copy back header */ 1698 head.fmh_entries = xhead.fmh_entries; 1699 head.fmh_oflags = xhead.fmh_oflags; 1700 if (copy_to_user(arg, &head, sizeof(struct fsmap_head))) 1701 return -EFAULT; 1702 1703 return 0; 1704 } 1705 1706 int 1707 xfs_ioc_swapext( 1708 xfs_swapext_t *sxp) 1709 { 1710 xfs_inode_t *ip, *tip; 1711 struct fd f, tmp; 1712 int error = 0; 1713 1714 /* Pull information for the target fd */ 1715 f = fdget((int)sxp->sx_fdtarget); 1716 if (!f.file) { 1717 error = -EINVAL; 1718 goto out; 1719 } 1720 1721 if (!(f.file->f_mode & FMODE_WRITE) || 1722 !(f.file->f_mode & FMODE_READ) || 1723 (f.file->f_flags & O_APPEND)) { 1724 error = -EBADF; 1725 goto out_put_file; 1726 } 1727 1728 tmp = fdget((int)sxp->sx_fdtmp); 1729 if (!tmp.file) { 1730 error = -EINVAL; 1731 goto out_put_file; 1732 } 1733 1734 if (!(tmp.file->f_mode & FMODE_WRITE) || 1735 !(tmp.file->f_mode & FMODE_READ) || 1736 (tmp.file->f_flags & O_APPEND)) { 1737 error = -EBADF; 1738 goto out_put_tmp_file; 1739 } 1740 1741 if (IS_SWAPFILE(file_inode(f.file)) || 1742 IS_SWAPFILE(file_inode(tmp.file))) { 1743 error = -EINVAL; 1744 goto out_put_tmp_file; 1745 } 1746 1747 /* 1748 * We need to ensure that the fds passed in point to XFS inodes 1749 * before we cast and access them as XFS structures as we have no 1750 * control over what the user passes us here. 1751 */ 1752 if (f.file->f_op != &xfs_file_operations || 1753 tmp.file->f_op != &xfs_file_operations) { 1754 error = -EINVAL; 1755 goto out_put_tmp_file; 1756 } 1757 1758 ip = XFS_I(file_inode(f.file)); 1759 tip = XFS_I(file_inode(tmp.file)); 1760 1761 if (ip->i_mount != tip->i_mount) { 1762 error = -EINVAL; 1763 goto out_put_tmp_file; 1764 } 1765 1766 if (ip->i_ino == tip->i_ino) { 1767 error = -EINVAL; 1768 goto out_put_tmp_file; 1769 } 1770 1771 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { 1772 error = -EIO; 1773 goto out_put_tmp_file; 1774 } 1775 1776 error = xfs_swap_extents(ip, tip, sxp); 1777 1778 out_put_tmp_file: 1779 fdput(tmp); 1780 out_put_file: 1781 fdput(f); 1782 out: 1783 return error; 1784 } 1785 1786 /* 1787 * Note: some of the ioctl's return positive numbers as a 1788 * byte count indicating success, such as readlink_by_handle. 1789 * So we don't "sign flip" like most other routines. This means 1790 * true errors need to be returned as a negative value. 1791 */ 1792 long 1793 xfs_file_ioctl( 1794 struct file *filp, 1795 unsigned int cmd, 1796 unsigned long p) 1797 { 1798 struct inode *inode = file_inode(filp); 1799 struct xfs_inode *ip = XFS_I(inode); 1800 struct xfs_mount *mp = ip->i_mount; 1801 void __user *arg = (void __user *)p; 1802 int error; 1803 1804 trace_xfs_file_ioctl(ip); 1805 1806 switch (cmd) { 1807 case FITRIM: 1808 return xfs_ioc_trim(mp, arg); 1809 case XFS_IOC_ALLOCSP: 1810 case XFS_IOC_FREESP: 1811 case XFS_IOC_RESVSP: 1812 case XFS_IOC_UNRESVSP: 1813 case XFS_IOC_ALLOCSP64: 1814 case XFS_IOC_FREESP64: 1815 case XFS_IOC_RESVSP64: 1816 case XFS_IOC_UNRESVSP64: 1817 case XFS_IOC_ZERO_RANGE: { 1818 xfs_flock64_t bf; 1819 1820 if (copy_from_user(&bf, arg, sizeof(bf))) 1821 return -EFAULT; 1822 return xfs_ioc_space(filp, cmd, &bf); 1823 } 1824 case XFS_IOC_DIOINFO: { 1825 struct dioattr da; 1826 xfs_buftarg_t *target = 1827 XFS_IS_REALTIME_INODE(ip) ? 1828 mp->m_rtdev_targp : mp->m_ddev_targp; 1829 1830 da.d_mem = da.d_miniosz = target->bt_logical_sectorsize; 1831 da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1); 1832 1833 if (copy_to_user(arg, &da, sizeof(da))) 1834 return -EFAULT; 1835 return 0; 1836 } 1837 1838 case XFS_IOC_FSBULKSTAT_SINGLE: 1839 case XFS_IOC_FSBULKSTAT: 1840 case XFS_IOC_FSINUMBERS: 1841 return xfs_ioc_bulkstat(mp, cmd, arg); 1842 1843 case XFS_IOC_FSGEOMETRY_V1: 1844 return xfs_ioc_fsgeometry_v1(mp, arg); 1845 1846 case XFS_IOC_FSGEOMETRY: 1847 return xfs_ioc_fsgeometry(mp, arg); 1848 1849 case XFS_IOC_GETVERSION: 1850 return put_user(inode->i_generation, (int __user *)arg); 1851 1852 case XFS_IOC_FSGETXATTR: 1853 return xfs_ioc_fsgetxattr(ip, 0, arg); 1854 case XFS_IOC_FSGETXATTRA: 1855 return xfs_ioc_fsgetxattr(ip, 1, arg); 1856 case XFS_IOC_FSSETXATTR: 1857 return xfs_ioc_fssetxattr(ip, filp, arg); 1858 case XFS_IOC_GETXFLAGS: 1859 return xfs_ioc_getxflags(ip, arg); 1860 case XFS_IOC_SETXFLAGS: 1861 return xfs_ioc_setxflags(ip, filp, arg); 1862 1863 case XFS_IOC_FSSETDM: { 1864 struct fsdmidata dmi; 1865 1866 if (copy_from_user(&dmi, arg, sizeof(dmi))) 1867 return -EFAULT; 1868 1869 error = mnt_want_write_file(filp); 1870 if (error) 1871 return error; 1872 1873 error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask, 1874 dmi.fsd_dmstate); 1875 mnt_drop_write_file(filp); 1876 return error; 1877 } 1878 1879 case XFS_IOC_GETBMAP: 1880 case XFS_IOC_GETBMAPA: 1881 return xfs_ioc_getbmap(filp, cmd, arg); 1882 1883 case XFS_IOC_GETBMAPX: 1884 return xfs_ioc_getbmapx(ip, arg); 1885 1886 case FS_IOC_GETFSMAP: 1887 return xfs_ioc_getfsmap(ip, arg); 1888 1889 case XFS_IOC_FD_TO_HANDLE: 1890 case XFS_IOC_PATH_TO_HANDLE: 1891 case XFS_IOC_PATH_TO_FSHANDLE: { 1892 xfs_fsop_handlereq_t hreq; 1893 1894 if (copy_from_user(&hreq, arg, sizeof(hreq))) 1895 return -EFAULT; 1896 return xfs_find_handle(cmd, &hreq); 1897 } 1898 case XFS_IOC_OPEN_BY_HANDLE: { 1899 xfs_fsop_handlereq_t hreq; 1900 1901 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) 1902 return -EFAULT; 1903 return xfs_open_by_handle(filp, &hreq); 1904 } 1905 case XFS_IOC_FSSETDM_BY_HANDLE: 1906 return xfs_fssetdm_by_handle(filp, arg); 1907 1908 case XFS_IOC_READLINK_BY_HANDLE: { 1909 xfs_fsop_handlereq_t hreq; 1910 1911 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) 1912 return -EFAULT; 1913 return xfs_readlink_by_handle(filp, &hreq); 1914 } 1915 case XFS_IOC_ATTRLIST_BY_HANDLE: 1916 return xfs_attrlist_by_handle(filp, arg); 1917 1918 case XFS_IOC_ATTRMULTI_BY_HANDLE: 1919 return xfs_attrmulti_by_handle(filp, arg); 1920 1921 case XFS_IOC_SWAPEXT: { 1922 struct xfs_swapext sxp; 1923 1924 if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t))) 1925 return -EFAULT; 1926 error = mnt_want_write_file(filp); 1927 if (error) 1928 return error; 1929 error = xfs_ioc_swapext(&sxp); 1930 mnt_drop_write_file(filp); 1931 return error; 1932 } 1933 1934 case XFS_IOC_FSCOUNTS: { 1935 xfs_fsop_counts_t out; 1936 1937 error = xfs_fs_counts(mp, &out); 1938 if (error) 1939 return error; 1940 1941 if (copy_to_user(arg, &out, sizeof(out))) 1942 return -EFAULT; 1943 return 0; 1944 } 1945 1946 case XFS_IOC_SET_RESBLKS: { 1947 xfs_fsop_resblks_t inout; 1948 uint64_t in; 1949 1950 if (!capable(CAP_SYS_ADMIN)) 1951 return -EPERM; 1952 1953 if (mp->m_flags & XFS_MOUNT_RDONLY) 1954 return -EROFS; 1955 1956 if (copy_from_user(&inout, arg, sizeof(inout))) 1957 return -EFAULT; 1958 1959 error = mnt_want_write_file(filp); 1960 if (error) 1961 return error; 1962 1963 /* input parameter is passed in resblks field of structure */ 1964 in = inout.resblks; 1965 error = xfs_reserve_blocks(mp, &in, &inout); 1966 mnt_drop_write_file(filp); 1967 if (error) 1968 return error; 1969 1970 if (copy_to_user(arg, &inout, sizeof(inout))) 1971 return -EFAULT; 1972 return 0; 1973 } 1974 1975 case XFS_IOC_GET_RESBLKS: { 1976 xfs_fsop_resblks_t out; 1977 1978 if (!capable(CAP_SYS_ADMIN)) 1979 return -EPERM; 1980 1981 error = xfs_reserve_blocks(mp, NULL, &out); 1982 if (error) 1983 return error; 1984 1985 if (copy_to_user(arg, &out, sizeof(out))) 1986 return -EFAULT; 1987 1988 return 0; 1989 } 1990 1991 case XFS_IOC_FSGROWFSDATA: { 1992 xfs_growfs_data_t in; 1993 1994 if (copy_from_user(&in, arg, sizeof(in))) 1995 return -EFAULT; 1996 1997 error = mnt_want_write_file(filp); 1998 if (error) 1999 return error; 2000 error = xfs_growfs_data(mp, &in); 2001 mnt_drop_write_file(filp); 2002 return error; 2003 } 2004 2005 case XFS_IOC_FSGROWFSLOG: { 2006 xfs_growfs_log_t in; 2007 2008 if (copy_from_user(&in, arg, sizeof(in))) 2009 return -EFAULT; 2010 2011 error = mnt_want_write_file(filp); 2012 if (error) 2013 return error; 2014 error = xfs_growfs_log(mp, &in); 2015 mnt_drop_write_file(filp); 2016 return error; 2017 } 2018 2019 case XFS_IOC_FSGROWFSRT: { 2020 xfs_growfs_rt_t in; 2021 2022 if (copy_from_user(&in, arg, sizeof(in))) 2023 return -EFAULT; 2024 2025 error = mnt_want_write_file(filp); 2026 if (error) 2027 return error; 2028 error = xfs_growfs_rt(mp, &in); 2029 mnt_drop_write_file(filp); 2030 return error; 2031 } 2032 2033 case XFS_IOC_GOINGDOWN: { 2034 uint32_t in; 2035 2036 if (!capable(CAP_SYS_ADMIN)) 2037 return -EPERM; 2038 2039 if (get_user(in, (uint32_t __user *)arg)) 2040 return -EFAULT; 2041 2042 return xfs_fs_goingdown(mp, in); 2043 } 2044 2045 case XFS_IOC_ERROR_INJECTION: { 2046 xfs_error_injection_t in; 2047 2048 if (!capable(CAP_SYS_ADMIN)) 2049 return -EPERM; 2050 2051 if (copy_from_user(&in, arg, sizeof(in))) 2052 return -EFAULT; 2053 2054 return xfs_errortag_add(mp, in.errtag); 2055 } 2056 2057 case XFS_IOC_ERROR_CLEARALL: 2058 if (!capable(CAP_SYS_ADMIN)) 2059 return -EPERM; 2060 2061 return xfs_errortag_clearall(mp); 2062 2063 case XFS_IOC_FREE_EOFBLOCKS: { 2064 struct xfs_fs_eofblocks eofb; 2065 struct xfs_eofblocks keofb; 2066 2067 if (!capable(CAP_SYS_ADMIN)) 2068 return -EPERM; 2069 2070 if (mp->m_flags & XFS_MOUNT_RDONLY) 2071 return -EROFS; 2072 2073 if (copy_from_user(&eofb, arg, sizeof(eofb))) 2074 return -EFAULT; 2075 2076 error = xfs_fs_eofblocks_from_user(&eofb, &keofb); 2077 if (error) 2078 return error; 2079 2080 return xfs_icache_free_eofblocks(mp, &keofb); 2081 } 2082 2083 default: 2084 return -ENOTTY; 2085 } 2086 } 2087