1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_mount.h" 25 #include "xfs_inode.h" 26 #include "xfs_ioctl.h" 27 #include "xfs_alloc.h" 28 #include "xfs_rtalloc.h" 29 #include "xfs_itable.h" 30 #include "xfs_error.h" 31 #include "xfs_attr.h" 32 #include "xfs_bmap.h" 33 #include "xfs_bmap_util.h" 34 #include "xfs_fsops.h" 35 #include "xfs_discard.h" 36 #include "xfs_quota.h" 37 #include "xfs_export.h" 38 #include "xfs_trace.h" 39 #include "xfs_icache.h" 40 #include "xfs_symlink.h" 41 #include "xfs_trans.h" 42 #include "xfs_pnfs.h" 43 #include "xfs_acl.h" 44 #include "xfs_btree.h" 45 #include <linux/fsmap.h> 46 #include "xfs_fsmap.h" 47 48 #include <linux/capability.h> 49 #include <linux/cred.h> 50 #include <linux/dcache.h> 51 #include <linux/mount.h> 52 #include <linux/namei.h> 53 #include <linux/pagemap.h> 54 #include <linux/slab.h> 55 #include <linux/exportfs.h> 56 57 /* 58 * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to 59 * a file or fs handle. 60 * 61 * XFS_IOC_PATH_TO_FSHANDLE 62 * returns fs handle for a mount point or path within that mount point 63 * XFS_IOC_FD_TO_HANDLE 64 * returns full handle for a FD opened in user space 65 * XFS_IOC_PATH_TO_HANDLE 66 * returns full handle for a path 67 */ 68 int 69 xfs_find_handle( 70 unsigned int cmd, 71 xfs_fsop_handlereq_t *hreq) 72 { 73 int hsize; 74 xfs_handle_t handle; 75 struct inode *inode; 76 struct fd f = {NULL}; 77 struct path path; 78 int error; 79 struct xfs_inode *ip; 80 81 if (cmd == XFS_IOC_FD_TO_HANDLE) { 82 f = fdget(hreq->fd); 83 if (!f.file) 84 return -EBADF; 85 inode = file_inode(f.file); 86 } else { 87 error = user_lpath((const char __user *)hreq->path, &path); 88 if (error) 89 return error; 90 inode = d_inode(path.dentry); 91 } 92 ip = XFS_I(inode); 93 94 /* 95 * We can only generate handles for inodes residing on a XFS filesystem, 96 * and only for regular files, directories or symbolic links. 97 */ 98 error = -EINVAL; 99 if (inode->i_sb->s_magic != XFS_SB_MAGIC) 100 goto out_put; 101 102 error = -EBADF; 103 if (!S_ISREG(inode->i_mode) && 104 !S_ISDIR(inode->i_mode) && 105 !S_ISLNK(inode->i_mode)) 106 goto out_put; 107 108 109 memcpy(&handle.ha_fsid, ip->i_mount->m_fixedfsid, sizeof(xfs_fsid_t)); 110 111 if (cmd == XFS_IOC_PATH_TO_FSHANDLE) { 112 /* 113 * This handle only contains an fsid, zero the rest. 114 */ 115 memset(&handle.ha_fid, 0, sizeof(handle.ha_fid)); 116 hsize = sizeof(xfs_fsid_t); 117 } else { 118 handle.ha_fid.fid_len = sizeof(xfs_fid_t) - 119 sizeof(handle.ha_fid.fid_len); 120 handle.ha_fid.fid_pad = 0; 121 handle.ha_fid.fid_gen = inode->i_generation; 122 handle.ha_fid.fid_ino = ip->i_ino; 123 124 hsize = XFS_HSIZE(handle); 125 } 126 127 error = -EFAULT; 128 if (copy_to_user(hreq->ohandle, &handle, hsize) || 129 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32))) 130 goto out_put; 131 132 error = 0; 133 134 out_put: 135 if (cmd == XFS_IOC_FD_TO_HANDLE) 136 fdput(f); 137 else 138 path_put(&path); 139 return error; 140 } 141 142 /* 143 * No need to do permission checks on the various pathname components 144 * as the handle operations are privileged. 145 */ 146 STATIC int 147 xfs_handle_acceptable( 148 void *context, 149 struct dentry *dentry) 150 { 151 return 1; 152 } 153 154 /* 155 * Convert userspace handle data into a dentry. 156 */ 157 struct dentry * 158 xfs_handle_to_dentry( 159 struct file *parfilp, 160 void __user *uhandle, 161 u32 hlen) 162 { 163 xfs_handle_t handle; 164 struct xfs_fid64 fid; 165 166 /* 167 * Only allow handle opens under a directory. 168 */ 169 if (!S_ISDIR(file_inode(parfilp)->i_mode)) 170 return ERR_PTR(-ENOTDIR); 171 172 if (hlen != sizeof(xfs_handle_t)) 173 return ERR_PTR(-EINVAL); 174 if (copy_from_user(&handle, uhandle, hlen)) 175 return ERR_PTR(-EFAULT); 176 if (handle.ha_fid.fid_len != 177 sizeof(handle.ha_fid) - sizeof(handle.ha_fid.fid_len)) 178 return ERR_PTR(-EINVAL); 179 180 memset(&fid, 0, sizeof(struct fid)); 181 fid.ino = handle.ha_fid.fid_ino; 182 fid.gen = handle.ha_fid.fid_gen; 183 184 return exportfs_decode_fh(parfilp->f_path.mnt, (struct fid *)&fid, 3, 185 FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG, 186 xfs_handle_acceptable, NULL); 187 } 188 189 STATIC struct dentry * 190 xfs_handlereq_to_dentry( 191 struct file *parfilp, 192 xfs_fsop_handlereq_t *hreq) 193 { 194 return xfs_handle_to_dentry(parfilp, hreq->ihandle, hreq->ihandlen); 195 } 196 197 int 198 xfs_open_by_handle( 199 struct file *parfilp, 200 xfs_fsop_handlereq_t *hreq) 201 { 202 const struct cred *cred = current_cred(); 203 int error; 204 int fd; 205 int permflag; 206 struct file *filp; 207 struct inode *inode; 208 struct dentry *dentry; 209 fmode_t fmode; 210 struct path path; 211 212 if (!capable(CAP_SYS_ADMIN)) 213 return -EPERM; 214 215 dentry = xfs_handlereq_to_dentry(parfilp, hreq); 216 if (IS_ERR(dentry)) 217 return PTR_ERR(dentry); 218 inode = d_inode(dentry); 219 220 /* Restrict xfs_open_by_handle to directories & regular files. */ 221 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) { 222 error = -EPERM; 223 goto out_dput; 224 } 225 226 #if BITS_PER_LONG != 32 227 hreq->oflags |= O_LARGEFILE; 228 #endif 229 230 permflag = hreq->oflags; 231 fmode = OPEN_FMODE(permflag); 232 if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) && 233 (fmode & FMODE_WRITE) && IS_APPEND(inode)) { 234 error = -EPERM; 235 goto out_dput; 236 } 237 238 if ((fmode & FMODE_WRITE) && IS_IMMUTABLE(inode)) { 239 error = -EPERM; 240 goto out_dput; 241 } 242 243 /* Can't write directories. */ 244 if (S_ISDIR(inode->i_mode) && (fmode & FMODE_WRITE)) { 245 error = -EISDIR; 246 goto out_dput; 247 } 248 249 fd = get_unused_fd_flags(0); 250 if (fd < 0) { 251 error = fd; 252 goto out_dput; 253 } 254 255 path.mnt = parfilp->f_path.mnt; 256 path.dentry = dentry; 257 filp = dentry_open(&path, hreq->oflags, cred); 258 dput(dentry); 259 if (IS_ERR(filp)) { 260 put_unused_fd(fd); 261 return PTR_ERR(filp); 262 } 263 264 if (S_ISREG(inode->i_mode)) { 265 filp->f_flags |= O_NOATIME; 266 filp->f_mode |= FMODE_NOCMTIME; 267 } 268 269 fd_install(fd, filp); 270 return fd; 271 272 out_dput: 273 dput(dentry); 274 return error; 275 } 276 277 int 278 xfs_readlink_by_handle( 279 struct file *parfilp, 280 xfs_fsop_handlereq_t *hreq) 281 { 282 struct dentry *dentry; 283 __u32 olen; 284 int error; 285 286 if (!capable(CAP_SYS_ADMIN)) 287 return -EPERM; 288 289 dentry = xfs_handlereq_to_dentry(parfilp, hreq); 290 if (IS_ERR(dentry)) 291 return PTR_ERR(dentry); 292 293 /* Restrict this handle operation to symlinks only. */ 294 if (!d_is_symlink(dentry)) { 295 error = -EINVAL; 296 goto out_dput; 297 } 298 299 if (copy_from_user(&olen, hreq->ohandlen, sizeof(__u32))) { 300 error = -EFAULT; 301 goto out_dput; 302 } 303 304 error = vfs_readlink(dentry, hreq->ohandle, olen); 305 306 out_dput: 307 dput(dentry); 308 return error; 309 } 310 311 int 312 xfs_set_dmattrs( 313 xfs_inode_t *ip, 314 u_int evmask, 315 u_int16_t state) 316 { 317 xfs_mount_t *mp = ip->i_mount; 318 xfs_trans_t *tp; 319 int error; 320 321 if (!capable(CAP_SYS_ADMIN)) 322 return -EPERM; 323 324 if (XFS_FORCED_SHUTDOWN(mp)) 325 return -EIO; 326 327 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp); 328 if (error) 329 return error; 330 331 xfs_ilock(ip, XFS_ILOCK_EXCL); 332 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 333 334 ip->i_d.di_dmevmask = evmask; 335 ip->i_d.di_dmstate = state; 336 337 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 338 error = xfs_trans_commit(tp); 339 340 return error; 341 } 342 343 STATIC int 344 xfs_fssetdm_by_handle( 345 struct file *parfilp, 346 void __user *arg) 347 { 348 int error; 349 struct fsdmidata fsd; 350 xfs_fsop_setdm_handlereq_t dmhreq; 351 struct dentry *dentry; 352 353 if (!capable(CAP_MKNOD)) 354 return -EPERM; 355 if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t))) 356 return -EFAULT; 357 358 error = mnt_want_write_file(parfilp); 359 if (error) 360 return error; 361 362 dentry = xfs_handlereq_to_dentry(parfilp, &dmhreq.hreq); 363 if (IS_ERR(dentry)) { 364 mnt_drop_write_file(parfilp); 365 return PTR_ERR(dentry); 366 } 367 368 if (IS_IMMUTABLE(d_inode(dentry)) || IS_APPEND(d_inode(dentry))) { 369 error = -EPERM; 370 goto out; 371 } 372 373 if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) { 374 error = -EFAULT; 375 goto out; 376 } 377 378 error = xfs_set_dmattrs(XFS_I(d_inode(dentry)), fsd.fsd_dmevmask, 379 fsd.fsd_dmstate); 380 381 out: 382 mnt_drop_write_file(parfilp); 383 dput(dentry); 384 return error; 385 } 386 387 STATIC int 388 xfs_attrlist_by_handle( 389 struct file *parfilp, 390 void __user *arg) 391 { 392 int error = -ENOMEM; 393 attrlist_cursor_kern_t *cursor; 394 struct xfs_fsop_attrlist_handlereq __user *p = arg; 395 xfs_fsop_attrlist_handlereq_t al_hreq; 396 struct dentry *dentry; 397 char *kbuf; 398 399 if (!capable(CAP_SYS_ADMIN)) 400 return -EPERM; 401 if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t))) 402 return -EFAULT; 403 if (al_hreq.buflen < sizeof(struct attrlist) || 404 al_hreq.buflen > XFS_XATTR_LIST_MAX) 405 return -EINVAL; 406 407 /* 408 * Reject flags, only allow namespaces. 409 */ 410 if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE)) 411 return -EINVAL; 412 413 dentry = xfs_handlereq_to_dentry(parfilp, &al_hreq.hreq); 414 if (IS_ERR(dentry)) 415 return PTR_ERR(dentry); 416 417 kbuf = kmem_zalloc_large(al_hreq.buflen, KM_SLEEP); 418 if (!kbuf) 419 goto out_dput; 420 421 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; 422 error = xfs_attr_list(XFS_I(d_inode(dentry)), kbuf, al_hreq.buflen, 423 al_hreq.flags, cursor); 424 if (error) 425 goto out_kfree; 426 427 if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) { 428 error = -EFAULT; 429 goto out_kfree; 430 } 431 432 if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen)) 433 error = -EFAULT; 434 435 out_kfree: 436 kmem_free(kbuf); 437 out_dput: 438 dput(dentry); 439 return error; 440 } 441 442 int 443 xfs_attrmulti_attr_get( 444 struct inode *inode, 445 unsigned char *name, 446 unsigned char __user *ubuf, 447 __uint32_t *len, 448 __uint32_t flags) 449 { 450 unsigned char *kbuf; 451 int error = -EFAULT; 452 453 if (*len > XFS_XATTR_SIZE_MAX) 454 return -EINVAL; 455 kbuf = kmem_zalloc_large(*len, KM_SLEEP); 456 if (!kbuf) 457 return -ENOMEM; 458 459 error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags); 460 if (error) 461 goto out_kfree; 462 463 if (copy_to_user(ubuf, kbuf, *len)) 464 error = -EFAULT; 465 466 out_kfree: 467 kmem_free(kbuf); 468 return error; 469 } 470 471 int 472 xfs_attrmulti_attr_set( 473 struct inode *inode, 474 unsigned char *name, 475 const unsigned char __user *ubuf, 476 __uint32_t len, 477 __uint32_t flags) 478 { 479 unsigned char *kbuf; 480 int error; 481 482 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) 483 return -EPERM; 484 if (len > XFS_XATTR_SIZE_MAX) 485 return -EINVAL; 486 487 kbuf = memdup_user(ubuf, len); 488 if (IS_ERR(kbuf)) 489 return PTR_ERR(kbuf); 490 491 error = xfs_attr_set(XFS_I(inode), name, kbuf, len, flags); 492 if (!error) 493 xfs_forget_acl(inode, name, flags); 494 kfree(kbuf); 495 return error; 496 } 497 498 int 499 xfs_attrmulti_attr_remove( 500 struct inode *inode, 501 unsigned char *name, 502 __uint32_t flags) 503 { 504 int error; 505 506 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) 507 return -EPERM; 508 error = xfs_attr_remove(XFS_I(inode), name, flags); 509 if (!error) 510 xfs_forget_acl(inode, name, flags); 511 return error; 512 } 513 514 STATIC int 515 xfs_attrmulti_by_handle( 516 struct file *parfilp, 517 void __user *arg) 518 { 519 int error; 520 xfs_attr_multiop_t *ops; 521 xfs_fsop_attrmulti_handlereq_t am_hreq; 522 struct dentry *dentry; 523 unsigned int i, size; 524 unsigned char *attr_name; 525 526 if (!capable(CAP_SYS_ADMIN)) 527 return -EPERM; 528 if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t))) 529 return -EFAULT; 530 531 /* overflow check */ 532 if (am_hreq.opcount >= INT_MAX / sizeof(xfs_attr_multiop_t)) 533 return -E2BIG; 534 535 dentry = xfs_handlereq_to_dentry(parfilp, &am_hreq.hreq); 536 if (IS_ERR(dentry)) 537 return PTR_ERR(dentry); 538 539 error = -E2BIG; 540 size = am_hreq.opcount * sizeof(xfs_attr_multiop_t); 541 if (!size || size > 16 * PAGE_SIZE) 542 goto out_dput; 543 544 ops = memdup_user(am_hreq.ops, size); 545 if (IS_ERR(ops)) { 546 error = PTR_ERR(ops); 547 goto out_dput; 548 } 549 550 error = -ENOMEM; 551 attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL); 552 if (!attr_name) 553 goto out_kfree_ops; 554 555 error = 0; 556 for (i = 0; i < am_hreq.opcount; i++) { 557 ops[i].am_error = strncpy_from_user((char *)attr_name, 558 ops[i].am_attrname, MAXNAMELEN); 559 if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN) 560 error = -ERANGE; 561 if (ops[i].am_error < 0) 562 break; 563 564 switch (ops[i].am_opcode) { 565 case ATTR_OP_GET: 566 ops[i].am_error = xfs_attrmulti_attr_get( 567 d_inode(dentry), attr_name, 568 ops[i].am_attrvalue, &ops[i].am_length, 569 ops[i].am_flags); 570 break; 571 case ATTR_OP_SET: 572 ops[i].am_error = mnt_want_write_file(parfilp); 573 if (ops[i].am_error) 574 break; 575 ops[i].am_error = xfs_attrmulti_attr_set( 576 d_inode(dentry), attr_name, 577 ops[i].am_attrvalue, ops[i].am_length, 578 ops[i].am_flags); 579 mnt_drop_write_file(parfilp); 580 break; 581 case ATTR_OP_REMOVE: 582 ops[i].am_error = mnt_want_write_file(parfilp); 583 if (ops[i].am_error) 584 break; 585 ops[i].am_error = xfs_attrmulti_attr_remove( 586 d_inode(dentry), attr_name, 587 ops[i].am_flags); 588 mnt_drop_write_file(parfilp); 589 break; 590 default: 591 ops[i].am_error = -EINVAL; 592 } 593 } 594 595 if (copy_to_user(am_hreq.ops, ops, size)) 596 error = -EFAULT; 597 598 kfree(attr_name); 599 out_kfree_ops: 600 kfree(ops); 601 out_dput: 602 dput(dentry); 603 return error; 604 } 605 606 int 607 xfs_ioc_space( 608 struct file *filp, 609 unsigned int cmd, 610 xfs_flock64_t *bf) 611 { 612 struct inode *inode = file_inode(filp); 613 struct xfs_inode *ip = XFS_I(inode); 614 struct iattr iattr; 615 enum xfs_prealloc_flags flags = 0; 616 uint iolock = XFS_IOLOCK_EXCL; 617 int error; 618 619 /* 620 * Only allow the sys admin to reserve space unless 621 * unwritten extents are enabled. 622 */ 623 if (!xfs_sb_version_hasextflgbit(&ip->i_mount->m_sb) && 624 !capable(CAP_SYS_ADMIN)) 625 return -EPERM; 626 627 if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) 628 return -EPERM; 629 630 if (!(filp->f_mode & FMODE_WRITE)) 631 return -EBADF; 632 633 if (!S_ISREG(inode->i_mode)) 634 return -EINVAL; 635 636 if (filp->f_flags & O_DSYNC) 637 flags |= XFS_PREALLOC_SYNC; 638 if (filp->f_mode & FMODE_NOCMTIME) 639 flags |= XFS_PREALLOC_INVISIBLE; 640 641 error = mnt_want_write_file(filp); 642 if (error) 643 return error; 644 645 xfs_ilock(ip, iolock); 646 error = xfs_break_layouts(inode, &iolock); 647 if (error) 648 goto out_unlock; 649 650 xfs_ilock(ip, XFS_MMAPLOCK_EXCL); 651 iolock |= XFS_MMAPLOCK_EXCL; 652 653 switch (bf->l_whence) { 654 case 0: /*SEEK_SET*/ 655 break; 656 case 1: /*SEEK_CUR*/ 657 bf->l_start += filp->f_pos; 658 break; 659 case 2: /*SEEK_END*/ 660 bf->l_start += XFS_ISIZE(ip); 661 break; 662 default: 663 error = -EINVAL; 664 goto out_unlock; 665 } 666 667 /* 668 * length of <= 0 for resv/unresv/zero is invalid. length for 669 * alloc/free is ignored completely and we have no idea what userspace 670 * might have set it to, so set it to zero to allow range 671 * checks to pass. 672 */ 673 switch (cmd) { 674 case XFS_IOC_ZERO_RANGE: 675 case XFS_IOC_RESVSP: 676 case XFS_IOC_RESVSP64: 677 case XFS_IOC_UNRESVSP: 678 case XFS_IOC_UNRESVSP64: 679 if (bf->l_len <= 0) { 680 error = -EINVAL; 681 goto out_unlock; 682 } 683 break; 684 default: 685 bf->l_len = 0; 686 break; 687 } 688 689 if (bf->l_start < 0 || 690 bf->l_start > inode->i_sb->s_maxbytes || 691 bf->l_start + bf->l_len < 0 || 692 bf->l_start + bf->l_len >= inode->i_sb->s_maxbytes) { 693 error = -EINVAL; 694 goto out_unlock; 695 } 696 697 switch (cmd) { 698 case XFS_IOC_ZERO_RANGE: 699 flags |= XFS_PREALLOC_SET; 700 error = xfs_zero_file_space(ip, bf->l_start, bf->l_len); 701 break; 702 case XFS_IOC_RESVSP: 703 case XFS_IOC_RESVSP64: 704 flags |= XFS_PREALLOC_SET; 705 error = xfs_alloc_file_space(ip, bf->l_start, bf->l_len, 706 XFS_BMAPI_PREALLOC); 707 break; 708 case XFS_IOC_UNRESVSP: 709 case XFS_IOC_UNRESVSP64: 710 error = xfs_free_file_space(ip, bf->l_start, bf->l_len); 711 break; 712 case XFS_IOC_ALLOCSP: 713 case XFS_IOC_ALLOCSP64: 714 case XFS_IOC_FREESP: 715 case XFS_IOC_FREESP64: 716 flags |= XFS_PREALLOC_CLEAR; 717 if (bf->l_start > XFS_ISIZE(ip)) { 718 error = xfs_alloc_file_space(ip, XFS_ISIZE(ip), 719 bf->l_start - XFS_ISIZE(ip), 0); 720 if (error) 721 goto out_unlock; 722 } 723 724 iattr.ia_valid = ATTR_SIZE; 725 iattr.ia_size = bf->l_start; 726 727 error = xfs_vn_setattr_size(file_dentry(filp), &iattr); 728 break; 729 default: 730 ASSERT(0); 731 error = -EINVAL; 732 } 733 734 if (error) 735 goto out_unlock; 736 737 error = xfs_update_prealloc_flags(ip, flags); 738 739 out_unlock: 740 xfs_iunlock(ip, iolock); 741 mnt_drop_write_file(filp); 742 return error; 743 } 744 745 STATIC int 746 xfs_ioc_bulkstat( 747 xfs_mount_t *mp, 748 unsigned int cmd, 749 void __user *arg) 750 { 751 xfs_fsop_bulkreq_t bulkreq; 752 int count; /* # of records returned */ 753 xfs_ino_t inlast; /* last inode number */ 754 int done; 755 int error; 756 757 /* done = 1 if there are more stats to get and if bulkstat */ 758 /* should be called again (unused here, but used in dmapi) */ 759 760 if (!capable(CAP_SYS_ADMIN)) 761 return -EPERM; 762 763 if (XFS_FORCED_SHUTDOWN(mp)) 764 return -EIO; 765 766 if (copy_from_user(&bulkreq, arg, sizeof(xfs_fsop_bulkreq_t))) 767 return -EFAULT; 768 769 if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64))) 770 return -EFAULT; 771 772 if ((count = bulkreq.icount) <= 0) 773 return -EINVAL; 774 775 if (bulkreq.ubuffer == NULL) 776 return -EINVAL; 777 778 if (cmd == XFS_IOC_FSINUMBERS) 779 error = xfs_inumbers(mp, &inlast, &count, 780 bulkreq.ubuffer, xfs_inumbers_fmt); 781 else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE) 782 error = xfs_bulkstat_one(mp, inlast, bulkreq.ubuffer, 783 sizeof(xfs_bstat_t), NULL, &done); 784 else /* XFS_IOC_FSBULKSTAT */ 785 error = xfs_bulkstat(mp, &inlast, &count, xfs_bulkstat_one, 786 sizeof(xfs_bstat_t), bulkreq.ubuffer, 787 &done); 788 789 if (error) 790 return error; 791 792 if (bulkreq.ocount != NULL) { 793 if (copy_to_user(bulkreq.lastip, &inlast, 794 sizeof(xfs_ino_t))) 795 return -EFAULT; 796 797 if (copy_to_user(bulkreq.ocount, &count, sizeof(count))) 798 return -EFAULT; 799 } 800 801 return 0; 802 } 803 804 STATIC int 805 xfs_ioc_fsgeometry_v1( 806 xfs_mount_t *mp, 807 void __user *arg) 808 { 809 xfs_fsop_geom_t fsgeo; 810 int error; 811 812 error = xfs_fs_geometry(mp, &fsgeo, 3); 813 if (error) 814 return error; 815 816 /* 817 * Caller should have passed an argument of type 818 * xfs_fsop_geom_v1_t. This is a proper subset of the 819 * xfs_fsop_geom_t that xfs_fs_geometry() fills in. 820 */ 821 if (copy_to_user(arg, &fsgeo, sizeof(xfs_fsop_geom_v1_t))) 822 return -EFAULT; 823 return 0; 824 } 825 826 STATIC int 827 xfs_ioc_fsgeometry( 828 xfs_mount_t *mp, 829 void __user *arg) 830 { 831 xfs_fsop_geom_t fsgeo; 832 int error; 833 834 error = xfs_fs_geometry(mp, &fsgeo, 4); 835 if (error) 836 return error; 837 838 if (copy_to_user(arg, &fsgeo, sizeof(fsgeo))) 839 return -EFAULT; 840 return 0; 841 } 842 843 /* 844 * Linux extended inode flags interface. 845 */ 846 847 STATIC unsigned int 848 xfs_merge_ioc_xflags( 849 unsigned int flags, 850 unsigned int start) 851 { 852 unsigned int xflags = start; 853 854 if (flags & FS_IMMUTABLE_FL) 855 xflags |= FS_XFLAG_IMMUTABLE; 856 else 857 xflags &= ~FS_XFLAG_IMMUTABLE; 858 if (flags & FS_APPEND_FL) 859 xflags |= FS_XFLAG_APPEND; 860 else 861 xflags &= ~FS_XFLAG_APPEND; 862 if (flags & FS_SYNC_FL) 863 xflags |= FS_XFLAG_SYNC; 864 else 865 xflags &= ~FS_XFLAG_SYNC; 866 if (flags & FS_NOATIME_FL) 867 xflags |= FS_XFLAG_NOATIME; 868 else 869 xflags &= ~FS_XFLAG_NOATIME; 870 if (flags & FS_NODUMP_FL) 871 xflags |= FS_XFLAG_NODUMP; 872 else 873 xflags &= ~FS_XFLAG_NODUMP; 874 875 return xflags; 876 } 877 878 STATIC unsigned int 879 xfs_di2lxflags( 880 __uint16_t di_flags) 881 { 882 unsigned int flags = 0; 883 884 if (di_flags & XFS_DIFLAG_IMMUTABLE) 885 flags |= FS_IMMUTABLE_FL; 886 if (di_flags & XFS_DIFLAG_APPEND) 887 flags |= FS_APPEND_FL; 888 if (di_flags & XFS_DIFLAG_SYNC) 889 flags |= FS_SYNC_FL; 890 if (di_flags & XFS_DIFLAG_NOATIME) 891 flags |= FS_NOATIME_FL; 892 if (di_flags & XFS_DIFLAG_NODUMP) 893 flags |= FS_NODUMP_FL; 894 return flags; 895 } 896 897 STATIC int 898 xfs_ioc_fsgetxattr( 899 xfs_inode_t *ip, 900 int attr, 901 void __user *arg) 902 { 903 struct fsxattr fa; 904 905 memset(&fa, 0, sizeof(struct fsxattr)); 906 907 xfs_ilock(ip, XFS_ILOCK_SHARED); 908 fa.fsx_xflags = xfs_ip2xflags(ip); 909 fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog; 910 fa.fsx_cowextsize = ip->i_d.di_cowextsize << 911 ip->i_mount->m_sb.sb_blocklog; 912 fa.fsx_projid = xfs_get_projid(ip); 913 914 if (attr) { 915 if (ip->i_afp) { 916 if (ip->i_afp->if_flags & XFS_IFEXTENTS) 917 fa.fsx_nextents = xfs_iext_count(ip->i_afp); 918 else 919 fa.fsx_nextents = ip->i_d.di_anextents; 920 } else 921 fa.fsx_nextents = 0; 922 } else { 923 if (ip->i_df.if_flags & XFS_IFEXTENTS) 924 fa.fsx_nextents = xfs_iext_count(&ip->i_df); 925 else 926 fa.fsx_nextents = ip->i_d.di_nextents; 927 } 928 xfs_iunlock(ip, XFS_ILOCK_SHARED); 929 930 if (copy_to_user(arg, &fa, sizeof(fa))) 931 return -EFAULT; 932 return 0; 933 } 934 935 STATIC void 936 xfs_set_diflags( 937 struct xfs_inode *ip, 938 unsigned int xflags) 939 { 940 unsigned int di_flags; 941 uint64_t di_flags2; 942 943 /* can't set PREALLOC this way, just preserve it */ 944 di_flags = (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC); 945 if (xflags & FS_XFLAG_IMMUTABLE) 946 di_flags |= XFS_DIFLAG_IMMUTABLE; 947 if (xflags & FS_XFLAG_APPEND) 948 di_flags |= XFS_DIFLAG_APPEND; 949 if (xflags & FS_XFLAG_SYNC) 950 di_flags |= XFS_DIFLAG_SYNC; 951 if (xflags & FS_XFLAG_NOATIME) 952 di_flags |= XFS_DIFLAG_NOATIME; 953 if (xflags & FS_XFLAG_NODUMP) 954 di_flags |= XFS_DIFLAG_NODUMP; 955 if (xflags & FS_XFLAG_NODEFRAG) 956 di_flags |= XFS_DIFLAG_NODEFRAG; 957 if (xflags & FS_XFLAG_FILESTREAM) 958 di_flags |= XFS_DIFLAG_FILESTREAM; 959 if (S_ISDIR(VFS_I(ip)->i_mode)) { 960 if (xflags & FS_XFLAG_RTINHERIT) 961 di_flags |= XFS_DIFLAG_RTINHERIT; 962 if (xflags & FS_XFLAG_NOSYMLINKS) 963 di_flags |= XFS_DIFLAG_NOSYMLINKS; 964 if (xflags & FS_XFLAG_EXTSZINHERIT) 965 di_flags |= XFS_DIFLAG_EXTSZINHERIT; 966 if (xflags & FS_XFLAG_PROJINHERIT) 967 di_flags |= XFS_DIFLAG_PROJINHERIT; 968 } else if (S_ISREG(VFS_I(ip)->i_mode)) { 969 if (xflags & FS_XFLAG_REALTIME) 970 di_flags |= XFS_DIFLAG_REALTIME; 971 if (xflags & FS_XFLAG_EXTSIZE) 972 di_flags |= XFS_DIFLAG_EXTSIZE; 973 } 974 ip->i_d.di_flags = di_flags; 975 976 /* diflags2 only valid for v3 inodes. */ 977 if (ip->i_d.di_version < 3) 978 return; 979 980 di_flags2 = (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK); 981 if (xflags & FS_XFLAG_DAX) 982 di_flags2 |= XFS_DIFLAG2_DAX; 983 if (xflags & FS_XFLAG_COWEXTSIZE) 984 di_flags2 |= XFS_DIFLAG2_COWEXTSIZE; 985 986 ip->i_d.di_flags2 = di_flags2; 987 } 988 989 STATIC void 990 xfs_diflags_to_linux( 991 struct xfs_inode *ip) 992 { 993 struct inode *inode = VFS_I(ip); 994 unsigned int xflags = xfs_ip2xflags(ip); 995 996 if (xflags & FS_XFLAG_IMMUTABLE) 997 inode->i_flags |= S_IMMUTABLE; 998 else 999 inode->i_flags &= ~S_IMMUTABLE; 1000 if (xflags & FS_XFLAG_APPEND) 1001 inode->i_flags |= S_APPEND; 1002 else 1003 inode->i_flags &= ~S_APPEND; 1004 if (xflags & FS_XFLAG_SYNC) 1005 inode->i_flags |= S_SYNC; 1006 else 1007 inode->i_flags &= ~S_SYNC; 1008 if (xflags & FS_XFLAG_NOATIME) 1009 inode->i_flags |= S_NOATIME; 1010 else 1011 inode->i_flags &= ~S_NOATIME; 1012 if (xflags & FS_XFLAG_DAX) 1013 inode->i_flags |= S_DAX; 1014 else 1015 inode->i_flags &= ~S_DAX; 1016 1017 } 1018 1019 static int 1020 xfs_ioctl_setattr_xflags( 1021 struct xfs_trans *tp, 1022 struct xfs_inode *ip, 1023 struct fsxattr *fa) 1024 { 1025 struct xfs_mount *mp = ip->i_mount; 1026 1027 /* Can't change realtime flag if any extents are allocated. */ 1028 if ((ip->i_d.di_nextents || ip->i_delayed_blks) && 1029 XFS_IS_REALTIME_INODE(ip) != (fa->fsx_xflags & FS_XFLAG_REALTIME)) 1030 return -EINVAL; 1031 1032 /* If realtime flag is set then must have realtime device */ 1033 if (fa->fsx_xflags & FS_XFLAG_REALTIME) { 1034 if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 || 1035 (ip->i_d.di_extsize % mp->m_sb.sb_rextsize)) 1036 return -EINVAL; 1037 } 1038 1039 /* Clear reflink if we are actually able to set the rt flag. */ 1040 if ((fa->fsx_xflags & FS_XFLAG_REALTIME) && xfs_is_reflink_inode(ip)) 1041 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK; 1042 1043 /* Don't allow us to set DAX mode for a reflinked file for now. */ 1044 if ((fa->fsx_xflags & FS_XFLAG_DAX) && xfs_is_reflink_inode(ip)) 1045 return -EINVAL; 1046 1047 /* 1048 * Can't modify an immutable/append-only file unless 1049 * we have appropriate permission. 1050 */ 1051 if (((ip->i_d.di_flags & (XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND)) || 1052 (fa->fsx_xflags & (FS_XFLAG_IMMUTABLE | FS_XFLAG_APPEND))) && 1053 !capable(CAP_LINUX_IMMUTABLE)) 1054 return -EPERM; 1055 1056 xfs_set_diflags(ip, fa->fsx_xflags); 1057 xfs_diflags_to_linux(ip); 1058 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); 1059 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1060 XFS_STATS_INC(mp, xs_ig_attrchg); 1061 return 0; 1062 } 1063 1064 /* 1065 * If we are changing DAX flags, we have to ensure the file is clean and any 1066 * cached objects in the address space are invalidated and removed. This 1067 * requires us to lock out other IO and page faults similar to a truncate 1068 * operation. The locks need to be held until the transaction has been committed 1069 * so that the cache invalidation is atomic with respect to the DAX flag 1070 * manipulation. 1071 */ 1072 static int 1073 xfs_ioctl_setattr_dax_invalidate( 1074 struct xfs_inode *ip, 1075 struct fsxattr *fa, 1076 int *join_flags) 1077 { 1078 struct inode *inode = VFS_I(ip); 1079 int error; 1080 1081 *join_flags = 0; 1082 1083 /* 1084 * It is only valid to set the DAX flag on regular files and 1085 * directories on filesystems where the block size is equal to the page 1086 * size. On directories it serves as an inherit hint. 1087 */ 1088 if (fa->fsx_xflags & FS_XFLAG_DAX) { 1089 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) 1090 return -EINVAL; 1091 if (ip->i_mount->m_sb.sb_blocksize != PAGE_SIZE) 1092 return -EINVAL; 1093 } 1094 1095 /* If the DAX state is not changing, we have nothing to do here. */ 1096 if ((fa->fsx_xflags & FS_XFLAG_DAX) && IS_DAX(inode)) 1097 return 0; 1098 if (!(fa->fsx_xflags & FS_XFLAG_DAX) && !IS_DAX(inode)) 1099 return 0; 1100 1101 /* lock, flush and invalidate mapping in preparation for flag change */ 1102 xfs_ilock(ip, XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL); 1103 error = filemap_write_and_wait(inode->i_mapping); 1104 if (error) 1105 goto out_unlock; 1106 error = invalidate_inode_pages2(inode->i_mapping); 1107 if (error) 1108 goto out_unlock; 1109 1110 *join_flags = XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL; 1111 return 0; 1112 1113 out_unlock: 1114 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL); 1115 return error; 1116 1117 } 1118 1119 /* 1120 * Set up the transaction structure for the setattr operation, checking that we 1121 * have permission to do so. On success, return a clean transaction and the 1122 * inode locked exclusively ready for further operation specific checks. On 1123 * failure, return an error without modifying or locking the inode. 1124 * 1125 * The inode might already be IO locked on call. If this is the case, it is 1126 * indicated in @join_flags and we take full responsibility for ensuring they 1127 * are unlocked from now on. Hence if we have an error here, we still have to 1128 * unlock them. Otherwise, once they are joined to the transaction, they will 1129 * be unlocked on commit/cancel. 1130 */ 1131 static struct xfs_trans * 1132 xfs_ioctl_setattr_get_trans( 1133 struct xfs_inode *ip, 1134 int join_flags) 1135 { 1136 struct xfs_mount *mp = ip->i_mount; 1137 struct xfs_trans *tp; 1138 int error = -EROFS; 1139 1140 if (mp->m_flags & XFS_MOUNT_RDONLY) 1141 goto out_unlock; 1142 error = -EIO; 1143 if (XFS_FORCED_SHUTDOWN(mp)) 1144 goto out_unlock; 1145 1146 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp); 1147 if (error) 1148 return ERR_PTR(error); 1149 1150 xfs_ilock(ip, XFS_ILOCK_EXCL); 1151 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | join_flags); 1152 join_flags = 0; 1153 1154 /* 1155 * CAP_FOWNER overrides the following restrictions: 1156 * 1157 * The user ID of the calling process must be equal to the file owner 1158 * ID, except in cases where the CAP_FSETID capability is applicable. 1159 */ 1160 if (!inode_owner_or_capable(VFS_I(ip))) { 1161 error = -EPERM; 1162 goto out_cancel; 1163 } 1164 1165 if (mp->m_flags & XFS_MOUNT_WSYNC) 1166 xfs_trans_set_sync(tp); 1167 1168 return tp; 1169 1170 out_cancel: 1171 xfs_trans_cancel(tp); 1172 out_unlock: 1173 if (join_flags) 1174 xfs_iunlock(ip, join_flags); 1175 return ERR_PTR(error); 1176 } 1177 1178 /* 1179 * extent size hint validation is somewhat cumbersome. Rules are: 1180 * 1181 * 1. extent size hint is only valid for directories and regular files 1182 * 2. FS_XFLAG_EXTSIZE is only valid for regular files 1183 * 3. FS_XFLAG_EXTSZINHERIT is only valid for directories. 1184 * 4. can only be changed on regular files if no extents are allocated 1185 * 5. can be changed on directories at any time 1186 * 6. extsize hint of 0 turns off hints, clears inode flags. 1187 * 7. Extent size must be a multiple of the appropriate block size. 1188 * 8. for non-realtime files, the extent size hint must be limited 1189 * to half the AG size to avoid alignment extending the extent beyond the 1190 * limits of the AG. 1191 */ 1192 static int 1193 xfs_ioctl_setattr_check_extsize( 1194 struct xfs_inode *ip, 1195 struct fsxattr *fa) 1196 { 1197 struct xfs_mount *mp = ip->i_mount; 1198 1199 if ((fa->fsx_xflags & FS_XFLAG_EXTSIZE) && !S_ISREG(VFS_I(ip)->i_mode)) 1200 return -EINVAL; 1201 1202 if ((fa->fsx_xflags & FS_XFLAG_EXTSZINHERIT) && 1203 !S_ISDIR(VFS_I(ip)->i_mode)) 1204 return -EINVAL; 1205 1206 if (S_ISREG(VFS_I(ip)->i_mode) && ip->i_d.di_nextents && 1207 ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != fa->fsx_extsize)) 1208 return -EINVAL; 1209 1210 if (fa->fsx_extsize != 0) { 1211 xfs_extlen_t size; 1212 xfs_fsblock_t extsize_fsb; 1213 1214 extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize); 1215 if (extsize_fsb > MAXEXTLEN) 1216 return -EINVAL; 1217 1218 if (XFS_IS_REALTIME_INODE(ip) || 1219 (fa->fsx_xflags & FS_XFLAG_REALTIME)) { 1220 size = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog; 1221 } else { 1222 size = mp->m_sb.sb_blocksize; 1223 if (extsize_fsb > mp->m_sb.sb_agblocks / 2) 1224 return -EINVAL; 1225 } 1226 1227 if (fa->fsx_extsize % size) 1228 return -EINVAL; 1229 } else 1230 fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE | FS_XFLAG_EXTSZINHERIT); 1231 1232 return 0; 1233 } 1234 1235 /* 1236 * CoW extent size hint validation rules are: 1237 * 1238 * 1. CoW extent size hint can only be set if reflink is enabled on the fs. 1239 * The inode does not have to have any shared blocks, but it must be a v3. 1240 * 2. FS_XFLAG_COWEXTSIZE is only valid for directories and regular files; 1241 * for a directory, the hint is propagated to new files. 1242 * 3. Can be changed on files & directories at any time. 1243 * 4. CoW extsize hint of 0 turns off hints, clears inode flags. 1244 * 5. Extent size must be a multiple of the appropriate block size. 1245 * 6. The extent size hint must be limited to half the AG size to avoid 1246 * alignment extending the extent beyond the limits of the AG. 1247 */ 1248 static int 1249 xfs_ioctl_setattr_check_cowextsize( 1250 struct xfs_inode *ip, 1251 struct fsxattr *fa) 1252 { 1253 struct xfs_mount *mp = ip->i_mount; 1254 1255 if (!(fa->fsx_xflags & FS_XFLAG_COWEXTSIZE)) 1256 return 0; 1257 1258 if (!xfs_sb_version_hasreflink(&ip->i_mount->m_sb) || 1259 ip->i_d.di_version != 3) 1260 return -EINVAL; 1261 1262 if (!S_ISREG(VFS_I(ip)->i_mode) && !S_ISDIR(VFS_I(ip)->i_mode)) 1263 return -EINVAL; 1264 1265 if (fa->fsx_cowextsize != 0) { 1266 xfs_extlen_t size; 1267 xfs_fsblock_t cowextsize_fsb; 1268 1269 cowextsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_cowextsize); 1270 if (cowextsize_fsb > MAXEXTLEN) 1271 return -EINVAL; 1272 1273 size = mp->m_sb.sb_blocksize; 1274 if (cowextsize_fsb > mp->m_sb.sb_agblocks / 2) 1275 return -EINVAL; 1276 1277 if (fa->fsx_cowextsize % size) 1278 return -EINVAL; 1279 } else 1280 fa->fsx_xflags &= ~FS_XFLAG_COWEXTSIZE; 1281 1282 return 0; 1283 } 1284 1285 static int 1286 xfs_ioctl_setattr_check_projid( 1287 struct xfs_inode *ip, 1288 struct fsxattr *fa) 1289 { 1290 /* Disallow 32bit project ids if projid32bit feature is not enabled. */ 1291 if (fa->fsx_projid > (__uint16_t)-1 && 1292 !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb)) 1293 return -EINVAL; 1294 1295 /* 1296 * Project Quota ID state is only allowed to change from within the init 1297 * namespace. Enforce that restriction only if we are trying to change 1298 * the quota ID state. Everything else is allowed in user namespaces. 1299 */ 1300 if (current_user_ns() == &init_user_ns) 1301 return 0; 1302 1303 if (xfs_get_projid(ip) != fa->fsx_projid) 1304 return -EINVAL; 1305 if ((fa->fsx_xflags & FS_XFLAG_PROJINHERIT) != 1306 (ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)) 1307 return -EINVAL; 1308 1309 return 0; 1310 } 1311 1312 STATIC int 1313 xfs_ioctl_setattr( 1314 xfs_inode_t *ip, 1315 struct fsxattr *fa) 1316 { 1317 struct xfs_mount *mp = ip->i_mount; 1318 struct xfs_trans *tp; 1319 struct xfs_dquot *udqp = NULL; 1320 struct xfs_dquot *pdqp = NULL; 1321 struct xfs_dquot *olddquot = NULL; 1322 int code; 1323 int join_flags = 0; 1324 1325 trace_xfs_ioctl_setattr(ip); 1326 1327 code = xfs_ioctl_setattr_check_projid(ip, fa); 1328 if (code) 1329 return code; 1330 1331 /* 1332 * If disk quotas is on, we make sure that the dquots do exist on disk, 1333 * before we start any other transactions. Trying to do this later 1334 * is messy. We don't care to take a readlock to look at the ids 1335 * in inode here, because we can't hold it across the trans_reserve. 1336 * If the IDs do change before we take the ilock, we're covered 1337 * because the i_*dquot fields will get updated anyway. 1338 */ 1339 if (XFS_IS_QUOTA_ON(mp)) { 1340 code = xfs_qm_vop_dqalloc(ip, ip->i_d.di_uid, 1341 ip->i_d.di_gid, fa->fsx_projid, 1342 XFS_QMOPT_PQUOTA, &udqp, NULL, &pdqp); 1343 if (code) 1344 return code; 1345 } 1346 1347 /* 1348 * Changing DAX config may require inode locking for mapping 1349 * invalidation. These need to be held all the way to transaction commit 1350 * or cancel time, so need to be passed through to 1351 * xfs_ioctl_setattr_get_trans() so it can apply them to the join call 1352 * appropriately. 1353 */ 1354 code = xfs_ioctl_setattr_dax_invalidate(ip, fa, &join_flags); 1355 if (code) 1356 goto error_free_dquots; 1357 1358 tp = xfs_ioctl_setattr_get_trans(ip, join_flags); 1359 if (IS_ERR(tp)) { 1360 code = PTR_ERR(tp); 1361 goto error_free_dquots; 1362 } 1363 1364 1365 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp) && 1366 xfs_get_projid(ip) != fa->fsx_projid) { 1367 code = xfs_qm_vop_chown_reserve(tp, ip, udqp, NULL, pdqp, 1368 capable(CAP_FOWNER) ? XFS_QMOPT_FORCE_RES : 0); 1369 if (code) /* out of quota */ 1370 goto error_trans_cancel; 1371 } 1372 1373 code = xfs_ioctl_setattr_check_extsize(ip, fa); 1374 if (code) 1375 goto error_trans_cancel; 1376 1377 code = xfs_ioctl_setattr_check_cowextsize(ip, fa); 1378 if (code) 1379 goto error_trans_cancel; 1380 1381 code = xfs_ioctl_setattr_xflags(tp, ip, fa); 1382 if (code) 1383 goto error_trans_cancel; 1384 1385 /* 1386 * Change file ownership. Must be the owner or privileged. CAP_FSETID 1387 * overrides the following restrictions: 1388 * 1389 * The set-user-ID and set-group-ID bits of a file will be cleared upon 1390 * successful return from chown() 1391 */ 1392 1393 if ((VFS_I(ip)->i_mode & (S_ISUID|S_ISGID)) && 1394 !capable_wrt_inode_uidgid(VFS_I(ip), CAP_FSETID)) 1395 VFS_I(ip)->i_mode &= ~(S_ISUID|S_ISGID); 1396 1397 /* Change the ownerships and register project quota modifications */ 1398 if (xfs_get_projid(ip) != fa->fsx_projid) { 1399 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) { 1400 olddquot = xfs_qm_vop_chown(tp, ip, 1401 &ip->i_pdquot, pdqp); 1402 } 1403 ASSERT(ip->i_d.di_version > 1); 1404 xfs_set_projid(ip, fa->fsx_projid); 1405 } 1406 1407 /* 1408 * Only set the extent size hint if we've already determined that the 1409 * extent size hint should be set on the inode. If no extent size flags 1410 * are set on the inode then unconditionally clear the extent size hint. 1411 */ 1412 if (ip->i_d.di_flags & (XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT)) 1413 ip->i_d.di_extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog; 1414 else 1415 ip->i_d.di_extsize = 0; 1416 if (ip->i_d.di_version == 3 && 1417 (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)) 1418 ip->i_d.di_cowextsize = fa->fsx_cowextsize >> 1419 mp->m_sb.sb_blocklog; 1420 else 1421 ip->i_d.di_cowextsize = 0; 1422 1423 code = xfs_trans_commit(tp); 1424 1425 /* 1426 * Release any dquot(s) the inode had kept before chown. 1427 */ 1428 xfs_qm_dqrele(olddquot); 1429 xfs_qm_dqrele(udqp); 1430 xfs_qm_dqrele(pdqp); 1431 1432 return code; 1433 1434 error_trans_cancel: 1435 xfs_trans_cancel(tp); 1436 error_free_dquots: 1437 xfs_qm_dqrele(udqp); 1438 xfs_qm_dqrele(pdqp); 1439 return code; 1440 } 1441 1442 STATIC int 1443 xfs_ioc_fssetxattr( 1444 xfs_inode_t *ip, 1445 struct file *filp, 1446 void __user *arg) 1447 { 1448 struct fsxattr fa; 1449 int error; 1450 1451 if (copy_from_user(&fa, arg, sizeof(fa))) 1452 return -EFAULT; 1453 1454 error = mnt_want_write_file(filp); 1455 if (error) 1456 return error; 1457 error = xfs_ioctl_setattr(ip, &fa); 1458 mnt_drop_write_file(filp); 1459 return error; 1460 } 1461 1462 STATIC int 1463 xfs_ioc_getxflags( 1464 xfs_inode_t *ip, 1465 void __user *arg) 1466 { 1467 unsigned int flags; 1468 1469 flags = xfs_di2lxflags(ip->i_d.di_flags); 1470 if (copy_to_user(arg, &flags, sizeof(flags))) 1471 return -EFAULT; 1472 return 0; 1473 } 1474 1475 STATIC int 1476 xfs_ioc_setxflags( 1477 struct xfs_inode *ip, 1478 struct file *filp, 1479 void __user *arg) 1480 { 1481 struct xfs_trans *tp; 1482 struct fsxattr fa; 1483 unsigned int flags; 1484 int join_flags = 0; 1485 int error; 1486 1487 if (copy_from_user(&flags, arg, sizeof(flags))) 1488 return -EFAULT; 1489 1490 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ 1491 FS_NOATIME_FL | FS_NODUMP_FL | \ 1492 FS_SYNC_FL)) 1493 return -EOPNOTSUPP; 1494 1495 fa.fsx_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip)); 1496 1497 error = mnt_want_write_file(filp); 1498 if (error) 1499 return error; 1500 1501 /* 1502 * Changing DAX config may require inode locking for mapping 1503 * invalidation. These need to be held all the way to transaction commit 1504 * or cancel time, so need to be passed through to 1505 * xfs_ioctl_setattr_get_trans() so it can apply them to the join call 1506 * appropriately. 1507 */ 1508 error = xfs_ioctl_setattr_dax_invalidate(ip, &fa, &join_flags); 1509 if (error) 1510 goto out_drop_write; 1511 1512 tp = xfs_ioctl_setattr_get_trans(ip, join_flags); 1513 if (IS_ERR(tp)) { 1514 error = PTR_ERR(tp); 1515 goto out_drop_write; 1516 } 1517 1518 error = xfs_ioctl_setattr_xflags(tp, ip, &fa); 1519 if (error) { 1520 xfs_trans_cancel(tp); 1521 goto out_drop_write; 1522 } 1523 1524 error = xfs_trans_commit(tp); 1525 out_drop_write: 1526 mnt_drop_write_file(filp); 1527 return error; 1528 } 1529 1530 STATIC int 1531 xfs_getbmap_format(void **ap, struct getbmapx *bmv) 1532 { 1533 struct getbmap __user *base = (struct getbmap __user *)*ap; 1534 1535 /* copy only getbmap portion (not getbmapx) */ 1536 if (copy_to_user(base, bmv, sizeof(struct getbmap))) 1537 return -EFAULT; 1538 1539 *ap += sizeof(struct getbmap); 1540 return 0; 1541 } 1542 1543 STATIC int 1544 xfs_ioc_getbmap( 1545 struct file *file, 1546 unsigned int cmd, 1547 void __user *arg) 1548 { 1549 struct getbmapx bmx = { 0 }; 1550 int error; 1551 1552 /* struct getbmap is a strict subset of struct getbmapx. */ 1553 if (copy_from_user(&bmx, arg, offsetof(struct getbmapx, bmv_iflags))) 1554 return -EFAULT; 1555 1556 if (bmx.bmv_count < 2) 1557 return -EINVAL; 1558 1559 bmx.bmv_iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0); 1560 if (file->f_mode & FMODE_NOCMTIME) 1561 bmx.bmv_iflags |= BMV_IF_NO_DMAPI_READ; 1562 1563 error = xfs_getbmap(XFS_I(file_inode(file)), &bmx, xfs_getbmap_format, 1564 (__force struct getbmap *)arg+1); 1565 if (error) 1566 return error; 1567 1568 /* copy back header - only size of getbmap */ 1569 if (copy_to_user(arg, &bmx, sizeof(struct getbmap))) 1570 return -EFAULT; 1571 return 0; 1572 } 1573 1574 STATIC int 1575 xfs_getbmapx_format(void **ap, struct getbmapx *bmv) 1576 { 1577 struct getbmapx __user *base = (struct getbmapx __user *)*ap; 1578 1579 if (copy_to_user(base, bmv, sizeof(struct getbmapx))) 1580 return -EFAULT; 1581 1582 *ap += sizeof(struct getbmapx); 1583 return 0; 1584 } 1585 1586 STATIC int 1587 xfs_ioc_getbmapx( 1588 struct xfs_inode *ip, 1589 void __user *arg) 1590 { 1591 struct getbmapx bmx; 1592 int error; 1593 1594 if (copy_from_user(&bmx, arg, sizeof(bmx))) 1595 return -EFAULT; 1596 1597 if (bmx.bmv_count < 2) 1598 return -EINVAL; 1599 1600 if (bmx.bmv_iflags & (~BMV_IF_VALID)) 1601 return -EINVAL; 1602 1603 error = xfs_getbmap(ip, &bmx, xfs_getbmapx_format, 1604 (__force struct getbmapx *)arg+1); 1605 if (error) 1606 return error; 1607 1608 /* copy back header */ 1609 if (copy_to_user(arg, &bmx, sizeof(struct getbmapx))) 1610 return -EFAULT; 1611 1612 return 0; 1613 } 1614 1615 struct getfsmap_info { 1616 struct xfs_mount *mp; 1617 struct fsmap_head __user *data; 1618 unsigned int idx; 1619 __u32 last_flags; 1620 }; 1621 1622 STATIC int 1623 xfs_getfsmap_format(struct xfs_fsmap *xfm, void *priv) 1624 { 1625 struct getfsmap_info *info = priv; 1626 struct fsmap fm; 1627 1628 trace_xfs_getfsmap_mapping(info->mp, xfm); 1629 1630 info->last_flags = xfm->fmr_flags; 1631 xfs_fsmap_from_internal(&fm, xfm); 1632 if (copy_to_user(&info->data->fmh_recs[info->idx++], &fm, 1633 sizeof(struct fsmap))) 1634 return -EFAULT; 1635 1636 return 0; 1637 } 1638 1639 STATIC int 1640 xfs_ioc_getfsmap( 1641 struct xfs_inode *ip, 1642 struct fsmap_head __user *arg) 1643 { 1644 struct getfsmap_info info = { NULL }; 1645 struct xfs_fsmap_head xhead = {0}; 1646 struct fsmap_head head; 1647 bool aborted = false; 1648 int error; 1649 1650 if (copy_from_user(&head, arg, sizeof(struct fsmap_head))) 1651 return -EFAULT; 1652 if (memchr_inv(head.fmh_reserved, 0, sizeof(head.fmh_reserved)) || 1653 memchr_inv(head.fmh_keys[0].fmr_reserved, 0, 1654 sizeof(head.fmh_keys[0].fmr_reserved)) || 1655 memchr_inv(head.fmh_keys[1].fmr_reserved, 0, 1656 sizeof(head.fmh_keys[1].fmr_reserved))) 1657 return -EINVAL; 1658 1659 xhead.fmh_iflags = head.fmh_iflags; 1660 xhead.fmh_count = head.fmh_count; 1661 xfs_fsmap_to_internal(&xhead.fmh_keys[0], &head.fmh_keys[0]); 1662 xfs_fsmap_to_internal(&xhead.fmh_keys[1], &head.fmh_keys[1]); 1663 1664 trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]); 1665 trace_xfs_getfsmap_high_key(ip->i_mount, &xhead.fmh_keys[1]); 1666 1667 info.mp = ip->i_mount; 1668 info.data = arg; 1669 error = xfs_getfsmap(ip->i_mount, &xhead, xfs_getfsmap_format, &info); 1670 if (error == XFS_BTREE_QUERY_RANGE_ABORT) { 1671 error = 0; 1672 aborted = true; 1673 } else if (error) 1674 return error; 1675 1676 /* If we didn't abort, set the "last" flag in the last fmx */ 1677 if (!aborted && info.idx) { 1678 info.last_flags |= FMR_OF_LAST; 1679 if (copy_to_user(&info.data->fmh_recs[info.idx - 1].fmr_flags, 1680 &info.last_flags, sizeof(info.last_flags))) 1681 return -EFAULT; 1682 } 1683 1684 /* copy back header */ 1685 head.fmh_entries = xhead.fmh_entries; 1686 head.fmh_oflags = xhead.fmh_oflags; 1687 if (copy_to_user(arg, &head, sizeof(struct fsmap_head))) 1688 return -EFAULT; 1689 1690 return 0; 1691 } 1692 1693 int 1694 xfs_ioc_swapext( 1695 xfs_swapext_t *sxp) 1696 { 1697 xfs_inode_t *ip, *tip; 1698 struct fd f, tmp; 1699 int error = 0; 1700 1701 /* Pull information for the target fd */ 1702 f = fdget((int)sxp->sx_fdtarget); 1703 if (!f.file) { 1704 error = -EINVAL; 1705 goto out; 1706 } 1707 1708 if (!(f.file->f_mode & FMODE_WRITE) || 1709 !(f.file->f_mode & FMODE_READ) || 1710 (f.file->f_flags & O_APPEND)) { 1711 error = -EBADF; 1712 goto out_put_file; 1713 } 1714 1715 tmp = fdget((int)sxp->sx_fdtmp); 1716 if (!tmp.file) { 1717 error = -EINVAL; 1718 goto out_put_file; 1719 } 1720 1721 if (!(tmp.file->f_mode & FMODE_WRITE) || 1722 !(tmp.file->f_mode & FMODE_READ) || 1723 (tmp.file->f_flags & O_APPEND)) { 1724 error = -EBADF; 1725 goto out_put_tmp_file; 1726 } 1727 1728 if (IS_SWAPFILE(file_inode(f.file)) || 1729 IS_SWAPFILE(file_inode(tmp.file))) { 1730 error = -EINVAL; 1731 goto out_put_tmp_file; 1732 } 1733 1734 /* 1735 * We need to ensure that the fds passed in point to XFS inodes 1736 * before we cast and access them as XFS structures as we have no 1737 * control over what the user passes us here. 1738 */ 1739 if (f.file->f_op != &xfs_file_operations || 1740 tmp.file->f_op != &xfs_file_operations) { 1741 error = -EINVAL; 1742 goto out_put_tmp_file; 1743 } 1744 1745 ip = XFS_I(file_inode(f.file)); 1746 tip = XFS_I(file_inode(tmp.file)); 1747 1748 if (ip->i_mount != tip->i_mount) { 1749 error = -EINVAL; 1750 goto out_put_tmp_file; 1751 } 1752 1753 if (ip->i_ino == tip->i_ino) { 1754 error = -EINVAL; 1755 goto out_put_tmp_file; 1756 } 1757 1758 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { 1759 error = -EIO; 1760 goto out_put_tmp_file; 1761 } 1762 1763 error = xfs_swap_extents(ip, tip, sxp); 1764 1765 out_put_tmp_file: 1766 fdput(tmp); 1767 out_put_file: 1768 fdput(f); 1769 out: 1770 return error; 1771 } 1772 1773 /* 1774 * Note: some of the ioctl's return positive numbers as a 1775 * byte count indicating success, such as readlink_by_handle. 1776 * So we don't "sign flip" like most other routines. This means 1777 * true errors need to be returned as a negative value. 1778 */ 1779 long 1780 xfs_file_ioctl( 1781 struct file *filp, 1782 unsigned int cmd, 1783 unsigned long p) 1784 { 1785 struct inode *inode = file_inode(filp); 1786 struct xfs_inode *ip = XFS_I(inode); 1787 struct xfs_mount *mp = ip->i_mount; 1788 void __user *arg = (void __user *)p; 1789 int error; 1790 1791 trace_xfs_file_ioctl(ip); 1792 1793 switch (cmd) { 1794 case FITRIM: 1795 return xfs_ioc_trim(mp, arg); 1796 case XFS_IOC_ALLOCSP: 1797 case XFS_IOC_FREESP: 1798 case XFS_IOC_RESVSP: 1799 case XFS_IOC_UNRESVSP: 1800 case XFS_IOC_ALLOCSP64: 1801 case XFS_IOC_FREESP64: 1802 case XFS_IOC_RESVSP64: 1803 case XFS_IOC_UNRESVSP64: 1804 case XFS_IOC_ZERO_RANGE: { 1805 xfs_flock64_t bf; 1806 1807 if (copy_from_user(&bf, arg, sizeof(bf))) 1808 return -EFAULT; 1809 return xfs_ioc_space(filp, cmd, &bf); 1810 } 1811 case XFS_IOC_DIOINFO: { 1812 struct dioattr da; 1813 xfs_buftarg_t *target = 1814 XFS_IS_REALTIME_INODE(ip) ? 1815 mp->m_rtdev_targp : mp->m_ddev_targp; 1816 1817 da.d_mem = da.d_miniosz = target->bt_logical_sectorsize; 1818 da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1); 1819 1820 if (copy_to_user(arg, &da, sizeof(da))) 1821 return -EFAULT; 1822 return 0; 1823 } 1824 1825 case XFS_IOC_FSBULKSTAT_SINGLE: 1826 case XFS_IOC_FSBULKSTAT: 1827 case XFS_IOC_FSINUMBERS: 1828 return xfs_ioc_bulkstat(mp, cmd, arg); 1829 1830 case XFS_IOC_FSGEOMETRY_V1: 1831 return xfs_ioc_fsgeometry_v1(mp, arg); 1832 1833 case XFS_IOC_FSGEOMETRY: 1834 return xfs_ioc_fsgeometry(mp, arg); 1835 1836 case XFS_IOC_GETVERSION: 1837 return put_user(inode->i_generation, (int __user *)arg); 1838 1839 case XFS_IOC_FSGETXATTR: 1840 return xfs_ioc_fsgetxattr(ip, 0, arg); 1841 case XFS_IOC_FSGETXATTRA: 1842 return xfs_ioc_fsgetxattr(ip, 1, arg); 1843 case XFS_IOC_FSSETXATTR: 1844 return xfs_ioc_fssetxattr(ip, filp, arg); 1845 case XFS_IOC_GETXFLAGS: 1846 return xfs_ioc_getxflags(ip, arg); 1847 case XFS_IOC_SETXFLAGS: 1848 return xfs_ioc_setxflags(ip, filp, arg); 1849 1850 case XFS_IOC_FSSETDM: { 1851 struct fsdmidata dmi; 1852 1853 if (copy_from_user(&dmi, arg, sizeof(dmi))) 1854 return -EFAULT; 1855 1856 error = mnt_want_write_file(filp); 1857 if (error) 1858 return error; 1859 1860 error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask, 1861 dmi.fsd_dmstate); 1862 mnt_drop_write_file(filp); 1863 return error; 1864 } 1865 1866 case XFS_IOC_GETBMAP: 1867 case XFS_IOC_GETBMAPA: 1868 return xfs_ioc_getbmap(filp, cmd, arg); 1869 1870 case XFS_IOC_GETBMAPX: 1871 return xfs_ioc_getbmapx(ip, arg); 1872 1873 case FS_IOC_GETFSMAP: 1874 return xfs_ioc_getfsmap(ip, arg); 1875 1876 case XFS_IOC_FD_TO_HANDLE: 1877 case XFS_IOC_PATH_TO_HANDLE: 1878 case XFS_IOC_PATH_TO_FSHANDLE: { 1879 xfs_fsop_handlereq_t hreq; 1880 1881 if (copy_from_user(&hreq, arg, sizeof(hreq))) 1882 return -EFAULT; 1883 return xfs_find_handle(cmd, &hreq); 1884 } 1885 case XFS_IOC_OPEN_BY_HANDLE: { 1886 xfs_fsop_handlereq_t hreq; 1887 1888 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) 1889 return -EFAULT; 1890 return xfs_open_by_handle(filp, &hreq); 1891 } 1892 case XFS_IOC_FSSETDM_BY_HANDLE: 1893 return xfs_fssetdm_by_handle(filp, arg); 1894 1895 case XFS_IOC_READLINK_BY_HANDLE: { 1896 xfs_fsop_handlereq_t hreq; 1897 1898 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) 1899 return -EFAULT; 1900 return xfs_readlink_by_handle(filp, &hreq); 1901 } 1902 case XFS_IOC_ATTRLIST_BY_HANDLE: 1903 return xfs_attrlist_by_handle(filp, arg); 1904 1905 case XFS_IOC_ATTRMULTI_BY_HANDLE: 1906 return xfs_attrmulti_by_handle(filp, arg); 1907 1908 case XFS_IOC_SWAPEXT: { 1909 struct xfs_swapext sxp; 1910 1911 if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t))) 1912 return -EFAULT; 1913 error = mnt_want_write_file(filp); 1914 if (error) 1915 return error; 1916 error = xfs_ioc_swapext(&sxp); 1917 mnt_drop_write_file(filp); 1918 return error; 1919 } 1920 1921 case XFS_IOC_FSCOUNTS: { 1922 xfs_fsop_counts_t out; 1923 1924 error = xfs_fs_counts(mp, &out); 1925 if (error) 1926 return error; 1927 1928 if (copy_to_user(arg, &out, sizeof(out))) 1929 return -EFAULT; 1930 return 0; 1931 } 1932 1933 case XFS_IOC_SET_RESBLKS: { 1934 xfs_fsop_resblks_t inout; 1935 __uint64_t in; 1936 1937 if (!capable(CAP_SYS_ADMIN)) 1938 return -EPERM; 1939 1940 if (mp->m_flags & XFS_MOUNT_RDONLY) 1941 return -EROFS; 1942 1943 if (copy_from_user(&inout, arg, sizeof(inout))) 1944 return -EFAULT; 1945 1946 error = mnt_want_write_file(filp); 1947 if (error) 1948 return error; 1949 1950 /* input parameter is passed in resblks field of structure */ 1951 in = inout.resblks; 1952 error = xfs_reserve_blocks(mp, &in, &inout); 1953 mnt_drop_write_file(filp); 1954 if (error) 1955 return error; 1956 1957 if (copy_to_user(arg, &inout, sizeof(inout))) 1958 return -EFAULT; 1959 return 0; 1960 } 1961 1962 case XFS_IOC_GET_RESBLKS: { 1963 xfs_fsop_resblks_t out; 1964 1965 if (!capable(CAP_SYS_ADMIN)) 1966 return -EPERM; 1967 1968 error = xfs_reserve_blocks(mp, NULL, &out); 1969 if (error) 1970 return error; 1971 1972 if (copy_to_user(arg, &out, sizeof(out))) 1973 return -EFAULT; 1974 1975 return 0; 1976 } 1977 1978 case XFS_IOC_FSGROWFSDATA: { 1979 xfs_growfs_data_t in; 1980 1981 if (copy_from_user(&in, arg, sizeof(in))) 1982 return -EFAULT; 1983 1984 error = mnt_want_write_file(filp); 1985 if (error) 1986 return error; 1987 error = xfs_growfs_data(mp, &in); 1988 mnt_drop_write_file(filp); 1989 return error; 1990 } 1991 1992 case XFS_IOC_FSGROWFSLOG: { 1993 xfs_growfs_log_t in; 1994 1995 if (copy_from_user(&in, arg, sizeof(in))) 1996 return -EFAULT; 1997 1998 error = mnt_want_write_file(filp); 1999 if (error) 2000 return error; 2001 error = xfs_growfs_log(mp, &in); 2002 mnt_drop_write_file(filp); 2003 return error; 2004 } 2005 2006 case XFS_IOC_FSGROWFSRT: { 2007 xfs_growfs_rt_t in; 2008 2009 if (copy_from_user(&in, arg, sizeof(in))) 2010 return -EFAULT; 2011 2012 error = mnt_want_write_file(filp); 2013 if (error) 2014 return error; 2015 error = xfs_growfs_rt(mp, &in); 2016 mnt_drop_write_file(filp); 2017 return error; 2018 } 2019 2020 case XFS_IOC_GOINGDOWN: { 2021 __uint32_t in; 2022 2023 if (!capable(CAP_SYS_ADMIN)) 2024 return -EPERM; 2025 2026 if (get_user(in, (__uint32_t __user *)arg)) 2027 return -EFAULT; 2028 2029 return xfs_fs_goingdown(mp, in); 2030 } 2031 2032 case XFS_IOC_ERROR_INJECTION: { 2033 xfs_error_injection_t in; 2034 2035 if (!capable(CAP_SYS_ADMIN)) 2036 return -EPERM; 2037 2038 if (copy_from_user(&in, arg, sizeof(in))) 2039 return -EFAULT; 2040 2041 return xfs_errortag_add(in.errtag, mp); 2042 } 2043 2044 case XFS_IOC_ERROR_CLEARALL: 2045 if (!capable(CAP_SYS_ADMIN)) 2046 return -EPERM; 2047 2048 return xfs_errortag_clearall(mp, 1); 2049 2050 case XFS_IOC_FREE_EOFBLOCKS: { 2051 struct xfs_fs_eofblocks eofb; 2052 struct xfs_eofblocks keofb; 2053 2054 if (!capable(CAP_SYS_ADMIN)) 2055 return -EPERM; 2056 2057 if (mp->m_flags & XFS_MOUNT_RDONLY) 2058 return -EROFS; 2059 2060 if (copy_from_user(&eofb, arg, sizeof(eofb))) 2061 return -EFAULT; 2062 2063 error = xfs_fs_eofblocks_from_user(&eofb, &keofb); 2064 if (error) 2065 return error; 2066 2067 return xfs_icache_free_eofblocks(mp, &keofb); 2068 } 2069 2070 default: 2071 return -ENOTTY; 2072 } 2073 } 2074