1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_mount.h" 25 #include "xfs_inode.h" 26 #include "xfs_ioctl.h" 27 #include "xfs_alloc.h" 28 #include "xfs_rtalloc.h" 29 #include "xfs_itable.h" 30 #include "xfs_error.h" 31 #include "xfs_attr.h" 32 #include "xfs_bmap.h" 33 #include "xfs_bmap_util.h" 34 #include "xfs_fsops.h" 35 #include "xfs_discard.h" 36 #include "xfs_quota.h" 37 #include "xfs_export.h" 38 #include "xfs_trace.h" 39 #include "xfs_icache.h" 40 #include "xfs_symlink.h" 41 #include "xfs_trans.h" 42 #include "xfs_pnfs.h" 43 #include "xfs_acl.h" 44 45 #include <linux/capability.h> 46 #include <linux/dcache.h> 47 #include <linux/mount.h> 48 #include <linux/namei.h> 49 #include <linux/pagemap.h> 50 #include <linux/slab.h> 51 #include <linux/exportfs.h> 52 53 /* 54 * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to 55 * a file or fs handle. 56 * 57 * XFS_IOC_PATH_TO_FSHANDLE 58 * returns fs handle for a mount point or path within that mount point 59 * XFS_IOC_FD_TO_HANDLE 60 * returns full handle for a FD opened in user space 61 * XFS_IOC_PATH_TO_HANDLE 62 * returns full handle for a path 63 */ 64 int 65 xfs_find_handle( 66 unsigned int cmd, 67 xfs_fsop_handlereq_t *hreq) 68 { 69 int hsize; 70 xfs_handle_t handle; 71 struct inode *inode; 72 struct fd f = {NULL}; 73 struct path path; 74 int error; 75 struct xfs_inode *ip; 76 77 if (cmd == XFS_IOC_FD_TO_HANDLE) { 78 f = fdget(hreq->fd); 79 if (!f.file) 80 return -EBADF; 81 inode = file_inode(f.file); 82 } else { 83 error = user_lpath((const char __user *)hreq->path, &path); 84 if (error) 85 return error; 86 inode = d_inode(path.dentry); 87 } 88 ip = XFS_I(inode); 89 90 /* 91 * We can only generate handles for inodes residing on a XFS filesystem, 92 * and only for regular files, directories or symbolic links. 93 */ 94 error = -EINVAL; 95 if (inode->i_sb->s_magic != XFS_SB_MAGIC) 96 goto out_put; 97 98 error = -EBADF; 99 if (!S_ISREG(inode->i_mode) && 100 !S_ISDIR(inode->i_mode) && 101 !S_ISLNK(inode->i_mode)) 102 goto out_put; 103 104 105 memcpy(&handle.ha_fsid, ip->i_mount->m_fixedfsid, sizeof(xfs_fsid_t)); 106 107 if (cmd == XFS_IOC_PATH_TO_FSHANDLE) { 108 /* 109 * This handle only contains an fsid, zero the rest. 110 */ 111 memset(&handle.ha_fid, 0, sizeof(handle.ha_fid)); 112 hsize = sizeof(xfs_fsid_t); 113 } else { 114 handle.ha_fid.fid_len = sizeof(xfs_fid_t) - 115 sizeof(handle.ha_fid.fid_len); 116 handle.ha_fid.fid_pad = 0; 117 handle.ha_fid.fid_gen = inode->i_generation; 118 handle.ha_fid.fid_ino = ip->i_ino; 119 120 hsize = XFS_HSIZE(handle); 121 } 122 123 error = -EFAULT; 124 if (copy_to_user(hreq->ohandle, &handle, hsize) || 125 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32))) 126 goto out_put; 127 128 error = 0; 129 130 out_put: 131 if (cmd == XFS_IOC_FD_TO_HANDLE) 132 fdput(f); 133 else 134 path_put(&path); 135 return error; 136 } 137 138 /* 139 * No need to do permission checks on the various pathname components 140 * as the handle operations are privileged. 141 */ 142 STATIC int 143 xfs_handle_acceptable( 144 void *context, 145 struct dentry *dentry) 146 { 147 return 1; 148 } 149 150 /* 151 * Convert userspace handle data into a dentry. 152 */ 153 struct dentry * 154 xfs_handle_to_dentry( 155 struct file *parfilp, 156 void __user *uhandle, 157 u32 hlen) 158 { 159 xfs_handle_t handle; 160 struct xfs_fid64 fid; 161 162 /* 163 * Only allow handle opens under a directory. 164 */ 165 if (!S_ISDIR(file_inode(parfilp)->i_mode)) 166 return ERR_PTR(-ENOTDIR); 167 168 if (hlen != sizeof(xfs_handle_t)) 169 return ERR_PTR(-EINVAL); 170 if (copy_from_user(&handle, uhandle, hlen)) 171 return ERR_PTR(-EFAULT); 172 if (handle.ha_fid.fid_len != 173 sizeof(handle.ha_fid) - sizeof(handle.ha_fid.fid_len)) 174 return ERR_PTR(-EINVAL); 175 176 memset(&fid, 0, sizeof(struct fid)); 177 fid.ino = handle.ha_fid.fid_ino; 178 fid.gen = handle.ha_fid.fid_gen; 179 180 return exportfs_decode_fh(parfilp->f_path.mnt, (struct fid *)&fid, 3, 181 FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG, 182 xfs_handle_acceptable, NULL); 183 } 184 185 STATIC struct dentry * 186 xfs_handlereq_to_dentry( 187 struct file *parfilp, 188 xfs_fsop_handlereq_t *hreq) 189 { 190 return xfs_handle_to_dentry(parfilp, hreq->ihandle, hreq->ihandlen); 191 } 192 193 int 194 xfs_open_by_handle( 195 struct file *parfilp, 196 xfs_fsop_handlereq_t *hreq) 197 { 198 const struct cred *cred = current_cred(); 199 int error; 200 int fd; 201 int permflag; 202 struct file *filp; 203 struct inode *inode; 204 struct dentry *dentry; 205 fmode_t fmode; 206 struct path path; 207 208 if (!capable(CAP_SYS_ADMIN)) 209 return -EPERM; 210 211 dentry = xfs_handlereq_to_dentry(parfilp, hreq); 212 if (IS_ERR(dentry)) 213 return PTR_ERR(dentry); 214 inode = d_inode(dentry); 215 216 /* Restrict xfs_open_by_handle to directories & regular files. */ 217 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) { 218 error = -EPERM; 219 goto out_dput; 220 } 221 222 #if BITS_PER_LONG != 32 223 hreq->oflags |= O_LARGEFILE; 224 #endif 225 226 permflag = hreq->oflags; 227 fmode = OPEN_FMODE(permflag); 228 if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) && 229 (fmode & FMODE_WRITE) && IS_APPEND(inode)) { 230 error = -EPERM; 231 goto out_dput; 232 } 233 234 if ((fmode & FMODE_WRITE) && IS_IMMUTABLE(inode)) { 235 error = -EACCES; 236 goto out_dput; 237 } 238 239 /* Can't write directories. */ 240 if (S_ISDIR(inode->i_mode) && (fmode & FMODE_WRITE)) { 241 error = -EISDIR; 242 goto out_dput; 243 } 244 245 fd = get_unused_fd_flags(0); 246 if (fd < 0) { 247 error = fd; 248 goto out_dput; 249 } 250 251 path.mnt = parfilp->f_path.mnt; 252 path.dentry = dentry; 253 filp = dentry_open(&path, hreq->oflags, cred); 254 dput(dentry); 255 if (IS_ERR(filp)) { 256 put_unused_fd(fd); 257 return PTR_ERR(filp); 258 } 259 260 if (S_ISREG(inode->i_mode)) { 261 filp->f_flags |= O_NOATIME; 262 filp->f_mode |= FMODE_NOCMTIME; 263 } 264 265 fd_install(fd, filp); 266 return fd; 267 268 out_dput: 269 dput(dentry); 270 return error; 271 } 272 273 int 274 xfs_readlink_by_handle( 275 struct file *parfilp, 276 xfs_fsop_handlereq_t *hreq) 277 { 278 struct dentry *dentry; 279 __u32 olen; 280 int error; 281 282 if (!capable(CAP_SYS_ADMIN)) 283 return -EPERM; 284 285 dentry = xfs_handlereq_to_dentry(parfilp, hreq); 286 if (IS_ERR(dentry)) 287 return PTR_ERR(dentry); 288 289 /* Restrict this handle operation to symlinks only. */ 290 if (!d_inode(dentry)->i_op->readlink) { 291 error = -EINVAL; 292 goto out_dput; 293 } 294 295 if (copy_from_user(&olen, hreq->ohandlen, sizeof(__u32))) { 296 error = -EFAULT; 297 goto out_dput; 298 } 299 300 error = d_inode(dentry)->i_op->readlink(dentry, hreq->ohandle, olen); 301 302 out_dput: 303 dput(dentry); 304 return error; 305 } 306 307 int 308 xfs_set_dmattrs( 309 xfs_inode_t *ip, 310 u_int evmask, 311 u_int16_t state) 312 { 313 xfs_mount_t *mp = ip->i_mount; 314 xfs_trans_t *tp; 315 int error; 316 317 if (!capable(CAP_SYS_ADMIN)) 318 return -EPERM; 319 320 if (XFS_FORCED_SHUTDOWN(mp)) 321 return -EIO; 322 323 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp); 324 if (error) 325 return error; 326 327 xfs_ilock(ip, XFS_ILOCK_EXCL); 328 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 329 330 ip->i_d.di_dmevmask = evmask; 331 ip->i_d.di_dmstate = state; 332 333 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 334 error = xfs_trans_commit(tp); 335 336 return error; 337 } 338 339 STATIC int 340 xfs_fssetdm_by_handle( 341 struct file *parfilp, 342 void __user *arg) 343 { 344 int error; 345 struct fsdmidata fsd; 346 xfs_fsop_setdm_handlereq_t dmhreq; 347 struct dentry *dentry; 348 349 if (!capable(CAP_MKNOD)) 350 return -EPERM; 351 if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t))) 352 return -EFAULT; 353 354 error = mnt_want_write_file(parfilp); 355 if (error) 356 return error; 357 358 dentry = xfs_handlereq_to_dentry(parfilp, &dmhreq.hreq); 359 if (IS_ERR(dentry)) { 360 mnt_drop_write_file(parfilp); 361 return PTR_ERR(dentry); 362 } 363 364 if (IS_IMMUTABLE(d_inode(dentry)) || IS_APPEND(d_inode(dentry))) { 365 error = -EPERM; 366 goto out; 367 } 368 369 if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) { 370 error = -EFAULT; 371 goto out; 372 } 373 374 error = xfs_set_dmattrs(XFS_I(d_inode(dentry)), fsd.fsd_dmevmask, 375 fsd.fsd_dmstate); 376 377 out: 378 mnt_drop_write_file(parfilp); 379 dput(dentry); 380 return error; 381 } 382 383 STATIC int 384 xfs_attrlist_by_handle( 385 struct file *parfilp, 386 void __user *arg) 387 { 388 int error = -ENOMEM; 389 attrlist_cursor_kern_t *cursor; 390 xfs_fsop_attrlist_handlereq_t al_hreq; 391 struct dentry *dentry; 392 char *kbuf; 393 394 if (!capable(CAP_SYS_ADMIN)) 395 return -EPERM; 396 if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t))) 397 return -EFAULT; 398 if (al_hreq.buflen < sizeof(struct attrlist) || 399 al_hreq.buflen > XFS_XATTR_LIST_MAX) 400 return -EINVAL; 401 402 /* 403 * Reject flags, only allow namespaces. 404 */ 405 if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE)) 406 return -EINVAL; 407 408 dentry = xfs_handlereq_to_dentry(parfilp, &al_hreq.hreq); 409 if (IS_ERR(dentry)) 410 return PTR_ERR(dentry); 411 412 kbuf = kmem_zalloc_large(al_hreq.buflen, KM_SLEEP); 413 if (!kbuf) 414 goto out_dput; 415 416 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; 417 error = xfs_attr_list(XFS_I(d_inode(dentry)), kbuf, al_hreq.buflen, 418 al_hreq.flags, cursor); 419 if (error) 420 goto out_kfree; 421 422 if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen)) 423 error = -EFAULT; 424 425 out_kfree: 426 kmem_free(kbuf); 427 out_dput: 428 dput(dentry); 429 return error; 430 } 431 432 int 433 xfs_attrmulti_attr_get( 434 struct inode *inode, 435 unsigned char *name, 436 unsigned char __user *ubuf, 437 __uint32_t *len, 438 __uint32_t flags) 439 { 440 unsigned char *kbuf; 441 int error = -EFAULT; 442 443 if (*len > XFS_XATTR_SIZE_MAX) 444 return -EINVAL; 445 kbuf = kmem_zalloc_large(*len, KM_SLEEP); 446 if (!kbuf) 447 return -ENOMEM; 448 449 error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags); 450 if (error) 451 goto out_kfree; 452 453 if (copy_to_user(ubuf, kbuf, *len)) 454 error = -EFAULT; 455 456 out_kfree: 457 kmem_free(kbuf); 458 return error; 459 } 460 461 int 462 xfs_attrmulti_attr_set( 463 struct inode *inode, 464 unsigned char *name, 465 const unsigned char __user *ubuf, 466 __uint32_t len, 467 __uint32_t flags) 468 { 469 unsigned char *kbuf; 470 int error; 471 472 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) 473 return -EPERM; 474 if (len > XFS_XATTR_SIZE_MAX) 475 return -EINVAL; 476 477 kbuf = memdup_user(ubuf, len); 478 if (IS_ERR(kbuf)) 479 return PTR_ERR(kbuf); 480 481 error = xfs_attr_set(XFS_I(inode), name, kbuf, len, flags); 482 if (!error) 483 xfs_forget_acl(inode, name, flags); 484 kfree(kbuf); 485 return error; 486 } 487 488 int 489 xfs_attrmulti_attr_remove( 490 struct inode *inode, 491 unsigned char *name, 492 __uint32_t flags) 493 { 494 int error; 495 496 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) 497 return -EPERM; 498 error = xfs_attr_remove(XFS_I(inode), name, flags); 499 if (!error) 500 xfs_forget_acl(inode, name, flags); 501 return error; 502 } 503 504 STATIC int 505 xfs_attrmulti_by_handle( 506 struct file *parfilp, 507 void __user *arg) 508 { 509 int error; 510 xfs_attr_multiop_t *ops; 511 xfs_fsop_attrmulti_handlereq_t am_hreq; 512 struct dentry *dentry; 513 unsigned int i, size; 514 unsigned char *attr_name; 515 516 if (!capable(CAP_SYS_ADMIN)) 517 return -EPERM; 518 if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t))) 519 return -EFAULT; 520 521 /* overflow check */ 522 if (am_hreq.opcount >= INT_MAX / sizeof(xfs_attr_multiop_t)) 523 return -E2BIG; 524 525 dentry = xfs_handlereq_to_dentry(parfilp, &am_hreq.hreq); 526 if (IS_ERR(dentry)) 527 return PTR_ERR(dentry); 528 529 error = -E2BIG; 530 size = am_hreq.opcount * sizeof(xfs_attr_multiop_t); 531 if (!size || size > 16 * PAGE_SIZE) 532 goto out_dput; 533 534 ops = memdup_user(am_hreq.ops, size); 535 if (IS_ERR(ops)) { 536 error = PTR_ERR(ops); 537 goto out_dput; 538 } 539 540 error = -ENOMEM; 541 attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL); 542 if (!attr_name) 543 goto out_kfree_ops; 544 545 error = 0; 546 for (i = 0; i < am_hreq.opcount; i++) { 547 ops[i].am_error = strncpy_from_user((char *)attr_name, 548 ops[i].am_attrname, MAXNAMELEN); 549 if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN) 550 error = -ERANGE; 551 if (ops[i].am_error < 0) 552 break; 553 554 switch (ops[i].am_opcode) { 555 case ATTR_OP_GET: 556 ops[i].am_error = xfs_attrmulti_attr_get( 557 d_inode(dentry), attr_name, 558 ops[i].am_attrvalue, &ops[i].am_length, 559 ops[i].am_flags); 560 break; 561 case ATTR_OP_SET: 562 ops[i].am_error = mnt_want_write_file(parfilp); 563 if (ops[i].am_error) 564 break; 565 ops[i].am_error = xfs_attrmulti_attr_set( 566 d_inode(dentry), attr_name, 567 ops[i].am_attrvalue, ops[i].am_length, 568 ops[i].am_flags); 569 mnt_drop_write_file(parfilp); 570 break; 571 case ATTR_OP_REMOVE: 572 ops[i].am_error = mnt_want_write_file(parfilp); 573 if (ops[i].am_error) 574 break; 575 ops[i].am_error = xfs_attrmulti_attr_remove( 576 d_inode(dentry), attr_name, 577 ops[i].am_flags); 578 mnt_drop_write_file(parfilp); 579 break; 580 default: 581 ops[i].am_error = -EINVAL; 582 } 583 } 584 585 if (copy_to_user(am_hreq.ops, ops, size)) 586 error = -EFAULT; 587 588 kfree(attr_name); 589 out_kfree_ops: 590 kfree(ops); 591 out_dput: 592 dput(dentry); 593 return error; 594 } 595 596 int 597 xfs_ioc_space( 598 struct file *filp, 599 unsigned int cmd, 600 xfs_flock64_t *bf) 601 { 602 struct inode *inode = file_inode(filp); 603 struct xfs_inode *ip = XFS_I(inode); 604 struct iattr iattr; 605 enum xfs_prealloc_flags flags = 0; 606 uint iolock = XFS_IOLOCK_EXCL; 607 int error; 608 609 /* 610 * Only allow the sys admin to reserve space unless 611 * unwritten extents are enabled. 612 */ 613 if (!xfs_sb_version_hasextflgbit(&ip->i_mount->m_sb) && 614 !capable(CAP_SYS_ADMIN)) 615 return -EPERM; 616 617 if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) 618 return -EPERM; 619 620 if (!(filp->f_mode & FMODE_WRITE)) 621 return -EBADF; 622 623 if (!S_ISREG(inode->i_mode)) 624 return -EINVAL; 625 626 if (filp->f_flags & O_DSYNC) 627 flags |= XFS_PREALLOC_SYNC; 628 if (filp->f_mode & FMODE_NOCMTIME) 629 flags |= XFS_PREALLOC_INVISIBLE; 630 631 error = mnt_want_write_file(filp); 632 if (error) 633 return error; 634 635 xfs_ilock(ip, iolock); 636 error = xfs_break_layouts(inode, &iolock, false); 637 if (error) 638 goto out_unlock; 639 640 xfs_ilock(ip, XFS_MMAPLOCK_EXCL); 641 iolock |= XFS_MMAPLOCK_EXCL; 642 643 switch (bf->l_whence) { 644 case 0: /*SEEK_SET*/ 645 break; 646 case 1: /*SEEK_CUR*/ 647 bf->l_start += filp->f_pos; 648 break; 649 case 2: /*SEEK_END*/ 650 bf->l_start += XFS_ISIZE(ip); 651 break; 652 default: 653 error = -EINVAL; 654 goto out_unlock; 655 } 656 657 /* 658 * length of <= 0 for resv/unresv/zero is invalid. length for 659 * alloc/free is ignored completely and we have no idea what userspace 660 * might have set it to, so set it to zero to allow range 661 * checks to pass. 662 */ 663 switch (cmd) { 664 case XFS_IOC_ZERO_RANGE: 665 case XFS_IOC_RESVSP: 666 case XFS_IOC_RESVSP64: 667 case XFS_IOC_UNRESVSP: 668 case XFS_IOC_UNRESVSP64: 669 if (bf->l_len <= 0) { 670 error = -EINVAL; 671 goto out_unlock; 672 } 673 break; 674 default: 675 bf->l_len = 0; 676 break; 677 } 678 679 if (bf->l_start < 0 || 680 bf->l_start > inode->i_sb->s_maxbytes || 681 bf->l_start + bf->l_len < 0 || 682 bf->l_start + bf->l_len >= inode->i_sb->s_maxbytes) { 683 error = -EINVAL; 684 goto out_unlock; 685 } 686 687 switch (cmd) { 688 case XFS_IOC_ZERO_RANGE: 689 flags |= XFS_PREALLOC_SET; 690 error = xfs_zero_file_space(ip, bf->l_start, bf->l_len); 691 break; 692 case XFS_IOC_RESVSP: 693 case XFS_IOC_RESVSP64: 694 flags |= XFS_PREALLOC_SET; 695 error = xfs_alloc_file_space(ip, bf->l_start, bf->l_len, 696 XFS_BMAPI_PREALLOC); 697 break; 698 case XFS_IOC_UNRESVSP: 699 case XFS_IOC_UNRESVSP64: 700 error = xfs_free_file_space(ip, bf->l_start, bf->l_len); 701 break; 702 case XFS_IOC_ALLOCSP: 703 case XFS_IOC_ALLOCSP64: 704 case XFS_IOC_FREESP: 705 case XFS_IOC_FREESP64: 706 flags |= XFS_PREALLOC_CLEAR; 707 if (bf->l_start > XFS_ISIZE(ip)) { 708 error = xfs_alloc_file_space(ip, XFS_ISIZE(ip), 709 bf->l_start - XFS_ISIZE(ip), 0); 710 if (error) 711 goto out_unlock; 712 } 713 714 iattr.ia_valid = ATTR_SIZE; 715 iattr.ia_size = bf->l_start; 716 717 error = xfs_setattr_size(ip, &iattr); 718 break; 719 default: 720 ASSERT(0); 721 error = -EINVAL; 722 } 723 724 if (error) 725 goto out_unlock; 726 727 error = xfs_update_prealloc_flags(ip, flags); 728 729 out_unlock: 730 xfs_iunlock(ip, iolock); 731 mnt_drop_write_file(filp); 732 return error; 733 } 734 735 STATIC int 736 xfs_ioc_bulkstat( 737 xfs_mount_t *mp, 738 unsigned int cmd, 739 void __user *arg) 740 { 741 xfs_fsop_bulkreq_t bulkreq; 742 int count; /* # of records returned */ 743 xfs_ino_t inlast; /* last inode number */ 744 int done; 745 int error; 746 747 /* done = 1 if there are more stats to get and if bulkstat */ 748 /* should be called again (unused here, but used in dmapi) */ 749 750 if (!capable(CAP_SYS_ADMIN)) 751 return -EPERM; 752 753 if (XFS_FORCED_SHUTDOWN(mp)) 754 return -EIO; 755 756 if (copy_from_user(&bulkreq, arg, sizeof(xfs_fsop_bulkreq_t))) 757 return -EFAULT; 758 759 if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64))) 760 return -EFAULT; 761 762 if ((count = bulkreq.icount) <= 0) 763 return -EINVAL; 764 765 if (bulkreq.ubuffer == NULL) 766 return -EINVAL; 767 768 if (cmd == XFS_IOC_FSINUMBERS) 769 error = xfs_inumbers(mp, &inlast, &count, 770 bulkreq.ubuffer, xfs_inumbers_fmt); 771 else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE) 772 error = xfs_bulkstat_one(mp, inlast, bulkreq.ubuffer, 773 sizeof(xfs_bstat_t), NULL, &done); 774 else /* XFS_IOC_FSBULKSTAT */ 775 error = xfs_bulkstat(mp, &inlast, &count, xfs_bulkstat_one, 776 sizeof(xfs_bstat_t), bulkreq.ubuffer, 777 &done); 778 779 if (error) 780 return error; 781 782 if (bulkreq.ocount != NULL) { 783 if (copy_to_user(bulkreq.lastip, &inlast, 784 sizeof(xfs_ino_t))) 785 return -EFAULT; 786 787 if (copy_to_user(bulkreq.ocount, &count, sizeof(count))) 788 return -EFAULT; 789 } 790 791 return 0; 792 } 793 794 STATIC int 795 xfs_ioc_fsgeometry_v1( 796 xfs_mount_t *mp, 797 void __user *arg) 798 { 799 xfs_fsop_geom_t fsgeo; 800 int error; 801 802 error = xfs_fs_geometry(mp, &fsgeo, 3); 803 if (error) 804 return error; 805 806 /* 807 * Caller should have passed an argument of type 808 * xfs_fsop_geom_v1_t. This is a proper subset of the 809 * xfs_fsop_geom_t that xfs_fs_geometry() fills in. 810 */ 811 if (copy_to_user(arg, &fsgeo, sizeof(xfs_fsop_geom_v1_t))) 812 return -EFAULT; 813 return 0; 814 } 815 816 STATIC int 817 xfs_ioc_fsgeometry( 818 xfs_mount_t *mp, 819 void __user *arg) 820 { 821 xfs_fsop_geom_t fsgeo; 822 int error; 823 824 error = xfs_fs_geometry(mp, &fsgeo, 4); 825 if (error) 826 return error; 827 828 if (copy_to_user(arg, &fsgeo, sizeof(fsgeo))) 829 return -EFAULT; 830 return 0; 831 } 832 833 /* 834 * Linux extended inode flags interface. 835 */ 836 837 STATIC unsigned int 838 xfs_merge_ioc_xflags( 839 unsigned int flags, 840 unsigned int start) 841 { 842 unsigned int xflags = start; 843 844 if (flags & FS_IMMUTABLE_FL) 845 xflags |= FS_XFLAG_IMMUTABLE; 846 else 847 xflags &= ~FS_XFLAG_IMMUTABLE; 848 if (flags & FS_APPEND_FL) 849 xflags |= FS_XFLAG_APPEND; 850 else 851 xflags &= ~FS_XFLAG_APPEND; 852 if (flags & FS_SYNC_FL) 853 xflags |= FS_XFLAG_SYNC; 854 else 855 xflags &= ~FS_XFLAG_SYNC; 856 if (flags & FS_NOATIME_FL) 857 xflags |= FS_XFLAG_NOATIME; 858 else 859 xflags &= ~FS_XFLAG_NOATIME; 860 if (flags & FS_NODUMP_FL) 861 xflags |= FS_XFLAG_NODUMP; 862 else 863 xflags &= ~FS_XFLAG_NODUMP; 864 865 return xflags; 866 } 867 868 STATIC unsigned int 869 xfs_di2lxflags( 870 __uint16_t di_flags) 871 { 872 unsigned int flags = 0; 873 874 if (di_flags & XFS_DIFLAG_IMMUTABLE) 875 flags |= FS_IMMUTABLE_FL; 876 if (di_flags & XFS_DIFLAG_APPEND) 877 flags |= FS_APPEND_FL; 878 if (di_flags & XFS_DIFLAG_SYNC) 879 flags |= FS_SYNC_FL; 880 if (di_flags & XFS_DIFLAG_NOATIME) 881 flags |= FS_NOATIME_FL; 882 if (di_flags & XFS_DIFLAG_NODUMP) 883 flags |= FS_NODUMP_FL; 884 return flags; 885 } 886 887 STATIC int 888 xfs_ioc_fsgetxattr( 889 xfs_inode_t *ip, 890 int attr, 891 void __user *arg) 892 { 893 struct fsxattr fa; 894 895 memset(&fa, 0, sizeof(struct fsxattr)); 896 897 xfs_ilock(ip, XFS_ILOCK_SHARED); 898 fa.fsx_xflags = xfs_ip2xflags(ip); 899 fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog; 900 fa.fsx_projid = xfs_get_projid(ip); 901 902 if (attr) { 903 if (ip->i_afp) { 904 if (ip->i_afp->if_flags & XFS_IFEXTENTS) 905 fa.fsx_nextents = ip->i_afp->if_bytes / 906 sizeof(xfs_bmbt_rec_t); 907 else 908 fa.fsx_nextents = ip->i_d.di_anextents; 909 } else 910 fa.fsx_nextents = 0; 911 } else { 912 if (ip->i_df.if_flags & XFS_IFEXTENTS) 913 fa.fsx_nextents = ip->i_df.if_bytes / 914 sizeof(xfs_bmbt_rec_t); 915 else 916 fa.fsx_nextents = ip->i_d.di_nextents; 917 } 918 xfs_iunlock(ip, XFS_ILOCK_SHARED); 919 920 if (copy_to_user(arg, &fa, sizeof(fa))) 921 return -EFAULT; 922 return 0; 923 } 924 925 STATIC void 926 xfs_set_diflags( 927 struct xfs_inode *ip, 928 unsigned int xflags) 929 { 930 unsigned int di_flags; 931 uint64_t di_flags2; 932 933 /* can't set PREALLOC this way, just preserve it */ 934 di_flags = (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC); 935 if (xflags & FS_XFLAG_IMMUTABLE) 936 di_flags |= XFS_DIFLAG_IMMUTABLE; 937 if (xflags & FS_XFLAG_APPEND) 938 di_flags |= XFS_DIFLAG_APPEND; 939 if (xflags & FS_XFLAG_SYNC) 940 di_flags |= XFS_DIFLAG_SYNC; 941 if (xflags & FS_XFLAG_NOATIME) 942 di_flags |= XFS_DIFLAG_NOATIME; 943 if (xflags & FS_XFLAG_NODUMP) 944 di_flags |= XFS_DIFLAG_NODUMP; 945 if (xflags & FS_XFLAG_NODEFRAG) 946 di_flags |= XFS_DIFLAG_NODEFRAG; 947 if (xflags & FS_XFLAG_FILESTREAM) 948 di_flags |= XFS_DIFLAG_FILESTREAM; 949 if (S_ISDIR(VFS_I(ip)->i_mode)) { 950 if (xflags & FS_XFLAG_RTINHERIT) 951 di_flags |= XFS_DIFLAG_RTINHERIT; 952 if (xflags & FS_XFLAG_NOSYMLINKS) 953 di_flags |= XFS_DIFLAG_NOSYMLINKS; 954 if (xflags & FS_XFLAG_EXTSZINHERIT) 955 di_flags |= XFS_DIFLAG_EXTSZINHERIT; 956 if (xflags & FS_XFLAG_PROJINHERIT) 957 di_flags |= XFS_DIFLAG_PROJINHERIT; 958 } else if (S_ISREG(VFS_I(ip)->i_mode)) { 959 if (xflags & FS_XFLAG_REALTIME) 960 di_flags |= XFS_DIFLAG_REALTIME; 961 if (xflags & FS_XFLAG_EXTSIZE) 962 di_flags |= XFS_DIFLAG_EXTSIZE; 963 } 964 ip->i_d.di_flags = di_flags; 965 966 /* diflags2 only valid for v3 inodes. */ 967 if (ip->i_d.di_version < 3) 968 return; 969 970 di_flags2 = 0; 971 if (xflags & FS_XFLAG_DAX) 972 di_flags2 |= XFS_DIFLAG2_DAX; 973 974 ip->i_d.di_flags2 = di_flags2; 975 976 } 977 978 STATIC void 979 xfs_diflags_to_linux( 980 struct xfs_inode *ip) 981 { 982 struct inode *inode = VFS_I(ip); 983 unsigned int xflags = xfs_ip2xflags(ip); 984 985 if (xflags & FS_XFLAG_IMMUTABLE) 986 inode->i_flags |= S_IMMUTABLE; 987 else 988 inode->i_flags &= ~S_IMMUTABLE; 989 if (xflags & FS_XFLAG_APPEND) 990 inode->i_flags |= S_APPEND; 991 else 992 inode->i_flags &= ~S_APPEND; 993 if (xflags & FS_XFLAG_SYNC) 994 inode->i_flags |= S_SYNC; 995 else 996 inode->i_flags &= ~S_SYNC; 997 if (xflags & FS_XFLAG_NOATIME) 998 inode->i_flags |= S_NOATIME; 999 else 1000 inode->i_flags &= ~S_NOATIME; 1001 if (xflags & FS_XFLAG_DAX) 1002 inode->i_flags |= S_DAX; 1003 else 1004 inode->i_flags &= ~S_DAX; 1005 1006 } 1007 1008 static int 1009 xfs_ioctl_setattr_xflags( 1010 struct xfs_trans *tp, 1011 struct xfs_inode *ip, 1012 struct fsxattr *fa) 1013 { 1014 struct xfs_mount *mp = ip->i_mount; 1015 1016 /* Can't change realtime flag if any extents are allocated. */ 1017 if ((ip->i_d.di_nextents || ip->i_delayed_blks) && 1018 XFS_IS_REALTIME_INODE(ip) != (fa->fsx_xflags & FS_XFLAG_REALTIME)) 1019 return -EINVAL; 1020 1021 /* If realtime flag is set then must have realtime device */ 1022 if (fa->fsx_xflags & FS_XFLAG_REALTIME) { 1023 if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 || 1024 (ip->i_d.di_extsize % mp->m_sb.sb_rextsize)) 1025 return -EINVAL; 1026 } 1027 1028 /* 1029 * Can't modify an immutable/append-only file unless 1030 * we have appropriate permission. 1031 */ 1032 if (((ip->i_d.di_flags & (XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND)) || 1033 (fa->fsx_xflags & (FS_XFLAG_IMMUTABLE | FS_XFLAG_APPEND))) && 1034 !capable(CAP_LINUX_IMMUTABLE)) 1035 return -EPERM; 1036 1037 xfs_set_diflags(ip, fa->fsx_xflags); 1038 xfs_diflags_to_linux(ip); 1039 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); 1040 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1041 XFS_STATS_INC(mp, xs_ig_attrchg); 1042 return 0; 1043 } 1044 1045 /* 1046 * If we are changing DAX flags, we have to ensure the file is clean and any 1047 * cached objects in the address space are invalidated and removed. This 1048 * requires us to lock out other IO and page faults similar to a truncate 1049 * operation. The locks need to be held until the transaction has been committed 1050 * so that the cache invalidation is atomic with respect to the DAX flag 1051 * manipulation. 1052 */ 1053 static int 1054 xfs_ioctl_setattr_dax_invalidate( 1055 struct xfs_inode *ip, 1056 struct fsxattr *fa, 1057 int *join_flags) 1058 { 1059 struct inode *inode = VFS_I(ip); 1060 int error; 1061 1062 *join_flags = 0; 1063 1064 /* 1065 * It is only valid to set the DAX flag on regular files and 1066 * directories on filesystems where the block size is equal to the page 1067 * size. On directories it serves as an inherit hint. 1068 */ 1069 if (fa->fsx_xflags & FS_XFLAG_DAX) { 1070 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) 1071 return -EINVAL; 1072 if (ip->i_mount->m_sb.sb_blocksize != PAGE_SIZE) 1073 return -EINVAL; 1074 } 1075 1076 /* If the DAX state is not changing, we have nothing to do here. */ 1077 if ((fa->fsx_xflags & FS_XFLAG_DAX) && IS_DAX(inode)) 1078 return 0; 1079 if (!(fa->fsx_xflags & FS_XFLAG_DAX) && !IS_DAX(inode)) 1080 return 0; 1081 1082 /* lock, flush and invalidate mapping in preparation for flag change */ 1083 xfs_ilock(ip, XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL); 1084 error = filemap_write_and_wait(inode->i_mapping); 1085 if (error) 1086 goto out_unlock; 1087 error = invalidate_inode_pages2(inode->i_mapping); 1088 if (error) 1089 goto out_unlock; 1090 1091 *join_flags = XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL; 1092 return 0; 1093 1094 out_unlock: 1095 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL); 1096 return error; 1097 1098 } 1099 1100 /* 1101 * Set up the transaction structure for the setattr operation, checking that we 1102 * have permission to do so. On success, return a clean transaction and the 1103 * inode locked exclusively ready for further operation specific checks. On 1104 * failure, return an error without modifying or locking the inode. 1105 * 1106 * The inode might already be IO locked on call. If this is the case, it is 1107 * indicated in @join_flags and we take full responsibility for ensuring they 1108 * are unlocked from now on. Hence if we have an error here, we still have to 1109 * unlock them. Otherwise, once they are joined to the transaction, they will 1110 * be unlocked on commit/cancel. 1111 */ 1112 static struct xfs_trans * 1113 xfs_ioctl_setattr_get_trans( 1114 struct xfs_inode *ip, 1115 int join_flags) 1116 { 1117 struct xfs_mount *mp = ip->i_mount; 1118 struct xfs_trans *tp; 1119 int error = -EROFS; 1120 1121 if (mp->m_flags & XFS_MOUNT_RDONLY) 1122 goto out_unlock; 1123 error = -EIO; 1124 if (XFS_FORCED_SHUTDOWN(mp)) 1125 goto out_unlock; 1126 1127 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp); 1128 if (error) 1129 return ERR_PTR(error); 1130 1131 xfs_ilock(ip, XFS_ILOCK_EXCL); 1132 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | join_flags); 1133 join_flags = 0; 1134 1135 /* 1136 * CAP_FOWNER overrides the following restrictions: 1137 * 1138 * The user ID of the calling process must be equal to the file owner 1139 * ID, except in cases where the CAP_FSETID capability is applicable. 1140 */ 1141 if (!inode_owner_or_capable(VFS_I(ip))) { 1142 error = -EPERM; 1143 goto out_cancel; 1144 } 1145 1146 if (mp->m_flags & XFS_MOUNT_WSYNC) 1147 xfs_trans_set_sync(tp); 1148 1149 return tp; 1150 1151 out_cancel: 1152 xfs_trans_cancel(tp); 1153 out_unlock: 1154 if (join_flags) 1155 xfs_iunlock(ip, join_flags); 1156 return ERR_PTR(error); 1157 } 1158 1159 /* 1160 * extent size hint validation is somewhat cumbersome. Rules are: 1161 * 1162 * 1. extent size hint is only valid for directories and regular files 1163 * 2. FS_XFLAG_EXTSIZE is only valid for regular files 1164 * 3. FS_XFLAG_EXTSZINHERIT is only valid for directories. 1165 * 4. can only be changed on regular files if no extents are allocated 1166 * 5. can be changed on directories at any time 1167 * 6. extsize hint of 0 turns off hints, clears inode flags. 1168 * 7. Extent size must be a multiple of the appropriate block size. 1169 * 8. for non-realtime files, the extent size hint must be limited 1170 * to half the AG size to avoid alignment extending the extent beyond the 1171 * limits of the AG. 1172 */ 1173 static int 1174 xfs_ioctl_setattr_check_extsize( 1175 struct xfs_inode *ip, 1176 struct fsxattr *fa) 1177 { 1178 struct xfs_mount *mp = ip->i_mount; 1179 1180 if ((fa->fsx_xflags & FS_XFLAG_EXTSIZE) && !S_ISREG(VFS_I(ip)->i_mode)) 1181 return -EINVAL; 1182 1183 if ((fa->fsx_xflags & FS_XFLAG_EXTSZINHERIT) && 1184 !S_ISDIR(VFS_I(ip)->i_mode)) 1185 return -EINVAL; 1186 1187 if (S_ISREG(VFS_I(ip)->i_mode) && ip->i_d.di_nextents && 1188 ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != fa->fsx_extsize)) 1189 return -EINVAL; 1190 1191 if (fa->fsx_extsize != 0) { 1192 xfs_extlen_t size; 1193 xfs_fsblock_t extsize_fsb; 1194 1195 extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize); 1196 if (extsize_fsb > MAXEXTLEN) 1197 return -EINVAL; 1198 1199 if (XFS_IS_REALTIME_INODE(ip) || 1200 (fa->fsx_xflags & FS_XFLAG_REALTIME)) { 1201 size = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog; 1202 } else { 1203 size = mp->m_sb.sb_blocksize; 1204 if (extsize_fsb > mp->m_sb.sb_agblocks / 2) 1205 return -EINVAL; 1206 } 1207 1208 if (fa->fsx_extsize % size) 1209 return -EINVAL; 1210 } else 1211 fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE | FS_XFLAG_EXTSZINHERIT); 1212 1213 return 0; 1214 } 1215 1216 static int 1217 xfs_ioctl_setattr_check_projid( 1218 struct xfs_inode *ip, 1219 struct fsxattr *fa) 1220 { 1221 /* Disallow 32bit project ids if projid32bit feature is not enabled. */ 1222 if (fa->fsx_projid > (__uint16_t)-1 && 1223 !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb)) 1224 return -EINVAL; 1225 1226 /* 1227 * Project Quota ID state is only allowed to change from within the init 1228 * namespace. Enforce that restriction only if we are trying to change 1229 * the quota ID state. Everything else is allowed in user namespaces. 1230 */ 1231 if (current_user_ns() == &init_user_ns) 1232 return 0; 1233 1234 if (xfs_get_projid(ip) != fa->fsx_projid) 1235 return -EINVAL; 1236 if ((fa->fsx_xflags & FS_XFLAG_PROJINHERIT) != 1237 (ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)) 1238 return -EINVAL; 1239 1240 return 0; 1241 } 1242 1243 STATIC int 1244 xfs_ioctl_setattr( 1245 xfs_inode_t *ip, 1246 struct fsxattr *fa) 1247 { 1248 struct xfs_mount *mp = ip->i_mount; 1249 struct xfs_trans *tp; 1250 struct xfs_dquot *udqp = NULL; 1251 struct xfs_dquot *pdqp = NULL; 1252 struct xfs_dquot *olddquot = NULL; 1253 int code; 1254 int join_flags = 0; 1255 1256 trace_xfs_ioctl_setattr(ip); 1257 1258 code = xfs_ioctl_setattr_check_projid(ip, fa); 1259 if (code) 1260 return code; 1261 1262 /* 1263 * If disk quotas is on, we make sure that the dquots do exist on disk, 1264 * before we start any other transactions. Trying to do this later 1265 * is messy. We don't care to take a readlock to look at the ids 1266 * in inode here, because we can't hold it across the trans_reserve. 1267 * If the IDs do change before we take the ilock, we're covered 1268 * because the i_*dquot fields will get updated anyway. 1269 */ 1270 if (XFS_IS_QUOTA_ON(mp)) { 1271 code = xfs_qm_vop_dqalloc(ip, ip->i_d.di_uid, 1272 ip->i_d.di_gid, fa->fsx_projid, 1273 XFS_QMOPT_PQUOTA, &udqp, NULL, &pdqp); 1274 if (code) 1275 return code; 1276 } 1277 1278 /* 1279 * Changing DAX config may require inode locking for mapping 1280 * invalidation. These need to be held all the way to transaction commit 1281 * or cancel time, so need to be passed through to 1282 * xfs_ioctl_setattr_get_trans() so it can apply them to the join call 1283 * appropriately. 1284 */ 1285 code = xfs_ioctl_setattr_dax_invalidate(ip, fa, &join_flags); 1286 if (code) 1287 goto error_free_dquots; 1288 1289 tp = xfs_ioctl_setattr_get_trans(ip, join_flags); 1290 if (IS_ERR(tp)) { 1291 code = PTR_ERR(tp); 1292 goto error_free_dquots; 1293 } 1294 1295 1296 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp) && 1297 xfs_get_projid(ip) != fa->fsx_projid) { 1298 code = xfs_qm_vop_chown_reserve(tp, ip, udqp, NULL, pdqp, 1299 capable(CAP_FOWNER) ? XFS_QMOPT_FORCE_RES : 0); 1300 if (code) /* out of quota */ 1301 goto error_trans_cancel; 1302 } 1303 1304 code = xfs_ioctl_setattr_check_extsize(ip, fa); 1305 if (code) 1306 goto error_trans_cancel; 1307 1308 code = xfs_ioctl_setattr_xflags(tp, ip, fa); 1309 if (code) 1310 goto error_trans_cancel; 1311 1312 /* 1313 * Change file ownership. Must be the owner or privileged. CAP_FSETID 1314 * overrides the following restrictions: 1315 * 1316 * The set-user-ID and set-group-ID bits of a file will be cleared upon 1317 * successful return from chown() 1318 */ 1319 1320 if ((VFS_I(ip)->i_mode & (S_ISUID|S_ISGID)) && 1321 !capable_wrt_inode_uidgid(VFS_I(ip), CAP_FSETID)) 1322 VFS_I(ip)->i_mode &= ~(S_ISUID|S_ISGID); 1323 1324 /* Change the ownerships and register project quota modifications */ 1325 if (xfs_get_projid(ip) != fa->fsx_projid) { 1326 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) { 1327 olddquot = xfs_qm_vop_chown(tp, ip, 1328 &ip->i_pdquot, pdqp); 1329 } 1330 ASSERT(ip->i_d.di_version > 1); 1331 xfs_set_projid(ip, fa->fsx_projid); 1332 } 1333 1334 /* 1335 * Only set the extent size hint if we've already determined that the 1336 * extent size hint should be set on the inode. If no extent size flags 1337 * are set on the inode then unconditionally clear the extent size hint. 1338 */ 1339 if (ip->i_d.di_flags & (XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT)) 1340 ip->i_d.di_extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog; 1341 else 1342 ip->i_d.di_extsize = 0; 1343 1344 code = xfs_trans_commit(tp); 1345 1346 /* 1347 * Release any dquot(s) the inode had kept before chown. 1348 */ 1349 xfs_qm_dqrele(olddquot); 1350 xfs_qm_dqrele(udqp); 1351 xfs_qm_dqrele(pdqp); 1352 1353 return code; 1354 1355 error_trans_cancel: 1356 xfs_trans_cancel(tp); 1357 error_free_dquots: 1358 xfs_qm_dqrele(udqp); 1359 xfs_qm_dqrele(pdqp); 1360 return code; 1361 } 1362 1363 STATIC int 1364 xfs_ioc_fssetxattr( 1365 xfs_inode_t *ip, 1366 struct file *filp, 1367 void __user *arg) 1368 { 1369 struct fsxattr fa; 1370 int error; 1371 1372 if (copy_from_user(&fa, arg, sizeof(fa))) 1373 return -EFAULT; 1374 1375 error = mnt_want_write_file(filp); 1376 if (error) 1377 return error; 1378 error = xfs_ioctl_setattr(ip, &fa); 1379 mnt_drop_write_file(filp); 1380 return error; 1381 } 1382 1383 STATIC int 1384 xfs_ioc_getxflags( 1385 xfs_inode_t *ip, 1386 void __user *arg) 1387 { 1388 unsigned int flags; 1389 1390 flags = xfs_di2lxflags(ip->i_d.di_flags); 1391 if (copy_to_user(arg, &flags, sizeof(flags))) 1392 return -EFAULT; 1393 return 0; 1394 } 1395 1396 STATIC int 1397 xfs_ioc_setxflags( 1398 struct xfs_inode *ip, 1399 struct file *filp, 1400 void __user *arg) 1401 { 1402 struct xfs_trans *tp; 1403 struct fsxattr fa; 1404 unsigned int flags; 1405 int join_flags = 0; 1406 int error; 1407 1408 if (copy_from_user(&flags, arg, sizeof(flags))) 1409 return -EFAULT; 1410 1411 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ 1412 FS_NOATIME_FL | FS_NODUMP_FL | \ 1413 FS_SYNC_FL)) 1414 return -EOPNOTSUPP; 1415 1416 fa.fsx_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip)); 1417 1418 error = mnt_want_write_file(filp); 1419 if (error) 1420 return error; 1421 1422 /* 1423 * Changing DAX config may require inode locking for mapping 1424 * invalidation. These need to be held all the way to transaction commit 1425 * or cancel time, so need to be passed through to 1426 * xfs_ioctl_setattr_get_trans() so it can apply them to the join call 1427 * appropriately. 1428 */ 1429 error = xfs_ioctl_setattr_dax_invalidate(ip, &fa, &join_flags); 1430 if (error) 1431 goto out_drop_write; 1432 1433 tp = xfs_ioctl_setattr_get_trans(ip, join_flags); 1434 if (IS_ERR(tp)) { 1435 error = PTR_ERR(tp); 1436 goto out_drop_write; 1437 } 1438 1439 error = xfs_ioctl_setattr_xflags(tp, ip, &fa); 1440 if (error) { 1441 xfs_trans_cancel(tp); 1442 goto out_drop_write; 1443 } 1444 1445 error = xfs_trans_commit(tp); 1446 out_drop_write: 1447 mnt_drop_write_file(filp); 1448 return error; 1449 } 1450 1451 STATIC int 1452 xfs_getbmap_format(void **ap, struct getbmapx *bmv, int *full) 1453 { 1454 struct getbmap __user *base = (struct getbmap __user *)*ap; 1455 1456 /* copy only getbmap portion (not getbmapx) */ 1457 if (copy_to_user(base, bmv, sizeof(struct getbmap))) 1458 return -EFAULT; 1459 1460 *ap += sizeof(struct getbmap); 1461 return 0; 1462 } 1463 1464 STATIC int 1465 xfs_ioc_getbmap( 1466 struct file *file, 1467 unsigned int cmd, 1468 void __user *arg) 1469 { 1470 struct getbmapx bmx; 1471 int error; 1472 1473 if (copy_from_user(&bmx, arg, sizeof(struct getbmapx))) 1474 return -EFAULT; 1475 1476 if (bmx.bmv_count < 2) 1477 return -EINVAL; 1478 1479 bmx.bmv_iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0); 1480 if (file->f_mode & FMODE_NOCMTIME) 1481 bmx.bmv_iflags |= BMV_IF_NO_DMAPI_READ; 1482 1483 error = xfs_getbmap(XFS_I(file_inode(file)), &bmx, xfs_getbmap_format, 1484 (__force struct getbmap *)arg+1); 1485 if (error) 1486 return error; 1487 1488 /* copy back header - only size of getbmap */ 1489 if (copy_to_user(arg, &bmx, sizeof(struct getbmap))) 1490 return -EFAULT; 1491 return 0; 1492 } 1493 1494 STATIC int 1495 xfs_getbmapx_format(void **ap, struct getbmapx *bmv, int *full) 1496 { 1497 struct getbmapx __user *base = (struct getbmapx __user *)*ap; 1498 1499 if (copy_to_user(base, bmv, sizeof(struct getbmapx))) 1500 return -EFAULT; 1501 1502 *ap += sizeof(struct getbmapx); 1503 return 0; 1504 } 1505 1506 STATIC int 1507 xfs_ioc_getbmapx( 1508 struct xfs_inode *ip, 1509 void __user *arg) 1510 { 1511 struct getbmapx bmx; 1512 int error; 1513 1514 if (copy_from_user(&bmx, arg, sizeof(bmx))) 1515 return -EFAULT; 1516 1517 if (bmx.bmv_count < 2) 1518 return -EINVAL; 1519 1520 if (bmx.bmv_iflags & (~BMV_IF_VALID)) 1521 return -EINVAL; 1522 1523 error = xfs_getbmap(ip, &bmx, xfs_getbmapx_format, 1524 (__force struct getbmapx *)arg+1); 1525 if (error) 1526 return error; 1527 1528 /* copy back header */ 1529 if (copy_to_user(arg, &bmx, sizeof(struct getbmapx))) 1530 return -EFAULT; 1531 1532 return 0; 1533 } 1534 1535 int 1536 xfs_ioc_swapext( 1537 xfs_swapext_t *sxp) 1538 { 1539 xfs_inode_t *ip, *tip; 1540 struct fd f, tmp; 1541 int error = 0; 1542 1543 /* Pull information for the target fd */ 1544 f = fdget((int)sxp->sx_fdtarget); 1545 if (!f.file) { 1546 error = -EINVAL; 1547 goto out; 1548 } 1549 1550 if (!(f.file->f_mode & FMODE_WRITE) || 1551 !(f.file->f_mode & FMODE_READ) || 1552 (f.file->f_flags & O_APPEND)) { 1553 error = -EBADF; 1554 goto out_put_file; 1555 } 1556 1557 tmp = fdget((int)sxp->sx_fdtmp); 1558 if (!tmp.file) { 1559 error = -EINVAL; 1560 goto out_put_file; 1561 } 1562 1563 if (!(tmp.file->f_mode & FMODE_WRITE) || 1564 !(tmp.file->f_mode & FMODE_READ) || 1565 (tmp.file->f_flags & O_APPEND)) { 1566 error = -EBADF; 1567 goto out_put_tmp_file; 1568 } 1569 1570 if (IS_SWAPFILE(file_inode(f.file)) || 1571 IS_SWAPFILE(file_inode(tmp.file))) { 1572 error = -EINVAL; 1573 goto out_put_tmp_file; 1574 } 1575 1576 /* 1577 * We need to ensure that the fds passed in point to XFS inodes 1578 * before we cast and access them as XFS structures as we have no 1579 * control over what the user passes us here. 1580 */ 1581 if (f.file->f_op != &xfs_file_operations || 1582 tmp.file->f_op != &xfs_file_operations) { 1583 error = -EINVAL; 1584 goto out_put_tmp_file; 1585 } 1586 1587 ip = XFS_I(file_inode(f.file)); 1588 tip = XFS_I(file_inode(tmp.file)); 1589 1590 if (ip->i_mount != tip->i_mount) { 1591 error = -EINVAL; 1592 goto out_put_tmp_file; 1593 } 1594 1595 if (ip->i_ino == tip->i_ino) { 1596 error = -EINVAL; 1597 goto out_put_tmp_file; 1598 } 1599 1600 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { 1601 error = -EIO; 1602 goto out_put_tmp_file; 1603 } 1604 1605 error = xfs_swap_extents(ip, tip, sxp); 1606 1607 out_put_tmp_file: 1608 fdput(tmp); 1609 out_put_file: 1610 fdput(f); 1611 out: 1612 return error; 1613 } 1614 1615 /* 1616 * Note: some of the ioctl's return positive numbers as a 1617 * byte count indicating success, such as readlink_by_handle. 1618 * So we don't "sign flip" like most other routines. This means 1619 * true errors need to be returned as a negative value. 1620 */ 1621 long 1622 xfs_file_ioctl( 1623 struct file *filp, 1624 unsigned int cmd, 1625 unsigned long p) 1626 { 1627 struct inode *inode = file_inode(filp); 1628 struct xfs_inode *ip = XFS_I(inode); 1629 struct xfs_mount *mp = ip->i_mount; 1630 void __user *arg = (void __user *)p; 1631 int error; 1632 1633 trace_xfs_file_ioctl(ip); 1634 1635 switch (cmd) { 1636 case FITRIM: 1637 return xfs_ioc_trim(mp, arg); 1638 case XFS_IOC_ALLOCSP: 1639 case XFS_IOC_FREESP: 1640 case XFS_IOC_RESVSP: 1641 case XFS_IOC_UNRESVSP: 1642 case XFS_IOC_ALLOCSP64: 1643 case XFS_IOC_FREESP64: 1644 case XFS_IOC_RESVSP64: 1645 case XFS_IOC_UNRESVSP64: 1646 case XFS_IOC_ZERO_RANGE: { 1647 xfs_flock64_t bf; 1648 1649 if (copy_from_user(&bf, arg, sizeof(bf))) 1650 return -EFAULT; 1651 return xfs_ioc_space(filp, cmd, &bf); 1652 } 1653 case XFS_IOC_DIOINFO: { 1654 struct dioattr da; 1655 xfs_buftarg_t *target = 1656 XFS_IS_REALTIME_INODE(ip) ? 1657 mp->m_rtdev_targp : mp->m_ddev_targp; 1658 1659 da.d_mem = da.d_miniosz = target->bt_logical_sectorsize; 1660 da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1); 1661 1662 if (copy_to_user(arg, &da, sizeof(da))) 1663 return -EFAULT; 1664 return 0; 1665 } 1666 1667 case XFS_IOC_FSBULKSTAT_SINGLE: 1668 case XFS_IOC_FSBULKSTAT: 1669 case XFS_IOC_FSINUMBERS: 1670 return xfs_ioc_bulkstat(mp, cmd, arg); 1671 1672 case XFS_IOC_FSGEOMETRY_V1: 1673 return xfs_ioc_fsgeometry_v1(mp, arg); 1674 1675 case XFS_IOC_FSGEOMETRY: 1676 return xfs_ioc_fsgeometry(mp, arg); 1677 1678 case XFS_IOC_GETVERSION: 1679 return put_user(inode->i_generation, (int __user *)arg); 1680 1681 case XFS_IOC_FSGETXATTR: 1682 return xfs_ioc_fsgetxattr(ip, 0, arg); 1683 case XFS_IOC_FSGETXATTRA: 1684 return xfs_ioc_fsgetxattr(ip, 1, arg); 1685 case XFS_IOC_FSSETXATTR: 1686 return xfs_ioc_fssetxattr(ip, filp, arg); 1687 case XFS_IOC_GETXFLAGS: 1688 return xfs_ioc_getxflags(ip, arg); 1689 case XFS_IOC_SETXFLAGS: 1690 return xfs_ioc_setxflags(ip, filp, arg); 1691 1692 case XFS_IOC_FSSETDM: { 1693 struct fsdmidata dmi; 1694 1695 if (copy_from_user(&dmi, arg, sizeof(dmi))) 1696 return -EFAULT; 1697 1698 error = mnt_want_write_file(filp); 1699 if (error) 1700 return error; 1701 1702 error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask, 1703 dmi.fsd_dmstate); 1704 mnt_drop_write_file(filp); 1705 return error; 1706 } 1707 1708 case XFS_IOC_GETBMAP: 1709 case XFS_IOC_GETBMAPA: 1710 return xfs_ioc_getbmap(filp, cmd, arg); 1711 1712 case XFS_IOC_GETBMAPX: 1713 return xfs_ioc_getbmapx(ip, arg); 1714 1715 case XFS_IOC_FD_TO_HANDLE: 1716 case XFS_IOC_PATH_TO_HANDLE: 1717 case XFS_IOC_PATH_TO_FSHANDLE: { 1718 xfs_fsop_handlereq_t hreq; 1719 1720 if (copy_from_user(&hreq, arg, sizeof(hreq))) 1721 return -EFAULT; 1722 return xfs_find_handle(cmd, &hreq); 1723 } 1724 case XFS_IOC_OPEN_BY_HANDLE: { 1725 xfs_fsop_handlereq_t hreq; 1726 1727 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) 1728 return -EFAULT; 1729 return xfs_open_by_handle(filp, &hreq); 1730 } 1731 case XFS_IOC_FSSETDM_BY_HANDLE: 1732 return xfs_fssetdm_by_handle(filp, arg); 1733 1734 case XFS_IOC_READLINK_BY_HANDLE: { 1735 xfs_fsop_handlereq_t hreq; 1736 1737 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) 1738 return -EFAULT; 1739 return xfs_readlink_by_handle(filp, &hreq); 1740 } 1741 case XFS_IOC_ATTRLIST_BY_HANDLE: 1742 return xfs_attrlist_by_handle(filp, arg); 1743 1744 case XFS_IOC_ATTRMULTI_BY_HANDLE: 1745 return xfs_attrmulti_by_handle(filp, arg); 1746 1747 case XFS_IOC_SWAPEXT: { 1748 struct xfs_swapext sxp; 1749 1750 if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t))) 1751 return -EFAULT; 1752 error = mnt_want_write_file(filp); 1753 if (error) 1754 return error; 1755 error = xfs_ioc_swapext(&sxp); 1756 mnt_drop_write_file(filp); 1757 return error; 1758 } 1759 1760 case XFS_IOC_FSCOUNTS: { 1761 xfs_fsop_counts_t out; 1762 1763 error = xfs_fs_counts(mp, &out); 1764 if (error) 1765 return error; 1766 1767 if (copy_to_user(arg, &out, sizeof(out))) 1768 return -EFAULT; 1769 return 0; 1770 } 1771 1772 case XFS_IOC_SET_RESBLKS: { 1773 xfs_fsop_resblks_t inout; 1774 __uint64_t in; 1775 1776 if (!capable(CAP_SYS_ADMIN)) 1777 return -EPERM; 1778 1779 if (mp->m_flags & XFS_MOUNT_RDONLY) 1780 return -EROFS; 1781 1782 if (copy_from_user(&inout, arg, sizeof(inout))) 1783 return -EFAULT; 1784 1785 error = mnt_want_write_file(filp); 1786 if (error) 1787 return error; 1788 1789 /* input parameter is passed in resblks field of structure */ 1790 in = inout.resblks; 1791 error = xfs_reserve_blocks(mp, &in, &inout); 1792 mnt_drop_write_file(filp); 1793 if (error) 1794 return error; 1795 1796 if (copy_to_user(arg, &inout, sizeof(inout))) 1797 return -EFAULT; 1798 return 0; 1799 } 1800 1801 case XFS_IOC_GET_RESBLKS: { 1802 xfs_fsop_resblks_t out; 1803 1804 if (!capable(CAP_SYS_ADMIN)) 1805 return -EPERM; 1806 1807 error = xfs_reserve_blocks(mp, NULL, &out); 1808 if (error) 1809 return error; 1810 1811 if (copy_to_user(arg, &out, sizeof(out))) 1812 return -EFAULT; 1813 1814 return 0; 1815 } 1816 1817 case XFS_IOC_FSGROWFSDATA: { 1818 xfs_growfs_data_t in; 1819 1820 if (copy_from_user(&in, arg, sizeof(in))) 1821 return -EFAULT; 1822 1823 error = mnt_want_write_file(filp); 1824 if (error) 1825 return error; 1826 error = xfs_growfs_data(mp, &in); 1827 mnt_drop_write_file(filp); 1828 return error; 1829 } 1830 1831 case XFS_IOC_FSGROWFSLOG: { 1832 xfs_growfs_log_t in; 1833 1834 if (copy_from_user(&in, arg, sizeof(in))) 1835 return -EFAULT; 1836 1837 error = mnt_want_write_file(filp); 1838 if (error) 1839 return error; 1840 error = xfs_growfs_log(mp, &in); 1841 mnt_drop_write_file(filp); 1842 return error; 1843 } 1844 1845 case XFS_IOC_FSGROWFSRT: { 1846 xfs_growfs_rt_t in; 1847 1848 if (copy_from_user(&in, arg, sizeof(in))) 1849 return -EFAULT; 1850 1851 error = mnt_want_write_file(filp); 1852 if (error) 1853 return error; 1854 error = xfs_growfs_rt(mp, &in); 1855 mnt_drop_write_file(filp); 1856 return error; 1857 } 1858 1859 case XFS_IOC_GOINGDOWN: { 1860 __uint32_t in; 1861 1862 if (!capable(CAP_SYS_ADMIN)) 1863 return -EPERM; 1864 1865 if (get_user(in, (__uint32_t __user *)arg)) 1866 return -EFAULT; 1867 1868 return xfs_fs_goingdown(mp, in); 1869 } 1870 1871 case XFS_IOC_ERROR_INJECTION: { 1872 xfs_error_injection_t in; 1873 1874 if (!capable(CAP_SYS_ADMIN)) 1875 return -EPERM; 1876 1877 if (copy_from_user(&in, arg, sizeof(in))) 1878 return -EFAULT; 1879 1880 return xfs_errortag_add(in.errtag, mp); 1881 } 1882 1883 case XFS_IOC_ERROR_CLEARALL: 1884 if (!capable(CAP_SYS_ADMIN)) 1885 return -EPERM; 1886 1887 return xfs_errortag_clearall(mp, 1); 1888 1889 case XFS_IOC_FREE_EOFBLOCKS: { 1890 struct xfs_fs_eofblocks eofb; 1891 struct xfs_eofblocks keofb; 1892 1893 if (!capable(CAP_SYS_ADMIN)) 1894 return -EPERM; 1895 1896 if (mp->m_flags & XFS_MOUNT_RDONLY) 1897 return -EROFS; 1898 1899 if (copy_from_user(&eofb, arg, sizeof(eofb))) 1900 return -EFAULT; 1901 1902 error = xfs_fs_eofblocks_from_user(&eofb, &keofb); 1903 if (error) 1904 return error; 1905 1906 return xfs_icache_free_eofblocks(mp, &keofb); 1907 } 1908 1909 default: 1910 return -ENOTTY; 1911 } 1912 } 1913