1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/stat.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 #include <linux/blkdev.h> 9 #include <linux/export.h> 10 #include <linux/mm.h> 11 #include <linux/errno.h> 12 #include <linux/file.h> 13 #include <linux/highuid.h> 14 #include <linux/fs.h> 15 #include <linux/namei.h> 16 #include <linux/security.h> 17 #include <linux/cred.h> 18 #include <linux/syscalls.h> 19 #include <linux/pagemap.h> 20 #include <linux/compat.h> 21 22 #include <linux/uaccess.h> 23 #include <asm/unistd.h> 24 25 #include "internal.h" 26 #include "mount.h" 27 28 /** 29 * generic_fillattr - Fill in the basic attributes from the inode struct 30 * @mnt_userns: user namespace of the mount the inode was found from 31 * @inode: Inode to use as the source 32 * @stat: Where to fill in the attributes 33 * 34 * Fill in the basic attributes in the kstat structure from data that's to be 35 * found on the VFS inode structure. This is the default if no getattr inode 36 * operation is supplied. 37 * 38 * If the inode has been found through an idmapped mount the user namespace of 39 * the vfsmount must be passed through @mnt_userns. This function will then 40 * take care to map the inode according to @mnt_userns before filling in the 41 * uid and gid filds. On non-idmapped mounts or if permission checking is to be 42 * performed on the raw inode simply passs init_user_ns. 43 */ 44 void generic_fillattr(struct user_namespace *mnt_userns, struct inode *inode, 45 struct kstat *stat) 46 { 47 vfsuid_t vfsuid = i_uid_into_vfsuid(mnt_userns, inode); 48 vfsgid_t vfsgid = i_gid_into_vfsgid(mnt_userns, inode); 49 50 stat->dev = inode->i_sb->s_dev; 51 stat->ino = inode->i_ino; 52 stat->mode = inode->i_mode; 53 stat->nlink = inode->i_nlink; 54 stat->uid = vfsuid_into_kuid(vfsuid); 55 stat->gid = vfsgid_into_kgid(vfsgid); 56 stat->rdev = inode->i_rdev; 57 stat->size = i_size_read(inode); 58 stat->atime = inode->i_atime; 59 stat->mtime = inode->i_mtime; 60 stat->ctime = inode->i_ctime; 61 stat->blksize = i_blocksize(inode); 62 stat->blocks = inode->i_blocks; 63 } 64 EXPORT_SYMBOL(generic_fillattr); 65 66 /** 67 * generic_fill_statx_attr - Fill in the statx attributes from the inode flags 68 * @inode: Inode to use as the source 69 * @stat: Where to fill in the attribute flags 70 * 71 * Fill in the STATX_ATTR_* flags in the kstat structure for properties of the 72 * inode that are published on i_flags and enforced by the VFS. 73 */ 74 void generic_fill_statx_attr(struct inode *inode, struct kstat *stat) 75 { 76 if (inode->i_flags & S_IMMUTABLE) 77 stat->attributes |= STATX_ATTR_IMMUTABLE; 78 if (inode->i_flags & S_APPEND) 79 stat->attributes |= STATX_ATTR_APPEND; 80 stat->attributes_mask |= KSTAT_ATTR_VFS_FLAGS; 81 } 82 EXPORT_SYMBOL(generic_fill_statx_attr); 83 84 /** 85 * vfs_getattr_nosec - getattr without security checks 86 * @path: file to get attributes from 87 * @stat: structure to return attributes in 88 * @request_mask: STATX_xxx flags indicating what the caller wants 89 * @query_flags: Query mode (AT_STATX_SYNC_TYPE) 90 * 91 * Get attributes without calling security_inode_getattr. 92 * 93 * Currently the only caller other than vfs_getattr is internal to the 94 * filehandle lookup code, which uses only the inode number and returns no 95 * attributes to any user. Any other code probably wants vfs_getattr. 96 */ 97 int vfs_getattr_nosec(const struct path *path, struct kstat *stat, 98 u32 request_mask, unsigned int query_flags) 99 { 100 struct user_namespace *mnt_userns; 101 struct inode *inode = d_backing_inode(path->dentry); 102 103 memset(stat, 0, sizeof(*stat)); 104 stat->result_mask |= STATX_BASIC_STATS; 105 query_flags &= AT_STATX_SYNC_TYPE; 106 107 /* allow the fs to override these if it really wants to */ 108 /* SB_NOATIME means filesystem supplies dummy atime value */ 109 if (inode->i_sb->s_flags & SB_NOATIME) 110 stat->result_mask &= ~STATX_ATIME; 111 112 /* 113 * Note: If you add another clause to set an attribute flag, please 114 * update attributes_mask below. 115 */ 116 if (IS_AUTOMOUNT(inode)) 117 stat->attributes |= STATX_ATTR_AUTOMOUNT; 118 119 if (IS_DAX(inode)) 120 stat->attributes |= STATX_ATTR_DAX; 121 122 stat->attributes_mask |= (STATX_ATTR_AUTOMOUNT | 123 STATX_ATTR_DAX); 124 125 mnt_userns = mnt_user_ns(path->mnt); 126 if (inode->i_op->getattr) 127 return inode->i_op->getattr(mnt_userns, path, stat, 128 request_mask, query_flags); 129 130 generic_fillattr(mnt_userns, inode, stat); 131 return 0; 132 } 133 EXPORT_SYMBOL(vfs_getattr_nosec); 134 135 /* 136 * vfs_getattr - Get the enhanced basic attributes of a file 137 * @path: The file of interest 138 * @stat: Where to return the statistics 139 * @request_mask: STATX_xxx flags indicating what the caller wants 140 * @query_flags: Query mode (AT_STATX_SYNC_TYPE) 141 * 142 * Ask the filesystem for a file's attributes. The caller must indicate in 143 * request_mask and query_flags to indicate what they want. 144 * 145 * If the file is remote, the filesystem can be forced to update the attributes 146 * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can 147 * suppress the update by passing AT_STATX_DONT_SYNC. 148 * 149 * Bits must have been set in request_mask to indicate which attributes the 150 * caller wants retrieving. Any such attribute not requested may be returned 151 * anyway, but the value may be approximate, and, if remote, may not have been 152 * synchronised with the server. 153 * 154 * 0 will be returned on success, and a -ve error code if unsuccessful. 155 */ 156 int vfs_getattr(const struct path *path, struct kstat *stat, 157 u32 request_mask, unsigned int query_flags) 158 { 159 int retval; 160 161 retval = security_inode_getattr(path); 162 if (retval) 163 return retval; 164 return vfs_getattr_nosec(path, stat, request_mask, query_flags); 165 } 166 EXPORT_SYMBOL(vfs_getattr); 167 168 /** 169 * vfs_fstat - Get the basic attributes by file descriptor 170 * @fd: The file descriptor referring to the file of interest 171 * @stat: The result structure to fill in. 172 * 173 * This function is a wrapper around vfs_getattr(). The main difference is 174 * that it uses a file descriptor to determine the file location. 175 * 176 * 0 will be returned on success, and a -ve error code if unsuccessful. 177 */ 178 int vfs_fstat(int fd, struct kstat *stat) 179 { 180 struct fd f; 181 int error; 182 183 f = fdget_raw(fd); 184 if (!f.file) 185 return -EBADF; 186 error = vfs_getattr(&f.file->f_path, stat, STATX_BASIC_STATS, 0); 187 fdput(f); 188 return error; 189 } 190 191 int getname_statx_lookup_flags(int flags) 192 { 193 int lookup_flags = 0; 194 195 if (!(flags & AT_SYMLINK_NOFOLLOW)) 196 lookup_flags |= LOOKUP_FOLLOW; 197 if (!(flags & AT_NO_AUTOMOUNT)) 198 lookup_flags |= LOOKUP_AUTOMOUNT; 199 if (flags & AT_EMPTY_PATH) 200 lookup_flags |= LOOKUP_EMPTY; 201 202 return lookup_flags; 203 } 204 205 /** 206 * vfs_statx - Get basic and extra attributes by filename 207 * @dfd: A file descriptor representing the base dir for a relative filename 208 * @filename: The name of the file of interest 209 * @flags: Flags to control the query 210 * @stat: The result structure to fill in. 211 * @request_mask: STATX_xxx flags indicating what the caller wants 212 * 213 * This function is a wrapper around vfs_getattr(). The main difference is 214 * that it uses a filename and base directory to determine the file location. 215 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink 216 * at the given name from being referenced. 217 * 218 * 0 will be returned on success, and a -ve error code if unsuccessful. 219 */ 220 static int vfs_statx(int dfd, struct filename *filename, int flags, 221 struct kstat *stat, u32 request_mask) 222 { 223 struct path path; 224 unsigned int lookup_flags = getname_statx_lookup_flags(flags); 225 int error; 226 227 if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | AT_EMPTY_PATH | 228 AT_STATX_SYNC_TYPE)) 229 return -EINVAL; 230 231 retry: 232 error = filename_lookup(dfd, filename, lookup_flags, &path, NULL); 233 if (error) 234 goto out; 235 236 error = vfs_getattr(&path, stat, request_mask, flags); 237 238 stat->mnt_id = real_mount(path.mnt)->mnt_id; 239 stat->result_mask |= STATX_MNT_ID; 240 241 if (path.mnt->mnt_root == path.dentry) 242 stat->attributes |= STATX_ATTR_MOUNT_ROOT; 243 stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT; 244 245 /* Handle STATX_DIOALIGN for block devices. */ 246 if (request_mask & STATX_DIOALIGN) { 247 struct inode *inode = d_backing_inode(path.dentry); 248 249 if (S_ISBLK(inode->i_mode)) 250 bdev_statx_dioalign(inode, stat); 251 } 252 253 path_put(&path); 254 if (retry_estale(error, lookup_flags)) { 255 lookup_flags |= LOOKUP_REVAL; 256 goto retry; 257 } 258 out: 259 return error; 260 } 261 262 int vfs_fstatat(int dfd, const char __user *filename, 263 struct kstat *stat, int flags) 264 { 265 int ret; 266 int statx_flags = flags | AT_NO_AUTOMOUNT; 267 struct filename *name; 268 269 name = getname_flags(filename, getname_statx_lookup_flags(statx_flags), NULL); 270 ret = vfs_statx(dfd, name, statx_flags, stat, STATX_BASIC_STATS); 271 putname(name); 272 273 return ret; 274 } 275 276 #ifdef __ARCH_WANT_OLD_STAT 277 278 /* 279 * For backward compatibility? Maybe this should be moved 280 * into arch/i386 instead? 281 */ 282 static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf) 283 { 284 static int warncount = 5; 285 struct __old_kernel_stat tmp; 286 287 if (warncount > 0) { 288 warncount--; 289 printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n", 290 current->comm); 291 } else if (warncount < 0) { 292 /* it's laughable, but... */ 293 warncount = 0; 294 } 295 296 memset(&tmp, 0, sizeof(struct __old_kernel_stat)); 297 tmp.st_dev = old_encode_dev(stat->dev); 298 tmp.st_ino = stat->ino; 299 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 300 return -EOVERFLOW; 301 tmp.st_mode = stat->mode; 302 tmp.st_nlink = stat->nlink; 303 if (tmp.st_nlink != stat->nlink) 304 return -EOVERFLOW; 305 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 306 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 307 tmp.st_rdev = old_encode_dev(stat->rdev); 308 #if BITS_PER_LONG == 32 309 if (stat->size > MAX_NON_LFS) 310 return -EOVERFLOW; 311 #endif 312 tmp.st_size = stat->size; 313 tmp.st_atime = stat->atime.tv_sec; 314 tmp.st_mtime = stat->mtime.tv_sec; 315 tmp.st_ctime = stat->ctime.tv_sec; 316 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 317 } 318 319 SYSCALL_DEFINE2(stat, const char __user *, filename, 320 struct __old_kernel_stat __user *, statbuf) 321 { 322 struct kstat stat; 323 int error; 324 325 error = vfs_stat(filename, &stat); 326 if (error) 327 return error; 328 329 return cp_old_stat(&stat, statbuf); 330 } 331 332 SYSCALL_DEFINE2(lstat, const char __user *, filename, 333 struct __old_kernel_stat __user *, statbuf) 334 { 335 struct kstat stat; 336 int error; 337 338 error = vfs_lstat(filename, &stat); 339 if (error) 340 return error; 341 342 return cp_old_stat(&stat, statbuf); 343 } 344 345 SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf) 346 { 347 struct kstat stat; 348 int error = vfs_fstat(fd, &stat); 349 350 if (!error) 351 error = cp_old_stat(&stat, statbuf); 352 353 return error; 354 } 355 356 #endif /* __ARCH_WANT_OLD_STAT */ 357 358 #ifdef __ARCH_WANT_NEW_STAT 359 360 #if BITS_PER_LONG == 32 361 # define choose_32_64(a,b) a 362 #else 363 # define choose_32_64(a,b) b 364 #endif 365 366 #ifndef INIT_STRUCT_STAT_PADDING 367 # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st)) 368 #endif 369 370 static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf) 371 { 372 struct stat tmp; 373 374 if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev)) 375 return -EOVERFLOW; 376 if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev)) 377 return -EOVERFLOW; 378 #if BITS_PER_LONG == 32 379 if (stat->size > MAX_NON_LFS) 380 return -EOVERFLOW; 381 #endif 382 383 INIT_STRUCT_STAT_PADDING(tmp); 384 tmp.st_dev = new_encode_dev(stat->dev); 385 tmp.st_ino = stat->ino; 386 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 387 return -EOVERFLOW; 388 tmp.st_mode = stat->mode; 389 tmp.st_nlink = stat->nlink; 390 if (tmp.st_nlink != stat->nlink) 391 return -EOVERFLOW; 392 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 393 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 394 tmp.st_rdev = new_encode_dev(stat->rdev); 395 tmp.st_size = stat->size; 396 tmp.st_atime = stat->atime.tv_sec; 397 tmp.st_mtime = stat->mtime.tv_sec; 398 tmp.st_ctime = stat->ctime.tv_sec; 399 #ifdef STAT_HAVE_NSEC 400 tmp.st_atime_nsec = stat->atime.tv_nsec; 401 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 402 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 403 #endif 404 tmp.st_blocks = stat->blocks; 405 tmp.st_blksize = stat->blksize; 406 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 407 } 408 409 SYSCALL_DEFINE2(newstat, const char __user *, filename, 410 struct stat __user *, statbuf) 411 { 412 struct kstat stat; 413 int error = vfs_stat(filename, &stat); 414 415 if (error) 416 return error; 417 return cp_new_stat(&stat, statbuf); 418 } 419 420 SYSCALL_DEFINE2(newlstat, const char __user *, filename, 421 struct stat __user *, statbuf) 422 { 423 struct kstat stat; 424 int error; 425 426 error = vfs_lstat(filename, &stat); 427 if (error) 428 return error; 429 430 return cp_new_stat(&stat, statbuf); 431 } 432 433 #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT) 434 SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename, 435 struct stat __user *, statbuf, int, flag) 436 { 437 struct kstat stat; 438 int error; 439 440 error = vfs_fstatat(dfd, filename, &stat, flag); 441 if (error) 442 return error; 443 return cp_new_stat(&stat, statbuf); 444 } 445 #endif 446 447 SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf) 448 { 449 struct kstat stat; 450 int error = vfs_fstat(fd, &stat); 451 452 if (!error) 453 error = cp_new_stat(&stat, statbuf); 454 455 return error; 456 } 457 #endif 458 459 static int do_readlinkat(int dfd, const char __user *pathname, 460 char __user *buf, int bufsiz) 461 { 462 struct path path; 463 int error; 464 int empty = 0; 465 unsigned int lookup_flags = LOOKUP_EMPTY; 466 467 if (bufsiz <= 0) 468 return -EINVAL; 469 470 retry: 471 error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty); 472 if (!error) { 473 struct inode *inode = d_backing_inode(path.dentry); 474 475 error = empty ? -ENOENT : -EINVAL; 476 /* 477 * AFS mountpoints allow readlink(2) but are not symlinks 478 */ 479 if (d_is_symlink(path.dentry) || inode->i_op->readlink) { 480 error = security_inode_readlink(path.dentry); 481 if (!error) { 482 touch_atime(&path); 483 error = vfs_readlink(path.dentry, buf, bufsiz); 484 } 485 } 486 path_put(&path); 487 if (retry_estale(error, lookup_flags)) { 488 lookup_flags |= LOOKUP_REVAL; 489 goto retry; 490 } 491 } 492 return error; 493 } 494 495 SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname, 496 char __user *, buf, int, bufsiz) 497 { 498 return do_readlinkat(dfd, pathname, buf, bufsiz); 499 } 500 501 SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf, 502 int, bufsiz) 503 { 504 return do_readlinkat(AT_FDCWD, path, buf, bufsiz); 505 } 506 507 508 /* ---------- LFS-64 ----------- */ 509 #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64) 510 511 #ifndef INIT_STRUCT_STAT64_PADDING 512 # define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st)) 513 #endif 514 515 static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf) 516 { 517 struct stat64 tmp; 518 519 INIT_STRUCT_STAT64_PADDING(tmp); 520 #ifdef CONFIG_MIPS 521 /* mips has weird padding, so we don't get 64 bits there */ 522 tmp.st_dev = new_encode_dev(stat->dev); 523 tmp.st_rdev = new_encode_dev(stat->rdev); 524 #else 525 tmp.st_dev = huge_encode_dev(stat->dev); 526 tmp.st_rdev = huge_encode_dev(stat->rdev); 527 #endif 528 tmp.st_ino = stat->ino; 529 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 530 return -EOVERFLOW; 531 #ifdef STAT64_HAS_BROKEN_ST_INO 532 tmp.__st_ino = stat->ino; 533 #endif 534 tmp.st_mode = stat->mode; 535 tmp.st_nlink = stat->nlink; 536 tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid); 537 tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid); 538 tmp.st_atime = stat->atime.tv_sec; 539 tmp.st_atime_nsec = stat->atime.tv_nsec; 540 tmp.st_mtime = stat->mtime.tv_sec; 541 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 542 tmp.st_ctime = stat->ctime.tv_sec; 543 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 544 tmp.st_size = stat->size; 545 tmp.st_blocks = stat->blocks; 546 tmp.st_blksize = stat->blksize; 547 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 548 } 549 550 SYSCALL_DEFINE2(stat64, const char __user *, filename, 551 struct stat64 __user *, statbuf) 552 { 553 struct kstat stat; 554 int error = vfs_stat(filename, &stat); 555 556 if (!error) 557 error = cp_new_stat64(&stat, statbuf); 558 559 return error; 560 } 561 562 SYSCALL_DEFINE2(lstat64, const char __user *, filename, 563 struct stat64 __user *, statbuf) 564 { 565 struct kstat stat; 566 int error = vfs_lstat(filename, &stat); 567 568 if (!error) 569 error = cp_new_stat64(&stat, statbuf); 570 571 return error; 572 } 573 574 SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf) 575 { 576 struct kstat stat; 577 int error = vfs_fstat(fd, &stat); 578 579 if (!error) 580 error = cp_new_stat64(&stat, statbuf); 581 582 return error; 583 } 584 585 SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename, 586 struct stat64 __user *, statbuf, int, flag) 587 { 588 struct kstat stat; 589 int error; 590 591 error = vfs_fstatat(dfd, filename, &stat, flag); 592 if (error) 593 return error; 594 return cp_new_stat64(&stat, statbuf); 595 } 596 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */ 597 598 static noinline_for_stack int 599 cp_statx(const struct kstat *stat, struct statx __user *buffer) 600 { 601 struct statx tmp; 602 603 memset(&tmp, 0, sizeof(tmp)); 604 605 tmp.stx_mask = stat->result_mask; 606 tmp.stx_blksize = stat->blksize; 607 tmp.stx_attributes = stat->attributes; 608 tmp.stx_nlink = stat->nlink; 609 tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid); 610 tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid); 611 tmp.stx_mode = stat->mode; 612 tmp.stx_ino = stat->ino; 613 tmp.stx_size = stat->size; 614 tmp.stx_blocks = stat->blocks; 615 tmp.stx_attributes_mask = stat->attributes_mask; 616 tmp.stx_atime.tv_sec = stat->atime.tv_sec; 617 tmp.stx_atime.tv_nsec = stat->atime.tv_nsec; 618 tmp.stx_btime.tv_sec = stat->btime.tv_sec; 619 tmp.stx_btime.tv_nsec = stat->btime.tv_nsec; 620 tmp.stx_ctime.tv_sec = stat->ctime.tv_sec; 621 tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec; 622 tmp.stx_mtime.tv_sec = stat->mtime.tv_sec; 623 tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec; 624 tmp.stx_rdev_major = MAJOR(stat->rdev); 625 tmp.stx_rdev_minor = MINOR(stat->rdev); 626 tmp.stx_dev_major = MAJOR(stat->dev); 627 tmp.stx_dev_minor = MINOR(stat->dev); 628 tmp.stx_mnt_id = stat->mnt_id; 629 tmp.stx_dio_mem_align = stat->dio_mem_align; 630 tmp.stx_dio_offset_align = stat->dio_offset_align; 631 632 return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0; 633 } 634 635 int do_statx(int dfd, struct filename *filename, unsigned int flags, 636 unsigned int mask, struct statx __user *buffer) 637 { 638 struct kstat stat; 639 int error; 640 641 if (mask & STATX__RESERVED) 642 return -EINVAL; 643 if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) 644 return -EINVAL; 645 646 error = vfs_statx(dfd, filename, flags, &stat, mask); 647 if (error) 648 return error; 649 650 return cp_statx(&stat, buffer); 651 } 652 653 /** 654 * sys_statx - System call to get enhanced stats 655 * @dfd: Base directory to pathwalk from *or* fd to stat. 656 * @filename: File to stat or "" with AT_EMPTY_PATH 657 * @flags: AT_* flags to control pathwalk. 658 * @mask: Parts of statx struct actually required. 659 * @buffer: Result buffer. 660 * 661 * Note that fstat() can be emulated by setting dfd to the fd of interest, 662 * supplying "" as the filename and setting AT_EMPTY_PATH in the flags. 663 */ 664 SYSCALL_DEFINE5(statx, 665 int, dfd, const char __user *, filename, unsigned, flags, 666 unsigned int, mask, 667 struct statx __user *, buffer) 668 { 669 int ret; 670 struct filename *name; 671 672 name = getname_flags(filename, getname_statx_lookup_flags(flags), NULL); 673 ret = do_statx(dfd, name, flags, mask, buffer); 674 putname(name); 675 676 return ret; 677 } 678 679 #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_STAT) 680 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf) 681 { 682 struct compat_stat tmp; 683 684 if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev)) 685 return -EOVERFLOW; 686 if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev)) 687 return -EOVERFLOW; 688 689 memset(&tmp, 0, sizeof(tmp)); 690 tmp.st_dev = new_encode_dev(stat->dev); 691 tmp.st_ino = stat->ino; 692 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 693 return -EOVERFLOW; 694 tmp.st_mode = stat->mode; 695 tmp.st_nlink = stat->nlink; 696 if (tmp.st_nlink != stat->nlink) 697 return -EOVERFLOW; 698 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 699 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 700 tmp.st_rdev = new_encode_dev(stat->rdev); 701 if ((u64) stat->size > MAX_NON_LFS) 702 return -EOVERFLOW; 703 tmp.st_size = stat->size; 704 tmp.st_atime = stat->atime.tv_sec; 705 tmp.st_atime_nsec = stat->atime.tv_nsec; 706 tmp.st_mtime = stat->mtime.tv_sec; 707 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 708 tmp.st_ctime = stat->ctime.tv_sec; 709 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 710 tmp.st_blocks = stat->blocks; 711 tmp.st_blksize = stat->blksize; 712 return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0; 713 } 714 715 COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename, 716 struct compat_stat __user *, statbuf) 717 { 718 struct kstat stat; 719 int error; 720 721 error = vfs_stat(filename, &stat); 722 if (error) 723 return error; 724 return cp_compat_stat(&stat, statbuf); 725 } 726 727 COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename, 728 struct compat_stat __user *, statbuf) 729 { 730 struct kstat stat; 731 int error; 732 733 error = vfs_lstat(filename, &stat); 734 if (error) 735 return error; 736 return cp_compat_stat(&stat, statbuf); 737 } 738 739 #ifndef __ARCH_WANT_STAT64 740 COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd, 741 const char __user *, filename, 742 struct compat_stat __user *, statbuf, int, flag) 743 { 744 struct kstat stat; 745 int error; 746 747 error = vfs_fstatat(dfd, filename, &stat, flag); 748 if (error) 749 return error; 750 return cp_compat_stat(&stat, statbuf); 751 } 752 #endif 753 754 COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd, 755 struct compat_stat __user *, statbuf) 756 { 757 struct kstat stat; 758 int error = vfs_fstat(fd, &stat); 759 760 if (!error) 761 error = cp_compat_stat(&stat, statbuf); 762 return error; 763 } 764 #endif 765 766 /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */ 767 void __inode_add_bytes(struct inode *inode, loff_t bytes) 768 { 769 inode->i_blocks += bytes >> 9; 770 bytes &= 511; 771 inode->i_bytes += bytes; 772 if (inode->i_bytes >= 512) { 773 inode->i_blocks++; 774 inode->i_bytes -= 512; 775 } 776 } 777 EXPORT_SYMBOL(__inode_add_bytes); 778 779 void inode_add_bytes(struct inode *inode, loff_t bytes) 780 { 781 spin_lock(&inode->i_lock); 782 __inode_add_bytes(inode, bytes); 783 spin_unlock(&inode->i_lock); 784 } 785 786 EXPORT_SYMBOL(inode_add_bytes); 787 788 void __inode_sub_bytes(struct inode *inode, loff_t bytes) 789 { 790 inode->i_blocks -= bytes >> 9; 791 bytes &= 511; 792 if (inode->i_bytes < bytes) { 793 inode->i_blocks--; 794 inode->i_bytes += 512; 795 } 796 inode->i_bytes -= bytes; 797 } 798 799 EXPORT_SYMBOL(__inode_sub_bytes); 800 801 void inode_sub_bytes(struct inode *inode, loff_t bytes) 802 { 803 spin_lock(&inode->i_lock); 804 __inode_sub_bytes(inode, bytes); 805 spin_unlock(&inode->i_lock); 806 } 807 808 EXPORT_SYMBOL(inode_sub_bytes); 809 810 loff_t inode_get_bytes(struct inode *inode) 811 { 812 loff_t ret; 813 814 spin_lock(&inode->i_lock); 815 ret = __inode_get_bytes(inode); 816 spin_unlock(&inode->i_lock); 817 return ret; 818 } 819 820 EXPORT_SYMBOL(inode_get_bytes); 821 822 void inode_set_bytes(struct inode *inode, loff_t bytes) 823 { 824 /* Caller is here responsible for sufficient locking 825 * (ie. inode->i_lock) */ 826 inode->i_blocks = bytes >> 9; 827 inode->i_bytes = bytes & 511; 828 } 829 830 EXPORT_SYMBOL(inode_set_bytes); 831