1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/stat.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 #include <linux/blkdev.h> 9 #include <linux/export.h> 10 #include <linux/mm.h> 11 #include <linux/errno.h> 12 #include <linux/file.h> 13 #include <linux/highuid.h> 14 #include <linux/fs.h> 15 #include <linux/namei.h> 16 #include <linux/security.h> 17 #include <linux/cred.h> 18 #include <linux/syscalls.h> 19 #include <linux/pagemap.h> 20 #include <linux/compat.h> 21 #include <linux/iversion.h> 22 23 #include <linux/uaccess.h> 24 #include <asm/unistd.h> 25 26 #include "internal.h" 27 #include "mount.h" 28 29 /** 30 * generic_fillattr - Fill in the basic attributes from the inode struct 31 * @idmap: idmap of the mount the inode was found from 32 * @request_mask: statx request_mask 33 * @inode: Inode to use as the source 34 * @stat: Where to fill in the attributes 35 * 36 * Fill in the basic attributes in the kstat structure from data that's to be 37 * found on the VFS inode structure. This is the default if no getattr inode 38 * operation is supplied. 39 * 40 * If the inode has been found through an idmapped mount the idmap of 41 * the vfsmount must be passed through @idmap. This function will then 42 * take care to map the inode according to @idmap before filling in the 43 * uid and gid filds. On non-idmapped mounts or if permission checking is to be 44 * performed on the raw inode simply passs @nop_mnt_idmap. 45 */ 46 void generic_fillattr(struct mnt_idmap *idmap, u32 request_mask, 47 struct inode *inode, struct kstat *stat) 48 { 49 vfsuid_t vfsuid = i_uid_into_vfsuid(idmap, inode); 50 vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode); 51 52 stat->dev = inode->i_sb->s_dev; 53 stat->ino = inode->i_ino; 54 stat->mode = inode->i_mode; 55 stat->nlink = inode->i_nlink; 56 stat->uid = vfsuid_into_kuid(vfsuid); 57 stat->gid = vfsgid_into_kgid(vfsgid); 58 stat->rdev = inode->i_rdev; 59 stat->size = i_size_read(inode); 60 stat->atime = inode->i_atime; 61 stat->mtime = inode->i_mtime; 62 stat->ctime = inode_get_ctime(inode); 63 stat->blksize = i_blocksize(inode); 64 stat->blocks = inode->i_blocks; 65 66 if ((request_mask & STATX_CHANGE_COOKIE) && IS_I_VERSION(inode)) { 67 stat->result_mask |= STATX_CHANGE_COOKIE; 68 stat->change_cookie = inode_query_iversion(inode); 69 } 70 71 } 72 EXPORT_SYMBOL(generic_fillattr); 73 74 /** 75 * generic_fill_statx_attr - Fill in the statx attributes from the inode flags 76 * @inode: Inode to use as the source 77 * @stat: Where to fill in the attribute flags 78 * 79 * Fill in the STATX_ATTR_* flags in the kstat structure for properties of the 80 * inode that are published on i_flags and enforced by the VFS. 81 */ 82 void generic_fill_statx_attr(struct inode *inode, struct kstat *stat) 83 { 84 if (inode->i_flags & S_IMMUTABLE) 85 stat->attributes |= STATX_ATTR_IMMUTABLE; 86 if (inode->i_flags & S_APPEND) 87 stat->attributes |= STATX_ATTR_APPEND; 88 stat->attributes_mask |= KSTAT_ATTR_VFS_FLAGS; 89 } 90 EXPORT_SYMBOL(generic_fill_statx_attr); 91 92 /** 93 * vfs_getattr_nosec - getattr without security checks 94 * @path: file to get attributes from 95 * @stat: structure to return attributes in 96 * @request_mask: STATX_xxx flags indicating what the caller wants 97 * @query_flags: Query mode (AT_STATX_SYNC_TYPE) 98 * 99 * Get attributes without calling security_inode_getattr. 100 * 101 * Currently the only caller other than vfs_getattr is internal to the 102 * filehandle lookup code, which uses only the inode number and returns no 103 * attributes to any user. Any other code probably wants vfs_getattr. 104 */ 105 int vfs_getattr_nosec(const struct path *path, struct kstat *stat, 106 u32 request_mask, unsigned int query_flags) 107 { 108 struct mnt_idmap *idmap; 109 struct inode *inode = d_backing_inode(path->dentry); 110 111 memset(stat, 0, sizeof(*stat)); 112 stat->result_mask |= STATX_BASIC_STATS; 113 query_flags &= AT_STATX_SYNC_TYPE; 114 115 /* allow the fs to override these if it really wants to */ 116 /* SB_NOATIME means filesystem supplies dummy atime value */ 117 if (inode->i_sb->s_flags & SB_NOATIME) 118 stat->result_mask &= ~STATX_ATIME; 119 120 /* 121 * Note: If you add another clause to set an attribute flag, please 122 * update attributes_mask below. 123 */ 124 if (IS_AUTOMOUNT(inode)) 125 stat->attributes |= STATX_ATTR_AUTOMOUNT; 126 127 if (IS_DAX(inode)) 128 stat->attributes |= STATX_ATTR_DAX; 129 130 stat->attributes_mask |= (STATX_ATTR_AUTOMOUNT | 131 STATX_ATTR_DAX); 132 133 idmap = mnt_idmap(path->mnt); 134 if (inode->i_op->getattr) 135 return inode->i_op->getattr(idmap, path, stat, 136 request_mask, 137 query_flags | AT_GETATTR_NOSEC); 138 139 generic_fillattr(idmap, request_mask, inode, stat); 140 return 0; 141 } 142 EXPORT_SYMBOL(vfs_getattr_nosec); 143 144 /* 145 * vfs_getattr - Get the enhanced basic attributes of a file 146 * @path: The file of interest 147 * @stat: Where to return the statistics 148 * @request_mask: STATX_xxx flags indicating what the caller wants 149 * @query_flags: Query mode (AT_STATX_SYNC_TYPE) 150 * 151 * Ask the filesystem for a file's attributes. The caller must indicate in 152 * request_mask and query_flags to indicate what they want. 153 * 154 * If the file is remote, the filesystem can be forced to update the attributes 155 * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can 156 * suppress the update by passing AT_STATX_DONT_SYNC. 157 * 158 * Bits must have been set in request_mask to indicate which attributes the 159 * caller wants retrieving. Any such attribute not requested may be returned 160 * anyway, but the value may be approximate, and, if remote, may not have been 161 * synchronised with the server. 162 * 163 * 0 will be returned on success, and a -ve error code if unsuccessful. 164 */ 165 int vfs_getattr(const struct path *path, struct kstat *stat, 166 u32 request_mask, unsigned int query_flags) 167 { 168 int retval; 169 170 if (WARN_ON_ONCE(query_flags & AT_GETATTR_NOSEC)) 171 return -EPERM; 172 173 retval = security_inode_getattr(path); 174 if (retval) 175 return retval; 176 return vfs_getattr_nosec(path, stat, request_mask, query_flags); 177 } 178 EXPORT_SYMBOL(vfs_getattr); 179 180 /** 181 * vfs_fstat - Get the basic attributes by file descriptor 182 * @fd: The file descriptor referring to the file of interest 183 * @stat: The result structure to fill in. 184 * 185 * This function is a wrapper around vfs_getattr(). The main difference is 186 * that it uses a file descriptor to determine the file location. 187 * 188 * 0 will be returned on success, and a -ve error code if unsuccessful. 189 */ 190 int vfs_fstat(int fd, struct kstat *stat) 191 { 192 struct fd f; 193 int error; 194 195 f = fdget_raw(fd); 196 if (!f.file) 197 return -EBADF; 198 error = vfs_getattr(&f.file->f_path, stat, STATX_BASIC_STATS, 0); 199 fdput(f); 200 return error; 201 } 202 203 int getname_statx_lookup_flags(int flags) 204 { 205 int lookup_flags = 0; 206 207 if (!(flags & AT_SYMLINK_NOFOLLOW)) 208 lookup_flags |= LOOKUP_FOLLOW; 209 if (!(flags & AT_NO_AUTOMOUNT)) 210 lookup_flags |= LOOKUP_AUTOMOUNT; 211 if (flags & AT_EMPTY_PATH) 212 lookup_flags |= LOOKUP_EMPTY; 213 214 return lookup_flags; 215 } 216 217 /** 218 * vfs_statx - Get basic and extra attributes by filename 219 * @dfd: A file descriptor representing the base dir for a relative filename 220 * @filename: The name of the file of interest 221 * @flags: Flags to control the query 222 * @stat: The result structure to fill in. 223 * @request_mask: STATX_xxx flags indicating what the caller wants 224 * 225 * This function is a wrapper around vfs_getattr(). The main difference is 226 * that it uses a filename and base directory to determine the file location. 227 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink 228 * at the given name from being referenced. 229 * 230 * 0 will be returned on success, and a -ve error code if unsuccessful. 231 */ 232 static int vfs_statx(int dfd, struct filename *filename, int flags, 233 struct kstat *stat, u32 request_mask) 234 { 235 struct path path; 236 unsigned int lookup_flags = getname_statx_lookup_flags(flags); 237 int error; 238 239 if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | AT_EMPTY_PATH | 240 AT_STATX_SYNC_TYPE)) 241 return -EINVAL; 242 243 retry: 244 error = filename_lookup(dfd, filename, lookup_flags, &path, NULL); 245 if (error) 246 goto out; 247 248 error = vfs_getattr(&path, stat, request_mask, flags); 249 250 stat->mnt_id = real_mount(path.mnt)->mnt_id; 251 stat->result_mask |= STATX_MNT_ID; 252 253 if (path.mnt->mnt_root == path.dentry) 254 stat->attributes |= STATX_ATTR_MOUNT_ROOT; 255 stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT; 256 257 /* Handle STATX_DIOALIGN for block devices. */ 258 if (request_mask & STATX_DIOALIGN) { 259 struct inode *inode = d_backing_inode(path.dentry); 260 261 if (S_ISBLK(inode->i_mode)) 262 bdev_statx_dioalign(inode, stat); 263 } 264 265 path_put(&path); 266 if (retry_estale(error, lookup_flags)) { 267 lookup_flags |= LOOKUP_REVAL; 268 goto retry; 269 } 270 out: 271 return error; 272 } 273 274 int vfs_fstatat(int dfd, const char __user *filename, 275 struct kstat *stat, int flags) 276 { 277 int ret; 278 int statx_flags = flags | AT_NO_AUTOMOUNT; 279 struct filename *name; 280 281 /* 282 * Work around glibc turning fstat() into fstatat(AT_EMPTY_PATH) 283 * 284 * If AT_EMPTY_PATH is set, we expect the common case to be that 285 * empty path, and avoid doing all the extra pathname work. 286 */ 287 if (dfd >= 0 && flags == AT_EMPTY_PATH) { 288 char c; 289 290 ret = get_user(c, filename); 291 if (unlikely(ret)) 292 return ret; 293 294 if (likely(!c)) 295 return vfs_fstat(dfd, stat); 296 } 297 298 name = getname_flags(filename, getname_statx_lookup_flags(statx_flags), NULL); 299 ret = vfs_statx(dfd, name, statx_flags, stat, STATX_BASIC_STATS); 300 putname(name); 301 302 return ret; 303 } 304 305 #ifdef __ARCH_WANT_OLD_STAT 306 307 /* 308 * For backward compatibility? Maybe this should be moved 309 * into arch/i386 instead? 310 */ 311 static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf) 312 { 313 static int warncount = 5; 314 struct __old_kernel_stat tmp; 315 316 if (warncount > 0) { 317 warncount--; 318 printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n", 319 current->comm); 320 } else if (warncount < 0) { 321 /* it's laughable, but... */ 322 warncount = 0; 323 } 324 325 memset(&tmp, 0, sizeof(struct __old_kernel_stat)); 326 tmp.st_dev = old_encode_dev(stat->dev); 327 tmp.st_ino = stat->ino; 328 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 329 return -EOVERFLOW; 330 tmp.st_mode = stat->mode; 331 tmp.st_nlink = stat->nlink; 332 if (tmp.st_nlink != stat->nlink) 333 return -EOVERFLOW; 334 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 335 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 336 tmp.st_rdev = old_encode_dev(stat->rdev); 337 #if BITS_PER_LONG == 32 338 if (stat->size > MAX_NON_LFS) 339 return -EOVERFLOW; 340 #endif 341 tmp.st_size = stat->size; 342 tmp.st_atime = stat->atime.tv_sec; 343 tmp.st_mtime = stat->mtime.tv_sec; 344 tmp.st_ctime = stat->ctime.tv_sec; 345 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 346 } 347 348 SYSCALL_DEFINE2(stat, const char __user *, filename, 349 struct __old_kernel_stat __user *, statbuf) 350 { 351 struct kstat stat; 352 int error; 353 354 error = vfs_stat(filename, &stat); 355 if (error) 356 return error; 357 358 return cp_old_stat(&stat, statbuf); 359 } 360 361 SYSCALL_DEFINE2(lstat, const char __user *, filename, 362 struct __old_kernel_stat __user *, statbuf) 363 { 364 struct kstat stat; 365 int error; 366 367 error = vfs_lstat(filename, &stat); 368 if (error) 369 return error; 370 371 return cp_old_stat(&stat, statbuf); 372 } 373 374 SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf) 375 { 376 struct kstat stat; 377 int error = vfs_fstat(fd, &stat); 378 379 if (!error) 380 error = cp_old_stat(&stat, statbuf); 381 382 return error; 383 } 384 385 #endif /* __ARCH_WANT_OLD_STAT */ 386 387 #ifdef __ARCH_WANT_NEW_STAT 388 389 #ifndef INIT_STRUCT_STAT_PADDING 390 # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st)) 391 #endif 392 393 static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf) 394 { 395 struct stat tmp; 396 397 if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev)) 398 return -EOVERFLOW; 399 if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev)) 400 return -EOVERFLOW; 401 #if BITS_PER_LONG == 32 402 if (stat->size > MAX_NON_LFS) 403 return -EOVERFLOW; 404 #endif 405 406 INIT_STRUCT_STAT_PADDING(tmp); 407 tmp.st_dev = new_encode_dev(stat->dev); 408 tmp.st_ino = stat->ino; 409 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 410 return -EOVERFLOW; 411 tmp.st_mode = stat->mode; 412 tmp.st_nlink = stat->nlink; 413 if (tmp.st_nlink != stat->nlink) 414 return -EOVERFLOW; 415 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 416 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 417 tmp.st_rdev = new_encode_dev(stat->rdev); 418 tmp.st_size = stat->size; 419 tmp.st_atime = stat->atime.tv_sec; 420 tmp.st_mtime = stat->mtime.tv_sec; 421 tmp.st_ctime = stat->ctime.tv_sec; 422 #ifdef STAT_HAVE_NSEC 423 tmp.st_atime_nsec = stat->atime.tv_nsec; 424 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 425 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 426 #endif 427 tmp.st_blocks = stat->blocks; 428 tmp.st_blksize = stat->blksize; 429 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 430 } 431 432 SYSCALL_DEFINE2(newstat, const char __user *, filename, 433 struct stat __user *, statbuf) 434 { 435 struct kstat stat; 436 int error = vfs_stat(filename, &stat); 437 438 if (error) 439 return error; 440 return cp_new_stat(&stat, statbuf); 441 } 442 443 SYSCALL_DEFINE2(newlstat, const char __user *, filename, 444 struct stat __user *, statbuf) 445 { 446 struct kstat stat; 447 int error; 448 449 error = vfs_lstat(filename, &stat); 450 if (error) 451 return error; 452 453 return cp_new_stat(&stat, statbuf); 454 } 455 456 #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT) 457 SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename, 458 struct stat __user *, statbuf, int, flag) 459 { 460 struct kstat stat; 461 int error; 462 463 error = vfs_fstatat(dfd, filename, &stat, flag); 464 if (error) 465 return error; 466 return cp_new_stat(&stat, statbuf); 467 } 468 #endif 469 470 SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf) 471 { 472 struct kstat stat; 473 int error = vfs_fstat(fd, &stat); 474 475 if (!error) 476 error = cp_new_stat(&stat, statbuf); 477 478 return error; 479 } 480 #endif 481 482 static int do_readlinkat(int dfd, const char __user *pathname, 483 char __user *buf, int bufsiz) 484 { 485 struct path path; 486 int error; 487 int empty = 0; 488 unsigned int lookup_flags = LOOKUP_EMPTY; 489 490 if (bufsiz <= 0) 491 return -EINVAL; 492 493 retry: 494 error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty); 495 if (!error) { 496 struct inode *inode = d_backing_inode(path.dentry); 497 498 error = empty ? -ENOENT : -EINVAL; 499 /* 500 * AFS mountpoints allow readlink(2) but are not symlinks 501 */ 502 if (d_is_symlink(path.dentry) || inode->i_op->readlink) { 503 error = security_inode_readlink(path.dentry); 504 if (!error) { 505 touch_atime(&path); 506 error = vfs_readlink(path.dentry, buf, bufsiz); 507 } 508 } 509 path_put(&path); 510 if (retry_estale(error, lookup_flags)) { 511 lookup_flags |= LOOKUP_REVAL; 512 goto retry; 513 } 514 } 515 return error; 516 } 517 518 SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname, 519 char __user *, buf, int, bufsiz) 520 { 521 return do_readlinkat(dfd, pathname, buf, bufsiz); 522 } 523 524 SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf, 525 int, bufsiz) 526 { 527 return do_readlinkat(AT_FDCWD, path, buf, bufsiz); 528 } 529 530 531 /* ---------- LFS-64 ----------- */ 532 #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64) 533 534 #ifndef INIT_STRUCT_STAT64_PADDING 535 # define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st)) 536 #endif 537 538 static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf) 539 { 540 struct stat64 tmp; 541 542 INIT_STRUCT_STAT64_PADDING(tmp); 543 #ifdef CONFIG_MIPS 544 /* mips has weird padding, so we don't get 64 bits there */ 545 tmp.st_dev = new_encode_dev(stat->dev); 546 tmp.st_rdev = new_encode_dev(stat->rdev); 547 #else 548 tmp.st_dev = huge_encode_dev(stat->dev); 549 tmp.st_rdev = huge_encode_dev(stat->rdev); 550 #endif 551 tmp.st_ino = stat->ino; 552 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 553 return -EOVERFLOW; 554 #ifdef STAT64_HAS_BROKEN_ST_INO 555 tmp.__st_ino = stat->ino; 556 #endif 557 tmp.st_mode = stat->mode; 558 tmp.st_nlink = stat->nlink; 559 tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid); 560 tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid); 561 tmp.st_atime = stat->atime.tv_sec; 562 tmp.st_atime_nsec = stat->atime.tv_nsec; 563 tmp.st_mtime = stat->mtime.tv_sec; 564 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 565 tmp.st_ctime = stat->ctime.tv_sec; 566 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 567 tmp.st_size = stat->size; 568 tmp.st_blocks = stat->blocks; 569 tmp.st_blksize = stat->blksize; 570 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 571 } 572 573 SYSCALL_DEFINE2(stat64, const char __user *, filename, 574 struct stat64 __user *, statbuf) 575 { 576 struct kstat stat; 577 int error = vfs_stat(filename, &stat); 578 579 if (!error) 580 error = cp_new_stat64(&stat, statbuf); 581 582 return error; 583 } 584 585 SYSCALL_DEFINE2(lstat64, const char __user *, filename, 586 struct stat64 __user *, statbuf) 587 { 588 struct kstat stat; 589 int error = vfs_lstat(filename, &stat); 590 591 if (!error) 592 error = cp_new_stat64(&stat, statbuf); 593 594 return error; 595 } 596 597 SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf) 598 { 599 struct kstat stat; 600 int error = vfs_fstat(fd, &stat); 601 602 if (!error) 603 error = cp_new_stat64(&stat, statbuf); 604 605 return error; 606 } 607 608 SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename, 609 struct stat64 __user *, statbuf, int, flag) 610 { 611 struct kstat stat; 612 int error; 613 614 error = vfs_fstatat(dfd, filename, &stat, flag); 615 if (error) 616 return error; 617 return cp_new_stat64(&stat, statbuf); 618 } 619 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */ 620 621 static noinline_for_stack int 622 cp_statx(const struct kstat *stat, struct statx __user *buffer) 623 { 624 struct statx tmp; 625 626 memset(&tmp, 0, sizeof(tmp)); 627 628 /* STATX_CHANGE_COOKIE is kernel-only for now */ 629 tmp.stx_mask = stat->result_mask & ~STATX_CHANGE_COOKIE; 630 tmp.stx_blksize = stat->blksize; 631 /* STATX_ATTR_CHANGE_MONOTONIC is kernel-only for now */ 632 tmp.stx_attributes = stat->attributes & ~STATX_ATTR_CHANGE_MONOTONIC; 633 tmp.stx_nlink = stat->nlink; 634 tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid); 635 tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid); 636 tmp.stx_mode = stat->mode; 637 tmp.stx_ino = stat->ino; 638 tmp.stx_size = stat->size; 639 tmp.stx_blocks = stat->blocks; 640 tmp.stx_attributes_mask = stat->attributes_mask; 641 tmp.stx_atime.tv_sec = stat->atime.tv_sec; 642 tmp.stx_atime.tv_nsec = stat->atime.tv_nsec; 643 tmp.stx_btime.tv_sec = stat->btime.tv_sec; 644 tmp.stx_btime.tv_nsec = stat->btime.tv_nsec; 645 tmp.stx_ctime.tv_sec = stat->ctime.tv_sec; 646 tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec; 647 tmp.stx_mtime.tv_sec = stat->mtime.tv_sec; 648 tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec; 649 tmp.stx_rdev_major = MAJOR(stat->rdev); 650 tmp.stx_rdev_minor = MINOR(stat->rdev); 651 tmp.stx_dev_major = MAJOR(stat->dev); 652 tmp.stx_dev_minor = MINOR(stat->dev); 653 tmp.stx_mnt_id = stat->mnt_id; 654 tmp.stx_dio_mem_align = stat->dio_mem_align; 655 tmp.stx_dio_offset_align = stat->dio_offset_align; 656 657 return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0; 658 } 659 660 int do_statx(int dfd, struct filename *filename, unsigned int flags, 661 unsigned int mask, struct statx __user *buffer) 662 { 663 struct kstat stat; 664 int error; 665 666 if (mask & STATX__RESERVED) 667 return -EINVAL; 668 if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) 669 return -EINVAL; 670 671 /* STATX_CHANGE_COOKIE is kernel-only for now. Ignore requests 672 * from userland. 673 */ 674 mask &= ~STATX_CHANGE_COOKIE; 675 676 error = vfs_statx(dfd, filename, flags, &stat, mask); 677 if (error) 678 return error; 679 680 return cp_statx(&stat, buffer); 681 } 682 683 /** 684 * sys_statx - System call to get enhanced stats 685 * @dfd: Base directory to pathwalk from *or* fd to stat. 686 * @filename: File to stat or "" with AT_EMPTY_PATH 687 * @flags: AT_* flags to control pathwalk. 688 * @mask: Parts of statx struct actually required. 689 * @buffer: Result buffer. 690 * 691 * Note that fstat() can be emulated by setting dfd to the fd of interest, 692 * supplying "" as the filename and setting AT_EMPTY_PATH in the flags. 693 */ 694 SYSCALL_DEFINE5(statx, 695 int, dfd, const char __user *, filename, unsigned, flags, 696 unsigned int, mask, 697 struct statx __user *, buffer) 698 { 699 int ret; 700 struct filename *name; 701 702 name = getname_flags(filename, getname_statx_lookup_flags(flags), NULL); 703 ret = do_statx(dfd, name, flags, mask, buffer); 704 putname(name); 705 706 return ret; 707 } 708 709 #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_STAT) 710 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf) 711 { 712 struct compat_stat tmp; 713 714 if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev)) 715 return -EOVERFLOW; 716 if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev)) 717 return -EOVERFLOW; 718 719 memset(&tmp, 0, sizeof(tmp)); 720 tmp.st_dev = new_encode_dev(stat->dev); 721 tmp.st_ino = stat->ino; 722 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 723 return -EOVERFLOW; 724 tmp.st_mode = stat->mode; 725 tmp.st_nlink = stat->nlink; 726 if (tmp.st_nlink != stat->nlink) 727 return -EOVERFLOW; 728 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 729 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 730 tmp.st_rdev = new_encode_dev(stat->rdev); 731 if ((u64) stat->size > MAX_NON_LFS) 732 return -EOVERFLOW; 733 tmp.st_size = stat->size; 734 tmp.st_atime = stat->atime.tv_sec; 735 tmp.st_atime_nsec = stat->atime.tv_nsec; 736 tmp.st_mtime = stat->mtime.tv_sec; 737 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 738 tmp.st_ctime = stat->ctime.tv_sec; 739 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 740 tmp.st_blocks = stat->blocks; 741 tmp.st_blksize = stat->blksize; 742 return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0; 743 } 744 745 COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename, 746 struct compat_stat __user *, statbuf) 747 { 748 struct kstat stat; 749 int error; 750 751 error = vfs_stat(filename, &stat); 752 if (error) 753 return error; 754 return cp_compat_stat(&stat, statbuf); 755 } 756 757 COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename, 758 struct compat_stat __user *, statbuf) 759 { 760 struct kstat stat; 761 int error; 762 763 error = vfs_lstat(filename, &stat); 764 if (error) 765 return error; 766 return cp_compat_stat(&stat, statbuf); 767 } 768 769 #ifndef __ARCH_WANT_STAT64 770 COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd, 771 const char __user *, filename, 772 struct compat_stat __user *, statbuf, int, flag) 773 { 774 struct kstat stat; 775 int error; 776 777 error = vfs_fstatat(dfd, filename, &stat, flag); 778 if (error) 779 return error; 780 return cp_compat_stat(&stat, statbuf); 781 } 782 #endif 783 784 COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd, 785 struct compat_stat __user *, statbuf) 786 { 787 struct kstat stat; 788 int error = vfs_fstat(fd, &stat); 789 790 if (!error) 791 error = cp_compat_stat(&stat, statbuf); 792 return error; 793 } 794 #endif 795 796 /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */ 797 void __inode_add_bytes(struct inode *inode, loff_t bytes) 798 { 799 inode->i_blocks += bytes >> 9; 800 bytes &= 511; 801 inode->i_bytes += bytes; 802 if (inode->i_bytes >= 512) { 803 inode->i_blocks++; 804 inode->i_bytes -= 512; 805 } 806 } 807 EXPORT_SYMBOL(__inode_add_bytes); 808 809 void inode_add_bytes(struct inode *inode, loff_t bytes) 810 { 811 spin_lock(&inode->i_lock); 812 __inode_add_bytes(inode, bytes); 813 spin_unlock(&inode->i_lock); 814 } 815 816 EXPORT_SYMBOL(inode_add_bytes); 817 818 void __inode_sub_bytes(struct inode *inode, loff_t bytes) 819 { 820 inode->i_blocks -= bytes >> 9; 821 bytes &= 511; 822 if (inode->i_bytes < bytes) { 823 inode->i_blocks--; 824 inode->i_bytes += 512; 825 } 826 inode->i_bytes -= bytes; 827 } 828 829 EXPORT_SYMBOL(__inode_sub_bytes); 830 831 void inode_sub_bytes(struct inode *inode, loff_t bytes) 832 { 833 spin_lock(&inode->i_lock); 834 __inode_sub_bytes(inode, bytes); 835 spin_unlock(&inode->i_lock); 836 } 837 838 EXPORT_SYMBOL(inode_sub_bytes); 839 840 loff_t inode_get_bytes(struct inode *inode) 841 { 842 loff_t ret; 843 844 spin_lock(&inode->i_lock); 845 ret = __inode_get_bytes(inode); 846 spin_unlock(&inode->i_lock); 847 return ret; 848 } 849 850 EXPORT_SYMBOL(inode_get_bytes); 851 852 void inode_set_bytes(struct inode *inode, loff_t bytes) 853 { 854 /* Caller is here responsible for sufficient locking 855 * (ie. inode->i_lock) */ 856 inode->i_blocks = bytes >> 9; 857 inode->i_bytes = bytes & 511; 858 } 859 860 EXPORT_SYMBOL(inode_set_bytes); 861