xref: /openbmc/linux/fs/stat.c (revision 5febf6d6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/stat.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  */
7 
8 #include <linux/export.h>
9 #include <linux/mm.h>
10 #include <linux/errno.h>
11 #include <linux/file.h>
12 #include <linux/highuid.h>
13 #include <linux/fs.h>
14 #include <linux/namei.h>
15 #include <linux/security.h>
16 #include <linux/cred.h>
17 #include <linux/syscalls.h>
18 #include <linux/pagemap.h>
19 #include <linux/compat.h>
20 
21 #include <linux/uaccess.h>
22 #include <asm/unistd.h>
23 
24 #include "internal.h"
25 
26 /**
27  * generic_fillattr - Fill in the basic attributes from the inode struct
28  * @inode: Inode to use as the source
29  * @stat: Where to fill in the attributes
30  *
31  * Fill in the basic attributes in the kstat structure from data that's to be
32  * found on the VFS inode structure.  This is the default if no getattr inode
33  * operation is supplied.
34  */
35 void generic_fillattr(struct inode *inode, struct kstat *stat)
36 {
37 	stat->dev = inode->i_sb->s_dev;
38 	stat->ino = inode->i_ino;
39 	stat->mode = inode->i_mode;
40 	stat->nlink = inode->i_nlink;
41 	stat->uid = inode->i_uid;
42 	stat->gid = inode->i_gid;
43 	stat->rdev = inode->i_rdev;
44 	stat->size = i_size_read(inode);
45 	stat->atime = inode->i_atime;
46 	stat->mtime = inode->i_mtime;
47 	stat->ctime = inode->i_ctime;
48 	stat->blksize = i_blocksize(inode);
49 	stat->blocks = inode->i_blocks;
50 }
51 EXPORT_SYMBOL(generic_fillattr);
52 
53 /**
54  * vfs_getattr_nosec - getattr without security checks
55  * @path: file to get attributes from
56  * @stat: structure to return attributes in
57  * @request_mask: STATX_xxx flags indicating what the caller wants
58  * @query_flags: Query mode (KSTAT_QUERY_FLAGS)
59  *
60  * Get attributes without calling security_inode_getattr.
61  *
62  * Currently the only caller other than vfs_getattr is internal to the
63  * filehandle lookup code, which uses only the inode number and returns no
64  * attributes to any user.  Any other code probably wants vfs_getattr.
65  */
66 int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
67 		      u32 request_mask, unsigned int query_flags)
68 {
69 	struct inode *inode = d_backing_inode(path->dentry);
70 
71 	memset(stat, 0, sizeof(*stat));
72 	stat->result_mask |= STATX_BASIC_STATS;
73 	request_mask &= STATX_ALL;
74 	query_flags &= KSTAT_QUERY_FLAGS;
75 
76 	/* allow the fs to override these if it really wants to */
77 	if (IS_NOATIME(inode))
78 		stat->result_mask &= ~STATX_ATIME;
79 	if (IS_AUTOMOUNT(inode))
80 		stat->attributes |= STATX_ATTR_AUTOMOUNT;
81 
82 	if (inode->i_op->getattr)
83 		return inode->i_op->getattr(path, stat, request_mask,
84 					    query_flags);
85 
86 	generic_fillattr(inode, stat);
87 	return 0;
88 }
89 EXPORT_SYMBOL(vfs_getattr_nosec);
90 
91 /*
92  * vfs_getattr - Get the enhanced basic attributes of a file
93  * @path: The file of interest
94  * @stat: Where to return the statistics
95  * @request_mask: STATX_xxx flags indicating what the caller wants
96  * @query_flags: Query mode (KSTAT_QUERY_FLAGS)
97  *
98  * Ask the filesystem for a file's attributes.  The caller must indicate in
99  * request_mask and query_flags to indicate what they want.
100  *
101  * If the file is remote, the filesystem can be forced to update the attributes
102  * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can
103  * suppress the update by passing AT_STATX_DONT_SYNC.
104  *
105  * Bits must have been set in request_mask to indicate which attributes the
106  * caller wants retrieving.  Any such attribute not requested may be returned
107  * anyway, but the value may be approximate, and, if remote, may not have been
108  * synchronised with the server.
109  *
110  * 0 will be returned on success, and a -ve error code if unsuccessful.
111  */
112 int vfs_getattr(const struct path *path, struct kstat *stat,
113 		u32 request_mask, unsigned int query_flags)
114 {
115 	int retval;
116 
117 	retval = security_inode_getattr(path);
118 	if (retval)
119 		return retval;
120 	return vfs_getattr_nosec(path, stat, request_mask, query_flags);
121 }
122 EXPORT_SYMBOL(vfs_getattr);
123 
124 /**
125  * vfs_statx_fd - Get the enhanced basic attributes by file descriptor
126  * @fd: The file descriptor referring to the file of interest
127  * @stat: The result structure to fill in.
128  * @request_mask: STATX_xxx flags indicating what the caller wants
129  * @query_flags: Query mode (KSTAT_QUERY_FLAGS)
130  *
131  * This function is a wrapper around vfs_getattr().  The main difference is
132  * that it uses a file descriptor to determine the file location.
133  *
134  * 0 will be returned on success, and a -ve error code if unsuccessful.
135  */
136 int vfs_statx_fd(unsigned int fd, struct kstat *stat,
137 		 u32 request_mask, unsigned int query_flags)
138 {
139 	struct fd f;
140 	int error = -EBADF;
141 
142 	if (query_flags & ~KSTAT_QUERY_FLAGS)
143 		return -EINVAL;
144 
145 	f = fdget_raw(fd);
146 	if (f.file) {
147 		error = vfs_getattr(&f.file->f_path, stat,
148 				    request_mask, query_flags);
149 		fdput(f);
150 	}
151 	return error;
152 }
153 EXPORT_SYMBOL(vfs_statx_fd);
154 
155 inline unsigned vfs_stat_set_lookup_flags(unsigned *lookup_flags, int flags)
156 {
157 	if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT |
158 		       AT_EMPTY_PATH | KSTAT_QUERY_FLAGS)) != 0)
159 		return -EINVAL;
160 
161 	*lookup_flags = LOOKUP_FOLLOW | LOOKUP_AUTOMOUNT;
162 	if (flags & AT_SYMLINK_NOFOLLOW)
163 		*lookup_flags &= ~LOOKUP_FOLLOW;
164 	if (flags & AT_NO_AUTOMOUNT)
165 		*lookup_flags &= ~LOOKUP_AUTOMOUNT;
166 	if (flags & AT_EMPTY_PATH)
167 		*lookup_flags |= LOOKUP_EMPTY;
168 
169 	return 0;
170 }
171 
172 /**
173  * vfs_statx - Get basic and extra attributes by filename
174  * @dfd: A file descriptor representing the base dir for a relative filename
175  * @filename: The name of the file of interest
176  * @flags: Flags to control the query
177  * @stat: The result structure to fill in.
178  * @request_mask: STATX_xxx flags indicating what the caller wants
179  *
180  * This function is a wrapper around vfs_getattr().  The main difference is
181  * that it uses a filename and base directory to determine the file location.
182  * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink
183  * at the given name from being referenced.
184  *
185  * 0 will be returned on success, and a -ve error code if unsuccessful.
186  */
187 int vfs_statx(int dfd, const char __user *filename, int flags,
188 	      struct kstat *stat, u32 request_mask)
189 {
190 	struct path path;
191 	int error = -EINVAL;
192 	unsigned lookup_flags;
193 
194 	if (vfs_stat_set_lookup_flags(&lookup_flags, flags))
195 		return -EINVAL;
196 retry:
197 	error = user_path_at(dfd, filename, lookup_flags, &path);
198 	if (error)
199 		goto out;
200 
201 	error = vfs_getattr(&path, stat, request_mask, flags);
202 	path_put(&path);
203 	if (retry_estale(error, lookup_flags)) {
204 		lookup_flags |= LOOKUP_REVAL;
205 		goto retry;
206 	}
207 out:
208 	return error;
209 }
210 EXPORT_SYMBOL(vfs_statx);
211 
212 
213 #ifdef __ARCH_WANT_OLD_STAT
214 
215 /*
216  * For backward compatibility?  Maybe this should be moved
217  * into arch/i386 instead?
218  */
219 static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
220 {
221 	static int warncount = 5;
222 	struct __old_kernel_stat tmp;
223 
224 	if (warncount > 0) {
225 		warncount--;
226 		printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
227 			current->comm);
228 	} else if (warncount < 0) {
229 		/* it's laughable, but... */
230 		warncount = 0;
231 	}
232 
233 	memset(&tmp, 0, sizeof(struct __old_kernel_stat));
234 	tmp.st_dev = old_encode_dev(stat->dev);
235 	tmp.st_ino = stat->ino;
236 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
237 		return -EOVERFLOW;
238 	tmp.st_mode = stat->mode;
239 	tmp.st_nlink = stat->nlink;
240 	if (tmp.st_nlink != stat->nlink)
241 		return -EOVERFLOW;
242 	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
243 	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
244 	tmp.st_rdev = old_encode_dev(stat->rdev);
245 #if BITS_PER_LONG == 32
246 	if (stat->size > MAX_NON_LFS)
247 		return -EOVERFLOW;
248 #endif
249 	tmp.st_size = stat->size;
250 	tmp.st_atime = stat->atime.tv_sec;
251 	tmp.st_mtime = stat->mtime.tv_sec;
252 	tmp.st_ctime = stat->ctime.tv_sec;
253 	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
254 }
255 
256 SYSCALL_DEFINE2(stat, const char __user *, filename,
257 		struct __old_kernel_stat __user *, statbuf)
258 {
259 	struct kstat stat;
260 	int error;
261 
262 	error = vfs_stat(filename, &stat);
263 	if (error)
264 		return error;
265 
266 	return cp_old_stat(&stat, statbuf);
267 }
268 
269 SYSCALL_DEFINE2(lstat, const char __user *, filename,
270 		struct __old_kernel_stat __user *, statbuf)
271 {
272 	struct kstat stat;
273 	int error;
274 
275 	error = vfs_lstat(filename, &stat);
276 	if (error)
277 		return error;
278 
279 	return cp_old_stat(&stat, statbuf);
280 }
281 
282 SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
283 {
284 	struct kstat stat;
285 	int error = vfs_fstat(fd, &stat);
286 
287 	if (!error)
288 		error = cp_old_stat(&stat, statbuf);
289 
290 	return error;
291 }
292 
293 #endif /* __ARCH_WANT_OLD_STAT */
294 
295 #ifdef __ARCH_WANT_NEW_STAT
296 
297 #if BITS_PER_LONG == 32
298 #  define choose_32_64(a,b) a
299 #else
300 #  define choose_32_64(a,b) b
301 #endif
302 
303 #define valid_dev(x)  choose_32_64(old_valid_dev(x),true)
304 #define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
305 
306 #ifndef INIT_STRUCT_STAT_PADDING
307 #  define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
308 #endif
309 
310 static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
311 {
312 	struct stat tmp;
313 
314 	if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
315 		return -EOVERFLOW;
316 #if BITS_PER_LONG == 32
317 	if (stat->size > MAX_NON_LFS)
318 		return -EOVERFLOW;
319 #endif
320 
321 	INIT_STRUCT_STAT_PADDING(tmp);
322 	tmp.st_dev = encode_dev(stat->dev);
323 	tmp.st_ino = stat->ino;
324 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
325 		return -EOVERFLOW;
326 	tmp.st_mode = stat->mode;
327 	tmp.st_nlink = stat->nlink;
328 	if (tmp.st_nlink != stat->nlink)
329 		return -EOVERFLOW;
330 	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
331 	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
332 	tmp.st_rdev = encode_dev(stat->rdev);
333 	tmp.st_size = stat->size;
334 	tmp.st_atime = stat->atime.tv_sec;
335 	tmp.st_mtime = stat->mtime.tv_sec;
336 	tmp.st_ctime = stat->ctime.tv_sec;
337 #ifdef STAT_HAVE_NSEC
338 	tmp.st_atime_nsec = stat->atime.tv_nsec;
339 	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
340 	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
341 #endif
342 	tmp.st_blocks = stat->blocks;
343 	tmp.st_blksize = stat->blksize;
344 	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
345 }
346 
347 SYSCALL_DEFINE2(newstat, const char __user *, filename,
348 		struct stat __user *, statbuf)
349 {
350 	struct kstat stat;
351 	int error = vfs_stat(filename, &stat);
352 
353 	if (error)
354 		return error;
355 	return cp_new_stat(&stat, statbuf);
356 }
357 
358 SYSCALL_DEFINE2(newlstat, const char __user *, filename,
359 		struct stat __user *, statbuf)
360 {
361 	struct kstat stat;
362 	int error;
363 
364 	error = vfs_lstat(filename, &stat);
365 	if (error)
366 		return error;
367 
368 	return cp_new_stat(&stat, statbuf);
369 }
370 
371 #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
372 SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
373 		struct stat __user *, statbuf, int, flag)
374 {
375 	struct kstat stat;
376 	int error;
377 
378 	error = vfs_fstatat(dfd, filename, &stat, flag);
379 	if (error)
380 		return error;
381 	return cp_new_stat(&stat, statbuf);
382 }
383 #endif
384 
385 SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
386 {
387 	struct kstat stat;
388 	int error = vfs_fstat(fd, &stat);
389 
390 	if (!error)
391 		error = cp_new_stat(&stat, statbuf);
392 
393 	return error;
394 }
395 #endif
396 
397 static int do_readlinkat(int dfd, const char __user *pathname,
398 			 char __user *buf, int bufsiz)
399 {
400 	struct path path;
401 	int error;
402 	int empty = 0;
403 	unsigned int lookup_flags = LOOKUP_EMPTY;
404 
405 	if (bufsiz <= 0)
406 		return -EINVAL;
407 
408 retry:
409 	error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
410 	if (!error) {
411 		struct inode *inode = d_backing_inode(path.dentry);
412 
413 		error = empty ? -ENOENT : -EINVAL;
414 		/*
415 		 * AFS mountpoints allow readlink(2) but are not symlinks
416 		 */
417 		if (d_is_symlink(path.dentry) || inode->i_op->readlink) {
418 			error = security_inode_readlink(path.dentry);
419 			if (!error) {
420 				touch_atime(&path);
421 				error = vfs_readlink(path.dentry, buf, bufsiz);
422 			}
423 		}
424 		path_put(&path);
425 		if (retry_estale(error, lookup_flags)) {
426 			lookup_flags |= LOOKUP_REVAL;
427 			goto retry;
428 		}
429 	}
430 	return error;
431 }
432 
433 SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
434 		char __user *, buf, int, bufsiz)
435 {
436 	return do_readlinkat(dfd, pathname, buf, bufsiz);
437 }
438 
439 SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
440 		int, bufsiz)
441 {
442 	return do_readlinkat(AT_FDCWD, path, buf, bufsiz);
443 }
444 
445 
446 /* ---------- LFS-64 ----------- */
447 #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
448 
449 #ifndef INIT_STRUCT_STAT64_PADDING
450 #  define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
451 #endif
452 
453 static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
454 {
455 	struct stat64 tmp;
456 
457 	INIT_STRUCT_STAT64_PADDING(tmp);
458 #ifdef CONFIG_MIPS
459 	/* mips has weird padding, so we don't get 64 bits there */
460 	tmp.st_dev = new_encode_dev(stat->dev);
461 	tmp.st_rdev = new_encode_dev(stat->rdev);
462 #else
463 	tmp.st_dev = huge_encode_dev(stat->dev);
464 	tmp.st_rdev = huge_encode_dev(stat->rdev);
465 #endif
466 	tmp.st_ino = stat->ino;
467 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
468 		return -EOVERFLOW;
469 #ifdef STAT64_HAS_BROKEN_ST_INO
470 	tmp.__st_ino = stat->ino;
471 #endif
472 	tmp.st_mode = stat->mode;
473 	tmp.st_nlink = stat->nlink;
474 	tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
475 	tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
476 	tmp.st_atime = stat->atime.tv_sec;
477 	tmp.st_atime_nsec = stat->atime.tv_nsec;
478 	tmp.st_mtime = stat->mtime.tv_sec;
479 	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
480 	tmp.st_ctime = stat->ctime.tv_sec;
481 	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
482 	tmp.st_size = stat->size;
483 	tmp.st_blocks = stat->blocks;
484 	tmp.st_blksize = stat->blksize;
485 	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
486 }
487 
488 SYSCALL_DEFINE2(stat64, const char __user *, filename,
489 		struct stat64 __user *, statbuf)
490 {
491 	struct kstat stat;
492 	int error = vfs_stat(filename, &stat);
493 
494 	if (!error)
495 		error = cp_new_stat64(&stat, statbuf);
496 
497 	return error;
498 }
499 
500 SYSCALL_DEFINE2(lstat64, const char __user *, filename,
501 		struct stat64 __user *, statbuf)
502 {
503 	struct kstat stat;
504 	int error = vfs_lstat(filename, &stat);
505 
506 	if (!error)
507 		error = cp_new_stat64(&stat, statbuf);
508 
509 	return error;
510 }
511 
512 SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
513 {
514 	struct kstat stat;
515 	int error = vfs_fstat(fd, &stat);
516 
517 	if (!error)
518 		error = cp_new_stat64(&stat, statbuf);
519 
520 	return error;
521 }
522 
523 SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
524 		struct stat64 __user *, statbuf, int, flag)
525 {
526 	struct kstat stat;
527 	int error;
528 
529 	error = vfs_fstatat(dfd, filename, &stat, flag);
530 	if (error)
531 		return error;
532 	return cp_new_stat64(&stat, statbuf);
533 }
534 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
535 
536 noinline_for_stack int
537 cp_statx(const struct kstat *stat, struct statx __user *buffer)
538 {
539 	struct statx tmp;
540 
541 	memset(&tmp, 0, sizeof(tmp));
542 
543 	tmp.stx_mask = stat->result_mask;
544 	tmp.stx_blksize = stat->blksize;
545 	tmp.stx_attributes = stat->attributes;
546 	tmp.stx_nlink = stat->nlink;
547 	tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid);
548 	tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid);
549 	tmp.stx_mode = stat->mode;
550 	tmp.stx_ino = stat->ino;
551 	tmp.stx_size = stat->size;
552 	tmp.stx_blocks = stat->blocks;
553 	tmp.stx_attributes_mask = stat->attributes_mask;
554 	tmp.stx_atime.tv_sec = stat->atime.tv_sec;
555 	tmp.stx_atime.tv_nsec = stat->atime.tv_nsec;
556 	tmp.stx_btime.tv_sec = stat->btime.tv_sec;
557 	tmp.stx_btime.tv_nsec = stat->btime.tv_nsec;
558 	tmp.stx_ctime.tv_sec = stat->ctime.tv_sec;
559 	tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec;
560 	tmp.stx_mtime.tv_sec = stat->mtime.tv_sec;
561 	tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec;
562 	tmp.stx_rdev_major = MAJOR(stat->rdev);
563 	tmp.stx_rdev_minor = MINOR(stat->rdev);
564 	tmp.stx_dev_major = MAJOR(stat->dev);
565 	tmp.stx_dev_minor = MINOR(stat->dev);
566 
567 	return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
568 }
569 
570 /**
571  * sys_statx - System call to get enhanced stats
572  * @dfd: Base directory to pathwalk from *or* fd to stat.
573  * @filename: File to stat or "" with AT_EMPTY_PATH
574  * @flags: AT_* flags to control pathwalk.
575  * @mask: Parts of statx struct actually required.
576  * @buffer: Result buffer.
577  *
578  * Note that fstat() can be emulated by setting dfd to the fd of interest,
579  * supplying "" as the filename and setting AT_EMPTY_PATH in the flags.
580  */
581 SYSCALL_DEFINE5(statx,
582 		int, dfd, const char __user *, filename, unsigned, flags,
583 		unsigned int, mask,
584 		struct statx __user *, buffer)
585 {
586 	struct kstat stat;
587 	int error;
588 
589 	if (mask & STATX__RESERVED)
590 		return -EINVAL;
591 	if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
592 		return -EINVAL;
593 
594 	error = vfs_statx(dfd, filename, flags, &stat, mask);
595 	if (error)
596 		return error;
597 
598 	return cp_statx(&stat, buffer);
599 }
600 
601 #ifdef CONFIG_COMPAT
602 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
603 {
604 	struct compat_stat tmp;
605 
606 	if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
607 		return -EOVERFLOW;
608 
609 	memset(&tmp, 0, sizeof(tmp));
610 	tmp.st_dev = old_encode_dev(stat->dev);
611 	tmp.st_ino = stat->ino;
612 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
613 		return -EOVERFLOW;
614 	tmp.st_mode = stat->mode;
615 	tmp.st_nlink = stat->nlink;
616 	if (tmp.st_nlink != stat->nlink)
617 		return -EOVERFLOW;
618 	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
619 	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
620 	tmp.st_rdev = old_encode_dev(stat->rdev);
621 	if ((u64) stat->size > MAX_NON_LFS)
622 		return -EOVERFLOW;
623 	tmp.st_size = stat->size;
624 	tmp.st_atime = stat->atime.tv_sec;
625 	tmp.st_atime_nsec = stat->atime.tv_nsec;
626 	tmp.st_mtime = stat->mtime.tv_sec;
627 	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
628 	tmp.st_ctime = stat->ctime.tv_sec;
629 	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
630 	tmp.st_blocks = stat->blocks;
631 	tmp.st_blksize = stat->blksize;
632 	return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0;
633 }
634 
635 COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename,
636 		       struct compat_stat __user *, statbuf)
637 {
638 	struct kstat stat;
639 	int error;
640 
641 	error = vfs_stat(filename, &stat);
642 	if (error)
643 		return error;
644 	return cp_compat_stat(&stat, statbuf);
645 }
646 
647 COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename,
648 		       struct compat_stat __user *, statbuf)
649 {
650 	struct kstat stat;
651 	int error;
652 
653 	error = vfs_lstat(filename, &stat);
654 	if (error)
655 		return error;
656 	return cp_compat_stat(&stat, statbuf);
657 }
658 
659 #ifndef __ARCH_WANT_STAT64
660 COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd,
661 		       const char __user *, filename,
662 		       struct compat_stat __user *, statbuf, int, flag)
663 {
664 	struct kstat stat;
665 	int error;
666 
667 	error = vfs_fstatat(dfd, filename, &stat, flag);
668 	if (error)
669 		return error;
670 	return cp_compat_stat(&stat, statbuf);
671 }
672 #endif
673 
674 COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd,
675 		       struct compat_stat __user *, statbuf)
676 {
677 	struct kstat stat;
678 	int error = vfs_fstat(fd, &stat);
679 
680 	if (!error)
681 		error = cp_compat_stat(&stat, statbuf);
682 	return error;
683 }
684 #endif
685 
686 /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
687 void __inode_add_bytes(struct inode *inode, loff_t bytes)
688 {
689 	inode->i_blocks += bytes >> 9;
690 	bytes &= 511;
691 	inode->i_bytes += bytes;
692 	if (inode->i_bytes >= 512) {
693 		inode->i_blocks++;
694 		inode->i_bytes -= 512;
695 	}
696 }
697 EXPORT_SYMBOL(__inode_add_bytes);
698 
699 void inode_add_bytes(struct inode *inode, loff_t bytes)
700 {
701 	spin_lock(&inode->i_lock);
702 	__inode_add_bytes(inode, bytes);
703 	spin_unlock(&inode->i_lock);
704 }
705 
706 EXPORT_SYMBOL(inode_add_bytes);
707 
708 void __inode_sub_bytes(struct inode *inode, loff_t bytes)
709 {
710 	inode->i_blocks -= bytes >> 9;
711 	bytes &= 511;
712 	if (inode->i_bytes < bytes) {
713 		inode->i_blocks--;
714 		inode->i_bytes += 512;
715 	}
716 	inode->i_bytes -= bytes;
717 }
718 
719 EXPORT_SYMBOL(__inode_sub_bytes);
720 
721 void inode_sub_bytes(struct inode *inode, loff_t bytes)
722 {
723 	spin_lock(&inode->i_lock);
724 	__inode_sub_bytes(inode, bytes);
725 	spin_unlock(&inode->i_lock);
726 }
727 
728 EXPORT_SYMBOL(inode_sub_bytes);
729 
730 loff_t inode_get_bytes(struct inode *inode)
731 {
732 	loff_t ret;
733 
734 	spin_lock(&inode->i_lock);
735 	ret = __inode_get_bytes(inode);
736 	spin_unlock(&inode->i_lock);
737 	return ret;
738 }
739 
740 EXPORT_SYMBOL(inode_get_bytes);
741 
742 void inode_set_bytes(struct inode *inode, loff_t bytes)
743 {
744 	/* Caller is here responsible for sufficient locking
745 	 * (ie. inode->i_lock) */
746 	inode->i_blocks = bytes >> 9;
747 	inode->i_bytes = bytes & 511;
748 }
749 
750 EXPORT_SYMBOL(inode_set_bytes);
751