xref: /openbmc/linux/fs/stat.c (revision e7065e20)
1 /*
2  *  linux/fs/stat.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 #include <linux/export.h>
8 #include <linux/mm.h>
9 #include <linux/errno.h>
10 #include <linux/file.h>
11 #include <linux/highuid.h>
12 #include <linux/fs.h>
13 #include <linux/namei.h>
14 #include <linux/security.h>
15 #include <linux/syscalls.h>
16 #include <linux/pagemap.h>
17 
18 #include <asm/uaccess.h>
19 #include <asm/unistd.h>
20 
21 void generic_fillattr(struct inode *inode, struct kstat *stat)
22 {
23 	stat->dev = inode->i_sb->s_dev;
24 	stat->ino = inode->i_ino;
25 	stat->mode = inode->i_mode;
26 	stat->nlink = inode->i_nlink;
27 	stat->uid = inode->i_uid;
28 	stat->gid = inode->i_gid;
29 	stat->rdev = inode->i_rdev;
30 	stat->size = i_size_read(inode);
31 	stat->atime = inode->i_atime;
32 	stat->mtime = inode->i_mtime;
33 	stat->ctime = inode->i_ctime;
34 	stat->blksize = (1 << inode->i_blkbits);
35 	stat->blocks = inode->i_blocks;
36 }
37 
38 EXPORT_SYMBOL(generic_fillattr);
39 
40 int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
41 {
42 	struct inode *inode = dentry->d_inode;
43 	int retval;
44 
45 	retval = security_inode_getattr(mnt, dentry);
46 	if (retval)
47 		return retval;
48 
49 	if (inode->i_op->getattr)
50 		return inode->i_op->getattr(mnt, dentry, stat);
51 
52 	generic_fillattr(inode, stat);
53 	return 0;
54 }
55 
56 EXPORT_SYMBOL(vfs_getattr);
57 
58 int vfs_fstat(unsigned int fd, struct kstat *stat)
59 {
60 	struct file *f = fget(fd);
61 	int error = -EBADF;
62 
63 	if (f) {
64 		error = vfs_getattr(f->f_path.mnt, f->f_path.dentry, stat);
65 		fput(f);
66 	}
67 	return error;
68 }
69 EXPORT_SYMBOL(vfs_fstat);
70 
71 int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat,
72 		int flag)
73 {
74 	struct path path;
75 	int error = -EINVAL;
76 	int lookup_flags = 0;
77 
78 	if ((flag & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT |
79 		      AT_EMPTY_PATH)) != 0)
80 		goto out;
81 
82 	if (!(flag & AT_SYMLINK_NOFOLLOW))
83 		lookup_flags |= LOOKUP_FOLLOW;
84 	if (flag & AT_EMPTY_PATH)
85 		lookup_flags |= LOOKUP_EMPTY;
86 
87 	error = user_path_at(dfd, filename, lookup_flags, &path);
88 	if (error)
89 		goto out;
90 
91 	error = vfs_getattr(path.mnt, path.dentry, stat);
92 	path_put(&path);
93 out:
94 	return error;
95 }
96 EXPORT_SYMBOL(vfs_fstatat);
97 
98 int vfs_stat(const char __user *name, struct kstat *stat)
99 {
100 	return vfs_fstatat(AT_FDCWD, name, stat, 0);
101 }
102 EXPORT_SYMBOL(vfs_stat);
103 
104 int vfs_lstat(const char __user *name, struct kstat *stat)
105 {
106 	return vfs_fstatat(AT_FDCWD, name, stat, AT_SYMLINK_NOFOLLOW);
107 }
108 EXPORT_SYMBOL(vfs_lstat);
109 
110 
111 #ifdef __ARCH_WANT_OLD_STAT
112 
113 /*
114  * For backward compatibility?  Maybe this should be moved
115  * into arch/i386 instead?
116  */
117 static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
118 {
119 	static int warncount = 5;
120 	struct __old_kernel_stat tmp;
121 
122 	if (warncount > 0) {
123 		warncount--;
124 		printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
125 			current->comm);
126 	} else if (warncount < 0) {
127 		/* it's laughable, but... */
128 		warncount = 0;
129 	}
130 
131 	memset(&tmp, 0, sizeof(struct __old_kernel_stat));
132 	tmp.st_dev = old_encode_dev(stat->dev);
133 	tmp.st_ino = stat->ino;
134 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
135 		return -EOVERFLOW;
136 	tmp.st_mode = stat->mode;
137 	tmp.st_nlink = stat->nlink;
138 	if (tmp.st_nlink != stat->nlink)
139 		return -EOVERFLOW;
140 	SET_UID(tmp.st_uid, stat->uid);
141 	SET_GID(tmp.st_gid, stat->gid);
142 	tmp.st_rdev = old_encode_dev(stat->rdev);
143 #if BITS_PER_LONG == 32
144 	if (stat->size > MAX_NON_LFS)
145 		return -EOVERFLOW;
146 #endif
147 	tmp.st_size = stat->size;
148 	tmp.st_atime = stat->atime.tv_sec;
149 	tmp.st_mtime = stat->mtime.tv_sec;
150 	tmp.st_ctime = stat->ctime.tv_sec;
151 	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
152 }
153 
154 SYSCALL_DEFINE2(stat, const char __user *, filename,
155 		struct __old_kernel_stat __user *, statbuf)
156 {
157 	struct kstat stat;
158 	int error;
159 
160 	error = vfs_stat(filename, &stat);
161 	if (error)
162 		return error;
163 
164 	return cp_old_stat(&stat, statbuf);
165 }
166 
167 SYSCALL_DEFINE2(lstat, const char __user *, filename,
168 		struct __old_kernel_stat __user *, statbuf)
169 {
170 	struct kstat stat;
171 	int error;
172 
173 	error = vfs_lstat(filename, &stat);
174 	if (error)
175 		return error;
176 
177 	return cp_old_stat(&stat, statbuf);
178 }
179 
180 SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
181 {
182 	struct kstat stat;
183 	int error = vfs_fstat(fd, &stat);
184 
185 	if (!error)
186 		error = cp_old_stat(&stat, statbuf);
187 
188 	return error;
189 }
190 
191 #endif /* __ARCH_WANT_OLD_STAT */
192 
193 static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
194 {
195 	struct stat tmp;
196 
197 #if BITS_PER_LONG == 32
198 	if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
199 		return -EOVERFLOW;
200 #else
201 	if (!new_valid_dev(stat->dev) || !new_valid_dev(stat->rdev))
202 		return -EOVERFLOW;
203 #endif
204 
205 	memset(&tmp, 0, sizeof(tmp));
206 #if BITS_PER_LONG == 32
207 	tmp.st_dev = old_encode_dev(stat->dev);
208 #else
209 	tmp.st_dev = new_encode_dev(stat->dev);
210 #endif
211 	tmp.st_ino = stat->ino;
212 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
213 		return -EOVERFLOW;
214 	tmp.st_mode = stat->mode;
215 	tmp.st_nlink = stat->nlink;
216 	if (tmp.st_nlink != stat->nlink)
217 		return -EOVERFLOW;
218 	SET_UID(tmp.st_uid, stat->uid);
219 	SET_GID(tmp.st_gid, stat->gid);
220 #if BITS_PER_LONG == 32
221 	tmp.st_rdev = old_encode_dev(stat->rdev);
222 #else
223 	tmp.st_rdev = new_encode_dev(stat->rdev);
224 #endif
225 #if BITS_PER_LONG == 32
226 	if (stat->size > MAX_NON_LFS)
227 		return -EOVERFLOW;
228 #endif
229 	tmp.st_size = stat->size;
230 	tmp.st_atime = stat->atime.tv_sec;
231 	tmp.st_mtime = stat->mtime.tv_sec;
232 	tmp.st_ctime = stat->ctime.tv_sec;
233 #ifdef STAT_HAVE_NSEC
234 	tmp.st_atime_nsec = stat->atime.tv_nsec;
235 	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
236 	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
237 #endif
238 	tmp.st_blocks = stat->blocks;
239 	tmp.st_blksize = stat->blksize;
240 	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
241 }
242 
243 SYSCALL_DEFINE2(newstat, const char __user *, filename,
244 		struct stat __user *, statbuf)
245 {
246 	struct kstat stat;
247 	int error = vfs_stat(filename, &stat);
248 
249 	if (error)
250 		return error;
251 	return cp_new_stat(&stat, statbuf);
252 }
253 
254 SYSCALL_DEFINE2(newlstat, const char __user *, filename,
255 		struct stat __user *, statbuf)
256 {
257 	struct kstat stat;
258 	int error;
259 
260 	error = vfs_lstat(filename, &stat);
261 	if (error)
262 		return error;
263 
264 	return cp_new_stat(&stat, statbuf);
265 }
266 
267 #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
268 SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
269 		struct stat __user *, statbuf, int, flag)
270 {
271 	struct kstat stat;
272 	int error;
273 
274 	error = vfs_fstatat(dfd, filename, &stat, flag);
275 	if (error)
276 		return error;
277 	return cp_new_stat(&stat, statbuf);
278 }
279 #endif
280 
281 SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
282 {
283 	struct kstat stat;
284 	int error = vfs_fstat(fd, &stat);
285 
286 	if (!error)
287 		error = cp_new_stat(&stat, statbuf);
288 
289 	return error;
290 }
291 
292 SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
293 		char __user *, buf, int, bufsiz)
294 {
295 	struct path path;
296 	int error;
297 	int empty = 0;
298 
299 	if (bufsiz <= 0)
300 		return -EINVAL;
301 
302 	error = user_path_at_empty(dfd, pathname, LOOKUP_EMPTY, &path, &empty);
303 	if (!error) {
304 		struct inode *inode = path.dentry->d_inode;
305 
306 		error = empty ? -ENOENT : -EINVAL;
307 		if (inode->i_op->readlink) {
308 			error = security_inode_readlink(path.dentry);
309 			if (!error) {
310 				touch_atime(&path);
311 				error = inode->i_op->readlink(path.dentry,
312 							      buf, bufsiz);
313 			}
314 		}
315 		path_put(&path);
316 	}
317 	return error;
318 }
319 
320 SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
321 		int, bufsiz)
322 {
323 	return sys_readlinkat(AT_FDCWD, path, buf, bufsiz);
324 }
325 
326 
327 /* ---------- LFS-64 ----------- */
328 #ifdef __ARCH_WANT_STAT64
329 
330 static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
331 {
332 	struct stat64 tmp;
333 
334 	memset(&tmp, 0, sizeof(struct stat64));
335 #ifdef CONFIG_MIPS
336 	/* mips has weird padding, so we don't get 64 bits there */
337 	if (!new_valid_dev(stat->dev) || !new_valid_dev(stat->rdev))
338 		return -EOVERFLOW;
339 	tmp.st_dev = new_encode_dev(stat->dev);
340 	tmp.st_rdev = new_encode_dev(stat->rdev);
341 #else
342 	tmp.st_dev = huge_encode_dev(stat->dev);
343 	tmp.st_rdev = huge_encode_dev(stat->rdev);
344 #endif
345 	tmp.st_ino = stat->ino;
346 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
347 		return -EOVERFLOW;
348 #ifdef STAT64_HAS_BROKEN_ST_INO
349 	tmp.__st_ino = stat->ino;
350 #endif
351 	tmp.st_mode = stat->mode;
352 	tmp.st_nlink = stat->nlink;
353 	tmp.st_uid = stat->uid;
354 	tmp.st_gid = stat->gid;
355 	tmp.st_atime = stat->atime.tv_sec;
356 	tmp.st_atime_nsec = stat->atime.tv_nsec;
357 	tmp.st_mtime = stat->mtime.tv_sec;
358 	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
359 	tmp.st_ctime = stat->ctime.tv_sec;
360 	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
361 	tmp.st_size = stat->size;
362 	tmp.st_blocks = stat->blocks;
363 	tmp.st_blksize = stat->blksize;
364 	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
365 }
366 
367 SYSCALL_DEFINE2(stat64, const char __user *, filename,
368 		struct stat64 __user *, statbuf)
369 {
370 	struct kstat stat;
371 	int error = vfs_stat(filename, &stat);
372 
373 	if (!error)
374 		error = cp_new_stat64(&stat, statbuf);
375 
376 	return error;
377 }
378 
379 SYSCALL_DEFINE2(lstat64, const char __user *, filename,
380 		struct stat64 __user *, statbuf)
381 {
382 	struct kstat stat;
383 	int error = vfs_lstat(filename, &stat);
384 
385 	if (!error)
386 		error = cp_new_stat64(&stat, statbuf);
387 
388 	return error;
389 }
390 
391 SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
392 {
393 	struct kstat stat;
394 	int error = vfs_fstat(fd, &stat);
395 
396 	if (!error)
397 		error = cp_new_stat64(&stat, statbuf);
398 
399 	return error;
400 }
401 
402 SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
403 		struct stat64 __user *, statbuf, int, flag)
404 {
405 	struct kstat stat;
406 	int error;
407 
408 	error = vfs_fstatat(dfd, filename, &stat, flag);
409 	if (error)
410 		return error;
411 	return cp_new_stat64(&stat, statbuf);
412 }
413 #endif /* __ARCH_WANT_STAT64 */
414 
415 /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
416 void __inode_add_bytes(struct inode *inode, loff_t bytes)
417 {
418 	inode->i_blocks += bytes >> 9;
419 	bytes &= 511;
420 	inode->i_bytes += bytes;
421 	if (inode->i_bytes >= 512) {
422 		inode->i_blocks++;
423 		inode->i_bytes -= 512;
424 	}
425 }
426 
427 void inode_add_bytes(struct inode *inode, loff_t bytes)
428 {
429 	spin_lock(&inode->i_lock);
430 	__inode_add_bytes(inode, bytes);
431 	spin_unlock(&inode->i_lock);
432 }
433 
434 EXPORT_SYMBOL(inode_add_bytes);
435 
436 void inode_sub_bytes(struct inode *inode, loff_t bytes)
437 {
438 	spin_lock(&inode->i_lock);
439 	inode->i_blocks -= bytes >> 9;
440 	bytes &= 511;
441 	if (inode->i_bytes < bytes) {
442 		inode->i_blocks--;
443 		inode->i_bytes += 512;
444 	}
445 	inode->i_bytes -= bytes;
446 	spin_unlock(&inode->i_lock);
447 }
448 
449 EXPORT_SYMBOL(inode_sub_bytes);
450 
451 loff_t inode_get_bytes(struct inode *inode)
452 {
453 	loff_t ret;
454 
455 	spin_lock(&inode->i_lock);
456 	ret = (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
457 	spin_unlock(&inode->i_lock);
458 	return ret;
459 }
460 
461 EXPORT_SYMBOL(inode_get_bytes);
462 
463 void inode_set_bytes(struct inode *inode, loff_t bytes)
464 {
465 	/* Caller is here responsible for sufficient locking
466 	 * (ie. inode->i_lock) */
467 	inode->i_blocks = bytes >> 9;
468 	inode->i_bytes = bytes & 511;
469 }
470 
471 EXPORT_SYMBOL(inode_set_bytes);
472