xref: /openbmc/linux/fs/file_table.c (revision 63dc02bd)
1 /*
2  *  linux/fs/file_table.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *  Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6  */
7 
8 #include <linux/string.h>
9 #include <linux/slab.h>
10 #include <linux/file.h>
11 #include <linux/fdtable.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/security.h>
16 #include <linux/eventpoll.h>
17 #include <linux/rcupdate.h>
18 #include <linux/mount.h>
19 #include <linux/capability.h>
20 #include <linux/cdev.h>
21 #include <linux/fsnotify.h>
22 #include <linux/sysctl.h>
23 #include <linux/lglock.h>
24 #include <linux/percpu_counter.h>
25 #include <linux/percpu.h>
26 #include <linux/ima.h>
27 
28 #include <linux/atomic.h>
29 
30 #include "internal.h"
31 
32 /* sysctl tunables... */
33 struct files_stat_struct files_stat = {
34 	.max_files = NR_FILE
35 };
36 
37 DECLARE_LGLOCK(files_lglock);
38 DEFINE_LGLOCK(files_lglock);
39 
40 /* SLAB cache for file structures */
41 static struct kmem_cache *filp_cachep __read_mostly;
42 
43 static struct percpu_counter nr_files __cacheline_aligned_in_smp;
44 
45 static inline void file_free_rcu(struct rcu_head *head)
46 {
47 	struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
48 
49 	put_cred(f->f_cred);
50 	kmem_cache_free(filp_cachep, f);
51 }
52 
53 static inline void file_free(struct file *f)
54 {
55 	percpu_counter_dec(&nr_files);
56 	file_check_state(f);
57 	call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
58 }
59 
60 /*
61  * Return the total number of open files in the system
62  */
63 static long get_nr_files(void)
64 {
65 	return percpu_counter_read_positive(&nr_files);
66 }
67 
68 /*
69  * Return the maximum number of open files in the system
70  */
71 unsigned long get_max_files(void)
72 {
73 	return files_stat.max_files;
74 }
75 EXPORT_SYMBOL_GPL(get_max_files);
76 
77 /*
78  * Handle nr_files sysctl
79  */
80 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
81 int proc_nr_files(ctl_table *table, int write,
82                      void __user *buffer, size_t *lenp, loff_t *ppos)
83 {
84 	files_stat.nr_files = get_nr_files();
85 	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
86 }
87 #else
88 int proc_nr_files(ctl_table *table, int write,
89                      void __user *buffer, size_t *lenp, loff_t *ppos)
90 {
91 	return -ENOSYS;
92 }
93 #endif
94 
95 /* Find an unused file structure and return a pointer to it.
96  * Returns NULL, if there are no more free file structures or
97  * we run out of memory.
98  *
99  * Be very careful using this.  You are responsible for
100  * getting write access to any mount that you might assign
101  * to this filp, if it is opened for write.  If this is not
102  * done, you will imbalance int the mount's writer count
103  * and a warning at __fput() time.
104  */
105 struct file *get_empty_filp(void)
106 {
107 	const struct cred *cred = current_cred();
108 	static long old_max;
109 	struct file * f;
110 
111 	/*
112 	 * Privileged users can go above max_files
113 	 */
114 	if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
115 		/*
116 		 * percpu_counters are inaccurate.  Do an expensive check before
117 		 * we go and fail.
118 		 */
119 		if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
120 			goto over;
121 	}
122 
123 	f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
124 	if (f == NULL)
125 		goto fail;
126 
127 	percpu_counter_inc(&nr_files);
128 	f->f_cred = get_cred(cred);
129 	if (security_file_alloc(f))
130 		goto fail_sec;
131 
132 	INIT_LIST_HEAD(&f->f_u.fu_list);
133 	atomic_long_set(&f->f_count, 1);
134 	rwlock_init(&f->f_owner.lock);
135 	spin_lock_init(&f->f_lock);
136 	eventpoll_init_file(f);
137 	/* f->f_version: 0 */
138 	return f;
139 
140 over:
141 	/* Ran out of filps - report that */
142 	if (get_nr_files() > old_max) {
143 		pr_info("VFS: file-max limit %lu reached\n", get_max_files());
144 		old_max = get_nr_files();
145 	}
146 	goto fail;
147 
148 fail_sec:
149 	file_free(f);
150 fail:
151 	return NULL;
152 }
153 
154 /**
155  * alloc_file - allocate and initialize a 'struct file'
156  * @mnt: the vfsmount on which the file will reside
157  * @dentry: the dentry representing the new file
158  * @mode: the mode with which the new file will be opened
159  * @fop: the 'struct file_operations' for the new file
160  *
161  * Use this instead of get_empty_filp() to get a new
162  * 'struct file'.  Do so because of the same initialization
163  * pitfalls reasons listed for init_file().  This is a
164  * preferred interface to using init_file().
165  *
166  * If all the callers of init_file() are eliminated, its
167  * code should be moved into this function.
168  */
169 struct file *alloc_file(struct path *path, fmode_t mode,
170 		const struct file_operations *fop)
171 {
172 	struct file *file;
173 
174 	file = get_empty_filp();
175 	if (!file)
176 		return NULL;
177 
178 	file->f_path = *path;
179 	file->f_mapping = path->dentry->d_inode->i_mapping;
180 	file->f_mode = mode;
181 	file->f_op = fop;
182 
183 	/*
184 	 * These mounts don't really matter in practice
185 	 * for r/o bind mounts.  They aren't userspace-
186 	 * visible.  We do this for consistency, and so
187 	 * that we can do debugging checks at __fput()
188 	 */
189 	if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) {
190 		file_take_write(file);
191 		WARN_ON(mnt_clone_write(path->mnt));
192 	}
193 	if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
194 		i_readcount_inc(path->dentry->d_inode);
195 	return file;
196 }
197 EXPORT_SYMBOL(alloc_file);
198 
199 /**
200  * drop_file_write_access - give up ability to write to a file
201  * @file: the file to which we will stop writing
202  *
203  * This is a central place which will give up the ability
204  * to write to @file, along with access to write through
205  * its vfsmount.
206  */
207 static void drop_file_write_access(struct file *file)
208 {
209 	struct vfsmount *mnt = file->f_path.mnt;
210 	struct dentry *dentry = file->f_path.dentry;
211 	struct inode *inode = dentry->d_inode;
212 
213 	put_write_access(inode);
214 
215 	if (special_file(inode->i_mode))
216 		return;
217 	if (file_check_writeable(file) != 0)
218 		return;
219 	mnt_drop_write(mnt);
220 	file_release_write(file);
221 }
222 
223 /* the real guts of fput() - releasing the last reference to file
224  */
225 static void __fput(struct file *file)
226 {
227 	struct dentry *dentry = file->f_path.dentry;
228 	struct vfsmount *mnt = file->f_path.mnt;
229 	struct inode *inode = dentry->d_inode;
230 
231 	might_sleep();
232 
233 	fsnotify_close(file);
234 	/*
235 	 * The function eventpoll_release() should be the first called
236 	 * in the file cleanup chain.
237 	 */
238 	eventpoll_release(file);
239 	locks_remove_flock(file);
240 
241 	if (unlikely(file->f_flags & FASYNC)) {
242 		if (file->f_op && file->f_op->fasync)
243 			file->f_op->fasync(-1, file, 0);
244 	}
245 	if (file->f_op && file->f_op->release)
246 		file->f_op->release(inode, file);
247 	security_file_free(file);
248 	ima_file_free(file);
249 	if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL &&
250 		     !(file->f_mode & FMODE_PATH))) {
251 		cdev_put(inode->i_cdev);
252 	}
253 	fops_put(file->f_op);
254 	put_pid(file->f_owner.pid);
255 	file_sb_list_del(file);
256 	if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
257 		i_readcount_dec(inode);
258 	if (file->f_mode & FMODE_WRITE)
259 		drop_file_write_access(file);
260 	file->f_path.dentry = NULL;
261 	file->f_path.mnt = NULL;
262 	file_free(file);
263 	dput(dentry);
264 	mntput(mnt);
265 }
266 
267 void fput(struct file *file)
268 {
269 	if (atomic_long_dec_and_test(&file->f_count))
270 		__fput(file);
271 }
272 
273 EXPORT_SYMBOL(fput);
274 
275 struct file *fget(unsigned int fd)
276 {
277 	struct file *file;
278 	struct files_struct *files = current->files;
279 
280 	rcu_read_lock();
281 	file = fcheck_files(files, fd);
282 	if (file) {
283 		/* File object ref couldn't be taken */
284 		if (file->f_mode & FMODE_PATH ||
285 		    !atomic_long_inc_not_zero(&file->f_count))
286 			file = NULL;
287 	}
288 	rcu_read_unlock();
289 
290 	return file;
291 }
292 
293 EXPORT_SYMBOL(fget);
294 
295 struct file *fget_raw(unsigned int fd)
296 {
297 	struct file *file;
298 	struct files_struct *files = current->files;
299 
300 	rcu_read_lock();
301 	file = fcheck_files(files, fd);
302 	if (file) {
303 		/* File object ref couldn't be taken */
304 		if (!atomic_long_inc_not_zero(&file->f_count))
305 			file = NULL;
306 	}
307 	rcu_read_unlock();
308 
309 	return file;
310 }
311 
312 EXPORT_SYMBOL(fget_raw);
313 
314 /*
315  * Lightweight file lookup - no refcnt increment if fd table isn't shared.
316  *
317  * You can use this instead of fget if you satisfy all of the following
318  * conditions:
319  * 1) You must call fput_light before exiting the syscall and returning control
320  *    to userspace (i.e. you cannot remember the returned struct file * after
321  *    returning to userspace).
322  * 2) You must not call filp_close on the returned struct file * in between
323  *    calls to fget_light and fput_light.
324  * 3) You must not clone the current task in between the calls to fget_light
325  *    and fput_light.
326  *
327  * The fput_needed flag returned by fget_light should be passed to the
328  * corresponding fput_light.
329  */
330 struct file *fget_light(unsigned int fd, int *fput_needed)
331 {
332 	struct file *file;
333 	struct files_struct *files = current->files;
334 
335 	*fput_needed = 0;
336 	if (atomic_read(&files->count) == 1) {
337 		file = fcheck_files(files, fd);
338 		if (file && (file->f_mode & FMODE_PATH))
339 			file = NULL;
340 	} else {
341 		rcu_read_lock();
342 		file = fcheck_files(files, fd);
343 		if (file) {
344 			if (!(file->f_mode & FMODE_PATH) &&
345 			    atomic_long_inc_not_zero(&file->f_count))
346 				*fput_needed = 1;
347 			else
348 				/* Didn't get the reference, someone's freed */
349 				file = NULL;
350 		}
351 		rcu_read_unlock();
352 	}
353 
354 	return file;
355 }
356 
357 struct file *fget_raw_light(unsigned int fd, int *fput_needed)
358 {
359 	struct file *file;
360 	struct files_struct *files = current->files;
361 
362 	*fput_needed = 0;
363 	if (atomic_read(&files->count) == 1) {
364 		file = fcheck_files(files, fd);
365 	} else {
366 		rcu_read_lock();
367 		file = fcheck_files(files, fd);
368 		if (file) {
369 			if (atomic_long_inc_not_zero(&file->f_count))
370 				*fput_needed = 1;
371 			else
372 				/* Didn't get the reference, someone's freed */
373 				file = NULL;
374 		}
375 		rcu_read_unlock();
376 	}
377 
378 	return file;
379 }
380 
381 void put_filp(struct file *file)
382 {
383 	if (atomic_long_dec_and_test(&file->f_count)) {
384 		security_file_free(file);
385 		file_sb_list_del(file);
386 		file_free(file);
387 	}
388 }
389 
390 static inline int file_list_cpu(struct file *file)
391 {
392 #ifdef CONFIG_SMP
393 	return file->f_sb_list_cpu;
394 #else
395 	return smp_processor_id();
396 #endif
397 }
398 
399 /* helper for file_sb_list_add to reduce ifdefs */
400 static inline void __file_sb_list_add(struct file *file, struct super_block *sb)
401 {
402 	struct list_head *list;
403 #ifdef CONFIG_SMP
404 	int cpu;
405 	cpu = smp_processor_id();
406 	file->f_sb_list_cpu = cpu;
407 	list = per_cpu_ptr(sb->s_files, cpu);
408 #else
409 	list = &sb->s_files;
410 #endif
411 	list_add(&file->f_u.fu_list, list);
412 }
413 
414 /**
415  * file_sb_list_add - add a file to the sb's file list
416  * @file: file to add
417  * @sb: sb to add it to
418  *
419  * Use this function to associate a file with the superblock of the inode it
420  * refers to.
421  */
422 void file_sb_list_add(struct file *file, struct super_block *sb)
423 {
424 	lg_local_lock(files_lglock);
425 	__file_sb_list_add(file, sb);
426 	lg_local_unlock(files_lglock);
427 }
428 
429 /**
430  * file_sb_list_del - remove a file from the sb's file list
431  * @file: file to remove
432  * @sb: sb to remove it from
433  *
434  * Use this function to remove a file from its superblock.
435  */
436 void file_sb_list_del(struct file *file)
437 {
438 	if (!list_empty(&file->f_u.fu_list)) {
439 		lg_local_lock_cpu(files_lglock, file_list_cpu(file));
440 		list_del_init(&file->f_u.fu_list);
441 		lg_local_unlock_cpu(files_lglock, file_list_cpu(file));
442 	}
443 }
444 
445 #ifdef CONFIG_SMP
446 
447 /*
448  * These macros iterate all files on all CPUs for a given superblock.
449  * files_lglock must be held globally.
450  */
451 #define do_file_list_for_each_entry(__sb, __file)		\
452 {								\
453 	int i;							\
454 	for_each_possible_cpu(i) {				\
455 		struct list_head *list;				\
456 		list = per_cpu_ptr((__sb)->s_files, i);		\
457 		list_for_each_entry((__file), list, f_u.fu_list)
458 
459 #define while_file_list_for_each_entry				\
460 	}							\
461 }
462 
463 #else
464 
465 #define do_file_list_for_each_entry(__sb, __file)		\
466 {								\
467 	struct list_head *list;					\
468 	list = &(sb)->s_files;					\
469 	list_for_each_entry((__file), list, f_u.fu_list)
470 
471 #define while_file_list_for_each_entry				\
472 }
473 
474 #endif
475 
476 /**
477  *	mark_files_ro - mark all files read-only
478  *	@sb: superblock in question
479  *
480  *	All files are marked read-only.  We don't care about pending
481  *	delete files so this should be used in 'force' mode only.
482  */
483 void mark_files_ro(struct super_block *sb)
484 {
485 	struct file *f;
486 
487 retry:
488 	lg_global_lock(files_lglock);
489 	do_file_list_for_each_entry(sb, f) {
490 		struct vfsmount *mnt;
491 		if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
492 		       continue;
493 		if (!file_count(f))
494 			continue;
495 		if (!(f->f_mode & FMODE_WRITE))
496 			continue;
497 		spin_lock(&f->f_lock);
498 		f->f_mode &= ~FMODE_WRITE;
499 		spin_unlock(&f->f_lock);
500 		if (file_check_writeable(f) != 0)
501 			continue;
502 		file_release_write(f);
503 		mnt = mntget(f->f_path.mnt);
504 		/* This can sleep, so we can't hold the spinlock. */
505 		lg_global_unlock(files_lglock);
506 		mnt_drop_write(mnt);
507 		mntput(mnt);
508 		goto retry;
509 	} while_file_list_for_each_entry;
510 	lg_global_unlock(files_lglock);
511 }
512 
513 void __init files_init(unsigned long mempages)
514 {
515 	unsigned long n;
516 
517 	filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
518 			SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
519 
520 	/*
521 	 * One file with associated inode and dcache is very roughly 1K.
522 	 * Per default don't use more than 10% of our memory for files.
523 	 */
524 
525 	n = (mempages * (PAGE_SIZE / 1024)) / 10;
526 	files_stat.max_files = max_t(unsigned long, n, NR_FILE);
527 	files_defer_init();
528 	lg_lock_init(files_lglock);
529 	percpu_counter_init(&nr_files, 0);
530 }
531