xref: /openbmc/linux/fs/file_table.c (revision 68198dca)
1 /*
2  *  linux/fs/file_table.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *  Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6  */
7 
8 #include <linux/string.h>
9 #include <linux/slab.h>
10 #include <linux/file.h>
11 #include <linux/fdtable.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/security.h>
16 #include <linux/cred.h>
17 #include <linux/eventpoll.h>
18 #include <linux/rcupdate.h>
19 #include <linux/mount.h>
20 #include <linux/capability.h>
21 #include <linux/cdev.h>
22 #include <linux/fsnotify.h>
23 #include <linux/sysctl.h>
24 #include <linux/percpu_counter.h>
25 #include <linux/percpu.h>
26 #include <linux/hardirq.h>
27 #include <linux/task_work.h>
28 #include <linux/ima.h>
29 #include <linux/swap.h>
30 
31 #include <linux/atomic.h>
32 
33 #include "internal.h"
34 
35 /* sysctl tunables... */
36 struct files_stat_struct files_stat = {
37 	.max_files = NR_FILE
38 };
39 
40 /* SLAB cache for file structures */
41 static struct kmem_cache *filp_cachep __read_mostly;
42 
43 static struct percpu_counter nr_files __cacheline_aligned_in_smp;
44 
45 static void file_free_rcu(struct rcu_head *head)
46 {
47 	struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
48 
49 	put_cred(f->f_cred);
50 	kmem_cache_free(filp_cachep, f);
51 }
52 
53 static inline void file_free(struct file *f)
54 {
55 	percpu_counter_dec(&nr_files);
56 	call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
57 }
58 
59 /*
60  * Return the total number of open files in the system
61  */
62 static long get_nr_files(void)
63 {
64 	return percpu_counter_read_positive(&nr_files);
65 }
66 
67 /*
68  * Return the maximum number of open files in the system
69  */
70 unsigned long get_max_files(void)
71 {
72 	return files_stat.max_files;
73 }
74 EXPORT_SYMBOL_GPL(get_max_files);
75 
76 /*
77  * Handle nr_files sysctl
78  */
79 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
80 int proc_nr_files(struct ctl_table *table, int write,
81                      void __user *buffer, size_t *lenp, loff_t *ppos)
82 {
83 	files_stat.nr_files = get_nr_files();
84 	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
85 }
86 #else
87 int proc_nr_files(struct ctl_table *table, int write,
88                      void __user *buffer, size_t *lenp, loff_t *ppos)
89 {
90 	return -ENOSYS;
91 }
92 #endif
93 
94 /* Find an unused file structure and return a pointer to it.
95  * Returns an error pointer if some error happend e.g. we over file
96  * structures limit, run out of memory or operation is not permitted.
97  *
98  * Be very careful using this.  You are responsible for
99  * getting write access to any mount that you might assign
100  * to this filp, if it is opened for write.  If this is not
101  * done, you will imbalance int the mount's writer count
102  * and a warning at __fput() time.
103  */
104 struct file *get_empty_filp(void)
105 {
106 	const struct cred *cred = current_cred();
107 	static long old_max;
108 	struct file *f;
109 	int error;
110 
111 	/*
112 	 * Privileged users can go above max_files
113 	 */
114 	if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
115 		/*
116 		 * percpu_counters are inaccurate.  Do an expensive check before
117 		 * we go and fail.
118 		 */
119 		if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
120 			goto over;
121 	}
122 
123 	f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
124 	if (unlikely(!f))
125 		return ERR_PTR(-ENOMEM);
126 
127 	percpu_counter_inc(&nr_files);
128 	f->f_cred = get_cred(cred);
129 	error = security_file_alloc(f);
130 	if (unlikely(error)) {
131 		file_free(f);
132 		return ERR_PTR(error);
133 	}
134 
135 	atomic_long_set(&f->f_count, 1);
136 	rwlock_init(&f->f_owner.lock);
137 	spin_lock_init(&f->f_lock);
138 	mutex_init(&f->f_pos_lock);
139 	eventpoll_init_file(f);
140 	/* f->f_version: 0 */
141 	return f;
142 
143 over:
144 	/* Ran out of filps - report that */
145 	if (get_nr_files() > old_max) {
146 		pr_info("VFS: file-max limit %lu reached\n", get_max_files());
147 		old_max = get_nr_files();
148 	}
149 	return ERR_PTR(-ENFILE);
150 }
151 
152 /**
153  * alloc_file - allocate and initialize a 'struct file'
154  *
155  * @path: the (dentry, vfsmount) pair for the new file
156  * @mode: the mode with which the new file will be opened
157  * @fop: the 'struct file_operations' for the new file
158  */
159 struct file *alloc_file(const struct path *path, fmode_t mode,
160 		const struct file_operations *fop)
161 {
162 	struct file *file;
163 
164 	file = get_empty_filp();
165 	if (IS_ERR(file))
166 		return file;
167 
168 	file->f_path = *path;
169 	file->f_inode = path->dentry->d_inode;
170 	file->f_mapping = path->dentry->d_inode->i_mapping;
171 	file->f_wb_err = filemap_sample_wb_err(file->f_mapping);
172 	if ((mode & FMODE_READ) &&
173 	     likely(fop->read || fop->read_iter))
174 		mode |= FMODE_CAN_READ;
175 	if ((mode & FMODE_WRITE) &&
176 	     likely(fop->write || fop->write_iter))
177 		mode |= FMODE_CAN_WRITE;
178 	file->f_mode = mode;
179 	file->f_op = fop;
180 	if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
181 		i_readcount_inc(path->dentry->d_inode);
182 	return file;
183 }
184 EXPORT_SYMBOL(alloc_file);
185 
186 /* the real guts of fput() - releasing the last reference to file
187  */
188 static void __fput(struct file *file)
189 {
190 	struct dentry *dentry = file->f_path.dentry;
191 	struct vfsmount *mnt = file->f_path.mnt;
192 	struct inode *inode = file->f_inode;
193 
194 	might_sleep();
195 
196 	fsnotify_close(file);
197 	/*
198 	 * The function eventpoll_release() should be the first called
199 	 * in the file cleanup chain.
200 	 */
201 	eventpoll_release(file);
202 	locks_remove_file(file);
203 
204 	ima_file_free(file);
205 	if (unlikely(file->f_flags & FASYNC)) {
206 		if (file->f_op->fasync)
207 			file->f_op->fasync(-1, file, 0);
208 	}
209 	if (file->f_op->release)
210 		file->f_op->release(inode, file);
211 	security_file_free(file);
212 	if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL &&
213 		     !(file->f_mode & FMODE_PATH))) {
214 		cdev_put(inode->i_cdev);
215 	}
216 	fops_put(file->f_op);
217 	put_pid(file->f_owner.pid);
218 	if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
219 		i_readcount_dec(inode);
220 	if (file->f_mode & FMODE_WRITER) {
221 		put_write_access(inode);
222 		__mnt_drop_write(mnt);
223 	}
224 	file->f_path.dentry = NULL;
225 	file->f_path.mnt = NULL;
226 	file->f_inode = NULL;
227 	file_free(file);
228 	dput(dentry);
229 	mntput(mnt);
230 }
231 
232 static LLIST_HEAD(delayed_fput_list);
233 static void delayed_fput(struct work_struct *unused)
234 {
235 	struct llist_node *node = llist_del_all(&delayed_fput_list);
236 	struct file *f, *t;
237 
238 	llist_for_each_entry_safe(f, t, node, f_u.fu_llist)
239 		__fput(f);
240 }
241 
242 static void ____fput(struct callback_head *work)
243 {
244 	__fput(container_of(work, struct file, f_u.fu_rcuhead));
245 }
246 
247 /*
248  * If kernel thread really needs to have the final fput() it has done
249  * to complete, call this.  The only user right now is the boot - we
250  * *do* need to make sure our writes to binaries on initramfs has
251  * not left us with opened struct file waiting for __fput() - execve()
252  * won't work without that.  Please, don't add more callers without
253  * very good reasons; in particular, never call that with locks
254  * held and never call that from a thread that might need to do
255  * some work on any kind of umount.
256  */
257 void flush_delayed_fput(void)
258 {
259 	delayed_fput(NULL);
260 }
261 
262 static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
263 
264 void fput(struct file *file)
265 {
266 	if (atomic_long_dec_and_test(&file->f_count)) {
267 		struct task_struct *task = current;
268 
269 		if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
270 			init_task_work(&file->f_u.fu_rcuhead, ____fput);
271 			if (!task_work_add(task, &file->f_u.fu_rcuhead, true))
272 				return;
273 			/*
274 			 * After this task has run exit_task_work(),
275 			 * task_work_add() will fail.  Fall through to delayed
276 			 * fput to avoid leaking *file.
277 			 */
278 		}
279 
280 		if (llist_add(&file->f_u.fu_llist, &delayed_fput_list))
281 			schedule_delayed_work(&delayed_fput_work, 1);
282 	}
283 }
284 
285 /*
286  * synchronous analog of fput(); for kernel threads that might be needed
287  * in some umount() (and thus can't use flush_delayed_fput() without
288  * risking deadlocks), need to wait for completion of __fput() and know
289  * for this specific struct file it won't involve anything that would
290  * need them.  Use only if you really need it - at the very least,
291  * don't blindly convert fput() by kernel thread to that.
292  */
293 void __fput_sync(struct file *file)
294 {
295 	if (atomic_long_dec_and_test(&file->f_count)) {
296 		struct task_struct *task = current;
297 		BUG_ON(!(task->flags & PF_KTHREAD));
298 		__fput(file);
299 	}
300 }
301 
302 EXPORT_SYMBOL(fput);
303 
304 void put_filp(struct file *file)
305 {
306 	if (atomic_long_dec_and_test(&file->f_count)) {
307 		security_file_free(file);
308 		file_free(file);
309 	}
310 }
311 
312 void __init files_init(void)
313 {
314 	filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
315 			SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT, NULL);
316 	percpu_counter_init(&nr_files, 0, GFP_KERNEL);
317 }
318 
319 /*
320  * One file with associated inode and dcache is very roughly 1K. Per default
321  * do not use more than 10% of our memory for files.
322  */
323 void __init files_maxfiles_init(void)
324 {
325 	unsigned long n;
326 	unsigned long memreserve = (totalram_pages - nr_free_pages()) * 3/2;
327 
328 	memreserve = min(memreserve, totalram_pages - 1);
329 	n = ((totalram_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
330 
331 	files_stat.max_files = max_t(unsigned long, n, NR_FILE);
332 }
333