xref: /openbmc/linux/fs/file_table.c (revision 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2)
1 /*
2  *  linux/fs/file_table.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *  Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6  */
7 
8 #include <linux/string.h>
9 #include <linux/slab.h>
10 #include <linux/file.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/smp_lock.h>
14 #include <linux/fs.h>
15 #include <linux/security.h>
16 #include <linux/eventpoll.h>
17 #include <linux/mount.h>
18 #include <linux/cdev.h>
19 
20 /* sysctl tunables... */
21 struct files_stat_struct files_stat = {
22 	.max_files = NR_FILE
23 };
24 
25 EXPORT_SYMBOL(files_stat); /* Needed by unix.o */
26 
27 /* public. Not pretty! */
28  __cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock);
29 
30 static DEFINE_SPINLOCK(filp_count_lock);
31 
32 /* slab constructors and destructors are called from arbitrary
33  * context and must be fully threaded - use a local spinlock
34  * to protect files_stat.nr_files
35  */
36 void filp_ctor(void * objp, struct kmem_cache_s *cachep, unsigned long cflags)
37 {
38 	if ((cflags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
39 	    SLAB_CTOR_CONSTRUCTOR) {
40 		unsigned long flags;
41 		spin_lock_irqsave(&filp_count_lock, flags);
42 		files_stat.nr_files++;
43 		spin_unlock_irqrestore(&filp_count_lock, flags);
44 	}
45 }
46 
47 void filp_dtor(void * objp, struct kmem_cache_s *cachep, unsigned long dflags)
48 {
49 	unsigned long flags;
50 	spin_lock_irqsave(&filp_count_lock, flags);
51 	files_stat.nr_files--;
52 	spin_unlock_irqrestore(&filp_count_lock, flags);
53 }
54 
55 static inline void file_free(struct file *f)
56 {
57 	kmem_cache_free(filp_cachep, f);
58 }
59 
60 /* Find an unused file structure and return a pointer to it.
61  * Returns NULL, if there are no more free file structures or
62  * we run out of memory.
63  */
64 struct file *get_empty_filp(void)
65 {
66 static int old_max;
67 	struct file * f;
68 
69 	/*
70 	 * Privileged users can go above max_files
71 	 */
72 	if (files_stat.nr_files < files_stat.max_files ||
73 				capable(CAP_SYS_ADMIN)) {
74 		f = kmem_cache_alloc(filp_cachep, GFP_KERNEL);
75 		if (f) {
76 			memset(f, 0, sizeof(*f));
77 			if (security_file_alloc(f)) {
78 				file_free(f);
79 				goto fail;
80 			}
81 			eventpoll_init_file(f);
82 			atomic_set(&f->f_count, 1);
83 			f->f_uid = current->fsuid;
84 			f->f_gid = current->fsgid;
85 			rwlock_init(&f->f_owner.lock);
86 			/* f->f_version: 0 */
87 			INIT_LIST_HEAD(&f->f_list);
88 			f->f_maxcount = INT_MAX;
89 			return f;
90 		}
91 	}
92 
93 	/* Ran out of filps - report that */
94 	if (files_stat.max_files >= old_max) {
95 		printk(KERN_INFO "VFS: file-max limit %d reached\n",
96 					files_stat.max_files);
97 		old_max = files_stat.max_files;
98 	} else {
99 		/* Big problems... */
100 		printk(KERN_WARNING "VFS: filp allocation failed\n");
101 	}
102 fail:
103 	return NULL;
104 }
105 
106 EXPORT_SYMBOL(get_empty_filp);
107 
108 void fastcall fput(struct file *file)
109 {
110 	if (atomic_dec_and_test(&file->f_count))
111 		__fput(file);
112 }
113 
114 EXPORT_SYMBOL(fput);
115 
116 /* __fput is called from task context when aio completion releases the last
117  * last use of a struct file *.  Do not use otherwise.
118  */
119 void fastcall __fput(struct file *file)
120 {
121 	struct dentry *dentry = file->f_dentry;
122 	struct vfsmount *mnt = file->f_vfsmnt;
123 	struct inode *inode = dentry->d_inode;
124 
125 	might_sleep();
126 	/*
127 	 * The function eventpoll_release() should be the first called
128 	 * in the file cleanup chain.
129 	 */
130 	eventpoll_release(file);
131 	locks_remove_flock(file);
132 
133 	if (file->f_op && file->f_op->release)
134 		file->f_op->release(inode, file);
135 	security_file_free(file);
136 	if (unlikely(inode->i_cdev != NULL))
137 		cdev_put(inode->i_cdev);
138 	fops_put(file->f_op);
139 	if (file->f_mode & FMODE_WRITE)
140 		put_write_access(inode);
141 	file_kill(file);
142 	file->f_dentry = NULL;
143 	file->f_vfsmnt = NULL;
144 	file_free(file);
145 	dput(dentry);
146 	mntput(mnt);
147 }
148 
149 struct file fastcall *fget(unsigned int fd)
150 {
151 	struct file *file;
152 	struct files_struct *files = current->files;
153 
154 	spin_lock(&files->file_lock);
155 	file = fcheck_files(files, fd);
156 	if (file)
157 		get_file(file);
158 	spin_unlock(&files->file_lock);
159 	return file;
160 }
161 
162 EXPORT_SYMBOL(fget);
163 
164 /*
165  * Lightweight file lookup - no refcnt increment if fd table isn't shared.
166  * You can use this only if it is guranteed that the current task already
167  * holds a refcnt to that file. That check has to be done at fget() only
168  * and a flag is returned to be passed to the corresponding fput_light().
169  * There must not be a cloning between an fget_light/fput_light pair.
170  */
171 struct file fastcall *fget_light(unsigned int fd, int *fput_needed)
172 {
173 	struct file *file;
174 	struct files_struct *files = current->files;
175 
176 	*fput_needed = 0;
177 	if (likely((atomic_read(&files->count) == 1))) {
178 		file = fcheck_files(files, fd);
179 	} else {
180 		spin_lock(&files->file_lock);
181 		file = fcheck_files(files, fd);
182 		if (file) {
183 			get_file(file);
184 			*fput_needed = 1;
185 		}
186 		spin_unlock(&files->file_lock);
187 	}
188 	return file;
189 }
190 
191 
192 void put_filp(struct file *file)
193 {
194 	if (atomic_dec_and_test(&file->f_count)) {
195 		security_file_free(file);
196 		file_kill(file);
197 		file_free(file);
198 	}
199 }
200 
201 void file_move(struct file *file, struct list_head *list)
202 {
203 	if (!list)
204 		return;
205 	file_list_lock();
206 	list_move(&file->f_list, list);
207 	file_list_unlock();
208 }
209 
210 void file_kill(struct file *file)
211 {
212 	if (!list_empty(&file->f_list)) {
213 		file_list_lock();
214 		list_del_init(&file->f_list);
215 		file_list_unlock();
216 	}
217 }
218 
219 int fs_may_remount_ro(struct super_block *sb)
220 {
221 	struct list_head *p;
222 
223 	/* Check that no files are currently opened for writing. */
224 	file_list_lock();
225 	list_for_each(p, &sb->s_files) {
226 		struct file *file = list_entry(p, struct file, f_list);
227 		struct inode *inode = file->f_dentry->d_inode;
228 
229 		/* File with pending delete? */
230 		if (inode->i_nlink == 0)
231 			goto too_bad;
232 
233 		/* Writeable file? */
234 		if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
235 			goto too_bad;
236 	}
237 	file_list_unlock();
238 	return 1; /* Tis' cool bro. */
239 too_bad:
240 	file_list_unlock();
241 	return 0;
242 }
243 
244 void __init files_init(unsigned long mempages)
245 {
246 	int n;
247 	/* One file with associated inode and dcache is very roughly 1K.
248 	 * Per default don't use more than 10% of our memory for files.
249 	 */
250 
251 	n = (mempages * (PAGE_SIZE / 1024)) / 10;
252 	files_stat.max_files = n;
253 	if (files_stat.max_files < NR_FILE)
254 		files_stat.max_files = NR_FILE;
255 }
256