1 /* 2 * linux/fs/file_table.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) 6 */ 7 8 #include <linux/string.h> 9 #include <linux/slab.h> 10 #include <linux/file.h> 11 #include <linux/fdtable.h> 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/fs.h> 15 #include <linux/security.h> 16 #include <linux/eventpoll.h> 17 #include <linux/rcupdate.h> 18 #include <linux/mount.h> 19 #include <linux/capability.h> 20 #include <linux/cdev.h> 21 #include <linux/fsnotify.h> 22 #include <linux/sysctl.h> 23 #include <linux/percpu_counter.h> 24 #include <linux/ima.h> 25 26 #include <asm/atomic.h> 27 28 #include "internal.h" 29 30 /* sysctl tunables... */ 31 struct files_stat_struct files_stat = { 32 .max_files = NR_FILE 33 }; 34 35 /* public. Not pretty! */ 36 __cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock); 37 38 /* SLAB cache for file structures */ 39 static struct kmem_cache *filp_cachep __read_mostly; 40 41 static struct percpu_counter nr_files __cacheline_aligned_in_smp; 42 43 static inline void file_free_rcu(struct rcu_head *head) 44 { 45 struct file *f = container_of(head, struct file, f_u.fu_rcuhead); 46 47 put_cred(f->f_cred); 48 kmem_cache_free(filp_cachep, f); 49 } 50 51 static inline void file_free(struct file *f) 52 { 53 percpu_counter_dec(&nr_files); 54 file_check_state(f); 55 call_rcu(&f->f_u.fu_rcuhead, file_free_rcu); 56 } 57 58 /* 59 * Return the total number of open files in the system 60 */ 61 static int get_nr_files(void) 62 { 63 return percpu_counter_read_positive(&nr_files); 64 } 65 66 /* 67 * Return the maximum number of open files in the system 68 */ 69 int get_max_files(void) 70 { 71 return files_stat.max_files; 72 } 73 EXPORT_SYMBOL_GPL(get_max_files); 74 75 /* 76 * Handle nr_files sysctl 77 */ 78 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) 79 int proc_nr_files(ctl_table *table, int write, 80 void __user *buffer, size_t *lenp, loff_t *ppos) 81 { 82 files_stat.nr_files = get_nr_files(); 83 return proc_dointvec(table, write, buffer, lenp, ppos); 84 } 85 #else 86 int proc_nr_files(ctl_table *table, int write, 87 void __user *buffer, size_t *lenp, loff_t *ppos) 88 { 89 return -ENOSYS; 90 } 91 #endif 92 93 /* Find an unused file structure and return a pointer to it. 94 * Returns NULL, if there are no more free file structures or 95 * we run out of memory. 96 * 97 * Be very careful using this. You are responsible for 98 * getting write access to any mount that you might assign 99 * to this filp, if it is opened for write. If this is not 100 * done, you will imbalance int the mount's writer count 101 * and a warning at __fput() time. 102 */ 103 struct file *get_empty_filp(void) 104 { 105 const struct cred *cred = current_cred(); 106 static int old_max; 107 struct file * f; 108 109 /* 110 * Privileged users can go above max_files 111 */ 112 if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) { 113 /* 114 * percpu_counters are inaccurate. Do an expensive check before 115 * we go and fail. 116 */ 117 if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files) 118 goto over; 119 } 120 121 f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL); 122 if (f == NULL) 123 goto fail; 124 125 percpu_counter_inc(&nr_files); 126 if (security_file_alloc(f)) 127 goto fail_sec; 128 129 INIT_LIST_HEAD(&f->f_u.fu_list); 130 atomic_long_set(&f->f_count, 1); 131 rwlock_init(&f->f_owner.lock); 132 f->f_cred = get_cred(cred); 133 spin_lock_init(&f->f_lock); 134 eventpoll_init_file(f); 135 /* f->f_version: 0 */ 136 return f; 137 138 over: 139 /* Ran out of filps - report that */ 140 if (get_nr_files() > old_max) { 141 printk(KERN_INFO "VFS: file-max limit %d reached\n", 142 get_max_files()); 143 old_max = get_nr_files(); 144 } 145 goto fail; 146 147 fail_sec: 148 file_free(f); 149 fail: 150 return NULL; 151 } 152 153 /** 154 * alloc_file - allocate and initialize a 'struct file' 155 * @mnt: the vfsmount on which the file will reside 156 * @dentry: the dentry representing the new file 157 * @mode: the mode with which the new file will be opened 158 * @fop: the 'struct file_operations' for the new file 159 * 160 * Use this instead of get_empty_filp() to get a new 161 * 'struct file'. Do so because of the same initialization 162 * pitfalls reasons listed for init_file(). This is a 163 * preferred interface to using init_file(). 164 * 165 * If all the callers of init_file() are eliminated, its 166 * code should be moved into this function. 167 */ 168 struct file *alloc_file(struct path *path, fmode_t mode, 169 const struct file_operations *fop) 170 { 171 struct file *file; 172 173 file = get_empty_filp(); 174 if (!file) 175 return NULL; 176 177 file->f_path = *path; 178 file->f_mapping = path->dentry->d_inode->i_mapping; 179 file->f_mode = mode; 180 file->f_op = fop; 181 182 /* 183 * These mounts don't really matter in practice 184 * for r/o bind mounts. They aren't userspace- 185 * visible. We do this for consistency, and so 186 * that we can do debugging checks at __fput() 187 */ 188 if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) { 189 file_take_write(file); 190 WARN_ON(mnt_clone_write(path->mnt)); 191 } 192 ima_counts_get(file); 193 return file; 194 } 195 EXPORT_SYMBOL(alloc_file); 196 197 void fput(struct file *file) 198 { 199 if (atomic_long_dec_and_test(&file->f_count)) 200 __fput(file); 201 } 202 203 EXPORT_SYMBOL(fput); 204 205 /** 206 * drop_file_write_access - give up ability to write to a file 207 * @file: the file to which we will stop writing 208 * 209 * This is a central place which will give up the ability 210 * to write to @file, along with access to write through 211 * its vfsmount. 212 */ 213 void drop_file_write_access(struct file *file) 214 { 215 struct vfsmount *mnt = file->f_path.mnt; 216 struct dentry *dentry = file->f_path.dentry; 217 struct inode *inode = dentry->d_inode; 218 219 put_write_access(inode); 220 221 if (special_file(inode->i_mode)) 222 return; 223 if (file_check_writeable(file) != 0) 224 return; 225 mnt_drop_write(mnt); 226 file_release_write(file); 227 } 228 EXPORT_SYMBOL_GPL(drop_file_write_access); 229 230 /* __fput is called from task context when aio completion releases the last 231 * last use of a struct file *. Do not use otherwise. 232 */ 233 void __fput(struct file *file) 234 { 235 struct dentry *dentry = file->f_path.dentry; 236 struct vfsmount *mnt = file->f_path.mnt; 237 struct inode *inode = dentry->d_inode; 238 239 might_sleep(); 240 241 fsnotify_close(file); 242 /* 243 * The function eventpoll_release() should be the first called 244 * in the file cleanup chain. 245 */ 246 eventpoll_release(file); 247 locks_remove_flock(file); 248 249 if (unlikely(file->f_flags & FASYNC)) { 250 if (file->f_op && file->f_op->fasync) 251 file->f_op->fasync(-1, file, 0); 252 } 253 if (file->f_op && file->f_op->release) 254 file->f_op->release(inode, file); 255 security_file_free(file); 256 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL)) 257 cdev_put(inode->i_cdev); 258 fops_put(file->f_op); 259 put_pid(file->f_owner.pid); 260 file_kill(file); 261 if (file->f_mode & FMODE_WRITE) 262 drop_file_write_access(file); 263 file->f_path.dentry = NULL; 264 file->f_path.mnt = NULL; 265 file_free(file); 266 dput(dentry); 267 mntput(mnt); 268 } 269 270 struct file *fget(unsigned int fd) 271 { 272 struct file *file; 273 struct files_struct *files = current->files; 274 275 rcu_read_lock(); 276 file = fcheck_files(files, fd); 277 if (file) { 278 if (!atomic_long_inc_not_zero(&file->f_count)) { 279 /* File object ref couldn't be taken */ 280 rcu_read_unlock(); 281 return NULL; 282 } 283 } 284 rcu_read_unlock(); 285 286 return file; 287 } 288 289 EXPORT_SYMBOL(fget); 290 291 /* 292 * Lightweight file lookup - no refcnt increment if fd table isn't shared. 293 * You can use this only if it is guranteed that the current task already 294 * holds a refcnt to that file. That check has to be done at fget() only 295 * and a flag is returned to be passed to the corresponding fput_light(). 296 * There must not be a cloning between an fget_light/fput_light pair. 297 */ 298 struct file *fget_light(unsigned int fd, int *fput_needed) 299 { 300 struct file *file; 301 struct files_struct *files = current->files; 302 303 *fput_needed = 0; 304 if (likely((atomic_read(&files->count) == 1))) { 305 file = fcheck_files(files, fd); 306 } else { 307 rcu_read_lock(); 308 file = fcheck_files(files, fd); 309 if (file) { 310 if (atomic_long_inc_not_zero(&file->f_count)) 311 *fput_needed = 1; 312 else 313 /* Didn't get the reference, someone's freed */ 314 file = NULL; 315 } 316 rcu_read_unlock(); 317 } 318 319 return file; 320 } 321 322 323 void put_filp(struct file *file) 324 { 325 if (atomic_long_dec_and_test(&file->f_count)) { 326 security_file_free(file); 327 file_kill(file); 328 file_free(file); 329 } 330 } 331 332 void file_move(struct file *file, struct list_head *list) 333 { 334 if (!list) 335 return; 336 file_list_lock(); 337 list_move(&file->f_u.fu_list, list); 338 file_list_unlock(); 339 } 340 341 void file_kill(struct file *file) 342 { 343 if (!list_empty(&file->f_u.fu_list)) { 344 file_list_lock(); 345 list_del_init(&file->f_u.fu_list); 346 file_list_unlock(); 347 } 348 } 349 350 int fs_may_remount_ro(struct super_block *sb) 351 { 352 struct file *file; 353 354 /* Check that no files are currently opened for writing. */ 355 file_list_lock(); 356 list_for_each_entry(file, &sb->s_files, f_u.fu_list) { 357 struct inode *inode = file->f_path.dentry->d_inode; 358 359 /* File with pending delete? */ 360 if (inode->i_nlink == 0) 361 goto too_bad; 362 363 /* Writeable file? */ 364 if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE)) 365 goto too_bad; 366 } 367 file_list_unlock(); 368 return 1; /* Tis' cool bro. */ 369 too_bad: 370 file_list_unlock(); 371 return 0; 372 } 373 374 /** 375 * mark_files_ro - mark all files read-only 376 * @sb: superblock in question 377 * 378 * All files are marked read-only. We don't care about pending 379 * delete files so this should be used in 'force' mode only. 380 */ 381 void mark_files_ro(struct super_block *sb) 382 { 383 struct file *f; 384 385 retry: 386 file_list_lock(); 387 list_for_each_entry(f, &sb->s_files, f_u.fu_list) { 388 struct vfsmount *mnt; 389 if (!S_ISREG(f->f_path.dentry->d_inode->i_mode)) 390 continue; 391 if (!file_count(f)) 392 continue; 393 if (!(f->f_mode & FMODE_WRITE)) 394 continue; 395 f->f_mode &= ~FMODE_WRITE; 396 if (file_check_writeable(f) != 0) 397 continue; 398 file_release_write(f); 399 mnt = mntget(f->f_path.mnt); 400 file_list_unlock(); 401 /* 402 * This can sleep, so we can't hold 403 * the file_list_lock() spinlock. 404 */ 405 mnt_drop_write(mnt); 406 mntput(mnt); 407 goto retry; 408 } 409 file_list_unlock(); 410 } 411 412 void __init files_init(unsigned long mempages) 413 { 414 int n; 415 416 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, 417 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 418 419 /* 420 * One file with associated inode and dcache is very roughly 1K. 421 * Per default don't use more than 10% of our memory for files. 422 */ 423 424 n = (mempages * (PAGE_SIZE / 1024)) / 10; 425 files_stat.max_files = n; 426 if (files_stat.max_files < NR_FILE) 427 files_stat.max_files = NR_FILE; 428 files_defer_init(); 429 percpu_counter_init(&nr_files, 0); 430 } 431