1 /* 2 * linux/fs/file_table.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) 6 */ 7 8 #include <linux/string.h> 9 #include <linux/slab.h> 10 #include <linux/file.h> 11 #include <linux/fdtable.h> 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/fs.h> 15 #include <linux/security.h> 16 #include <linux/eventpoll.h> 17 #include <linux/rcupdate.h> 18 #include <linux/mount.h> 19 #include <linux/capability.h> 20 #include <linux/cdev.h> 21 #include <linux/fsnotify.h> 22 #include <linux/sysctl.h> 23 #include <linux/lglock.h> 24 #include <linux/percpu_counter.h> 25 #include <linux/percpu.h> 26 #include <linux/hardirq.h> 27 #include <linux/task_work.h> 28 #include <linux/ima.h> 29 30 #include <linux/atomic.h> 31 32 #include "internal.h" 33 34 /* sysctl tunables... */ 35 struct files_stat_struct files_stat = { 36 .max_files = NR_FILE 37 }; 38 39 DEFINE_STATIC_LGLOCK(files_lglock); 40 41 /* SLAB cache for file structures */ 42 static struct kmem_cache *filp_cachep __read_mostly; 43 44 static struct percpu_counter nr_files __cacheline_aligned_in_smp; 45 46 static void file_free_rcu(struct rcu_head *head) 47 { 48 struct file *f = container_of(head, struct file, f_u.fu_rcuhead); 49 50 put_cred(f->f_cred); 51 kmem_cache_free(filp_cachep, f); 52 } 53 54 static inline void file_free(struct file *f) 55 { 56 percpu_counter_dec(&nr_files); 57 file_check_state(f); 58 call_rcu(&f->f_u.fu_rcuhead, file_free_rcu); 59 } 60 61 /* 62 * Return the total number of open files in the system 63 */ 64 static long get_nr_files(void) 65 { 66 return percpu_counter_read_positive(&nr_files); 67 } 68 69 /* 70 * Return the maximum number of open files in the system 71 */ 72 unsigned long get_max_files(void) 73 { 74 return files_stat.max_files; 75 } 76 EXPORT_SYMBOL_GPL(get_max_files); 77 78 /* 79 * Handle nr_files sysctl 80 */ 81 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) 82 int proc_nr_files(ctl_table *table, int write, 83 void __user *buffer, size_t *lenp, loff_t *ppos) 84 { 85 files_stat.nr_files = get_nr_files(); 86 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 87 } 88 #else 89 int proc_nr_files(ctl_table *table, int write, 90 void __user *buffer, size_t *lenp, loff_t *ppos) 91 { 92 return -ENOSYS; 93 } 94 #endif 95 96 /* Find an unused file structure and return a pointer to it. 97 * Returns an error pointer if some error happend e.g. we over file 98 * structures limit, run out of memory or operation is not permitted. 99 * 100 * Be very careful using this. You are responsible for 101 * getting write access to any mount that you might assign 102 * to this filp, if it is opened for write. If this is not 103 * done, you will imbalance int the mount's writer count 104 * and a warning at __fput() time. 105 */ 106 struct file *get_empty_filp(void) 107 { 108 const struct cred *cred = current_cred(); 109 static long old_max; 110 struct file *f; 111 int error; 112 113 /* 114 * Privileged users can go above max_files 115 */ 116 if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) { 117 /* 118 * percpu_counters are inaccurate. Do an expensive check before 119 * we go and fail. 120 */ 121 if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files) 122 goto over; 123 } 124 125 f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL); 126 if (unlikely(!f)) 127 return ERR_PTR(-ENOMEM); 128 129 percpu_counter_inc(&nr_files); 130 f->f_cred = get_cred(cred); 131 error = security_file_alloc(f); 132 if (unlikely(error)) { 133 file_free(f); 134 return ERR_PTR(error); 135 } 136 137 INIT_LIST_HEAD(&f->f_u.fu_list); 138 atomic_long_set(&f->f_count, 1); 139 rwlock_init(&f->f_owner.lock); 140 spin_lock_init(&f->f_lock); 141 eventpoll_init_file(f); 142 /* f->f_version: 0 */ 143 return f; 144 145 over: 146 /* Ran out of filps - report that */ 147 if (get_nr_files() > old_max) { 148 pr_info("VFS: file-max limit %lu reached\n", get_max_files()); 149 old_max = get_nr_files(); 150 } 151 return ERR_PTR(-ENFILE); 152 } 153 154 /** 155 * alloc_file - allocate and initialize a 'struct file' 156 * @mnt: the vfsmount on which the file will reside 157 * @dentry: the dentry representing the new file 158 * @mode: the mode with which the new file will be opened 159 * @fop: the 'struct file_operations' for the new file 160 * 161 * Use this instead of get_empty_filp() to get a new 162 * 'struct file'. Do so because of the same initialization 163 * pitfalls reasons listed for init_file(). This is a 164 * preferred interface to using init_file(). 165 * 166 * If all the callers of init_file() are eliminated, its 167 * code should be moved into this function. 168 */ 169 struct file *alloc_file(struct path *path, fmode_t mode, 170 const struct file_operations *fop) 171 { 172 struct file *file; 173 174 file = get_empty_filp(); 175 if (IS_ERR(file)) 176 return file; 177 178 file->f_path = *path; 179 file->f_inode = path->dentry->d_inode; 180 file->f_mapping = path->dentry->d_inode->i_mapping; 181 file->f_mode = mode; 182 file->f_op = fop; 183 184 /* 185 * These mounts don't really matter in practice 186 * for r/o bind mounts. They aren't userspace- 187 * visible. We do this for consistency, and so 188 * that we can do debugging checks at __fput() 189 */ 190 if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) { 191 file_take_write(file); 192 WARN_ON(mnt_clone_write(path->mnt)); 193 } 194 if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) 195 i_readcount_inc(path->dentry->d_inode); 196 return file; 197 } 198 EXPORT_SYMBOL(alloc_file); 199 200 /** 201 * drop_file_write_access - give up ability to write to a file 202 * @file: the file to which we will stop writing 203 * 204 * This is a central place which will give up the ability 205 * to write to @file, along with access to write through 206 * its vfsmount. 207 */ 208 static void drop_file_write_access(struct file *file) 209 { 210 struct vfsmount *mnt = file->f_path.mnt; 211 struct dentry *dentry = file->f_path.dentry; 212 struct inode *inode = dentry->d_inode; 213 214 put_write_access(inode); 215 216 if (special_file(inode->i_mode)) 217 return; 218 if (file_check_writeable(file) != 0) 219 return; 220 __mnt_drop_write(mnt); 221 file_release_write(file); 222 } 223 224 /* the real guts of fput() - releasing the last reference to file 225 */ 226 static void __fput(struct file *file) 227 { 228 struct dentry *dentry = file->f_path.dentry; 229 struct vfsmount *mnt = file->f_path.mnt; 230 struct inode *inode = file->f_inode; 231 232 might_sleep(); 233 234 fsnotify_close(file); 235 /* 236 * The function eventpoll_release() should be the first called 237 * in the file cleanup chain. 238 */ 239 eventpoll_release(file); 240 locks_remove_flock(file); 241 242 if (unlikely(file->f_flags & FASYNC)) { 243 if (file->f_op && file->f_op->fasync) 244 file->f_op->fasync(-1, file, 0); 245 } 246 ima_file_free(file); 247 if (file->f_op && file->f_op->release) 248 file->f_op->release(inode, file); 249 security_file_free(file); 250 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL && 251 !(file->f_mode & FMODE_PATH))) { 252 cdev_put(inode->i_cdev); 253 } 254 fops_put(file->f_op); 255 put_pid(file->f_owner.pid); 256 if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) 257 i_readcount_dec(inode); 258 if (file->f_mode & FMODE_WRITE) 259 drop_file_write_access(file); 260 file->f_path.dentry = NULL; 261 file->f_path.mnt = NULL; 262 file->f_inode = NULL; 263 file_free(file); 264 dput(dentry); 265 mntput(mnt); 266 } 267 268 static LLIST_HEAD(delayed_fput_list); 269 static void delayed_fput(struct work_struct *unused) 270 { 271 struct llist_node *node = llist_del_all(&delayed_fput_list); 272 struct llist_node *next; 273 274 for (; node; node = next) { 275 next = llist_next(node); 276 __fput(llist_entry(node, struct file, f_u.fu_llist)); 277 } 278 } 279 280 static void ____fput(struct callback_head *work) 281 { 282 __fput(container_of(work, struct file, f_u.fu_rcuhead)); 283 } 284 285 /* 286 * If kernel thread really needs to have the final fput() it has done 287 * to complete, call this. The only user right now is the boot - we 288 * *do* need to make sure our writes to binaries on initramfs has 289 * not left us with opened struct file waiting for __fput() - execve() 290 * won't work without that. Please, don't add more callers without 291 * very good reasons; in particular, never call that with locks 292 * held and never call that from a thread that might need to do 293 * some work on any kind of umount. 294 */ 295 void flush_delayed_fput(void) 296 { 297 delayed_fput(NULL); 298 } 299 300 static DECLARE_WORK(delayed_fput_work, delayed_fput); 301 302 void fput(struct file *file) 303 { 304 if (atomic_long_dec_and_test(&file->f_count)) { 305 struct task_struct *task = current; 306 307 file_sb_list_del(file); 308 if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) { 309 init_task_work(&file->f_u.fu_rcuhead, ____fput); 310 if (!task_work_add(task, &file->f_u.fu_rcuhead, true)) 311 return; 312 /* 313 * After this task has run exit_task_work(), 314 * task_work_add() will fail. Fall through to delayed 315 * fput to avoid leaking *file. 316 */ 317 } 318 319 if (llist_add(&file->f_u.fu_llist, &delayed_fput_list)) 320 schedule_work(&delayed_fput_work); 321 } 322 } 323 324 /* 325 * synchronous analog of fput(); for kernel threads that might be needed 326 * in some umount() (and thus can't use flush_delayed_fput() without 327 * risking deadlocks), need to wait for completion of __fput() and know 328 * for this specific struct file it won't involve anything that would 329 * need them. Use only if you really need it - at the very least, 330 * don't blindly convert fput() by kernel thread to that. 331 */ 332 void __fput_sync(struct file *file) 333 { 334 if (atomic_long_dec_and_test(&file->f_count)) { 335 struct task_struct *task = current; 336 file_sb_list_del(file); 337 BUG_ON(!(task->flags & PF_KTHREAD)); 338 __fput(file); 339 } 340 } 341 342 EXPORT_SYMBOL(fput); 343 344 void put_filp(struct file *file) 345 { 346 if (atomic_long_dec_and_test(&file->f_count)) { 347 security_file_free(file); 348 file_sb_list_del(file); 349 file_free(file); 350 } 351 } 352 353 static inline int file_list_cpu(struct file *file) 354 { 355 #ifdef CONFIG_SMP 356 return file->f_sb_list_cpu; 357 #else 358 return smp_processor_id(); 359 #endif 360 } 361 362 /* helper for file_sb_list_add to reduce ifdefs */ 363 static inline void __file_sb_list_add(struct file *file, struct super_block *sb) 364 { 365 struct list_head *list; 366 #ifdef CONFIG_SMP 367 int cpu; 368 cpu = smp_processor_id(); 369 file->f_sb_list_cpu = cpu; 370 list = per_cpu_ptr(sb->s_files, cpu); 371 #else 372 list = &sb->s_files; 373 #endif 374 list_add(&file->f_u.fu_list, list); 375 } 376 377 /** 378 * file_sb_list_add - add a file to the sb's file list 379 * @file: file to add 380 * @sb: sb to add it to 381 * 382 * Use this function to associate a file with the superblock of the inode it 383 * refers to. 384 */ 385 void file_sb_list_add(struct file *file, struct super_block *sb) 386 { 387 if (likely(!(file->f_mode & FMODE_WRITE))) 388 return; 389 if (!S_ISREG(file_inode(file)->i_mode)) 390 return; 391 lg_local_lock(&files_lglock); 392 __file_sb_list_add(file, sb); 393 lg_local_unlock(&files_lglock); 394 } 395 396 /** 397 * file_sb_list_del - remove a file from the sb's file list 398 * @file: file to remove 399 * @sb: sb to remove it from 400 * 401 * Use this function to remove a file from its superblock. 402 */ 403 void file_sb_list_del(struct file *file) 404 { 405 if (!list_empty(&file->f_u.fu_list)) { 406 lg_local_lock_cpu(&files_lglock, file_list_cpu(file)); 407 list_del_init(&file->f_u.fu_list); 408 lg_local_unlock_cpu(&files_lglock, file_list_cpu(file)); 409 } 410 } 411 412 #ifdef CONFIG_SMP 413 414 /* 415 * These macros iterate all files on all CPUs for a given superblock. 416 * files_lglock must be held globally. 417 */ 418 #define do_file_list_for_each_entry(__sb, __file) \ 419 { \ 420 int i; \ 421 for_each_possible_cpu(i) { \ 422 struct list_head *list; \ 423 list = per_cpu_ptr((__sb)->s_files, i); \ 424 list_for_each_entry((__file), list, f_u.fu_list) 425 426 #define while_file_list_for_each_entry \ 427 } \ 428 } 429 430 #else 431 432 #define do_file_list_for_each_entry(__sb, __file) \ 433 { \ 434 struct list_head *list; \ 435 list = &(sb)->s_files; \ 436 list_for_each_entry((__file), list, f_u.fu_list) 437 438 #define while_file_list_for_each_entry \ 439 } 440 441 #endif 442 443 /** 444 * mark_files_ro - mark all files read-only 445 * @sb: superblock in question 446 * 447 * All files are marked read-only. We don't care about pending 448 * delete files so this should be used in 'force' mode only. 449 */ 450 void mark_files_ro(struct super_block *sb) 451 { 452 struct file *f; 453 454 lg_global_lock(&files_lglock); 455 do_file_list_for_each_entry(sb, f) { 456 if (!file_count(f)) 457 continue; 458 if (!(f->f_mode & FMODE_WRITE)) 459 continue; 460 spin_lock(&f->f_lock); 461 f->f_mode &= ~FMODE_WRITE; 462 spin_unlock(&f->f_lock); 463 if (file_check_writeable(f) != 0) 464 continue; 465 __mnt_drop_write(f->f_path.mnt); 466 file_release_write(f); 467 } while_file_list_for_each_entry; 468 lg_global_unlock(&files_lglock); 469 } 470 471 void __init files_init(unsigned long mempages) 472 { 473 unsigned long n; 474 475 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, 476 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 477 478 /* 479 * One file with associated inode and dcache is very roughly 1K. 480 * Per default don't use more than 10% of our memory for files. 481 */ 482 483 n = (mempages * (PAGE_SIZE / 1024)) / 10; 484 files_stat.max_files = max_t(unsigned long, n, NR_FILE); 485 files_defer_init(); 486 lg_lock_init(&files_lglock, "files_lglock"); 487 percpu_counter_init(&nr_files, 0); 488 } 489