1 /* 2 * linux/fs/file_table.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) 6 */ 7 8 #include <linux/string.h> 9 #include <linux/slab.h> 10 #include <linux/file.h> 11 #include <linux/fdtable.h> 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/fs.h> 15 #include <linux/security.h> 16 #include <linux/eventpoll.h> 17 #include <linux/rcupdate.h> 18 #include <linux/mount.h> 19 #include <linux/capability.h> 20 #include <linux/cdev.h> 21 #include <linux/fsnotify.h> 22 #include <linux/sysctl.h> 23 #include <linux/lglock.h> 24 #include <linux/percpu_counter.h> 25 #include <linux/percpu.h> 26 #include <linux/hardirq.h> 27 #include <linux/task_work.h> 28 #include <linux/ima.h> 29 30 #include <linux/atomic.h> 31 32 #include "internal.h" 33 34 /* sysctl tunables... */ 35 struct files_stat_struct files_stat = { 36 .max_files = NR_FILE 37 }; 38 39 DEFINE_STATIC_LGLOCK(files_lglock); 40 41 /* SLAB cache for file structures */ 42 static struct kmem_cache *filp_cachep __read_mostly; 43 44 static struct percpu_counter nr_files __cacheline_aligned_in_smp; 45 46 static void file_free_rcu(struct rcu_head *head) 47 { 48 struct file *f = container_of(head, struct file, f_u.fu_rcuhead); 49 50 put_cred(f->f_cred); 51 kmem_cache_free(filp_cachep, f); 52 } 53 54 static inline void file_free(struct file *f) 55 { 56 percpu_counter_dec(&nr_files); 57 file_check_state(f); 58 call_rcu(&f->f_u.fu_rcuhead, file_free_rcu); 59 } 60 61 /* 62 * Return the total number of open files in the system 63 */ 64 static long get_nr_files(void) 65 { 66 return percpu_counter_read_positive(&nr_files); 67 } 68 69 /* 70 * Return the maximum number of open files in the system 71 */ 72 unsigned long get_max_files(void) 73 { 74 return files_stat.max_files; 75 } 76 EXPORT_SYMBOL_GPL(get_max_files); 77 78 /* 79 * Handle nr_files sysctl 80 */ 81 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) 82 int proc_nr_files(ctl_table *table, int write, 83 void __user *buffer, size_t *lenp, loff_t *ppos) 84 { 85 files_stat.nr_files = get_nr_files(); 86 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 87 } 88 #else 89 int proc_nr_files(ctl_table *table, int write, 90 void __user *buffer, size_t *lenp, loff_t *ppos) 91 { 92 return -ENOSYS; 93 } 94 #endif 95 96 /* Find an unused file structure and return a pointer to it. 97 * Returns an error pointer if some error happend e.g. we over file 98 * structures limit, run out of memory or operation is not permitted. 99 * 100 * Be very careful using this. You are responsible for 101 * getting write access to any mount that you might assign 102 * to this filp, if it is opened for write. If this is not 103 * done, you will imbalance int the mount's writer count 104 * and a warning at __fput() time. 105 */ 106 struct file *get_empty_filp(void) 107 { 108 const struct cred *cred = current_cred(); 109 static long old_max; 110 struct file *f; 111 int error; 112 113 /* 114 * Privileged users can go above max_files 115 */ 116 if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) { 117 /* 118 * percpu_counters are inaccurate. Do an expensive check before 119 * we go and fail. 120 */ 121 if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files) 122 goto over; 123 } 124 125 f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL); 126 if (unlikely(!f)) 127 return ERR_PTR(-ENOMEM); 128 129 percpu_counter_inc(&nr_files); 130 f->f_cred = get_cred(cred); 131 error = security_file_alloc(f); 132 if (unlikely(error)) { 133 file_free(f); 134 return ERR_PTR(error); 135 } 136 137 INIT_LIST_HEAD(&f->f_u.fu_list); 138 atomic_long_set(&f->f_count, 1); 139 rwlock_init(&f->f_owner.lock); 140 spin_lock_init(&f->f_lock); 141 eventpoll_init_file(f); 142 /* f->f_version: 0 */ 143 return f; 144 145 over: 146 /* Ran out of filps - report that */ 147 if (get_nr_files() > old_max) { 148 pr_info("VFS: file-max limit %lu reached\n", get_max_files()); 149 old_max = get_nr_files(); 150 } 151 return ERR_PTR(-ENFILE); 152 } 153 154 /** 155 * alloc_file - allocate and initialize a 'struct file' 156 * @mnt: the vfsmount on which the file will reside 157 * @dentry: the dentry representing the new file 158 * @mode: the mode with which the new file will be opened 159 * @fop: the 'struct file_operations' for the new file 160 * 161 * Use this instead of get_empty_filp() to get a new 162 * 'struct file'. Do so because of the same initialization 163 * pitfalls reasons listed for init_file(). This is a 164 * preferred interface to using init_file(). 165 * 166 * If all the callers of init_file() are eliminated, its 167 * code should be moved into this function. 168 */ 169 struct file *alloc_file(struct path *path, fmode_t mode, 170 const struct file_operations *fop) 171 { 172 struct file *file; 173 174 file = get_empty_filp(); 175 if (IS_ERR(file)) 176 return file; 177 178 file->f_path = *path; 179 file->f_inode = path->dentry->d_inode; 180 file->f_mapping = path->dentry->d_inode->i_mapping; 181 file->f_mode = mode; 182 file->f_op = fop; 183 184 /* 185 * These mounts don't really matter in practice 186 * for r/o bind mounts. They aren't userspace- 187 * visible. We do this for consistency, and so 188 * that we can do debugging checks at __fput() 189 */ 190 if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) { 191 file_take_write(file); 192 WARN_ON(mnt_clone_write(path->mnt)); 193 } 194 if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) 195 i_readcount_inc(path->dentry->d_inode); 196 return file; 197 } 198 EXPORT_SYMBOL(alloc_file); 199 200 /** 201 * drop_file_write_access - give up ability to write to a file 202 * @file: the file to which we will stop writing 203 * 204 * This is a central place which will give up the ability 205 * to write to @file, along with access to write through 206 * its vfsmount. 207 */ 208 static void drop_file_write_access(struct file *file) 209 { 210 struct vfsmount *mnt = file->f_path.mnt; 211 struct dentry *dentry = file->f_path.dentry; 212 struct inode *inode = dentry->d_inode; 213 214 put_write_access(inode); 215 216 if (special_file(inode->i_mode)) 217 return; 218 if (file_check_writeable(file) != 0) 219 return; 220 __mnt_drop_write(mnt); 221 file_release_write(file); 222 } 223 224 /* the real guts of fput() - releasing the last reference to file 225 */ 226 static void __fput(struct file *file) 227 { 228 struct dentry *dentry = file->f_path.dentry; 229 struct vfsmount *mnt = file->f_path.mnt; 230 struct inode *inode = dentry->d_inode; 231 232 might_sleep(); 233 234 fsnotify_close(file); 235 /* 236 * The function eventpoll_release() should be the first called 237 * in the file cleanup chain. 238 */ 239 eventpoll_release(file); 240 locks_remove_flock(file); 241 242 if (unlikely(file->f_flags & FASYNC)) { 243 if (file->f_op && file->f_op->fasync) 244 file->f_op->fasync(-1, file, 0); 245 } 246 ima_file_free(file); 247 if (file->f_op && file->f_op->release) 248 file->f_op->release(inode, file); 249 security_file_free(file); 250 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL && 251 !(file->f_mode & FMODE_PATH))) { 252 cdev_put(inode->i_cdev); 253 } 254 fops_put(file->f_op); 255 put_pid(file->f_owner.pid); 256 if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) 257 i_readcount_dec(inode); 258 if (file->f_mode & FMODE_WRITE) 259 drop_file_write_access(file); 260 file->f_path.dentry = NULL; 261 file->f_path.mnt = NULL; 262 file->f_inode = NULL; 263 file_free(file); 264 dput(dentry); 265 mntput(mnt); 266 } 267 268 static DEFINE_SPINLOCK(delayed_fput_lock); 269 static LIST_HEAD(delayed_fput_list); 270 static void delayed_fput(struct work_struct *unused) 271 { 272 LIST_HEAD(head); 273 spin_lock_irq(&delayed_fput_lock); 274 list_splice_init(&delayed_fput_list, &head); 275 spin_unlock_irq(&delayed_fput_lock); 276 while (!list_empty(&head)) { 277 struct file *f = list_first_entry(&head, struct file, f_u.fu_list); 278 list_del_init(&f->f_u.fu_list); 279 __fput(f); 280 } 281 } 282 283 static void ____fput(struct callback_head *work) 284 { 285 __fput(container_of(work, struct file, f_u.fu_rcuhead)); 286 } 287 288 /* 289 * If kernel thread really needs to have the final fput() it has done 290 * to complete, call this. The only user right now is the boot - we 291 * *do* need to make sure our writes to binaries on initramfs has 292 * not left us with opened struct file waiting for __fput() - execve() 293 * won't work without that. Please, don't add more callers without 294 * very good reasons; in particular, never call that with locks 295 * held and never call that from a thread that might need to do 296 * some work on any kind of umount. 297 */ 298 void flush_delayed_fput(void) 299 { 300 delayed_fput(NULL); 301 } 302 303 static DECLARE_WORK(delayed_fput_work, delayed_fput); 304 305 void fput(struct file *file) 306 { 307 if (atomic_long_dec_and_test(&file->f_count)) { 308 struct task_struct *task = current; 309 file_sb_list_del(file); 310 if (unlikely(in_interrupt() || task->flags & PF_KTHREAD)) { 311 unsigned long flags; 312 spin_lock_irqsave(&delayed_fput_lock, flags); 313 list_add(&file->f_u.fu_list, &delayed_fput_list); 314 schedule_work(&delayed_fput_work); 315 spin_unlock_irqrestore(&delayed_fput_lock, flags); 316 return; 317 } 318 init_task_work(&file->f_u.fu_rcuhead, ____fput); 319 task_work_add(task, &file->f_u.fu_rcuhead, true); 320 } 321 } 322 323 /* 324 * synchronous analog of fput(); for kernel threads that might be needed 325 * in some umount() (and thus can't use flush_delayed_fput() without 326 * risking deadlocks), need to wait for completion of __fput() and know 327 * for this specific struct file it won't involve anything that would 328 * need them. Use only if you really need it - at the very least, 329 * don't blindly convert fput() by kernel thread to that. 330 */ 331 void __fput_sync(struct file *file) 332 { 333 if (atomic_long_dec_and_test(&file->f_count)) { 334 struct task_struct *task = current; 335 file_sb_list_del(file); 336 BUG_ON(!(task->flags & PF_KTHREAD)); 337 __fput(file); 338 } 339 } 340 341 EXPORT_SYMBOL(fput); 342 343 void put_filp(struct file *file) 344 { 345 if (atomic_long_dec_and_test(&file->f_count)) { 346 security_file_free(file); 347 file_sb_list_del(file); 348 file_free(file); 349 } 350 } 351 352 static inline int file_list_cpu(struct file *file) 353 { 354 #ifdef CONFIG_SMP 355 return file->f_sb_list_cpu; 356 #else 357 return smp_processor_id(); 358 #endif 359 } 360 361 /* helper for file_sb_list_add to reduce ifdefs */ 362 static inline void __file_sb_list_add(struct file *file, struct super_block *sb) 363 { 364 struct list_head *list; 365 #ifdef CONFIG_SMP 366 int cpu; 367 cpu = smp_processor_id(); 368 file->f_sb_list_cpu = cpu; 369 list = per_cpu_ptr(sb->s_files, cpu); 370 #else 371 list = &sb->s_files; 372 #endif 373 list_add(&file->f_u.fu_list, list); 374 } 375 376 /** 377 * file_sb_list_add - add a file to the sb's file list 378 * @file: file to add 379 * @sb: sb to add it to 380 * 381 * Use this function to associate a file with the superblock of the inode it 382 * refers to. 383 */ 384 void file_sb_list_add(struct file *file, struct super_block *sb) 385 { 386 lg_local_lock(&files_lglock); 387 __file_sb_list_add(file, sb); 388 lg_local_unlock(&files_lglock); 389 } 390 391 /** 392 * file_sb_list_del - remove a file from the sb's file list 393 * @file: file to remove 394 * @sb: sb to remove it from 395 * 396 * Use this function to remove a file from its superblock. 397 */ 398 void file_sb_list_del(struct file *file) 399 { 400 if (!list_empty(&file->f_u.fu_list)) { 401 lg_local_lock_cpu(&files_lglock, file_list_cpu(file)); 402 list_del_init(&file->f_u.fu_list); 403 lg_local_unlock_cpu(&files_lglock, file_list_cpu(file)); 404 } 405 } 406 407 #ifdef CONFIG_SMP 408 409 /* 410 * These macros iterate all files on all CPUs for a given superblock. 411 * files_lglock must be held globally. 412 */ 413 #define do_file_list_for_each_entry(__sb, __file) \ 414 { \ 415 int i; \ 416 for_each_possible_cpu(i) { \ 417 struct list_head *list; \ 418 list = per_cpu_ptr((__sb)->s_files, i); \ 419 list_for_each_entry((__file), list, f_u.fu_list) 420 421 #define while_file_list_for_each_entry \ 422 } \ 423 } 424 425 #else 426 427 #define do_file_list_for_each_entry(__sb, __file) \ 428 { \ 429 struct list_head *list; \ 430 list = &(sb)->s_files; \ 431 list_for_each_entry((__file), list, f_u.fu_list) 432 433 #define while_file_list_for_each_entry \ 434 } 435 436 #endif 437 438 /** 439 * mark_files_ro - mark all files read-only 440 * @sb: superblock in question 441 * 442 * All files are marked read-only. We don't care about pending 443 * delete files so this should be used in 'force' mode only. 444 */ 445 void mark_files_ro(struct super_block *sb) 446 { 447 struct file *f; 448 449 lg_global_lock(&files_lglock); 450 do_file_list_for_each_entry(sb, f) { 451 if (!S_ISREG(file_inode(f)->i_mode)) 452 continue; 453 if (!file_count(f)) 454 continue; 455 if (!(f->f_mode & FMODE_WRITE)) 456 continue; 457 spin_lock(&f->f_lock); 458 f->f_mode &= ~FMODE_WRITE; 459 spin_unlock(&f->f_lock); 460 if (file_check_writeable(f) != 0) 461 continue; 462 __mnt_drop_write(f->f_path.mnt); 463 file_release_write(f); 464 } while_file_list_for_each_entry; 465 lg_global_unlock(&files_lglock); 466 } 467 468 void __init files_init(unsigned long mempages) 469 { 470 unsigned long n; 471 472 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, 473 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 474 475 /* 476 * One file with associated inode and dcache is very roughly 1K. 477 * Per default don't use more than 10% of our memory for files. 478 */ 479 480 n = (mempages * (PAGE_SIZE / 1024)) / 10; 481 files_stat.max_files = max_t(unsigned long, n, NR_FILE); 482 files_defer_init(); 483 lg_lock_init(&files_lglock, "files_lglock"); 484 percpu_counter_init(&nr_files, 0); 485 } 486