xref: /openbmc/linux/fs/file.c (revision 6a6d6681ac1add9655b7ab5dd0b46b54aeb1b44f)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   *  linux/fs/file.c
4   *
5   *  Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
6   *
7   *  Manage the dynamic fd arrays in the process files_struct.
8   */
9  
10  #include <linux/syscalls.h>
11  #include <linux/export.h>
12  #include <linux/fs.h>
13  #include <linux/mm.h>
14  #include <linux/sched/signal.h>
15  #include <linux/slab.h>
16  #include <linux/file.h>
17  #include <linux/fdtable.h>
18  #include <linux/bitops.h>
19  #include <linux/spinlock.h>
20  #include <linux/rcupdate.h>
21  
22  unsigned int sysctl_nr_open __read_mostly = 1024*1024;
23  unsigned int sysctl_nr_open_min = BITS_PER_LONG;
24  /* our min() is unusable in constant expressions ;-/ */
25  #define __const_min(x, y) ((x) < (y) ? (x) : (y))
26  unsigned int sysctl_nr_open_max =
27  	__const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
28  
29  static void __free_fdtable(struct fdtable *fdt)
30  {
31  	kvfree(fdt->fd);
32  	kvfree(fdt->open_fds);
33  	kfree(fdt);
34  }
35  
36  static void free_fdtable_rcu(struct rcu_head *rcu)
37  {
38  	__free_fdtable(container_of(rcu, struct fdtable, rcu));
39  }
40  
41  #define BITBIT_NR(nr)	BITS_TO_LONGS(BITS_TO_LONGS(nr))
42  #define BITBIT_SIZE(nr)	(BITBIT_NR(nr) * sizeof(long))
43  
44  /*
45   * Copy 'count' fd bits from the old table to the new table and clear the extra
46   * space if any.  This does not copy the file pointers.  Called with the files
47   * spinlock held for write.
48   */
49  static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
50  			    unsigned int count)
51  {
52  	unsigned int cpy, set;
53  
54  	cpy = count / BITS_PER_BYTE;
55  	set = (nfdt->max_fds - count) / BITS_PER_BYTE;
56  	memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
57  	memset((char *)nfdt->open_fds + cpy, 0, set);
58  	memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
59  	memset((char *)nfdt->close_on_exec + cpy, 0, set);
60  
61  	cpy = BITBIT_SIZE(count);
62  	set = BITBIT_SIZE(nfdt->max_fds) - cpy;
63  	memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
64  	memset((char *)nfdt->full_fds_bits + cpy, 0, set);
65  }
66  
67  /*
68   * Copy all file descriptors from the old table to the new, expanded table and
69   * clear the extra space.  Called with the files spinlock held for write.
70   */
71  static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
72  {
73  	unsigned int cpy, set;
74  
75  	BUG_ON(nfdt->max_fds < ofdt->max_fds);
76  
77  	cpy = ofdt->max_fds * sizeof(struct file *);
78  	set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
79  	memcpy(nfdt->fd, ofdt->fd, cpy);
80  	memset((char *)nfdt->fd + cpy, 0, set);
81  
82  	copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds);
83  }
84  
85  static struct fdtable * alloc_fdtable(unsigned int nr)
86  {
87  	struct fdtable *fdt;
88  	void *data;
89  
90  	/*
91  	 * Figure out how many fds we actually want to support in this fdtable.
92  	 * Allocation steps are keyed to the size of the fdarray, since it
93  	 * grows far faster than any of the other dynamic data. We try to fit
94  	 * the fdarray into comfortable page-tuned chunks: starting at 1024B
95  	 * and growing in powers of two from there on.
96  	 */
97  	nr /= (1024 / sizeof(struct file *));
98  	nr = roundup_pow_of_two(nr + 1);
99  	nr *= (1024 / sizeof(struct file *));
100  	/*
101  	 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
102  	 * had been set lower between the check in expand_files() and here.  Deal
103  	 * with that in caller, it's cheaper that way.
104  	 *
105  	 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
106  	 * bitmaps handling below becomes unpleasant, to put it mildly...
107  	 */
108  	if (unlikely(nr > sysctl_nr_open))
109  		nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
110  
111  	fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
112  	if (!fdt)
113  		goto out;
114  	fdt->max_fds = nr;
115  	data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT);
116  	if (!data)
117  		goto out_fdt;
118  	fdt->fd = data;
119  
120  	data = kvmalloc(max_t(size_t,
121  				 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES),
122  				 GFP_KERNEL_ACCOUNT);
123  	if (!data)
124  		goto out_arr;
125  	fdt->open_fds = data;
126  	data += nr / BITS_PER_BYTE;
127  	fdt->close_on_exec = data;
128  	data += nr / BITS_PER_BYTE;
129  	fdt->full_fds_bits = data;
130  
131  	return fdt;
132  
133  out_arr:
134  	kvfree(fdt->fd);
135  out_fdt:
136  	kfree(fdt);
137  out:
138  	return NULL;
139  }
140  
141  /*
142   * Expand the file descriptor table.
143   * This function will allocate a new fdtable and both fd array and fdset, of
144   * the given size.
145   * Return <0 error code on error; 1 on successful completion.
146   * The files->file_lock should be held on entry, and will be held on exit.
147   */
148  static int expand_fdtable(struct files_struct *files, unsigned int nr)
149  	__releases(files->file_lock)
150  	__acquires(files->file_lock)
151  {
152  	struct fdtable *new_fdt, *cur_fdt;
153  
154  	spin_unlock(&files->file_lock);
155  	new_fdt = alloc_fdtable(nr);
156  
157  	/* make sure all __fd_install() have seen resize_in_progress
158  	 * or have finished their rcu_read_lock_sched() section.
159  	 */
160  	if (atomic_read(&files->count) > 1)
161  		synchronize_sched();
162  
163  	spin_lock(&files->file_lock);
164  	if (!new_fdt)
165  		return -ENOMEM;
166  	/*
167  	 * extremely unlikely race - sysctl_nr_open decreased between the check in
168  	 * caller and alloc_fdtable().  Cheaper to catch it here...
169  	 */
170  	if (unlikely(new_fdt->max_fds <= nr)) {
171  		__free_fdtable(new_fdt);
172  		return -EMFILE;
173  	}
174  	cur_fdt = files_fdtable(files);
175  	BUG_ON(nr < cur_fdt->max_fds);
176  	copy_fdtable(new_fdt, cur_fdt);
177  	rcu_assign_pointer(files->fdt, new_fdt);
178  	if (cur_fdt != &files->fdtab)
179  		call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
180  	/* coupled with smp_rmb() in __fd_install() */
181  	smp_wmb();
182  	return 1;
183  }
184  
185  /*
186   * Expand files.
187   * This function will expand the file structures, if the requested size exceeds
188   * the current capacity and there is room for expansion.
189   * Return <0 error code on error; 0 when nothing done; 1 when files were
190   * expanded and execution may have blocked.
191   * The files->file_lock should be held on entry, and will be held on exit.
192   */
193  static int expand_files(struct files_struct *files, unsigned int nr)
194  	__releases(files->file_lock)
195  	__acquires(files->file_lock)
196  {
197  	struct fdtable *fdt;
198  	int expanded = 0;
199  
200  repeat:
201  	fdt = files_fdtable(files);
202  
203  	/* Do we need to expand? */
204  	if (nr < fdt->max_fds)
205  		return expanded;
206  
207  	/* Can we expand? */
208  	if (nr >= sysctl_nr_open)
209  		return -EMFILE;
210  
211  	if (unlikely(files->resize_in_progress)) {
212  		spin_unlock(&files->file_lock);
213  		expanded = 1;
214  		wait_event(files->resize_wait, !files->resize_in_progress);
215  		spin_lock(&files->file_lock);
216  		goto repeat;
217  	}
218  
219  	/* All good, so we try */
220  	files->resize_in_progress = true;
221  	expanded = expand_fdtable(files, nr);
222  	files->resize_in_progress = false;
223  
224  	wake_up_all(&files->resize_wait);
225  	return expanded;
226  }
227  
228  static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt)
229  {
230  	__set_bit(fd, fdt->close_on_exec);
231  }
232  
233  static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt)
234  {
235  	if (test_bit(fd, fdt->close_on_exec))
236  		__clear_bit(fd, fdt->close_on_exec);
237  }
238  
239  static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
240  {
241  	__set_bit(fd, fdt->open_fds);
242  	fd /= BITS_PER_LONG;
243  	if (!~fdt->open_fds[fd])
244  		__set_bit(fd, fdt->full_fds_bits);
245  }
246  
247  static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
248  {
249  	__clear_bit(fd, fdt->open_fds);
250  	__clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
251  }
252  
253  static unsigned int count_open_files(struct fdtable *fdt)
254  {
255  	unsigned int size = fdt->max_fds;
256  	unsigned int i;
257  
258  	/* Find the last open fd */
259  	for (i = size / BITS_PER_LONG; i > 0; ) {
260  		if (fdt->open_fds[--i])
261  			break;
262  	}
263  	i = (i + 1) * BITS_PER_LONG;
264  	return i;
265  }
266  
267  /*
268   * Allocate a new files structure and copy contents from the
269   * passed in files structure.
270   * errorp will be valid only when the returned files_struct is NULL.
271   */
272  struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
273  {
274  	struct files_struct *newf;
275  	struct file **old_fds, **new_fds;
276  	unsigned int open_files, i;
277  	struct fdtable *old_fdt, *new_fdt;
278  
279  	*errorp = -ENOMEM;
280  	newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
281  	if (!newf)
282  		goto out;
283  
284  	atomic_set(&newf->count, 1);
285  
286  	spin_lock_init(&newf->file_lock);
287  	newf->resize_in_progress = false;
288  	init_waitqueue_head(&newf->resize_wait);
289  	newf->next_fd = 0;
290  	new_fdt = &newf->fdtab;
291  	new_fdt->max_fds = NR_OPEN_DEFAULT;
292  	new_fdt->close_on_exec = newf->close_on_exec_init;
293  	new_fdt->open_fds = newf->open_fds_init;
294  	new_fdt->full_fds_bits = newf->full_fds_bits_init;
295  	new_fdt->fd = &newf->fd_array[0];
296  
297  	spin_lock(&oldf->file_lock);
298  	old_fdt = files_fdtable(oldf);
299  	open_files = count_open_files(old_fdt);
300  
301  	/*
302  	 * Check whether we need to allocate a larger fd array and fd set.
303  	 */
304  	while (unlikely(open_files > new_fdt->max_fds)) {
305  		spin_unlock(&oldf->file_lock);
306  
307  		if (new_fdt != &newf->fdtab)
308  			__free_fdtable(new_fdt);
309  
310  		new_fdt = alloc_fdtable(open_files - 1);
311  		if (!new_fdt) {
312  			*errorp = -ENOMEM;
313  			goto out_release;
314  		}
315  
316  		/* beyond sysctl_nr_open; nothing to do */
317  		if (unlikely(new_fdt->max_fds < open_files)) {
318  			__free_fdtable(new_fdt);
319  			*errorp = -EMFILE;
320  			goto out_release;
321  		}
322  
323  		/*
324  		 * Reacquire the oldf lock and a pointer to its fd table
325  		 * who knows it may have a new bigger fd table. We need
326  		 * the latest pointer.
327  		 */
328  		spin_lock(&oldf->file_lock);
329  		old_fdt = files_fdtable(oldf);
330  		open_files = count_open_files(old_fdt);
331  	}
332  
333  	copy_fd_bitmaps(new_fdt, old_fdt, open_files);
334  
335  	old_fds = old_fdt->fd;
336  	new_fds = new_fdt->fd;
337  
338  	for (i = open_files; i != 0; i--) {
339  		struct file *f = *old_fds++;
340  		if (f) {
341  			get_file(f);
342  		} else {
343  			/*
344  			 * The fd may be claimed in the fd bitmap but not yet
345  			 * instantiated in the files array if a sibling thread
346  			 * is partway through open().  So make sure that this
347  			 * fd is available to the new process.
348  			 */
349  			__clear_open_fd(open_files - i, new_fdt);
350  		}
351  		rcu_assign_pointer(*new_fds++, f);
352  	}
353  	spin_unlock(&oldf->file_lock);
354  
355  	/* clear the remainder */
356  	memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *));
357  
358  	rcu_assign_pointer(newf->fdt, new_fdt);
359  
360  	return newf;
361  
362  out_release:
363  	kmem_cache_free(files_cachep, newf);
364  out:
365  	return NULL;
366  }
367  
368  static struct fdtable *close_files(struct files_struct * files)
369  {
370  	/*
371  	 * It is safe to dereference the fd table without RCU or
372  	 * ->file_lock because this is the last reference to the
373  	 * files structure.
374  	 */
375  	struct fdtable *fdt = rcu_dereference_raw(files->fdt);
376  	unsigned int i, j = 0;
377  
378  	for (;;) {
379  		unsigned long set;
380  		i = j * BITS_PER_LONG;
381  		if (i >= fdt->max_fds)
382  			break;
383  		set = fdt->open_fds[j++];
384  		while (set) {
385  			if (set & 1) {
386  				struct file * file = xchg(&fdt->fd[i], NULL);
387  				if (file) {
388  					filp_close(file, files);
389  					cond_resched();
390  				}
391  			}
392  			i++;
393  			set >>= 1;
394  		}
395  	}
396  
397  	return fdt;
398  }
399  
400  struct files_struct *get_files_struct(struct task_struct *task)
401  {
402  	struct files_struct *files;
403  
404  	task_lock(task);
405  	files = task->files;
406  	if (files)
407  		atomic_inc(&files->count);
408  	task_unlock(task);
409  
410  	return files;
411  }
412  
413  void put_files_struct(struct files_struct *files)
414  {
415  	if (atomic_dec_and_test(&files->count)) {
416  		struct fdtable *fdt = close_files(files);
417  
418  		/* free the arrays if they are not embedded */
419  		if (fdt != &files->fdtab)
420  			__free_fdtable(fdt);
421  		kmem_cache_free(files_cachep, files);
422  	}
423  }
424  
425  void reset_files_struct(struct files_struct *files)
426  {
427  	struct task_struct *tsk = current;
428  	struct files_struct *old;
429  
430  	old = tsk->files;
431  	task_lock(tsk);
432  	tsk->files = files;
433  	task_unlock(tsk);
434  	put_files_struct(old);
435  }
436  
437  void exit_files(struct task_struct *tsk)
438  {
439  	struct files_struct * files = tsk->files;
440  
441  	if (files) {
442  		task_lock(tsk);
443  		tsk->files = NULL;
444  		task_unlock(tsk);
445  		put_files_struct(files);
446  	}
447  }
448  
449  struct files_struct init_files = {
450  	.count		= ATOMIC_INIT(1),
451  	.fdt		= &init_files.fdtab,
452  	.fdtab		= {
453  		.max_fds	= NR_OPEN_DEFAULT,
454  		.fd		= &init_files.fd_array[0],
455  		.close_on_exec	= init_files.close_on_exec_init,
456  		.open_fds	= init_files.open_fds_init,
457  		.full_fds_bits	= init_files.full_fds_bits_init,
458  	},
459  	.file_lock	= __SPIN_LOCK_UNLOCKED(init_files.file_lock),
460  };
461  
462  static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
463  {
464  	unsigned int maxfd = fdt->max_fds;
465  	unsigned int maxbit = maxfd / BITS_PER_LONG;
466  	unsigned int bitbit = start / BITS_PER_LONG;
467  
468  	bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
469  	if (bitbit > maxfd)
470  		return maxfd;
471  	if (bitbit > start)
472  		start = bitbit;
473  	return find_next_zero_bit(fdt->open_fds, maxfd, start);
474  }
475  
476  /*
477   * allocate a file descriptor, mark it busy.
478   */
479  int __alloc_fd(struct files_struct *files,
480  	       unsigned start, unsigned end, unsigned flags)
481  {
482  	unsigned int fd;
483  	int error;
484  	struct fdtable *fdt;
485  
486  	spin_lock(&files->file_lock);
487  repeat:
488  	fdt = files_fdtable(files);
489  	fd = start;
490  	if (fd < files->next_fd)
491  		fd = files->next_fd;
492  
493  	if (fd < fdt->max_fds)
494  		fd = find_next_fd(fdt, fd);
495  
496  	/*
497  	 * N.B. For clone tasks sharing a files structure, this test
498  	 * will limit the total number of files that can be opened.
499  	 */
500  	error = -EMFILE;
501  	if (fd >= end)
502  		goto out;
503  
504  	error = expand_files(files, fd);
505  	if (error < 0)
506  		goto out;
507  
508  	/*
509  	 * If we needed to expand the fs array we
510  	 * might have blocked - try again.
511  	 */
512  	if (error)
513  		goto repeat;
514  
515  	if (start <= files->next_fd)
516  		files->next_fd = fd + 1;
517  
518  	__set_open_fd(fd, fdt);
519  	if (flags & O_CLOEXEC)
520  		__set_close_on_exec(fd, fdt);
521  	else
522  		__clear_close_on_exec(fd, fdt);
523  	error = fd;
524  #if 1
525  	/* Sanity check */
526  	if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
527  		printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
528  		rcu_assign_pointer(fdt->fd[fd], NULL);
529  	}
530  #endif
531  
532  out:
533  	spin_unlock(&files->file_lock);
534  	return error;
535  }
536  
537  static int alloc_fd(unsigned start, unsigned flags)
538  {
539  	return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags);
540  }
541  
542  int get_unused_fd_flags(unsigned flags)
543  {
544  	return __alloc_fd(current->files, 0, rlimit(RLIMIT_NOFILE), flags);
545  }
546  EXPORT_SYMBOL(get_unused_fd_flags);
547  
548  static void __put_unused_fd(struct files_struct *files, unsigned int fd)
549  {
550  	struct fdtable *fdt = files_fdtable(files);
551  	__clear_open_fd(fd, fdt);
552  	if (fd < files->next_fd)
553  		files->next_fd = fd;
554  }
555  
556  void put_unused_fd(unsigned int fd)
557  {
558  	struct files_struct *files = current->files;
559  	spin_lock(&files->file_lock);
560  	__put_unused_fd(files, fd);
561  	spin_unlock(&files->file_lock);
562  }
563  
564  EXPORT_SYMBOL(put_unused_fd);
565  
566  /*
567   * Install a file pointer in the fd array.
568   *
569   * The VFS is full of places where we drop the files lock between
570   * setting the open_fds bitmap and installing the file in the file
571   * array.  At any such point, we are vulnerable to a dup2() race
572   * installing a file in the array before us.  We need to detect this and
573   * fput() the struct file we are about to overwrite in this case.
574   *
575   * It should never happen - if we allow dup2() do it, _really_ bad things
576   * will follow.
577   *
578   * NOTE: __fd_install() variant is really, really low-level; don't
579   * use it unless you are forced to by truly lousy API shoved down
580   * your throat.  'files' *MUST* be either current->files or obtained
581   * by get_files_struct(current) done by whoever had given it to you,
582   * or really bad things will happen.  Normally you want to use
583   * fd_install() instead.
584   */
585  
586  void __fd_install(struct files_struct *files, unsigned int fd,
587  		struct file *file)
588  {
589  	struct fdtable *fdt;
590  
591  	rcu_read_lock_sched();
592  
593  	if (unlikely(files->resize_in_progress)) {
594  		rcu_read_unlock_sched();
595  		spin_lock(&files->file_lock);
596  		fdt = files_fdtable(files);
597  		BUG_ON(fdt->fd[fd] != NULL);
598  		rcu_assign_pointer(fdt->fd[fd], file);
599  		spin_unlock(&files->file_lock);
600  		return;
601  	}
602  	/* coupled with smp_wmb() in expand_fdtable() */
603  	smp_rmb();
604  	fdt = rcu_dereference_sched(files->fdt);
605  	BUG_ON(fdt->fd[fd] != NULL);
606  	rcu_assign_pointer(fdt->fd[fd], file);
607  	rcu_read_unlock_sched();
608  }
609  
610  void fd_install(unsigned int fd, struct file *file)
611  {
612  	__fd_install(current->files, fd, file);
613  }
614  
615  EXPORT_SYMBOL(fd_install);
616  
617  /*
618   * The same warnings as for __alloc_fd()/__fd_install() apply here...
619   */
620  int __close_fd(struct files_struct *files, unsigned fd)
621  {
622  	struct file *file;
623  	struct fdtable *fdt;
624  
625  	spin_lock(&files->file_lock);
626  	fdt = files_fdtable(files);
627  	if (fd >= fdt->max_fds)
628  		goto out_unlock;
629  	file = fdt->fd[fd];
630  	if (!file)
631  		goto out_unlock;
632  	rcu_assign_pointer(fdt->fd[fd], NULL);
633  	__put_unused_fd(files, fd);
634  	spin_unlock(&files->file_lock);
635  	return filp_close(file, files);
636  
637  out_unlock:
638  	spin_unlock(&files->file_lock);
639  	return -EBADF;
640  }
641  EXPORT_SYMBOL(__close_fd); /* for ksys_close() */
642  
643  void do_close_on_exec(struct files_struct *files)
644  {
645  	unsigned i;
646  	struct fdtable *fdt;
647  
648  	/* exec unshares first */
649  	spin_lock(&files->file_lock);
650  	for (i = 0; ; i++) {
651  		unsigned long set;
652  		unsigned fd = i * BITS_PER_LONG;
653  		fdt = files_fdtable(files);
654  		if (fd >= fdt->max_fds)
655  			break;
656  		set = fdt->close_on_exec[i];
657  		if (!set)
658  			continue;
659  		fdt->close_on_exec[i] = 0;
660  		for ( ; set ; fd++, set >>= 1) {
661  			struct file *file;
662  			if (!(set & 1))
663  				continue;
664  			file = fdt->fd[fd];
665  			if (!file)
666  				continue;
667  			rcu_assign_pointer(fdt->fd[fd], NULL);
668  			__put_unused_fd(files, fd);
669  			spin_unlock(&files->file_lock);
670  			filp_close(file, files);
671  			cond_resched();
672  			spin_lock(&files->file_lock);
673  		}
674  
675  	}
676  	spin_unlock(&files->file_lock);
677  }
678  
679  static struct file *__fget(unsigned int fd, fmode_t mask)
680  {
681  	struct files_struct *files = current->files;
682  	struct file *file;
683  
684  	rcu_read_lock();
685  loop:
686  	file = fcheck_files(files, fd);
687  	if (file) {
688  		/* File object ref couldn't be taken.
689  		 * dup2() atomicity guarantee is the reason
690  		 * we loop to catch the new file (or NULL pointer)
691  		 */
692  		if (file->f_mode & mask)
693  			file = NULL;
694  		else if (!get_file_rcu(file))
695  			goto loop;
696  	}
697  	rcu_read_unlock();
698  
699  	return file;
700  }
701  
702  struct file *fget(unsigned int fd)
703  {
704  	return __fget(fd, FMODE_PATH);
705  }
706  EXPORT_SYMBOL(fget);
707  
708  struct file *fget_raw(unsigned int fd)
709  {
710  	return __fget(fd, 0);
711  }
712  EXPORT_SYMBOL(fget_raw);
713  
714  /*
715   * Lightweight file lookup - no refcnt increment if fd table isn't shared.
716   *
717   * You can use this instead of fget if you satisfy all of the following
718   * conditions:
719   * 1) You must call fput_light before exiting the syscall and returning control
720   *    to userspace (i.e. you cannot remember the returned struct file * after
721   *    returning to userspace).
722   * 2) You must not call filp_close on the returned struct file * in between
723   *    calls to fget_light and fput_light.
724   * 3) You must not clone the current task in between the calls to fget_light
725   *    and fput_light.
726   *
727   * The fput_needed flag returned by fget_light should be passed to the
728   * corresponding fput_light.
729   */
730  static unsigned long __fget_light(unsigned int fd, fmode_t mask)
731  {
732  	struct files_struct *files = current->files;
733  	struct file *file;
734  
735  	if (atomic_read(&files->count) == 1) {
736  		file = __fcheck_files(files, fd);
737  		if (!file || unlikely(file->f_mode & mask))
738  			return 0;
739  		return (unsigned long)file;
740  	} else {
741  		file = __fget(fd, mask);
742  		if (!file)
743  			return 0;
744  		return FDPUT_FPUT | (unsigned long)file;
745  	}
746  }
747  unsigned long __fdget(unsigned int fd)
748  {
749  	return __fget_light(fd, FMODE_PATH);
750  }
751  EXPORT_SYMBOL(__fdget);
752  
753  unsigned long __fdget_raw(unsigned int fd)
754  {
755  	return __fget_light(fd, 0);
756  }
757  
758  unsigned long __fdget_pos(unsigned int fd)
759  {
760  	unsigned long v = __fdget(fd);
761  	struct file *file = (struct file *)(v & ~3);
762  
763  	if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
764  		if (file_count(file) > 1) {
765  			v |= FDPUT_POS_UNLOCK;
766  			mutex_lock(&file->f_pos_lock);
767  		}
768  	}
769  	return v;
770  }
771  
772  void __f_unlock_pos(struct file *f)
773  {
774  	mutex_unlock(&f->f_pos_lock);
775  }
776  
777  /*
778   * We only lock f_pos if we have threads or if the file might be
779   * shared with another process. In both cases we'll have an elevated
780   * file count (done either by fdget() or by fork()).
781   */
782  
783  void set_close_on_exec(unsigned int fd, int flag)
784  {
785  	struct files_struct *files = current->files;
786  	struct fdtable *fdt;
787  	spin_lock(&files->file_lock);
788  	fdt = files_fdtable(files);
789  	if (flag)
790  		__set_close_on_exec(fd, fdt);
791  	else
792  		__clear_close_on_exec(fd, fdt);
793  	spin_unlock(&files->file_lock);
794  }
795  
796  bool get_close_on_exec(unsigned int fd)
797  {
798  	struct files_struct *files = current->files;
799  	struct fdtable *fdt;
800  	bool res;
801  	rcu_read_lock();
802  	fdt = files_fdtable(files);
803  	res = close_on_exec(fd, fdt);
804  	rcu_read_unlock();
805  	return res;
806  }
807  
808  static int do_dup2(struct files_struct *files,
809  	struct file *file, unsigned fd, unsigned flags)
810  __releases(&files->file_lock)
811  {
812  	struct file *tofree;
813  	struct fdtable *fdt;
814  
815  	/*
816  	 * We need to detect attempts to do dup2() over allocated but still
817  	 * not finished descriptor.  NB: OpenBSD avoids that at the price of
818  	 * extra work in their equivalent of fget() - they insert struct
819  	 * file immediately after grabbing descriptor, mark it larval if
820  	 * more work (e.g. actual opening) is needed and make sure that
821  	 * fget() treats larval files as absent.  Potentially interesting,
822  	 * but while extra work in fget() is trivial, locking implications
823  	 * and amount of surgery on open()-related paths in VFS are not.
824  	 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
825  	 * deadlocks in rather amusing ways, AFAICS.  All of that is out of
826  	 * scope of POSIX or SUS, since neither considers shared descriptor
827  	 * tables and this condition does not arise without those.
828  	 */
829  	fdt = files_fdtable(files);
830  	tofree = fdt->fd[fd];
831  	if (!tofree && fd_is_open(fd, fdt))
832  		goto Ebusy;
833  	get_file(file);
834  	rcu_assign_pointer(fdt->fd[fd], file);
835  	__set_open_fd(fd, fdt);
836  	if (flags & O_CLOEXEC)
837  		__set_close_on_exec(fd, fdt);
838  	else
839  		__clear_close_on_exec(fd, fdt);
840  	spin_unlock(&files->file_lock);
841  
842  	if (tofree)
843  		filp_close(tofree, files);
844  
845  	return fd;
846  
847  Ebusy:
848  	spin_unlock(&files->file_lock);
849  	return -EBUSY;
850  }
851  
852  int replace_fd(unsigned fd, struct file *file, unsigned flags)
853  {
854  	int err;
855  	struct files_struct *files = current->files;
856  
857  	if (!file)
858  		return __close_fd(files, fd);
859  
860  	if (fd >= rlimit(RLIMIT_NOFILE))
861  		return -EBADF;
862  
863  	spin_lock(&files->file_lock);
864  	err = expand_files(files, fd);
865  	if (unlikely(err < 0))
866  		goto out_unlock;
867  	return do_dup2(files, file, fd, flags);
868  
869  out_unlock:
870  	spin_unlock(&files->file_lock);
871  	return err;
872  }
873  
874  static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
875  {
876  	int err = -EBADF;
877  	struct file *file;
878  	struct files_struct *files = current->files;
879  
880  	if ((flags & ~O_CLOEXEC) != 0)
881  		return -EINVAL;
882  
883  	if (unlikely(oldfd == newfd))
884  		return -EINVAL;
885  
886  	if (newfd >= rlimit(RLIMIT_NOFILE))
887  		return -EBADF;
888  
889  	spin_lock(&files->file_lock);
890  	err = expand_files(files, newfd);
891  	file = fcheck(oldfd);
892  	if (unlikely(!file))
893  		goto Ebadf;
894  	if (unlikely(err < 0)) {
895  		if (err == -EMFILE)
896  			goto Ebadf;
897  		goto out_unlock;
898  	}
899  	return do_dup2(files, file, newfd, flags);
900  
901  Ebadf:
902  	err = -EBADF;
903  out_unlock:
904  	spin_unlock(&files->file_lock);
905  	return err;
906  }
907  
908  SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
909  {
910  	return ksys_dup3(oldfd, newfd, flags);
911  }
912  
913  SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
914  {
915  	if (unlikely(newfd == oldfd)) { /* corner case */
916  		struct files_struct *files = current->files;
917  		int retval = oldfd;
918  
919  		rcu_read_lock();
920  		if (!fcheck_files(files, oldfd))
921  			retval = -EBADF;
922  		rcu_read_unlock();
923  		return retval;
924  	}
925  	return ksys_dup3(oldfd, newfd, 0);
926  }
927  
928  int ksys_dup(unsigned int fildes)
929  {
930  	int ret = -EBADF;
931  	struct file *file = fget_raw(fildes);
932  
933  	if (file) {
934  		ret = get_unused_fd_flags(0);
935  		if (ret >= 0)
936  			fd_install(ret, file);
937  		else
938  			fput(file);
939  	}
940  	return ret;
941  }
942  
943  SYSCALL_DEFINE1(dup, unsigned int, fildes)
944  {
945  	return ksys_dup(fildes);
946  }
947  
948  int f_dupfd(unsigned int from, struct file *file, unsigned flags)
949  {
950  	int err;
951  	if (from >= rlimit(RLIMIT_NOFILE))
952  		return -EINVAL;
953  	err = alloc_fd(from, flags);
954  	if (err >= 0) {
955  		get_file(file);
956  		fd_install(err, file);
957  	}
958  	return err;
959  }
960  
961  int iterate_fd(struct files_struct *files, unsigned n,
962  		int (*f)(const void *, struct file *, unsigned),
963  		const void *p)
964  {
965  	struct fdtable *fdt;
966  	int res = 0;
967  	if (!files)
968  		return 0;
969  	spin_lock(&files->file_lock);
970  	for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
971  		struct file *file;
972  		file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
973  		if (!file)
974  			continue;
975  		res = f(p, file, n);
976  		if (res)
977  			break;
978  	}
979  	spin_unlock(&files->file_lock);
980  	return res;
981  }
982  EXPORT_SYMBOL(iterate_fd);
983