xref: /openbmc/linux/fs/file.c (revision aad29a73199b7fbccfbabea3f1ee627ad1924f52)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/file.c
4  *
5  *  Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
6  *
7  *  Manage the dynamic fd arrays in the process files_struct.
8  */
9 
10 #include <linux/syscalls.h>
11 #include <linux/export.h>
12 #include <linux/fs.h>
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/file.h>
18 #include <linux/fdtable.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/rcupdate.h>
22 #include <linux/close_range.h>
23 #include <net/sock.h>
24 #include <linux/init_task.h>
25 
26 #include "internal.h"
27 
28 unsigned int sysctl_nr_open __read_mostly = 1024*1024;
29 unsigned int sysctl_nr_open_min = BITS_PER_LONG;
30 /* our min() is unusable in constant expressions ;-/ */
31 #define __const_min(x, y) ((x) < (y) ? (x) : (y))
32 unsigned int sysctl_nr_open_max =
33 	__const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
34 
__free_fdtable(struct fdtable * fdt)35 static void __free_fdtable(struct fdtable *fdt)
36 {
37 	kvfree(fdt->fd);
38 	kvfree(fdt->open_fds);
39 	kfree(fdt);
40 }
41 
free_fdtable_rcu(struct rcu_head * rcu)42 static void free_fdtable_rcu(struct rcu_head *rcu)
43 {
44 	__free_fdtable(container_of(rcu, struct fdtable, rcu));
45 }
46 
47 #define BITBIT_NR(nr)	BITS_TO_LONGS(BITS_TO_LONGS(nr))
48 #define BITBIT_SIZE(nr)	(BITBIT_NR(nr) * sizeof(long))
49 
50 #define fdt_words(fdt) ((fdt)->max_fds / BITS_PER_LONG) // words in ->open_fds
51 /*
52  * Copy 'count' fd bits from the old table to the new table and clear the extra
53  * space if any.  This does not copy the file pointers.  Called with the files
54  * spinlock held for write.
55  */
copy_fd_bitmaps(struct fdtable * nfdt,struct fdtable * ofdt,unsigned int copy_words)56 static inline void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
57 			    unsigned int copy_words)
58 {
59 	unsigned int nwords = fdt_words(nfdt);
60 
61 	bitmap_copy_and_extend(nfdt->open_fds, ofdt->open_fds,
62 			copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG);
63 	bitmap_copy_and_extend(nfdt->close_on_exec, ofdt->close_on_exec,
64 			copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG);
65 	bitmap_copy_and_extend(nfdt->full_fds_bits, ofdt->full_fds_bits,
66 			copy_words, nwords);
67 }
68 
69 /*
70  * Copy all file descriptors from the old table to the new, expanded table and
71  * clear the extra space.  Called with the files spinlock held for write.
72  */
copy_fdtable(struct fdtable * nfdt,struct fdtable * ofdt)73 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
74 {
75 	size_t cpy, set;
76 
77 	BUG_ON(nfdt->max_fds < ofdt->max_fds);
78 
79 	cpy = ofdt->max_fds * sizeof(struct file *);
80 	set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
81 	memcpy(nfdt->fd, ofdt->fd, cpy);
82 	memset((char *)nfdt->fd + cpy, 0, set);
83 
84 	copy_fd_bitmaps(nfdt, ofdt, fdt_words(ofdt));
85 }
86 
87 /*
88  * Note how the fdtable bitmap allocations very much have to be a multiple of
89  * BITS_PER_LONG. This is not only because we walk those things in chunks of
90  * 'unsigned long' in some places, but simply because that is how the Linux
91  * kernel bitmaps are defined to work: they are not "bits in an array of bytes",
92  * they are very much "bits in an array of unsigned long".
93  *
94  * The ALIGN(nr, BITS_PER_LONG) here is for clarity: since we just multiplied
95  * by that "1024/sizeof(ptr)" before, we already know there are sufficient
96  * clear low bits. Clang seems to realize that, gcc ends up being confused.
97  *
98  * On a 128-bit machine, the ALIGN() would actually matter. In the meantime,
99  * let's consider it documentation (and maybe a test-case for gcc to improve
100  * its code generation ;)
101  */
alloc_fdtable(unsigned int nr)102 static struct fdtable * alloc_fdtable(unsigned int nr)
103 {
104 	struct fdtable *fdt;
105 	void *data;
106 
107 	/*
108 	 * Figure out how many fds we actually want to support in this fdtable.
109 	 * Allocation steps are keyed to the size of the fdarray, since it
110 	 * grows far faster than any of the other dynamic data. We try to fit
111 	 * the fdarray into comfortable page-tuned chunks: starting at 1024B
112 	 * and growing in powers of two from there on.
113 	 */
114 	nr /= (1024 / sizeof(struct file *));
115 	nr = roundup_pow_of_two(nr + 1);
116 	nr *= (1024 / sizeof(struct file *));
117 	nr = ALIGN(nr, BITS_PER_LONG);
118 	/*
119 	 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
120 	 * had been set lower between the check in expand_files() and here.  Deal
121 	 * with that in caller, it's cheaper that way.
122 	 *
123 	 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
124 	 * bitmaps handling below becomes unpleasant, to put it mildly...
125 	 */
126 	if (unlikely(nr > sysctl_nr_open))
127 		nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
128 
129 	fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
130 	if (!fdt)
131 		goto out;
132 	fdt->max_fds = nr;
133 	data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT);
134 	if (!data)
135 		goto out_fdt;
136 	fdt->fd = data;
137 
138 	data = kvmalloc(max_t(size_t,
139 				 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES),
140 				 GFP_KERNEL_ACCOUNT);
141 	if (!data)
142 		goto out_arr;
143 	fdt->open_fds = data;
144 	data += nr / BITS_PER_BYTE;
145 	fdt->close_on_exec = data;
146 	data += nr / BITS_PER_BYTE;
147 	fdt->full_fds_bits = data;
148 
149 	return fdt;
150 
151 out_arr:
152 	kvfree(fdt->fd);
153 out_fdt:
154 	kfree(fdt);
155 out:
156 	return NULL;
157 }
158 
159 /*
160  * Expand the file descriptor table.
161  * This function will allocate a new fdtable and both fd array and fdset, of
162  * the given size.
163  * Return <0 error code on error; 1 on successful completion.
164  * The files->file_lock should be held on entry, and will be held on exit.
165  */
expand_fdtable(struct files_struct * files,unsigned int nr)166 static int expand_fdtable(struct files_struct *files, unsigned int nr)
167 	__releases(files->file_lock)
168 	__acquires(files->file_lock)
169 {
170 	struct fdtable *new_fdt, *cur_fdt;
171 
172 	spin_unlock(&files->file_lock);
173 	new_fdt = alloc_fdtable(nr);
174 
175 	/* make sure all fd_install() have seen resize_in_progress
176 	 * or have finished their rcu_read_lock_sched() section.
177 	 */
178 	if (atomic_read(&files->count) > 1)
179 		synchronize_rcu();
180 
181 	spin_lock(&files->file_lock);
182 	if (!new_fdt)
183 		return -ENOMEM;
184 	/*
185 	 * extremely unlikely race - sysctl_nr_open decreased between the check in
186 	 * caller and alloc_fdtable().  Cheaper to catch it here...
187 	 */
188 	if (unlikely(new_fdt->max_fds <= nr)) {
189 		__free_fdtable(new_fdt);
190 		return -EMFILE;
191 	}
192 	cur_fdt = files_fdtable(files);
193 	BUG_ON(nr < cur_fdt->max_fds);
194 	copy_fdtable(new_fdt, cur_fdt);
195 	rcu_assign_pointer(files->fdt, new_fdt);
196 	if (cur_fdt != &files->fdtab)
197 		call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
198 	/* coupled with smp_rmb() in fd_install() */
199 	smp_wmb();
200 	return 1;
201 }
202 
203 /*
204  * Expand files.
205  * This function will expand the file structures, if the requested size exceeds
206  * the current capacity and there is room for expansion.
207  * Return <0 error code on error; 0 when nothing done; 1 when files were
208  * expanded and execution may have blocked.
209  * The files->file_lock should be held on entry, and will be held on exit.
210  */
expand_files(struct files_struct * files,unsigned int nr)211 static int expand_files(struct files_struct *files, unsigned int nr)
212 	__releases(files->file_lock)
213 	__acquires(files->file_lock)
214 {
215 	struct fdtable *fdt;
216 	int expanded = 0;
217 
218 repeat:
219 	fdt = files_fdtable(files);
220 
221 	/* Do we need to expand? */
222 	if (nr < fdt->max_fds)
223 		return expanded;
224 
225 	/* Can we expand? */
226 	if (nr >= sysctl_nr_open)
227 		return -EMFILE;
228 
229 	if (unlikely(files->resize_in_progress)) {
230 		spin_unlock(&files->file_lock);
231 		expanded = 1;
232 		wait_event(files->resize_wait, !files->resize_in_progress);
233 		spin_lock(&files->file_lock);
234 		goto repeat;
235 	}
236 
237 	/* All good, so we try */
238 	files->resize_in_progress = true;
239 	expanded = expand_fdtable(files, nr);
240 	files->resize_in_progress = false;
241 
242 	wake_up_all(&files->resize_wait);
243 	return expanded;
244 }
245 
__set_close_on_exec(unsigned int fd,struct fdtable * fdt)246 static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt)
247 {
248 	__set_bit(fd, fdt->close_on_exec);
249 }
250 
__clear_close_on_exec(unsigned int fd,struct fdtable * fdt)251 static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt)
252 {
253 	if (test_bit(fd, fdt->close_on_exec))
254 		__clear_bit(fd, fdt->close_on_exec);
255 }
256 
__set_open_fd(unsigned int fd,struct fdtable * fdt)257 static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
258 {
259 	__set_bit(fd, fdt->open_fds);
260 	fd /= BITS_PER_LONG;
261 	if (!~fdt->open_fds[fd])
262 		__set_bit(fd, fdt->full_fds_bits);
263 }
264 
__clear_open_fd(unsigned int fd,struct fdtable * fdt)265 static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
266 {
267 	__clear_bit(fd, fdt->open_fds);
268 	__clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
269 }
270 
271 /*
272  * Note that a sane fdtable size always has to be a multiple of
273  * BITS_PER_LONG, since we have bitmaps that are sized by this.
274  *
275  * punch_hole is optional - when close_range() is asked to unshare
276  * and close, we don't need to copy descriptors in that range, so
277  * a smaller cloned descriptor table might suffice if the last
278  * currently opened descriptor falls into that range.
279  */
sane_fdtable_size(struct fdtable * fdt,struct fd_range * punch_hole)280 static unsigned int sane_fdtable_size(struct fdtable *fdt, struct fd_range *punch_hole)
281 {
282 	unsigned int last = find_last_bit(fdt->open_fds, fdt->max_fds);
283 
284 	if (last == fdt->max_fds)
285 		return NR_OPEN_DEFAULT;
286 	if (punch_hole && punch_hole->to >= last && punch_hole->from <= last) {
287 		last = find_last_bit(fdt->open_fds, punch_hole->from);
288 		if (last == punch_hole->from)
289 			return NR_OPEN_DEFAULT;
290 	}
291 	return ALIGN(last + 1, BITS_PER_LONG);
292 }
293 
294 /*
295  * Allocate a new descriptor table and copy contents from the passed in
296  * instance.  Returns a pointer to cloned table on success, ERR_PTR()
297  * on failure.  For 'punch_hole' see sane_fdtable_size().
298  */
dup_fd(struct files_struct * oldf,struct fd_range * punch_hole)299 struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_hole)
300 {
301 	struct files_struct *newf;
302 	struct file **old_fds, **new_fds;
303 	unsigned int open_files, i;
304 	struct fdtable *old_fdt, *new_fdt;
305 	int error;
306 
307 	newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
308 	if (!newf)
309 		return ERR_PTR(-ENOMEM);
310 
311 	atomic_set(&newf->count, 1);
312 
313 	spin_lock_init(&newf->file_lock);
314 	newf->resize_in_progress = false;
315 	init_waitqueue_head(&newf->resize_wait);
316 	newf->next_fd = 0;
317 	new_fdt = &newf->fdtab;
318 	new_fdt->max_fds = NR_OPEN_DEFAULT;
319 	new_fdt->close_on_exec = newf->close_on_exec_init;
320 	new_fdt->open_fds = newf->open_fds_init;
321 	new_fdt->full_fds_bits = newf->full_fds_bits_init;
322 	new_fdt->fd = &newf->fd_array[0];
323 
324 	spin_lock(&oldf->file_lock);
325 	old_fdt = files_fdtable(oldf);
326 	open_files = sane_fdtable_size(old_fdt, punch_hole);
327 
328 	/*
329 	 * Check whether we need to allocate a larger fd array and fd set.
330 	 */
331 	while (unlikely(open_files > new_fdt->max_fds)) {
332 		spin_unlock(&oldf->file_lock);
333 
334 		if (new_fdt != &newf->fdtab)
335 			__free_fdtable(new_fdt);
336 
337 		new_fdt = alloc_fdtable(open_files - 1);
338 		if (!new_fdt) {
339 			error = -ENOMEM;
340 			goto out_release;
341 		}
342 
343 		/* beyond sysctl_nr_open; nothing to do */
344 		if (unlikely(new_fdt->max_fds < open_files)) {
345 			__free_fdtable(new_fdt);
346 			error = -EMFILE;
347 			goto out_release;
348 		}
349 
350 		/*
351 		 * Reacquire the oldf lock and a pointer to its fd table
352 		 * who knows it may have a new bigger fd table. We need
353 		 * the latest pointer.
354 		 */
355 		spin_lock(&oldf->file_lock);
356 		old_fdt = files_fdtable(oldf);
357 		open_files = sane_fdtable_size(old_fdt, punch_hole);
358 	}
359 
360 	copy_fd_bitmaps(new_fdt, old_fdt, open_files / BITS_PER_LONG);
361 
362 	old_fds = old_fdt->fd;
363 	new_fds = new_fdt->fd;
364 
365 	for (i = open_files; i != 0; i--) {
366 		struct file *f = *old_fds++;
367 		if (f) {
368 			get_file(f);
369 		} else {
370 			/*
371 			 * The fd may be claimed in the fd bitmap but not yet
372 			 * instantiated in the files array if a sibling thread
373 			 * is partway through open().  So make sure that this
374 			 * fd is available to the new process.
375 			 */
376 			__clear_open_fd(open_files - i, new_fdt);
377 		}
378 		rcu_assign_pointer(*new_fds++, f);
379 	}
380 	spin_unlock(&oldf->file_lock);
381 
382 	/* clear the remainder */
383 	memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *));
384 
385 	rcu_assign_pointer(newf->fdt, new_fdt);
386 
387 	return newf;
388 
389 out_release:
390 	kmem_cache_free(files_cachep, newf);
391 	return ERR_PTR(error);
392 }
393 
close_files(struct files_struct * files)394 static struct fdtable *close_files(struct files_struct * files)
395 {
396 	/*
397 	 * It is safe to dereference the fd table without RCU or
398 	 * ->file_lock because this is the last reference to the
399 	 * files structure.
400 	 */
401 	struct fdtable *fdt = rcu_dereference_raw(files->fdt);
402 	unsigned int i, j = 0;
403 
404 	for (;;) {
405 		unsigned long set;
406 		i = j * BITS_PER_LONG;
407 		if (i >= fdt->max_fds)
408 			break;
409 		set = fdt->open_fds[j++];
410 		while (set) {
411 			if (set & 1) {
412 				struct file * file = xchg(&fdt->fd[i], NULL);
413 				if (file) {
414 					filp_close(file, files);
415 					cond_resched();
416 				}
417 			}
418 			i++;
419 			set >>= 1;
420 		}
421 	}
422 
423 	return fdt;
424 }
425 
put_files_struct(struct files_struct * files)426 void put_files_struct(struct files_struct *files)
427 {
428 	if (atomic_dec_and_test(&files->count)) {
429 		struct fdtable *fdt = close_files(files);
430 
431 		/* free the arrays if they are not embedded */
432 		if (fdt != &files->fdtab)
433 			__free_fdtable(fdt);
434 		kmem_cache_free(files_cachep, files);
435 	}
436 }
437 
exit_files(struct task_struct * tsk)438 void exit_files(struct task_struct *tsk)
439 {
440 	struct files_struct * files = tsk->files;
441 
442 	if (files) {
443 		task_lock(tsk);
444 		tsk->files = NULL;
445 		task_unlock(tsk);
446 		put_files_struct(files);
447 	}
448 }
449 
450 struct files_struct init_files = {
451 	.count		= ATOMIC_INIT(1),
452 	.fdt		= &init_files.fdtab,
453 	.fdtab		= {
454 		.max_fds	= NR_OPEN_DEFAULT,
455 		.fd		= &init_files.fd_array[0],
456 		.close_on_exec	= init_files.close_on_exec_init,
457 		.open_fds	= init_files.open_fds_init,
458 		.full_fds_bits	= init_files.full_fds_bits_init,
459 	},
460 	.file_lock	= __SPIN_LOCK_UNLOCKED(init_files.file_lock),
461 	.resize_wait	= __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait),
462 };
463 
find_next_fd(struct fdtable * fdt,unsigned int start)464 static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
465 {
466 	unsigned int maxfd = fdt->max_fds; /* always multiple of BITS_PER_LONG */
467 	unsigned int maxbit = maxfd / BITS_PER_LONG;
468 	unsigned int bitbit = start / BITS_PER_LONG;
469 
470 	bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
471 	if (bitbit >= maxfd)
472 		return maxfd;
473 	if (bitbit > start)
474 		start = bitbit;
475 	return find_next_zero_bit(fdt->open_fds, maxfd, start);
476 }
477 
478 /*
479  * allocate a file descriptor, mark it busy.
480  */
alloc_fd(unsigned start,unsigned end,unsigned flags)481 static int alloc_fd(unsigned start, unsigned end, unsigned flags)
482 {
483 	struct files_struct *files = current->files;
484 	unsigned int fd;
485 	int error;
486 	struct fdtable *fdt;
487 
488 	spin_lock(&files->file_lock);
489 repeat:
490 	fdt = files_fdtable(files);
491 	fd = start;
492 	if (fd < files->next_fd)
493 		fd = files->next_fd;
494 
495 	if (fd < fdt->max_fds)
496 		fd = find_next_fd(fdt, fd);
497 
498 	/*
499 	 * N.B. For clone tasks sharing a files structure, this test
500 	 * will limit the total number of files that can be opened.
501 	 */
502 	error = -EMFILE;
503 	if (fd >= end)
504 		goto out;
505 
506 	error = expand_files(files, fd);
507 	if (error < 0)
508 		goto out;
509 
510 	/*
511 	 * If we needed to expand the fs array we
512 	 * might have blocked - try again.
513 	 */
514 	if (error)
515 		goto repeat;
516 
517 	if (start <= files->next_fd)
518 		files->next_fd = fd + 1;
519 
520 	__set_open_fd(fd, fdt);
521 	if (flags & O_CLOEXEC)
522 		__set_close_on_exec(fd, fdt);
523 	else
524 		__clear_close_on_exec(fd, fdt);
525 	error = fd;
526 #if 1
527 	/* Sanity check */
528 	if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
529 		printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
530 		rcu_assign_pointer(fdt->fd[fd], NULL);
531 	}
532 #endif
533 
534 out:
535 	spin_unlock(&files->file_lock);
536 	return error;
537 }
538 
__get_unused_fd_flags(unsigned flags,unsigned long nofile)539 int __get_unused_fd_flags(unsigned flags, unsigned long nofile)
540 {
541 	return alloc_fd(0, nofile, flags);
542 }
543 
get_unused_fd_flags(unsigned flags)544 int get_unused_fd_flags(unsigned flags)
545 {
546 	return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE));
547 }
548 EXPORT_SYMBOL(get_unused_fd_flags);
549 
__put_unused_fd(struct files_struct * files,unsigned int fd)550 static void __put_unused_fd(struct files_struct *files, unsigned int fd)
551 {
552 	struct fdtable *fdt = files_fdtable(files);
553 	__clear_open_fd(fd, fdt);
554 	if (fd < files->next_fd)
555 		files->next_fd = fd;
556 }
557 
put_unused_fd(unsigned int fd)558 void put_unused_fd(unsigned int fd)
559 {
560 	struct files_struct *files = current->files;
561 	spin_lock(&files->file_lock);
562 	__put_unused_fd(files, fd);
563 	spin_unlock(&files->file_lock);
564 }
565 
566 EXPORT_SYMBOL(put_unused_fd);
567 
568 /*
569  * Install a file pointer in the fd array.
570  *
571  * The VFS is full of places where we drop the files lock between
572  * setting the open_fds bitmap and installing the file in the file
573  * array.  At any such point, we are vulnerable to a dup2() race
574  * installing a file in the array before us.  We need to detect this and
575  * fput() the struct file we are about to overwrite in this case.
576  *
577  * It should never happen - if we allow dup2() do it, _really_ bad things
578  * will follow.
579  *
580  * This consumes the "file" refcount, so callers should treat it
581  * as if they had called fput(file).
582  */
583 
fd_install(unsigned int fd,struct file * file)584 void fd_install(unsigned int fd, struct file *file)
585 {
586 	struct files_struct *files = current->files;
587 	struct fdtable *fdt;
588 
589 	rcu_read_lock_sched();
590 
591 	if (unlikely(files->resize_in_progress)) {
592 		rcu_read_unlock_sched();
593 		spin_lock(&files->file_lock);
594 		fdt = files_fdtable(files);
595 		BUG_ON(fdt->fd[fd] != NULL);
596 		rcu_assign_pointer(fdt->fd[fd], file);
597 		spin_unlock(&files->file_lock);
598 		return;
599 	}
600 	/* coupled with smp_wmb() in expand_fdtable() */
601 	smp_rmb();
602 	fdt = rcu_dereference_sched(files->fdt);
603 	BUG_ON(fdt->fd[fd] != NULL);
604 	rcu_assign_pointer(fdt->fd[fd], file);
605 	rcu_read_unlock_sched();
606 }
607 
608 EXPORT_SYMBOL(fd_install);
609 
610 /**
611  * pick_file - return file associatd with fd
612  * @files: file struct to retrieve file from
613  * @fd: file descriptor to retrieve file for
614  *
615  * Context: files_lock must be held.
616  *
617  * Returns: The file associated with @fd (NULL if @fd is not open)
618  */
pick_file(struct files_struct * files,unsigned fd)619 static struct file *pick_file(struct files_struct *files, unsigned fd)
620 {
621 	struct fdtable *fdt = files_fdtable(files);
622 	struct file *file;
623 
624 	if (fd >= fdt->max_fds)
625 		return NULL;
626 
627 	fd = array_index_nospec(fd, fdt->max_fds);
628 	file = fdt->fd[fd];
629 	if (file) {
630 		rcu_assign_pointer(fdt->fd[fd], NULL);
631 		__put_unused_fd(files, fd);
632 	}
633 	return file;
634 }
635 
close_fd(unsigned fd)636 int close_fd(unsigned fd)
637 {
638 	struct files_struct *files = current->files;
639 	struct file *file;
640 
641 	spin_lock(&files->file_lock);
642 	file = pick_file(files, fd);
643 	spin_unlock(&files->file_lock);
644 	if (!file)
645 		return -EBADF;
646 
647 	return filp_close(file, files);
648 }
649 EXPORT_SYMBOL(close_fd); /* for ksys_close() */
650 
651 /**
652  * last_fd - return last valid index into fd table
653  * @fdt: File descriptor table.
654  *
655  * Context: Either rcu read lock or files_lock must be held.
656  *
657  * Returns: Last valid index into fdtable.
658  */
last_fd(struct fdtable * fdt)659 static inline unsigned last_fd(struct fdtable *fdt)
660 {
661 	return fdt->max_fds - 1;
662 }
663 
__range_cloexec(struct files_struct * cur_fds,unsigned int fd,unsigned int max_fd)664 static inline void __range_cloexec(struct files_struct *cur_fds,
665 				   unsigned int fd, unsigned int max_fd)
666 {
667 	struct fdtable *fdt;
668 
669 	/* make sure we're using the correct maximum value */
670 	spin_lock(&cur_fds->file_lock);
671 	fdt = files_fdtable(cur_fds);
672 	max_fd = min(last_fd(fdt), max_fd);
673 	if (fd <= max_fd)
674 		bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1);
675 	spin_unlock(&cur_fds->file_lock);
676 }
677 
__range_close(struct files_struct * files,unsigned int fd,unsigned int max_fd)678 static inline void __range_close(struct files_struct *files, unsigned int fd,
679 				 unsigned int max_fd)
680 {
681 	struct file *file;
682 	unsigned n;
683 
684 	spin_lock(&files->file_lock);
685 	n = last_fd(files_fdtable(files));
686 	max_fd = min(max_fd, n);
687 
688 	for (; fd <= max_fd; fd++) {
689 		file = pick_file(files, fd);
690 		if (file) {
691 			spin_unlock(&files->file_lock);
692 			filp_close(file, files);
693 			cond_resched();
694 			spin_lock(&files->file_lock);
695 		} else if (need_resched()) {
696 			spin_unlock(&files->file_lock);
697 			cond_resched();
698 			spin_lock(&files->file_lock);
699 		}
700 	}
701 	spin_unlock(&files->file_lock);
702 }
703 
704 /**
705  * __close_range() - Close all file descriptors in a given range.
706  *
707  * @fd:     starting file descriptor to close
708  * @max_fd: last file descriptor to close
709  * @flags:  CLOSE_RANGE flags.
710  *
711  * This closes a range of file descriptors. All file descriptors
712  * from @fd up to and including @max_fd are closed.
713  */
__close_range(unsigned fd,unsigned max_fd,unsigned int flags)714 int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
715 {
716 	struct task_struct *me = current;
717 	struct files_struct *cur_fds = me->files, *fds = NULL;
718 
719 	if (flags & ~(CLOSE_RANGE_UNSHARE | CLOSE_RANGE_CLOEXEC))
720 		return -EINVAL;
721 
722 	if (fd > max_fd)
723 		return -EINVAL;
724 
725 	if ((flags & CLOSE_RANGE_UNSHARE) && atomic_read(&cur_fds->count) > 1) {
726 		struct fd_range range = {fd, max_fd}, *punch_hole = &range;
727 
728 		/*
729 		 * If the caller requested all fds to be made cloexec we always
730 		 * copy all of the file descriptors since they still want to
731 		 * use them.
732 		 */
733 		if (flags & CLOSE_RANGE_CLOEXEC)
734 			punch_hole = NULL;
735 
736 		fds = dup_fd(cur_fds, punch_hole);
737 		if (IS_ERR(fds))
738 			return PTR_ERR(fds);
739 		/*
740 		 * We used to share our file descriptor table, and have now
741 		 * created a private one, make sure we're using it below.
742 		 */
743 		swap(cur_fds, fds);
744 	}
745 
746 	if (flags & CLOSE_RANGE_CLOEXEC)
747 		__range_cloexec(cur_fds, fd, max_fd);
748 	else
749 		__range_close(cur_fds, fd, max_fd);
750 
751 	if (fds) {
752 		/*
753 		 * We're done closing the files we were supposed to. Time to install
754 		 * the new file descriptor table and drop the old one.
755 		 */
756 		task_lock(me);
757 		me->files = cur_fds;
758 		task_unlock(me);
759 		put_files_struct(fds);
760 	}
761 
762 	return 0;
763 }
764 
765 /*
766  * See close_fd_get_file() below, this variant assumes current->files->file_lock
767  * is held.
768  */
__close_fd_get_file(unsigned int fd)769 struct file *__close_fd_get_file(unsigned int fd)
770 {
771 	return pick_file(current->files, fd);
772 }
773 
774 /*
775  * variant of close_fd that gets a ref on the file for later fput.
776  * The caller must ensure that filp_close() called on the file.
777  */
close_fd_get_file(unsigned int fd)778 struct file *close_fd_get_file(unsigned int fd)
779 {
780 	struct files_struct *files = current->files;
781 	struct file *file;
782 
783 	spin_lock(&files->file_lock);
784 	file = pick_file(files, fd);
785 	spin_unlock(&files->file_lock);
786 
787 	return file;
788 }
789 
do_close_on_exec(struct files_struct * files)790 void do_close_on_exec(struct files_struct *files)
791 {
792 	unsigned i;
793 	struct fdtable *fdt;
794 
795 	/* exec unshares first */
796 	spin_lock(&files->file_lock);
797 	for (i = 0; ; i++) {
798 		unsigned long set;
799 		unsigned fd = i * BITS_PER_LONG;
800 		fdt = files_fdtable(files);
801 		if (fd >= fdt->max_fds)
802 			break;
803 		set = fdt->close_on_exec[i];
804 		if (!set)
805 			continue;
806 		fdt->close_on_exec[i] = 0;
807 		for ( ; set ; fd++, set >>= 1) {
808 			struct file *file;
809 			if (!(set & 1))
810 				continue;
811 			file = fdt->fd[fd];
812 			if (!file)
813 				continue;
814 			rcu_assign_pointer(fdt->fd[fd], NULL);
815 			__put_unused_fd(files, fd);
816 			spin_unlock(&files->file_lock);
817 			filp_close(file, files);
818 			cond_resched();
819 			spin_lock(&files->file_lock);
820 		}
821 
822 	}
823 	spin_unlock(&files->file_lock);
824 }
825 
__fget_files_rcu(struct files_struct * files,unsigned int fd,fmode_t mask)826 static inline struct file *__fget_files_rcu(struct files_struct *files,
827 	unsigned int fd, fmode_t mask)
828 {
829 	for (;;) {
830 		struct file *file;
831 		struct fdtable *fdt = rcu_dereference_raw(files->fdt);
832 		struct file __rcu **fdentry;
833 
834 		if (unlikely(fd >= fdt->max_fds))
835 			return NULL;
836 
837 		fdentry = fdt->fd + array_index_nospec(fd, fdt->max_fds);
838 		file = rcu_dereference_raw(*fdentry);
839 		if (unlikely(!file))
840 			return NULL;
841 
842 		if (unlikely(file->f_mode & mask))
843 			return NULL;
844 
845 		/*
846 		 * Ok, we have a file pointer. However, because we do
847 		 * this all locklessly under RCU, we may be racing with
848 		 * that file being closed.
849 		 *
850 		 * Such a race can take two forms:
851 		 *
852 		 *  (a) the file ref already went down to zero,
853 		 *      and get_file_rcu() fails. Just try again:
854 		 */
855 		if (unlikely(!get_file_rcu(file)))
856 			continue;
857 
858 		/*
859 		 *  (b) the file table entry has changed under us.
860 		 *       Note that we don't need to re-check the 'fdt->fd'
861 		 *       pointer having changed, because it always goes
862 		 *       hand-in-hand with 'fdt'.
863 		 *
864 		 * If so, we need to put our ref and try again.
865 		 */
866 		if (unlikely(rcu_dereference_raw(files->fdt) != fdt) ||
867 		    unlikely(rcu_dereference_raw(*fdentry) != file)) {
868 			fput(file);
869 			continue;
870 		}
871 
872 		/*
873 		 * Ok, we have a ref to the file, and checked that it
874 		 * still exists.
875 		 */
876 		return file;
877 	}
878 }
879 
__fget_files(struct files_struct * files,unsigned int fd,fmode_t mask)880 static struct file *__fget_files(struct files_struct *files, unsigned int fd,
881 				 fmode_t mask)
882 {
883 	struct file *file;
884 
885 	rcu_read_lock();
886 	file = __fget_files_rcu(files, fd, mask);
887 	rcu_read_unlock();
888 
889 	return file;
890 }
891 
__fget(unsigned int fd,fmode_t mask)892 static inline struct file *__fget(unsigned int fd, fmode_t mask)
893 {
894 	return __fget_files(current->files, fd, mask);
895 }
896 
fget(unsigned int fd)897 struct file *fget(unsigned int fd)
898 {
899 	return __fget(fd, FMODE_PATH);
900 }
901 EXPORT_SYMBOL(fget);
902 
fget_raw(unsigned int fd)903 struct file *fget_raw(unsigned int fd)
904 {
905 	return __fget(fd, 0);
906 }
907 EXPORT_SYMBOL(fget_raw);
908 
fget_task(struct task_struct * task,unsigned int fd)909 struct file *fget_task(struct task_struct *task, unsigned int fd)
910 {
911 	struct file *file = NULL;
912 
913 	task_lock(task);
914 	if (task->files)
915 		file = __fget_files(task->files, fd, 0);
916 	task_unlock(task);
917 
918 	return file;
919 }
920 
task_lookup_fd_rcu(struct task_struct * task,unsigned int fd)921 struct file *task_lookup_fd_rcu(struct task_struct *task, unsigned int fd)
922 {
923 	/* Must be called with rcu_read_lock held */
924 	struct files_struct *files;
925 	struct file *file = NULL;
926 
927 	task_lock(task);
928 	files = task->files;
929 	if (files)
930 		file = files_lookup_fd_rcu(files, fd);
931 	task_unlock(task);
932 
933 	return file;
934 }
935 
task_lookup_next_fd_rcu(struct task_struct * task,unsigned int * ret_fd)936 struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *ret_fd)
937 {
938 	/* Must be called with rcu_read_lock held */
939 	struct files_struct *files;
940 	unsigned int fd = *ret_fd;
941 	struct file *file = NULL;
942 
943 	task_lock(task);
944 	files = task->files;
945 	if (files) {
946 		for (; fd < files_fdtable(files)->max_fds; fd++) {
947 			file = files_lookup_fd_rcu(files, fd);
948 			if (file)
949 				break;
950 		}
951 	}
952 	task_unlock(task);
953 	*ret_fd = fd;
954 	return file;
955 }
956 EXPORT_SYMBOL(task_lookup_next_fd_rcu);
957 
958 /*
959  * Lightweight file lookup - no refcnt increment if fd table isn't shared.
960  *
961  * You can use this instead of fget if you satisfy all of the following
962  * conditions:
963  * 1) You must call fput_light before exiting the syscall and returning control
964  *    to userspace (i.e. you cannot remember the returned struct file * after
965  *    returning to userspace).
966  * 2) You must not call filp_close on the returned struct file * in between
967  *    calls to fget_light and fput_light.
968  * 3) You must not clone the current task in between the calls to fget_light
969  *    and fput_light.
970  *
971  * The fput_needed flag returned by fget_light should be passed to the
972  * corresponding fput_light.
973  */
__fget_light(unsigned int fd,fmode_t mask)974 static unsigned long __fget_light(unsigned int fd, fmode_t mask)
975 {
976 	struct files_struct *files = current->files;
977 	struct file *file;
978 
979 	/*
980 	 * If another thread is concurrently calling close_fd() followed
981 	 * by put_files_struct(), we must not observe the old table
982 	 * entry combined with the new refcount - otherwise we could
983 	 * return a file that is concurrently being freed.
984 	 *
985 	 * atomic_read_acquire() pairs with atomic_dec_and_test() in
986 	 * put_files_struct().
987 	 */
988 	if (atomic_read_acquire(&files->count) == 1) {
989 		file = files_lookup_fd_raw(files, fd);
990 		if (!file || unlikely(file->f_mode & mask))
991 			return 0;
992 		return (unsigned long)file;
993 	} else {
994 		file = __fget(fd, mask);
995 		if (!file)
996 			return 0;
997 		return FDPUT_FPUT | (unsigned long)file;
998 	}
999 }
__fdget(unsigned int fd)1000 unsigned long __fdget(unsigned int fd)
1001 {
1002 	return __fget_light(fd, FMODE_PATH);
1003 }
1004 EXPORT_SYMBOL(__fdget);
1005 
__fdget_raw(unsigned int fd)1006 unsigned long __fdget_raw(unsigned int fd)
1007 {
1008 	return __fget_light(fd, 0);
1009 }
1010 
1011 /*
1012  * Try to avoid f_pos locking. We only need it if the
1013  * file is marked for FMODE_ATOMIC_POS, and it can be
1014  * accessed multiple ways.
1015  *
1016  * Always do it for directories, because pidfd_getfd()
1017  * can make a file accessible even if it otherwise would
1018  * not be, and for directories this is a correctness
1019  * issue, not a "POSIX requirement".
1020  */
file_needs_f_pos_lock(struct file * file)1021 static inline bool file_needs_f_pos_lock(struct file *file)
1022 {
1023 	return (file->f_mode & FMODE_ATOMIC_POS) &&
1024 		(file_count(file) > 1 || file->f_op->iterate_shared);
1025 }
1026 
__fdget_pos(unsigned int fd)1027 unsigned long __fdget_pos(unsigned int fd)
1028 {
1029 	unsigned long v = __fdget(fd);
1030 	struct file *file = (struct file *)(v & ~3);
1031 
1032 	if (file && file_needs_f_pos_lock(file)) {
1033 		v |= FDPUT_POS_UNLOCK;
1034 		mutex_lock(&file->f_pos_lock);
1035 	}
1036 	return v;
1037 }
1038 
__f_unlock_pos(struct file * f)1039 void __f_unlock_pos(struct file *f)
1040 {
1041 	mutex_unlock(&f->f_pos_lock);
1042 }
1043 
1044 /*
1045  * We only lock f_pos if we have threads or if the file might be
1046  * shared with another process. In both cases we'll have an elevated
1047  * file count (done either by fdget() or by fork()).
1048  */
1049 
set_close_on_exec(unsigned int fd,int flag)1050 void set_close_on_exec(unsigned int fd, int flag)
1051 {
1052 	struct files_struct *files = current->files;
1053 	struct fdtable *fdt;
1054 	spin_lock(&files->file_lock);
1055 	fdt = files_fdtable(files);
1056 	if (flag)
1057 		__set_close_on_exec(fd, fdt);
1058 	else
1059 		__clear_close_on_exec(fd, fdt);
1060 	spin_unlock(&files->file_lock);
1061 }
1062 
get_close_on_exec(unsigned int fd)1063 bool get_close_on_exec(unsigned int fd)
1064 {
1065 	struct files_struct *files = current->files;
1066 	struct fdtable *fdt;
1067 	bool res;
1068 	rcu_read_lock();
1069 	fdt = files_fdtable(files);
1070 	res = close_on_exec(fd, fdt);
1071 	rcu_read_unlock();
1072 	return res;
1073 }
1074 
do_dup2(struct files_struct * files,struct file * file,unsigned fd,unsigned flags)1075 static int do_dup2(struct files_struct *files,
1076 	struct file *file, unsigned fd, unsigned flags)
1077 __releases(&files->file_lock)
1078 {
1079 	struct file *tofree;
1080 	struct fdtable *fdt;
1081 
1082 	/*
1083 	 * We need to detect attempts to do dup2() over allocated but still
1084 	 * not finished descriptor.  NB: OpenBSD avoids that at the price of
1085 	 * extra work in their equivalent of fget() - they insert struct
1086 	 * file immediately after grabbing descriptor, mark it larval if
1087 	 * more work (e.g. actual opening) is needed and make sure that
1088 	 * fget() treats larval files as absent.  Potentially interesting,
1089 	 * but while extra work in fget() is trivial, locking implications
1090 	 * and amount of surgery on open()-related paths in VFS are not.
1091 	 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
1092 	 * deadlocks in rather amusing ways, AFAICS.  All of that is out of
1093 	 * scope of POSIX or SUS, since neither considers shared descriptor
1094 	 * tables and this condition does not arise without those.
1095 	 */
1096 	fdt = files_fdtable(files);
1097 	fd = array_index_nospec(fd, fdt->max_fds);
1098 	tofree = fdt->fd[fd];
1099 	if (!tofree && fd_is_open(fd, fdt))
1100 		goto Ebusy;
1101 	get_file(file);
1102 	rcu_assign_pointer(fdt->fd[fd], file);
1103 	__set_open_fd(fd, fdt);
1104 	if (flags & O_CLOEXEC)
1105 		__set_close_on_exec(fd, fdt);
1106 	else
1107 		__clear_close_on_exec(fd, fdt);
1108 	spin_unlock(&files->file_lock);
1109 
1110 	if (tofree)
1111 		filp_close(tofree, files);
1112 
1113 	return fd;
1114 
1115 Ebusy:
1116 	spin_unlock(&files->file_lock);
1117 	return -EBUSY;
1118 }
1119 
replace_fd(unsigned fd,struct file * file,unsigned flags)1120 int replace_fd(unsigned fd, struct file *file, unsigned flags)
1121 {
1122 	int err;
1123 	struct files_struct *files = current->files;
1124 
1125 	if (!file)
1126 		return close_fd(fd);
1127 
1128 	if (fd >= rlimit(RLIMIT_NOFILE))
1129 		return -EBADF;
1130 
1131 	spin_lock(&files->file_lock);
1132 	err = expand_files(files, fd);
1133 	if (unlikely(err < 0))
1134 		goto out_unlock;
1135 	return do_dup2(files, file, fd, flags);
1136 
1137 out_unlock:
1138 	spin_unlock(&files->file_lock);
1139 	return err;
1140 }
1141 
1142 /**
1143  * __receive_fd() - Install received file into file descriptor table
1144  * @file: struct file that was received from another process
1145  * @ufd: __user pointer to write new fd number to
1146  * @o_flags: the O_* flags to apply to the new fd entry
1147  *
1148  * Installs a received file into the file descriptor table, with appropriate
1149  * checks and count updates. Optionally writes the fd number to userspace, if
1150  * @ufd is non-NULL.
1151  *
1152  * This helper handles its own reference counting of the incoming
1153  * struct file.
1154  *
1155  * Returns newly install fd or -ve on error.
1156  */
__receive_fd(struct file * file,int __user * ufd,unsigned int o_flags)1157 int __receive_fd(struct file *file, int __user *ufd, unsigned int o_flags)
1158 {
1159 	int new_fd;
1160 	int error;
1161 
1162 	error = security_file_receive(file);
1163 	if (error)
1164 		return error;
1165 
1166 	new_fd = get_unused_fd_flags(o_flags);
1167 	if (new_fd < 0)
1168 		return new_fd;
1169 
1170 	if (ufd) {
1171 		error = put_user(new_fd, ufd);
1172 		if (error) {
1173 			put_unused_fd(new_fd);
1174 			return error;
1175 		}
1176 	}
1177 
1178 	fd_install(new_fd, get_file(file));
1179 	__receive_sock(file);
1180 	return new_fd;
1181 }
1182 
receive_fd_replace(int new_fd,struct file * file,unsigned int o_flags)1183 int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags)
1184 {
1185 	int error;
1186 
1187 	error = security_file_receive(file);
1188 	if (error)
1189 		return error;
1190 	error = replace_fd(new_fd, file, o_flags);
1191 	if (error)
1192 		return error;
1193 	__receive_sock(file);
1194 	return new_fd;
1195 }
1196 
receive_fd(struct file * file,unsigned int o_flags)1197 int receive_fd(struct file *file, unsigned int o_flags)
1198 {
1199 	return __receive_fd(file, NULL, o_flags);
1200 }
1201 EXPORT_SYMBOL_GPL(receive_fd);
1202 
ksys_dup3(unsigned int oldfd,unsigned int newfd,int flags)1203 static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
1204 {
1205 	int err = -EBADF;
1206 	struct file *file;
1207 	struct files_struct *files = current->files;
1208 
1209 	if ((flags & ~O_CLOEXEC) != 0)
1210 		return -EINVAL;
1211 
1212 	if (unlikely(oldfd == newfd))
1213 		return -EINVAL;
1214 
1215 	if (newfd >= rlimit(RLIMIT_NOFILE))
1216 		return -EBADF;
1217 
1218 	spin_lock(&files->file_lock);
1219 	err = expand_files(files, newfd);
1220 	file = files_lookup_fd_locked(files, oldfd);
1221 	if (unlikely(!file))
1222 		goto Ebadf;
1223 	if (unlikely(err < 0)) {
1224 		if (err == -EMFILE)
1225 			goto Ebadf;
1226 		goto out_unlock;
1227 	}
1228 	return do_dup2(files, file, newfd, flags);
1229 
1230 Ebadf:
1231 	err = -EBADF;
1232 out_unlock:
1233 	spin_unlock(&files->file_lock);
1234 	return err;
1235 }
1236 
SYSCALL_DEFINE3(dup3,unsigned int,oldfd,unsigned int,newfd,int,flags)1237 SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
1238 {
1239 	return ksys_dup3(oldfd, newfd, flags);
1240 }
1241 
SYSCALL_DEFINE2(dup2,unsigned int,oldfd,unsigned int,newfd)1242 SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
1243 {
1244 	if (unlikely(newfd == oldfd)) { /* corner case */
1245 		struct files_struct *files = current->files;
1246 		int retval = oldfd;
1247 
1248 		rcu_read_lock();
1249 		if (!files_lookup_fd_rcu(files, oldfd))
1250 			retval = -EBADF;
1251 		rcu_read_unlock();
1252 		return retval;
1253 	}
1254 	return ksys_dup3(oldfd, newfd, 0);
1255 }
1256 
SYSCALL_DEFINE1(dup,unsigned int,fildes)1257 SYSCALL_DEFINE1(dup, unsigned int, fildes)
1258 {
1259 	int ret = -EBADF;
1260 	struct file *file = fget_raw(fildes);
1261 
1262 	if (file) {
1263 		ret = get_unused_fd_flags(0);
1264 		if (ret >= 0)
1265 			fd_install(ret, file);
1266 		else
1267 			fput(file);
1268 	}
1269 	return ret;
1270 }
1271 
f_dupfd(unsigned int from,struct file * file,unsigned flags)1272 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
1273 {
1274 	unsigned long nofile = rlimit(RLIMIT_NOFILE);
1275 	int err;
1276 	if (from >= nofile)
1277 		return -EINVAL;
1278 	err = alloc_fd(from, nofile, flags);
1279 	if (err >= 0) {
1280 		get_file(file);
1281 		fd_install(err, file);
1282 	}
1283 	return err;
1284 }
1285 
iterate_fd(struct files_struct * files,unsigned n,int (* f)(const void *,struct file *,unsigned),const void * p)1286 int iterate_fd(struct files_struct *files, unsigned n,
1287 		int (*f)(const void *, struct file *, unsigned),
1288 		const void *p)
1289 {
1290 	struct fdtable *fdt;
1291 	int res = 0;
1292 	if (!files)
1293 		return 0;
1294 	spin_lock(&files->file_lock);
1295 	for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
1296 		struct file *file;
1297 		file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
1298 		if (!file)
1299 			continue;
1300 		res = f(p, file, n);
1301 		if (res)
1302 			break;
1303 	}
1304 	spin_unlock(&files->file_lock);
1305 	return res;
1306 }
1307 EXPORT_SYMBOL(iterate_fd);
1308