1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/file.c
4 *
5 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
6 *
7 * Manage the dynamic fd arrays in the process files_struct.
8 */
9
10 #include <linux/syscalls.h>
11 #include <linux/export.h>
12 #include <linux/fs.h>
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/file.h>
18 #include <linux/fdtable.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/rcupdate.h>
22 #include <linux/close_range.h>
23 #include <net/sock.h>
24 #include <linux/init_task.h>
25
26 #include "internal.h"
27
28 unsigned int sysctl_nr_open __read_mostly = 1024*1024;
29 unsigned int sysctl_nr_open_min = BITS_PER_LONG;
30 /* our min() is unusable in constant expressions ;-/ */
31 #define __const_min(x, y) ((x) < (y) ? (x) : (y))
32 unsigned int sysctl_nr_open_max =
33 __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
34
__free_fdtable(struct fdtable * fdt)35 static void __free_fdtable(struct fdtable *fdt)
36 {
37 kvfree(fdt->fd);
38 kvfree(fdt->open_fds);
39 kfree(fdt);
40 }
41
free_fdtable_rcu(struct rcu_head * rcu)42 static void free_fdtable_rcu(struct rcu_head *rcu)
43 {
44 __free_fdtable(container_of(rcu, struct fdtable, rcu));
45 }
46
47 #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
48 #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
49
50 #define fdt_words(fdt) ((fdt)->max_fds / BITS_PER_LONG) // words in ->open_fds
51 /*
52 * Copy 'count' fd bits from the old table to the new table and clear the extra
53 * space if any. This does not copy the file pointers. Called with the files
54 * spinlock held for write.
55 */
copy_fd_bitmaps(struct fdtable * nfdt,struct fdtable * ofdt,unsigned int copy_words)56 static inline void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
57 unsigned int copy_words)
58 {
59 unsigned int nwords = fdt_words(nfdt);
60
61 bitmap_copy_and_extend(nfdt->open_fds, ofdt->open_fds,
62 copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG);
63 bitmap_copy_and_extend(nfdt->close_on_exec, ofdt->close_on_exec,
64 copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG);
65 bitmap_copy_and_extend(nfdt->full_fds_bits, ofdt->full_fds_bits,
66 copy_words, nwords);
67 }
68
69 /*
70 * Copy all file descriptors from the old table to the new, expanded table and
71 * clear the extra space. Called with the files spinlock held for write.
72 */
copy_fdtable(struct fdtable * nfdt,struct fdtable * ofdt)73 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
74 {
75 size_t cpy, set;
76
77 BUG_ON(nfdt->max_fds < ofdt->max_fds);
78
79 cpy = ofdt->max_fds * sizeof(struct file *);
80 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
81 memcpy(nfdt->fd, ofdt->fd, cpy);
82 memset((char *)nfdt->fd + cpy, 0, set);
83
84 copy_fd_bitmaps(nfdt, ofdt, fdt_words(ofdt));
85 }
86
87 /*
88 * Note how the fdtable bitmap allocations very much have to be a multiple of
89 * BITS_PER_LONG. This is not only because we walk those things in chunks of
90 * 'unsigned long' in some places, but simply because that is how the Linux
91 * kernel bitmaps are defined to work: they are not "bits in an array of bytes",
92 * they are very much "bits in an array of unsigned long".
93 *
94 * The ALIGN(nr, BITS_PER_LONG) here is for clarity: since we just multiplied
95 * by that "1024/sizeof(ptr)" before, we already know there are sufficient
96 * clear low bits. Clang seems to realize that, gcc ends up being confused.
97 *
98 * On a 128-bit machine, the ALIGN() would actually matter. In the meantime,
99 * let's consider it documentation (and maybe a test-case for gcc to improve
100 * its code generation ;)
101 */
alloc_fdtable(unsigned int nr)102 static struct fdtable * alloc_fdtable(unsigned int nr)
103 {
104 struct fdtable *fdt;
105 void *data;
106
107 /*
108 * Figure out how many fds we actually want to support in this fdtable.
109 * Allocation steps are keyed to the size of the fdarray, since it
110 * grows far faster than any of the other dynamic data. We try to fit
111 * the fdarray into comfortable page-tuned chunks: starting at 1024B
112 * and growing in powers of two from there on.
113 */
114 nr /= (1024 / sizeof(struct file *));
115 nr = roundup_pow_of_two(nr + 1);
116 nr *= (1024 / sizeof(struct file *));
117 nr = ALIGN(nr, BITS_PER_LONG);
118 /*
119 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
120 * had been set lower between the check in expand_files() and here. Deal
121 * with that in caller, it's cheaper that way.
122 *
123 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
124 * bitmaps handling below becomes unpleasant, to put it mildly...
125 */
126 if (unlikely(nr > sysctl_nr_open))
127 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
128
129 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
130 if (!fdt)
131 goto out;
132 fdt->max_fds = nr;
133 data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT);
134 if (!data)
135 goto out_fdt;
136 fdt->fd = data;
137
138 data = kvmalloc(max_t(size_t,
139 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES),
140 GFP_KERNEL_ACCOUNT);
141 if (!data)
142 goto out_arr;
143 fdt->open_fds = data;
144 data += nr / BITS_PER_BYTE;
145 fdt->close_on_exec = data;
146 data += nr / BITS_PER_BYTE;
147 fdt->full_fds_bits = data;
148
149 return fdt;
150
151 out_arr:
152 kvfree(fdt->fd);
153 out_fdt:
154 kfree(fdt);
155 out:
156 return NULL;
157 }
158
159 /*
160 * Expand the file descriptor table.
161 * This function will allocate a new fdtable and both fd array and fdset, of
162 * the given size.
163 * Return <0 error code on error; 1 on successful completion.
164 * The files->file_lock should be held on entry, and will be held on exit.
165 */
expand_fdtable(struct files_struct * files,unsigned int nr)166 static int expand_fdtable(struct files_struct *files, unsigned int nr)
167 __releases(files->file_lock)
168 __acquires(files->file_lock)
169 {
170 struct fdtable *new_fdt, *cur_fdt;
171
172 spin_unlock(&files->file_lock);
173 new_fdt = alloc_fdtable(nr);
174
175 /* make sure all fd_install() have seen resize_in_progress
176 * or have finished their rcu_read_lock_sched() section.
177 */
178 if (atomic_read(&files->count) > 1)
179 synchronize_rcu();
180
181 spin_lock(&files->file_lock);
182 if (!new_fdt)
183 return -ENOMEM;
184 /*
185 * extremely unlikely race - sysctl_nr_open decreased between the check in
186 * caller and alloc_fdtable(). Cheaper to catch it here...
187 */
188 if (unlikely(new_fdt->max_fds <= nr)) {
189 __free_fdtable(new_fdt);
190 return -EMFILE;
191 }
192 cur_fdt = files_fdtable(files);
193 BUG_ON(nr < cur_fdt->max_fds);
194 copy_fdtable(new_fdt, cur_fdt);
195 rcu_assign_pointer(files->fdt, new_fdt);
196 if (cur_fdt != &files->fdtab)
197 call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
198 /* coupled with smp_rmb() in fd_install() */
199 smp_wmb();
200 return 1;
201 }
202
203 /*
204 * Expand files.
205 * This function will expand the file structures, if the requested size exceeds
206 * the current capacity and there is room for expansion.
207 * Return <0 error code on error; 0 when nothing done; 1 when files were
208 * expanded and execution may have blocked.
209 * The files->file_lock should be held on entry, and will be held on exit.
210 */
expand_files(struct files_struct * files,unsigned int nr)211 static int expand_files(struct files_struct *files, unsigned int nr)
212 __releases(files->file_lock)
213 __acquires(files->file_lock)
214 {
215 struct fdtable *fdt;
216 int expanded = 0;
217
218 repeat:
219 fdt = files_fdtable(files);
220
221 /* Do we need to expand? */
222 if (nr < fdt->max_fds)
223 return expanded;
224
225 /* Can we expand? */
226 if (nr >= sysctl_nr_open)
227 return -EMFILE;
228
229 if (unlikely(files->resize_in_progress)) {
230 spin_unlock(&files->file_lock);
231 expanded = 1;
232 wait_event(files->resize_wait, !files->resize_in_progress);
233 spin_lock(&files->file_lock);
234 goto repeat;
235 }
236
237 /* All good, so we try */
238 files->resize_in_progress = true;
239 expanded = expand_fdtable(files, nr);
240 files->resize_in_progress = false;
241
242 wake_up_all(&files->resize_wait);
243 return expanded;
244 }
245
__set_close_on_exec(unsigned int fd,struct fdtable * fdt)246 static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt)
247 {
248 __set_bit(fd, fdt->close_on_exec);
249 }
250
__clear_close_on_exec(unsigned int fd,struct fdtable * fdt)251 static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt)
252 {
253 if (test_bit(fd, fdt->close_on_exec))
254 __clear_bit(fd, fdt->close_on_exec);
255 }
256
__set_open_fd(unsigned int fd,struct fdtable * fdt)257 static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
258 {
259 __set_bit(fd, fdt->open_fds);
260 fd /= BITS_PER_LONG;
261 if (!~fdt->open_fds[fd])
262 __set_bit(fd, fdt->full_fds_bits);
263 }
264
__clear_open_fd(unsigned int fd,struct fdtable * fdt)265 static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
266 {
267 __clear_bit(fd, fdt->open_fds);
268 __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
269 }
270
271 /*
272 * Note that a sane fdtable size always has to be a multiple of
273 * BITS_PER_LONG, since we have bitmaps that are sized by this.
274 *
275 * punch_hole is optional - when close_range() is asked to unshare
276 * and close, we don't need to copy descriptors in that range, so
277 * a smaller cloned descriptor table might suffice if the last
278 * currently opened descriptor falls into that range.
279 */
sane_fdtable_size(struct fdtable * fdt,struct fd_range * punch_hole)280 static unsigned int sane_fdtable_size(struct fdtable *fdt, struct fd_range *punch_hole)
281 {
282 unsigned int last = find_last_bit(fdt->open_fds, fdt->max_fds);
283
284 if (last == fdt->max_fds)
285 return NR_OPEN_DEFAULT;
286 if (punch_hole && punch_hole->to >= last && punch_hole->from <= last) {
287 last = find_last_bit(fdt->open_fds, punch_hole->from);
288 if (last == punch_hole->from)
289 return NR_OPEN_DEFAULT;
290 }
291 return ALIGN(last + 1, BITS_PER_LONG);
292 }
293
294 /*
295 * Allocate a new descriptor table and copy contents from the passed in
296 * instance. Returns a pointer to cloned table on success, ERR_PTR()
297 * on failure. For 'punch_hole' see sane_fdtable_size().
298 */
dup_fd(struct files_struct * oldf,struct fd_range * punch_hole)299 struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_hole)
300 {
301 struct files_struct *newf;
302 struct file **old_fds, **new_fds;
303 unsigned int open_files, i;
304 struct fdtable *old_fdt, *new_fdt;
305 int error;
306
307 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
308 if (!newf)
309 return ERR_PTR(-ENOMEM);
310
311 atomic_set(&newf->count, 1);
312
313 spin_lock_init(&newf->file_lock);
314 newf->resize_in_progress = false;
315 init_waitqueue_head(&newf->resize_wait);
316 newf->next_fd = 0;
317 new_fdt = &newf->fdtab;
318 new_fdt->max_fds = NR_OPEN_DEFAULT;
319 new_fdt->close_on_exec = newf->close_on_exec_init;
320 new_fdt->open_fds = newf->open_fds_init;
321 new_fdt->full_fds_bits = newf->full_fds_bits_init;
322 new_fdt->fd = &newf->fd_array[0];
323
324 spin_lock(&oldf->file_lock);
325 old_fdt = files_fdtable(oldf);
326 open_files = sane_fdtable_size(old_fdt, punch_hole);
327
328 /*
329 * Check whether we need to allocate a larger fd array and fd set.
330 */
331 while (unlikely(open_files > new_fdt->max_fds)) {
332 spin_unlock(&oldf->file_lock);
333
334 if (new_fdt != &newf->fdtab)
335 __free_fdtable(new_fdt);
336
337 new_fdt = alloc_fdtable(open_files - 1);
338 if (!new_fdt) {
339 error = -ENOMEM;
340 goto out_release;
341 }
342
343 /* beyond sysctl_nr_open; nothing to do */
344 if (unlikely(new_fdt->max_fds < open_files)) {
345 __free_fdtable(new_fdt);
346 error = -EMFILE;
347 goto out_release;
348 }
349
350 /*
351 * Reacquire the oldf lock and a pointer to its fd table
352 * who knows it may have a new bigger fd table. We need
353 * the latest pointer.
354 */
355 spin_lock(&oldf->file_lock);
356 old_fdt = files_fdtable(oldf);
357 open_files = sane_fdtable_size(old_fdt, punch_hole);
358 }
359
360 copy_fd_bitmaps(new_fdt, old_fdt, open_files / BITS_PER_LONG);
361
362 old_fds = old_fdt->fd;
363 new_fds = new_fdt->fd;
364
365 /*
366 * We may be racing against fd allocation from other threads using this
367 * files_struct, despite holding ->file_lock.
368 *
369 * alloc_fd() might have already claimed a slot, while fd_install()
370 * did not populate it yet. Note the latter operates locklessly, so
371 * the file can show up as we are walking the array below.
372 *
373 * At the same time we know no files will disappear as all other
374 * operations take the lock.
375 *
376 * Instead of trying to placate userspace racing with itself, we
377 * ref the file if we see it and mark the fd slot as unused otherwise.
378 */
379 for (i = open_files; i != 0; i--) {
380 struct file *f = rcu_dereference_raw(*old_fds++);
381 if (f) {
382 get_file(f);
383 } else {
384 __clear_open_fd(open_files - i, new_fdt);
385 }
386 rcu_assign_pointer(*new_fds++, f);
387 }
388 spin_unlock(&oldf->file_lock);
389
390 /* clear the remainder */
391 memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *));
392
393 rcu_assign_pointer(newf->fdt, new_fdt);
394
395 return newf;
396
397 out_release:
398 kmem_cache_free(files_cachep, newf);
399 return ERR_PTR(error);
400 }
401
close_files(struct files_struct * files)402 static struct fdtable *close_files(struct files_struct * files)
403 {
404 /*
405 * It is safe to dereference the fd table without RCU or
406 * ->file_lock because this is the last reference to the
407 * files structure.
408 */
409 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
410 unsigned int i, j = 0;
411
412 for (;;) {
413 unsigned long set;
414 i = j * BITS_PER_LONG;
415 if (i >= fdt->max_fds)
416 break;
417 set = fdt->open_fds[j++];
418 while (set) {
419 if (set & 1) {
420 struct file * file = xchg(&fdt->fd[i], NULL);
421 if (file) {
422 filp_close(file, files);
423 cond_resched();
424 }
425 }
426 i++;
427 set >>= 1;
428 }
429 }
430
431 return fdt;
432 }
433
put_files_struct(struct files_struct * files)434 void put_files_struct(struct files_struct *files)
435 {
436 if (atomic_dec_and_test(&files->count)) {
437 struct fdtable *fdt = close_files(files);
438
439 /* free the arrays if they are not embedded */
440 if (fdt != &files->fdtab)
441 __free_fdtable(fdt);
442 kmem_cache_free(files_cachep, files);
443 }
444 }
445
exit_files(struct task_struct * tsk)446 void exit_files(struct task_struct *tsk)
447 {
448 struct files_struct * files = tsk->files;
449
450 if (files) {
451 task_lock(tsk);
452 tsk->files = NULL;
453 task_unlock(tsk);
454 put_files_struct(files);
455 }
456 }
457
458 struct files_struct init_files = {
459 .count = ATOMIC_INIT(1),
460 .fdt = &init_files.fdtab,
461 .fdtab = {
462 .max_fds = NR_OPEN_DEFAULT,
463 .fd = &init_files.fd_array[0],
464 .close_on_exec = init_files.close_on_exec_init,
465 .open_fds = init_files.open_fds_init,
466 .full_fds_bits = init_files.full_fds_bits_init,
467 },
468 .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
469 .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait),
470 };
471
find_next_fd(struct fdtable * fdt,unsigned int start)472 static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
473 {
474 unsigned int maxfd = fdt->max_fds; /* always multiple of BITS_PER_LONG */
475 unsigned int maxbit = maxfd / BITS_PER_LONG;
476 unsigned int bitbit = start / BITS_PER_LONG;
477
478 bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
479 if (bitbit >= maxfd)
480 return maxfd;
481 if (bitbit > start)
482 start = bitbit;
483 return find_next_zero_bit(fdt->open_fds, maxfd, start);
484 }
485
486 /*
487 * allocate a file descriptor, mark it busy.
488 */
alloc_fd(unsigned start,unsigned end,unsigned flags)489 static int alloc_fd(unsigned start, unsigned end, unsigned flags)
490 {
491 struct files_struct *files = current->files;
492 unsigned int fd;
493 int error;
494 struct fdtable *fdt;
495
496 spin_lock(&files->file_lock);
497 repeat:
498 fdt = files_fdtable(files);
499 fd = start;
500 if (fd < files->next_fd)
501 fd = files->next_fd;
502
503 if (fd < fdt->max_fds)
504 fd = find_next_fd(fdt, fd);
505
506 /*
507 * N.B. For clone tasks sharing a files structure, this test
508 * will limit the total number of files that can be opened.
509 */
510 error = -EMFILE;
511 if (fd >= end)
512 goto out;
513
514 error = expand_files(files, fd);
515 if (error < 0)
516 goto out;
517
518 /*
519 * If we needed to expand the fs array we
520 * might have blocked - try again.
521 */
522 if (error)
523 goto repeat;
524
525 if (start <= files->next_fd)
526 files->next_fd = fd + 1;
527
528 __set_open_fd(fd, fdt);
529 if (flags & O_CLOEXEC)
530 __set_close_on_exec(fd, fdt);
531 else
532 __clear_close_on_exec(fd, fdt);
533 error = fd;
534 #if 1
535 /* Sanity check */
536 if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
537 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
538 rcu_assign_pointer(fdt->fd[fd], NULL);
539 }
540 #endif
541
542 out:
543 spin_unlock(&files->file_lock);
544 return error;
545 }
546
__get_unused_fd_flags(unsigned flags,unsigned long nofile)547 int __get_unused_fd_flags(unsigned flags, unsigned long nofile)
548 {
549 return alloc_fd(0, nofile, flags);
550 }
551
get_unused_fd_flags(unsigned flags)552 int get_unused_fd_flags(unsigned flags)
553 {
554 return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE));
555 }
556 EXPORT_SYMBOL(get_unused_fd_flags);
557
__put_unused_fd(struct files_struct * files,unsigned int fd)558 static void __put_unused_fd(struct files_struct *files, unsigned int fd)
559 {
560 struct fdtable *fdt = files_fdtable(files);
561 __clear_open_fd(fd, fdt);
562 if (fd < files->next_fd)
563 files->next_fd = fd;
564 }
565
put_unused_fd(unsigned int fd)566 void put_unused_fd(unsigned int fd)
567 {
568 struct files_struct *files = current->files;
569 spin_lock(&files->file_lock);
570 __put_unused_fd(files, fd);
571 spin_unlock(&files->file_lock);
572 }
573
574 EXPORT_SYMBOL(put_unused_fd);
575
576 /*
577 * Install a file pointer in the fd array.
578 *
579 * The VFS is full of places where we drop the files lock between
580 * setting the open_fds bitmap and installing the file in the file
581 * array. At any such point, we are vulnerable to a dup2() race
582 * installing a file in the array before us. We need to detect this and
583 * fput() the struct file we are about to overwrite in this case.
584 *
585 * It should never happen - if we allow dup2() do it, _really_ bad things
586 * will follow.
587 *
588 * This consumes the "file" refcount, so callers should treat it
589 * as if they had called fput(file).
590 */
591
fd_install(unsigned int fd,struct file * file)592 void fd_install(unsigned int fd, struct file *file)
593 {
594 struct files_struct *files = current->files;
595 struct fdtable *fdt;
596
597 rcu_read_lock_sched();
598
599 if (unlikely(files->resize_in_progress)) {
600 rcu_read_unlock_sched();
601 spin_lock(&files->file_lock);
602 fdt = files_fdtable(files);
603 BUG_ON(fdt->fd[fd] != NULL);
604 rcu_assign_pointer(fdt->fd[fd], file);
605 spin_unlock(&files->file_lock);
606 return;
607 }
608 /* coupled with smp_wmb() in expand_fdtable() */
609 smp_rmb();
610 fdt = rcu_dereference_sched(files->fdt);
611 BUG_ON(fdt->fd[fd] != NULL);
612 rcu_assign_pointer(fdt->fd[fd], file);
613 rcu_read_unlock_sched();
614 }
615
616 EXPORT_SYMBOL(fd_install);
617
618 /**
619 * pick_file - return file associatd with fd
620 * @files: file struct to retrieve file from
621 * @fd: file descriptor to retrieve file for
622 *
623 * Context: files_lock must be held.
624 *
625 * Returns: The file associated with @fd (NULL if @fd is not open)
626 */
pick_file(struct files_struct * files,unsigned fd)627 static struct file *pick_file(struct files_struct *files, unsigned fd)
628 {
629 struct fdtable *fdt = files_fdtable(files);
630 struct file *file;
631
632 if (fd >= fdt->max_fds)
633 return NULL;
634
635 fd = array_index_nospec(fd, fdt->max_fds);
636 file = rcu_dereference_raw(fdt->fd[fd]);
637 if (file) {
638 rcu_assign_pointer(fdt->fd[fd], NULL);
639 __put_unused_fd(files, fd);
640 }
641 return file;
642 }
643
close_fd(unsigned fd)644 int close_fd(unsigned fd)
645 {
646 struct files_struct *files = current->files;
647 struct file *file;
648
649 spin_lock(&files->file_lock);
650 file = pick_file(files, fd);
651 spin_unlock(&files->file_lock);
652 if (!file)
653 return -EBADF;
654
655 return filp_close(file, files);
656 }
657 EXPORT_SYMBOL(close_fd); /* for ksys_close() */
658
659 /**
660 * last_fd - return last valid index into fd table
661 * @fdt: File descriptor table.
662 *
663 * Context: Either rcu read lock or files_lock must be held.
664 *
665 * Returns: Last valid index into fdtable.
666 */
last_fd(struct fdtable * fdt)667 static inline unsigned last_fd(struct fdtable *fdt)
668 {
669 return fdt->max_fds - 1;
670 }
671
__range_cloexec(struct files_struct * cur_fds,unsigned int fd,unsigned int max_fd)672 static inline void __range_cloexec(struct files_struct *cur_fds,
673 unsigned int fd, unsigned int max_fd)
674 {
675 struct fdtable *fdt;
676
677 /* make sure we're using the correct maximum value */
678 spin_lock(&cur_fds->file_lock);
679 fdt = files_fdtable(cur_fds);
680 max_fd = min(last_fd(fdt), max_fd);
681 if (fd <= max_fd)
682 bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1);
683 spin_unlock(&cur_fds->file_lock);
684 }
685
__range_close(struct files_struct * files,unsigned int fd,unsigned int max_fd)686 static inline void __range_close(struct files_struct *files, unsigned int fd,
687 unsigned int max_fd)
688 {
689 struct file *file;
690 unsigned n;
691
692 spin_lock(&files->file_lock);
693 n = last_fd(files_fdtable(files));
694 max_fd = min(max_fd, n);
695
696 for (; fd <= max_fd; fd++) {
697 file = pick_file(files, fd);
698 if (file) {
699 spin_unlock(&files->file_lock);
700 filp_close(file, files);
701 cond_resched();
702 spin_lock(&files->file_lock);
703 } else if (need_resched()) {
704 spin_unlock(&files->file_lock);
705 cond_resched();
706 spin_lock(&files->file_lock);
707 }
708 }
709 spin_unlock(&files->file_lock);
710 }
711
712 /**
713 * __close_range() - Close all file descriptors in a given range.
714 *
715 * @fd: starting file descriptor to close
716 * @max_fd: last file descriptor to close
717 * @flags: CLOSE_RANGE flags.
718 *
719 * This closes a range of file descriptors. All file descriptors
720 * from @fd up to and including @max_fd are closed.
721 */
__close_range(unsigned fd,unsigned max_fd,unsigned int flags)722 int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
723 {
724 struct task_struct *me = current;
725 struct files_struct *cur_fds = me->files, *fds = NULL;
726
727 if (flags & ~(CLOSE_RANGE_UNSHARE | CLOSE_RANGE_CLOEXEC))
728 return -EINVAL;
729
730 if (fd > max_fd)
731 return -EINVAL;
732
733 if ((flags & CLOSE_RANGE_UNSHARE) && atomic_read(&cur_fds->count) > 1) {
734 struct fd_range range = {fd, max_fd}, *punch_hole = ⦥
735
736 /*
737 * If the caller requested all fds to be made cloexec we always
738 * copy all of the file descriptors since they still want to
739 * use them.
740 */
741 if (flags & CLOSE_RANGE_CLOEXEC)
742 punch_hole = NULL;
743
744 fds = dup_fd(cur_fds, punch_hole);
745 if (IS_ERR(fds))
746 return PTR_ERR(fds);
747 /*
748 * We used to share our file descriptor table, and have now
749 * created a private one, make sure we're using it below.
750 */
751 swap(cur_fds, fds);
752 }
753
754 if (flags & CLOSE_RANGE_CLOEXEC)
755 __range_cloexec(cur_fds, fd, max_fd);
756 else
757 __range_close(cur_fds, fd, max_fd);
758
759 if (fds) {
760 /*
761 * We're done closing the files we were supposed to. Time to install
762 * the new file descriptor table and drop the old one.
763 */
764 task_lock(me);
765 me->files = cur_fds;
766 task_unlock(me);
767 put_files_struct(fds);
768 }
769
770 return 0;
771 }
772
773 /*
774 * See close_fd_get_file() below, this variant assumes current->files->file_lock
775 * is held.
776 */
__close_fd_get_file(unsigned int fd)777 struct file *__close_fd_get_file(unsigned int fd)
778 {
779 return pick_file(current->files, fd);
780 }
781
782 /*
783 * variant of close_fd that gets a ref on the file for later fput.
784 * The caller must ensure that filp_close() called on the file.
785 */
close_fd_get_file(unsigned int fd)786 struct file *close_fd_get_file(unsigned int fd)
787 {
788 struct files_struct *files = current->files;
789 struct file *file;
790
791 spin_lock(&files->file_lock);
792 file = pick_file(files, fd);
793 spin_unlock(&files->file_lock);
794
795 return file;
796 }
797
do_close_on_exec(struct files_struct * files)798 void do_close_on_exec(struct files_struct *files)
799 {
800 unsigned i;
801 struct fdtable *fdt;
802
803 /* exec unshares first */
804 spin_lock(&files->file_lock);
805 for (i = 0; ; i++) {
806 unsigned long set;
807 unsigned fd = i * BITS_PER_LONG;
808 fdt = files_fdtable(files);
809 if (fd >= fdt->max_fds)
810 break;
811 set = fdt->close_on_exec[i];
812 if (!set)
813 continue;
814 fdt->close_on_exec[i] = 0;
815 for ( ; set ; fd++, set >>= 1) {
816 struct file *file;
817 if (!(set & 1))
818 continue;
819 file = fdt->fd[fd];
820 if (!file)
821 continue;
822 rcu_assign_pointer(fdt->fd[fd], NULL);
823 __put_unused_fd(files, fd);
824 spin_unlock(&files->file_lock);
825 filp_close(file, files);
826 cond_resched();
827 spin_lock(&files->file_lock);
828 }
829
830 }
831 spin_unlock(&files->file_lock);
832 }
833
__fget_files_rcu(struct files_struct * files,unsigned int fd,fmode_t mask)834 static inline struct file *__fget_files_rcu(struct files_struct *files,
835 unsigned int fd, fmode_t mask)
836 {
837 for (;;) {
838 struct file *file;
839 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
840 struct file __rcu **fdentry;
841
842 if (unlikely(fd >= fdt->max_fds))
843 return NULL;
844
845 fdentry = fdt->fd + array_index_nospec(fd, fdt->max_fds);
846 file = rcu_dereference_raw(*fdentry);
847 if (unlikely(!file))
848 return NULL;
849
850 if (unlikely(file->f_mode & mask))
851 return NULL;
852
853 /*
854 * Ok, we have a file pointer. However, because we do
855 * this all locklessly under RCU, we may be racing with
856 * that file being closed.
857 *
858 * Such a race can take two forms:
859 *
860 * (a) the file ref already went down to zero,
861 * and get_file_rcu() fails. Just try again:
862 */
863 if (unlikely(!get_file_rcu(file)))
864 continue;
865
866 /*
867 * (b) the file table entry has changed under us.
868 * Note that we don't need to re-check the 'fdt->fd'
869 * pointer having changed, because it always goes
870 * hand-in-hand with 'fdt'.
871 *
872 * If so, we need to put our ref and try again.
873 */
874 if (unlikely(rcu_dereference_raw(files->fdt) != fdt) ||
875 unlikely(rcu_dereference_raw(*fdentry) != file)) {
876 fput(file);
877 continue;
878 }
879
880 /*
881 * Ok, we have a ref to the file, and checked that it
882 * still exists.
883 */
884 return file;
885 }
886 }
887
__fget_files(struct files_struct * files,unsigned int fd,fmode_t mask)888 static struct file *__fget_files(struct files_struct *files, unsigned int fd,
889 fmode_t mask)
890 {
891 struct file *file;
892
893 rcu_read_lock();
894 file = __fget_files_rcu(files, fd, mask);
895 rcu_read_unlock();
896
897 return file;
898 }
899
__fget(unsigned int fd,fmode_t mask)900 static inline struct file *__fget(unsigned int fd, fmode_t mask)
901 {
902 return __fget_files(current->files, fd, mask);
903 }
904
fget(unsigned int fd)905 struct file *fget(unsigned int fd)
906 {
907 return __fget(fd, FMODE_PATH);
908 }
909 EXPORT_SYMBOL(fget);
910
fget_raw(unsigned int fd)911 struct file *fget_raw(unsigned int fd)
912 {
913 return __fget(fd, 0);
914 }
915 EXPORT_SYMBOL(fget_raw);
916
fget_task(struct task_struct * task,unsigned int fd)917 struct file *fget_task(struct task_struct *task, unsigned int fd)
918 {
919 struct file *file = NULL;
920
921 task_lock(task);
922 if (task->files)
923 file = __fget_files(task->files, fd, 0);
924 task_unlock(task);
925
926 return file;
927 }
928
task_lookup_fd_rcu(struct task_struct * task,unsigned int fd)929 struct file *task_lookup_fd_rcu(struct task_struct *task, unsigned int fd)
930 {
931 /* Must be called with rcu_read_lock held */
932 struct files_struct *files;
933 struct file *file = NULL;
934
935 task_lock(task);
936 files = task->files;
937 if (files)
938 file = files_lookup_fd_rcu(files, fd);
939 task_unlock(task);
940
941 return file;
942 }
943
task_lookup_next_fd_rcu(struct task_struct * task,unsigned int * ret_fd)944 struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *ret_fd)
945 {
946 /* Must be called with rcu_read_lock held */
947 struct files_struct *files;
948 unsigned int fd = *ret_fd;
949 struct file *file = NULL;
950
951 task_lock(task);
952 files = task->files;
953 if (files) {
954 for (; fd < files_fdtable(files)->max_fds; fd++) {
955 file = files_lookup_fd_rcu(files, fd);
956 if (file)
957 break;
958 }
959 }
960 task_unlock(task);
961 *ret_fd = fd;
962 return file;
963 }
964 EXPORT_SYMBOL(task_lookup_next_fd_rcu);
965
966 /*
967 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
968 *
969 * You can use this instead of fget if you satisfy all of the following
970 * conditions:
971 * 1) You must call fput_light before exiting the syscall and returning control
972 * to userspace (i.e. you cannot remember the returned struct file * after
973 * returning to userspace).
974 * 2) You must not call filp_close on the returned struct file * in between
975 * calls to fget_light and fput_light.
976 * 3) You must not clone the current task in between the calls to fget_light
977 * and fput_light.
978 *
979 * The fput_needed flag returned by fget_light should be passed to the
980 * corresponding fput_light.
981 */
__fget_light(unsigned int fd,fmode_t mask)982 static unsigned long __fget_light(unsigned int fd, fmode_t mask)
983 {
984 struct files_struct *files = current->files;
985 struct file *file;
986
987 /*
988 * If another thread is concurrently calling close_fd() followed
989 * by put_files_struct(), we must not observe the old table
990 * entry combined with the new refcount - otherwise we could
991 * return a file that is concurrently being freed.
992 *
993 * atomic_read_acquire() pairs with atomic_dec_and_test() in
994 * put_files_struct().
995 */
996 if (atomic_read_acquire(&files->count) == 1) {
997 file = files_lookup_fd_raw(files, fd);
998 if (!file || unlikely(file->f_mode & mask))
999 return 0;
1000 return (unsigned long)file;
1001 } else {
1002 file = __fget(fd, mask);
1003 if (!file)
1004 return 0;
1005 return FDPUT_FPUT | (unsigned long)file;
1006 }
1007 }
__fdget(unsigned int fd)1008 unsigned long __fdget(unsigned int fd)
1009 {
1010 return __fget_light(fd, FMODE_PATH);
1011 }
1012 EXPORT_SYMBOL(__fdget);
1013
__fdget_raw(unsigned int fd)1014 unsigned long __fdget_raw(unsigned int fd)
1015 {
1016 return __fget_light(fd, 0);
1017 }
1018
1019 /*
1020 * Try to avoid f_pos locking. We only need it if the
1021 * file is marked for FMODE_ATOMIC_POS, and it can be
1022 * accessed multiple ways.
1023 *
1024 * Always do it for directories, because pidfd_getfd()
1025 * can make a file accessible even if it otherwise would
1026 * not be, and for directories this is a correctness
1027 * issue, not a "POSIX requirement".
1028 */
file_needs_f_pos_lock(struct file * file)1029 static inline bool file_needs_f_pos_lock(struct file *file)
1030 {
1031 return (file->f_mode & FMODE_ATOMIC_POS) &&
1032 (file_count(file) > 1 || file->f_op->iterate_shared);
1033 }
1034
__fdget_pos(unsigned int fd)1035 unsigned long __fdget_pos(unsigned int fd)
1036 {
1037 unsigned long v = __fdget(fd);
1038 struct file *file = (struct file *)(v & ~3);
1039
1040 if (file && file_needs_f_pos_lock(file)) {
1041 v |= FDPUT_POS_UNLOCK;
1042 mutex_lock(&file->f_pos_lock);
1043 }
1044 return v;
1045 }
1046
__f_unlock_pos(struct file * f)1047 void __f_unlock_pos(struct file *f)
1048 {
1049 mutex_unlock(&f->f_pos_lock);
1050 }
1051
1052 /*
1053 * We only lock f_pos if we have threads or if the file might be
1054 * shared with another process. In both cases we'll have an elevated
1055 * file count (done either by fdget() or by fork()).
1056 */
1057
set_close_on_exec(unsigned int fd,int flag)1058 void set_close_on_exec(unsigned int fd, int flag)
1059 {
1060 struct files_struct *files = current->files;
1061 struct fdtable *fdt;
1062 spin_lock(&files->file_lock);
1063 fdt = files_fdtable(files);
1064 if (flag)
1065 __set_close_on_exec(fd, fdt);
1066 else
1067 __clear_close_on_exec(fd, fdt);
1068 spin_unlock(&files->file_lock);
1069 }
1070
get_close_on_exec(unsigned int fd)1071 bool get_close_on_exec(unsigned int fd)
1072 {
1073 struct files_struct *files = current->files;
1074 struct fdtable *fdt;
1075 bool res;
1076 rcu_read_lock();
1077 fdt = files_fdtable(files);
1078 res = close_on_exec(fd, fdt);
1079 rcu_read_unlock();
1080 return res;
1081 }
1082
do_dup2(struct files_struct * files,struct file * file,unsigned fd,unsigned flags)1083 static int do_dup2(struct files_struct *files,
1084 struct file *file, unsigned fd, unsigned flags)
1085 __releases(&files->file_lock)
1086 {
1087 struct file *tofree;
1088 struct fdtable *fdt;
1089
1090 /*
1091 * We need to detect attempts to do dup2() over allocated but still
1092 * not finished descriptor. NB: OpenBSD avoids that at the price of
1093 * extra work in their equivalent of fget() - they insert struct
1094 * file immediately after grabbing descriptor, mark it larval if
1095 * more work (e.g. actual opening) is needed and make sure that
1096 * fget() treats larval files as absent. Potentially interesting,
1097 * but while extra work in fget() is trivial, locking implications
1098 * and amount of surgery on open()-related paths in VFS are not.
1099 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
1100 * deadlocks in rather amusing ways, AFAICS. All of that is out of
1101 * scope of POSIX or SUS, since neither considers shared descriptor
1102 * tables and this condition does not arise without those.
1103 */
1104 fdt = files_fdtable(files);
1105 fd = array_index_nospec(fd, fdt->max_fds);
1106 tofree = rcu_dereference_raw(fdt->fd[fd]);
1107 if (!tofree && fd_is_open(fd, fdt))
1108 goto Ebusy;
1109 get_file(file);
1110 rcu_assign_pointer(fdt->fd[fd], file);
1111 __set_open_fd(fd, fdt);
1112 if (flags & O_CLOEXEC)
1113 __set_close_on_exec(fd, fdt);
1114 else
1115 __clear_close_on_exec(fd, fdt);
1116 spin_unlock(&files->file_lock);
1117
1118 if (tofree)
1119 filp_close(tofree, files);
1120
1121 return fd;
1122
1123 Ebusy:
1124 spin_unlock(&files->file_lock);
1125 return -EBUSY;
1126 }
1127
replace_fd(unsigned fd,struct file * file,unsigned flags)1128 int replace_fd(unsigned fd, struct file *file, unsigned flags)
1129 {
1130 int err;
1131 struct files_struct *files = current->files;
1132
1133 if (!file)
1134 return close_fd(fd);
1135
1136 if (fd >= rlimit(RLIMIT_NOFILE))
1137 return -EBADF;
1138
1139 spin_lock(&files->file_lock);
1140 err = expand_files(files, fd);
1141 if (unlikely(err < 0))
1142 goto out_unlock;
1143 return do_dup2(files, file, fd, flags);
1144
1145 out_unlock:
1146 spin_unlock(&files->file_lock);
1147 return err;
1148 }
1149
1150 /**
1151 * __receive_fd() - Install received file into file descriptor table
1152 * @file: struct file that was received from another process
1153 * @ufd: __user pointer to write new fd number to
1154 * @o_flags: the O_* flags to apply to the new fd entry
1155 *
1156 * Installs a received file into the file descriptor table, with appropriate
1157 * checks and count updates. Optionally writes the fd number to userspace, if
1158 * @ufd is non-NULL.
1159 *
1160 * This helper handles its own reference counting of the incoming
1161 * struct file.
1162 *
1163 * Returns newly install fd or -ve on error.
1164 */
__receive_fd(struct file * file,int __user * ufd,unsigned int o_flags)1165 int __receive_fd(struct file *file, int __user *ufd, unsigned int o_flags)
1166 {
1167 int new_fd;
1168 int error;
1169
1170 error = security_file_receive(file);
1171 if (error)
1172 return error;
1173
1174 new_fd = get_unused_fd_flags(o_flags);
1175 if (new_fd < 0)
1176 return new_fd;
1177
1178 if (ufd) {
1179 error = put_user(new_fd, ufd);
1180 if (error) {
1181 put_unused_fd(new_fd);
1182 return error;
1183 }
1184 }
1185
1186 fd_install(new_fd, get_file(file));
1187 __receive_sock(file);
1188 return new_fd;
1189 }
1190
receive_fd_replace(int new_fd,struct file * file,unsigned int o_flags)1191 int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags)
1192 {
1193 int error;
1194
1195 error = security_file_receive(file);
1196 if (error)
1197 return error;
1198 error = replace_fd(new_fd, file, o_flags);
1199 if (error)
1200 return error;
1201 __receive_sock(file);
1202 return new_fd;
1203 }
1204
receive_fd(struct file * file,unsigned int o_flags)1205 int receive_fd(struct file *file, unsigned int o_flags)
1206 {
1207 return __receive_fd(file, NULL, o_flags);
1208 }
1209 EXPORT_SYMBOL_GPL(receive_fd);
1210
ksys_dup3(unsigned int oldfd,unsigned int newfd,int flags)1211 static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
1212 {
1213 int err = -EBADF;
1214 struct file *file;
1215 struct files_struct *files = current->files;
1216
1217 if ((flags & ~O_CLOEXEC) != 0)
1218 return -EINVAL;
1219
1220 if (unlikely(oldfd == newfd))
1221 return -EINVAL;
1222
1223 if (newfd >= rlimit(RLIMIT_NOFILE))
1224 return -EBADF;
1225
1226 spin_lock(&files->file_lock);
1227 err = expand_files(files, newfd);
1228 file = files_lookup_fd_locked(files, oldfd);
1229 if (unlikely(!file))
1230 goto Ebadf;
1231 if (unlikely(err < 0)) {
1232 if (err == -EMFILE)
1233 goto Ebadf;
1234 goto out_unlock;
1235 }
1236 return do_dup2(files, file, newfd, flags);
1237
1238 Ebadf:
1239 err = -EBADF;
1240 out_unlock:
1241 spin_unlock(&files->file_lock);
1242 return err;
1243 }
1244
SYSCALL_DEFINE3(dup3,unsigned int,oldfd,unsigned int,newfd,int,flags)1245 SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
1246 {
1247 return ksys_dup3(oldfd, newfd, flags);
1248 }
1249
SYSCALL_DEFINE2(dup2,unsigned int,oldfd,unsigned int,newfd)1250 SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
1251 {
1252 if (unlikely(newfd == oldfd)) { /* corner case */
1253 struct files_struct *files = current->files;
1254 int retval = oldfd;
1255
1256 rcu_read_lock();
1257 if (!files_lookup_fd_rcu(files, oldfd))
1258 retval = -EBADF;
1259 rcu_read_unlock();
1260 return retval;
1261 }
1262 return ksys_dup3(oldfd, newfd, 0);
1263 }
1264
SYSCALL_DEFINE1(dup,unsigned int,fildes)1265 SYSCALL_DEFINE1(dup, unsigned int, fildes)
1266 {
1267 int ret = -EBADF;
1268 struct file *file = fget_raw(fildes);
1269
1270 if (file) {
1271 ret = get_unused_fd_flags(0);
1272 if (ret >= 0)
1273 fd_install(ret, file);
1274 else
1275 fput(file);
1276 }
1277 return ret;
1278 }
1279
f_dupfd(unsigned int from,struct file * file,unsigned flags)1280 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
1281 {
1282 unsigned long nofile = rlimit(RLIMIT_NOFILE);
1283 int err;
1284 if (from >= nofile)
1285 return -EINVAL;
1286 err = alloc_fd(from, nofile, flags);
1287 if (err >= 0) {
1288 get_file(file);
1289 fd_install(err, file);
1290 }
1291 return err;
1292 }
1293
iterate_fd(struct files_struct * files,unsigned n,int (* f)(const void *,struct file *,unsigned),const void * p)1294 int iterate_fd(struct files_struct *files, unsigned n,
1295 int (*f)(const void *, struct file *, unsigned),
1296 const void *p)
1297 {
1298 struct fdtable *fdt;
1299 int res = 0;
1300 if (!files)
1301 return 0;
1302 spin_lock(&files->file_lock);
1303 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
1304 struct file *file;
1305 file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
1306 if (!file)
1307 continue;
1308 res = f(p, file, n);
1309 if (res)
1310 break;
1311 }
1312 spin_unlock(&files->file_lock);
1313 return res;
1314 }
1315 EXPORT_SYMBOL(iterate_fd);
1316