Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/file.c
4 *
5 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
6 *
7 * Manage the dynamic fd arrays in the process files_struct.
8 */
9
10#include <linux/syscalls.h>
11#include <linux/export.h>
12#include <linux/fs.h>
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/sched/signal.h>
16#include <linux/slab.h>
17#include <linux/file.h>
18#include <linux/fdtable.h>
19#include <linux/bitops.h>
20#include <linux/spinlock.h>
21#include <linux/rcupdate.h>
22#include <linux/close_range.h>
23#include <net/sock.h>
24
25#include "internal.h"
26
27unsigned int sysctl_nr_open __read_mostly = 1024*1024;
28unsigned int sysctl_nr_open_min = BITS_PER_LONG;
29/* our min() is unusable in constant expressions ;-/ */
30#define __const_min(x, y) ((x) < (y) ? (x) : (y))
31unsigned int sysctl_nr_open_max =
32 __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
33
34static void __free_fdtable(struct fdtable *fdt)
35{
36 kvfree(fdt->fd);
37 kvfree(fdt->open_fds);
38 kfree(fdt);
39}
40
41static void free_fdtable_rcu(struct rcu_head *rcu)
42{
43 __free_fdtable(container_of(rcu, struct fdtable, rcu));
44}
45
46#define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
47#define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
48
49/*
50 * Copy 'count' fd bits from the old table to the new table and clear the extra
51 * space if any. This does not copy the file pointers. Called with the files
52 * spinlock held for write.
53 */
54static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
55 unsigned int count)
56{
57 unsigned int cpy, set;
58
59 cpy = count / BITS_PER_BYTE;
60 set = (nfdt->max_fds - count) / BITS_PER_BYTE;
61 memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
62 memset((char *)nfdt->open_fds + cpy, 0, set);
63 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
64 memset((char *)nfdt->close_on_exec + cpy, 0, set);
65
66 cpy = BITBIT_SIZE(count);
67 set = BITBIT_SIZE(nfdt->max_fds) - cpy;
68 memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
69 memset((char *)nfdt->full_fds_bits + cpy, 0, set);
70}
71
72/*
73 * Copy all file descriptors from the old table to the new, expanded table and
74 * clear the extra space. Called with the files spinlock held for write.
75 */
76static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
77{
78 size_t cpy, set;
79
80 BUG_ON(nfdt->max_fds < ofdt->max_fds);
81
82 cpy = ofdt->max_fds * sizeof(struct file *);
83 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
84 memcpy(nfdt->fd, ofdt->fd, cpy);
85 memset((char *)nfdt->fd + cpy, 0, set);
86
87 copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds);
88}
89
90/*
91 * Note how the fdtable bitmap allocations very much have to be a multiple of
92 * BITS_PER_LONG. This is not only because we walk those things in chunks of
93 * 'unsigned long' in some places, but simply because that is how the Linux
94 * kernel bitmaps are defined to work: they are not "bits in an array of bytes",
95 * they are very much "bits in an array of unsigned long".
96 *
97 * The ALIGN(nr, BITS_PER_LONG) here is for clarity: since we just multiplied
98 * by that "1024/sizeof(ptr)" before, we already know there are sufficient
99 * clear low bits. Clang seems to realize that, gcc ends up being confused.
100 *
101 * On a 128-bit machine, the ALIGN() would actually matter. In the meantime,
102 * let's consider it documentation (and maybe a test-case for gcc to improve
103 * its code generation ;)
104 */
105static struct fdtable * alloc_fdtable(unsigned int nr)
106{
107 struct fdtable *fdt;
108 void *data;
109
110 /*
111 * Figure out how many fds we actually want to support in this fdtable.
112 * Allocation steps are keyed to the size of the fdarray, since it
113 * grows far faster than any of the other dynamic data. We try to fit
114 * the fdarray into comfortable page-tuned chunks: starting at 1024B
115 * and growing in powers of two from there on.
116 */
117 nr /= (1024 / sizeof(struct file *));
118 nr = roundup_pow_of_two(nr + 1);
119 nr *= (1024 / sizeof(struct file *));
120 nr = ALIGN(nr, BITS_PER_LONG);
121 /*
122 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
123 * had been set lower between the check in expand_files() and here. Deal
124 * with that in caller, it's cheaper that way.
125 *
126 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
127 * bitmaps handling below becomes unpleasant, to put it mildly...
128 */
129 if (unlikely(nr > sysctl_nr_open))
130 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
131
132 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
133 if (!fdt)
134 goto out;
135 fdt->max_fds = nr;
136 data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT);
137 if (!data)
138 goto out_fdt;
139 fdt->fd = data;
140
141 data = kvmalloc(max_t(size_t,
142 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES),
143 GFP_KERNEL_ACCOUNT);
144 if (!data)
145 goto out_arr;
146 fdt->open_fds = data;
147 data += nr / BITS_PER_BYTE;
148 fdt->close_on_exec = data;
149 data += nr / BITS_PER_BYTE;
150 fdt->full_fds_bits = data;
151
152 return fdt;
153
154out_arr:
155 kvfree(fdt->fd);
156out_fdt:
157 kfree(fdt);
158out:
159 return NULL;
160}
161
162/*
163 * Expand the file descriptor table.
164 * This function will allocate a new fdtable and both fd array and fdset, of
165 * the given size.
166 * Return <0 error code on error; 1 on successful completion.
167 * The files->file_lock should be held on entry, and will be held on exit.
168 */
169static int expand_fdtable(struct files_struct *files, unsigned int nr)
170 __releases(files->file_lock)
171 __acquires(files->file_lock)
172{
173 struct fdtable *new_fdt, *cur_fdt;
174
175 spin_unlock(&files->file_lock);
176 new_fdt = alloc_fdtable(nr);
177
178 /* make sure all fd_install() have seen resize_in_progress
179 * or have finished their rcu_read_lock_sched() section.
180 */
181 if (atomic_read(&files->count) > 1)
182 synchronize_rcu();
183
184 spin_lock(&files->file_lock);
185 if (!new_fdt)
186 return -ENOMEM;
187 /*
188 * extremely unlikely race - sysctl_nr_open decreased between the check in
189 * caller and alloc_fdtable(). Cheaper to catch it here...
190 */
191 if (unlikely(new_fdt->max_fds <= nr)) {
192 __free_fdtable(new_fdt);
193 return -EMFILE;
194 }
195 cur_fdt = files_fdtable(files);
196 BUG_ON(nr < cur_fdt->max_fds);
197 copy_fdtable(new_fdt, cur_fdt);
198 rcu_assign_pointer(files->fdt, new_fdt);
199 if (cur_fdt != &files->fdtab)
200 call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
201 /* coupled with smp_rmb() in fd_install() */
202 smp_wmb();
203 return 1;
204}
205
206/*
207 * Expand files.
208 * This function will expand the file structures, if the requested size exceeds
209 * the current capacity and there is room for expansion.
210 * Return <0 error code on error; 0 when nothing done; 1 when files were
211 * expanded and execution may have blocked.
212 * The files->file_lock should be held on entry, and will be held on exit.
213 */
214static int expand_files(struct files_struct *files, unsigned int nr)
215 __releases(files->file_lock)
216 __acquires(files->file_lock)
217{
218 struct fdtable *fdt;
219 int expanded = 0;
220
221repeat:
222 fdt = files_fdtable(files);
223
224 /* Do we need to expand? */
225 if (nr < fdt->max_fds)
226 return expanded;
227
228 /* Can we expand? */
229 if (nr >= sysctl_nr_open)
230 return -EMFILE;
231
232 if (unlikely(files->resize_in_progress)) {
233 spin_unlock(&files->file_lock);
234 expanded = 1;
235 wait_event(files->resize_wait, !files->resize_in_progress);
236 spin_lock(&files->file_lock);
237 goto repeat;
238 }
239
240 /* All good, so we try */
241 files->resize_in_progress = true;
242 expanded = expand_fdtable(files, nr);
243 files->resize_in_progress = false;
244
245 wake_up_all(&files->resize_wait);
246 return expanded;
247}
248
249static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt)
250{
251 __set_bit(fd, fdt->close_on_exec);
252}
253
254static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt)
255{
256 if (test_bit(fd, fdt->close_on_exec))
257 __clear_bit(fd, fdt->close_on_exec);
258}
259
260static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
261{
262 __set_bit(fd, fdt->open_fds);
263 fd /= BITS_PER_LONG;
264 if (!~fdt->open_fds[fd])
265 __set_bit(fd, fdt->full_fds_bits);
266}
267
268static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
269{
270 __clear_bit(fd, fdt->open_fds);
271 __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
272}
273
274static unsigned int count_open_files(struct fdtable *fdt)
275{
276 unsigned int size = fdt->max_fds;
277 unsigned int i;
278
279 /* Find the last open fd */
280 for (i = size / BITS_PER_LONG; i > 0; ) {
281 if (fdt->open_fds[--i])
282 break;
283 }
284 i = (i + 1) * BITS_PER_LONG;
285 return i;
286}
287
288/*
289 * Note that a sane fdtable size always has to be a multiple of
290 * BITS_PER_LONG, since we have bitmaps that are sized by this.
291 *
292 * 'max_fds' will normally already be properly aligned, but it
293 * turns out that in the close_range() -> __close_range() ->
294 * unshare_fd() -> dup_fd() -> sane_fdtable_size() we can end
295 * up having a 'max_fds' value that isn't already aligned.
296 *
297 * Rather than make close_range() have to worry about this,
298 * just make that BITS_PER_LONG alignment be part of a sane
299 * fdtable size. Becuase that's really what it is.
300 */
301static unsigned int sane_fdtable_size(struct fdtable *fdt, unsigned int max_fds)
302{
303 unsigned int count;
304
305 count = count_open_files(fdt);
306 if (max_fds < NR_OPEN_DEFAULT)
307 max_fds = NR_OPEN_DEFAULT;
308 return ALIGN(min(count, max_fds), BITS_PER_LONG);
309}
310
311/*
312 * Allocate a new files structure and copy contents from the
313 * passed in files structure.
314 * errorp will be valid only when the returned files_struct is NULL.
315 */
316struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int *errorp)
317{
318 struct files_struct *newf;
319 struct file **old_fds, **new_fds;
320 unsigned int open_files, i;
321 struct fdtable *old_fdt, *new_fdt;
322
323 *errorp = -ENOMEM;
324 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
325 if (!newf)
326 goto out;
327
328 atomic_set(&newf->count, 1);
329
330 spin_lock_init(&newf->file_lock);
331 newf->resize_in_progress = false;
332 init_waitqueue_head(&newf->resize_wait);
333 newf->next_fd = 0;
334 new_fdt = &newf->fdtab;
335 new_fdt->max_fds = NR_OPEN_DEFAULT;
336 new_fdt->close_on_exec = newf->close_on_exec_init;
337 new_fdt->open_fds = newf->open_fds_init;
338 new_fdt->full_fds_bits = newf->full_fds_bits_init;
339 new_fdt->fd = &newf->fd_array[0];
340
341 spin_lock(&oldf->file_lock);
342 old_fdt = files_fdtable(oldf);
343 open_files = sane_fdtable_size(old_fdt, max_fds);
344
345 /*
346 * Check whether we need to allocate a larger fd array and fd set.
347 */
348 while (unlikely(open_files > new_fdt->max_fds)) {
349 spin_unlock(&oldf->file_lock);
350
351 if (new_fdt != &newf->fdtab)
352 __free_fdtable(new_fdt);
353
354 new_fdt = alloc_fdtable(open_files - 1);
355 if (!new_fdt) {
356 *errorp = -ENOMEM;
357 goto out_release;
358 }
359
360 /* beyond sysctl_nr_open; nothing to do */
361 if (unlikely(new_fdt->max_fds < open_files)) {
362 __free_fdtable(new_fdt);
363 *errorp = -EMFILE;
364 goto out_release;
365 }
366
367 /*
368 * Reacquire the oldf lock and a pointer to its fd table
369 * who knows it may have a new bigger fd table. We need
370 * the latest pointer.
371 */
372 spin_lock(&oldf->file_lock);
373 old_fdt = files_fdtable(oldf);
374 open_files = sane_fdtable_size(old_fdt, max_fds);
375 }
376
377 copy_fd_bitmaps(new_fdt, old_fdt, open_files);
378
379 old_fds = old_fdt->fd;
380 new_fds = new_fdt->fd;
381
382 for (i = open_files; i != 0; i--) {
383 struct file *f = *old_fds++;
384 if (f) {
385 get_file(f);
386 } else {
387 /*
388 * The fd may be claimed in the fd bitmap but not yet
389 * instantiated in the files array if a sibling thread
390 * is partway through open(). So make sure that this
391 * fd is available to the new process.
392 */
393 __clear_open_fd(open_files - i, new_fdt);
394 }
395 rcu_assign_pointer(*new_fds++, f);
396 }
397 spin_unlock(&oldf->file_lock);
398
399 /* clear the remainder */
400 memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *));
401
402 rcu_assign_pointer(newf->fdt, new_fdt);
403
404 return newf;
405
406out_release:
407 kmem_cache_free(files_cachep, newf);
408out:
409 return NULL;
410}
411
412static struct fdtable *close_files(struct files_struct * files)
413{
414 /*
415 * It is safe to dereference the fd table without RCU or
416 * ->file_lock because this is the last reference to the
417 * files structure.
418 */
419 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
420 unsigned int i, j = 0;
421
422 for (;;) {
423 unsigned long set;
424 i = j * BITS_PER_LONG;
425 if (i >= fdt->max_fds)
426 break;
427 set = fdt->open_fds[j++];
428 while (set) {
429 if (set & 1) {
430 struct file * file = xchg(&fdt->fd[i], NULL);
431 if (file) {
432 filp_close(file, files);
433 cond_resched();
434 }
435 }
436 i++;
437 set >>= 1;
438 }
439 }
440
441 return fdt;
442}
443
444void put_files_struct(struct files_struct *files)
445{
446 if (atomic_dec_and_test(&files->count)) {
447 struct fdtable *fdt = close_files(files);
448
449 /* free the arrays if they are not embedded */
450 if (fdt != &files->fdtab)
451 __free_fdtable(fdt);
452 kmem_cache_free(files_cachep, files);
453 }
454}
455
456void exit_files(struct task_struct *tsk)
457{
458 struct files_struct * files = tsk->files;
459
460 if (files) {
461 task_lock(tsk);
462 tsk->files = NULL;
463 task_unlock(tsk);
464 put_files_struct(files);
465 }
466}
467
468struct files_struct init_files = {
469 .count = ATOMIC_INIT(1),
470 .fdt = &init_files.fdtab,
471 .fdtab = {
472 .max_fds = NR_OPEN_DEFAULT,
473 .fd = &init_files.fd_array[0],
474 .close_on_exec = init_files.close_on_exec_init,
475 .open_fds = init_files.open_fds_init,
476 .full_fds_bits = init_files.full_fds_bits_init,
477 },
478 .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
479 .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait),
480};
481
482static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
483{
484 unsigned int maxfd = fdt->max_fds;
485 unsigned int maxbit = maxfd / BITS_PER_LONG;
486 unsigned int bitbit = start / BITS_PER_LONG;
487
488 bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
489 if (bitbit > maxfd)
490 return maxfd;
491 if (bitbit > start)
492 start = bitbit;
493 return find_next_zero_bit(fdt->open_fds, maxfd, start);
494}
495
496/*
497 * allocate a file descriptor, mark it busy.
498 */
499static int alloc_fd(unsigned start, unsigned end, unsigned flags)
500{
501 struct files_struct *files = current->files;
502 unsigned int fd;
503 int error;
504 struct fdtable *fdt;
505
506 spin_lock(&files->file_lock);
507repeat:
508 fdt = files_fdtable(files);
509 fd = start;
510 if (fd < files->next_fd)
511 fd = files->next_fd;
512
513 if (fd < fdt->max_fds)
514 fd = find_next_fd(fdt, fd);
515
516 /*
517 * N.B. For clone tasks sharing a files structure, this test
518 * will limit the total number of files that can be opened.
519 */
520 error = -EMFILE;
521 if (fd >= end)
522 goto out;
523
524 error = expand_files(files, fd);
525 if (error < 0)
526 goto out;
527
528 /*
529 * If we needed to expand the fs array we
530 * might have blocked - try again.
531 */
532 if (error)
533 goto repeat;
534
535 if (start <= files->next_fd)
536 files->next_fd = fd + 1;
537
538 __set_open_fd(fd, fdt);
539 if (flags & O_CLOEXEC)
540 __set_close_on_exec(fd, fdt);
541 else
542 __clear_close_on_exec(fd, fdt);
543 error = fd;
544#if 1
545 /* Sanity check */
546 if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
547 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
548 rcu_assign_pointer(fdt->fd[fd], NULL);
549 }
550#endif
551
552out:
553 spin_unlock(&files->file_lock);
554 return error;
555}
556
557int __get_unused_fd_flags(unsigned flags, unsigned long nofile)
558{
559 return alloc_fd(0, nofile, flags);
560}
561
562int get_unused_fd_flags(unsigned flags)
563{
564 return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE));
565}
566EXPORT_SYMBOL(get_unused_fd_flags);
567
568static void __put_unused_fd(struct files_struct *files, unsigned int fd)
569{
570 struct fdtable *fdt = files_fdtable(files);
571 __clear_open_fd(fd, fdt);
572 if (fd < files->next_fd)
573 files->next_fd = fd;
574}
575
576void put_unused_fd(unsigned int fd)
577{
578 struct files_struct *files = current->files;
579 spin_lock(&files->file_lock);
580 __put_unused_fd(files, fd);
581 spin_unlock(&files->file_lock);
582}
583
584EXPORT_SYMBOL(put_unused_fd);
585
586/*
587 * Install a file pointer in the fd array.
588 *
589 * The VFS is full of places where we drop the files lock between
590 * setting the open_fds bitmap and installing the file in the file
591 * array. At any such point, we are vulnerable to a dup2() race
592 * installing a file in the array before us. We need to detect this and
593 * fput() the struct file we are about to overwrite in this case.
594 *
595 * It should never happen - if we allow dup2() do it, _really_ bad things
596 * will follow.
597 *
598 * This consumes the "file" refcount, so callers should treat it
599 * as if they had called fput(file).
600 */
601
602void fd_install(unsigned int fd, struct file *file)
603{
604 struct files_struct *files = current->files;
605 struct fdtable *fdt;
606
607 if (WARN_ON_ONCE(unlikely(file->f_mode & FMODE_BACKING)))
608 return;
609
610 rcu_read_lock_sched();
611
612 if (unlikely(files->resize_in_progress)) {
613 rcu_read_unlock_sched();
614 spin_lock(&files->file_lock);
615 fdt = files_fdtable(files);
616 BUG_ON(fdt->fd[fd] != NULL);
617 rcu_assign_pointer(fdt->fd[fd], file);
618 spin_unlock(&files->file_lock);
619 return;
620 }
621 /* coupled with smp_wmb() in expand_fdtable() */
622 smp_rmb();
623 fdt = rcu_dereference_sched(files->fdt);
624 BUG_ON(fdt->fd[fd] != NULL);
625 rcu_assign_pointer(fdt->fd[fd], file);
626 rcu_read_unlock_sched();
627}
628
629EXPORT_SYMBOL(fd_install);
630
631/**
632 * file_close_fd_locked - return file associated with fd
633 * @files: file struct to retrieve file from
634 * @fd: file descriptor to retrieve file for
635 *
636 * Doesn't take a separate reference count.
637 *
638 * Context: files_lock must be held.
639 *
640 * Returns: The file associated with @fd (NULL if @fd is not open)
641 */
642struct file *file_close_fd_locked(struct files_struct *files, unsigned fd)
643{
644 struct fdtable *fdt = files_fdtable(files);
645 struct file *file;
646
647 lockdep_assert_held(&files->file_lock);
648
649 if (fd >= fdt->max_fds)
650 return NULL;
651
652 fd = array_index_nospec(fd, fdt->max_fds);
653 file = fdt->fd[fd];
654 if (file) {
655 rcu_assign_pointer(fdt->fd[fd], NULL);
656 __put_unused_fd(files, fd);
657 }
658 return file;
659}
660
661int close_fd(unsigned fd)
662{
663 struct files_struct *files = current->files;
664 struct file *file;
665
666 spin_lock(&files->file_lock);
667 file = file_close_fd_locked(files, fd);
668 spin_unlock(&files->file_lock);
669 if (!file)
670 return -EBADF;
671
672 return filp_close(file, files);
673}
674EXPORT_SYMBOL(close_fd); /* for ksys_close() */
675
676/**
677 * last_fd - return last valid index into fd table
678 * @fdt: File descriptor table.
679 *
680 * Context: Either rcu read lock or files_lock must be held.
681 *
682 * Returns: Last valid index into fdtable.
683 */
684static inline unsigned last_fd(struct fdtable *fdt)
685{
686 return fdt->max_fds - 1;
687}
688
689static inline void __range_cloexec(struct files_struct *cur_fds,
690 unsigned int fd, unsigned int max_fd)
691{
692 struct fdtable *fdt;
693
694 /* make sure we're using the correct maximum value */
695 spin_lock(&cur_fds->file_lock);
696 fdt = files_fdtable(cur_fds);
697 max_fd = min(last_fd(fdt), max_fd);
698 if (fd <= max_fd)
699 bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1);
700 spin_unlock(&cur_fds->file_lock);
701}
702
703static inline void __range_close(struct files_struct *files, unsigned int fd,
704 unsigned int max_fd)
705{
706 struct file *file;
707 unsigned n;
708
709 spin_lock(&files->file_lock);
710 n = last_fd(files_fdtable(files));
711 max_fd = min(max_fd, n);
712
713 for (; fd <= max_fd; fd++) {
714 file = file_close_fd_locked(files, fd);
715 if (file) {
716 spin_unlock(&files->file_lock);
717 filp_close(file, files);
718 cond_resched();
719 spin_lock(&files->file_lock);
720 } else if (need_resched()) {
721 spin_unlock(&files->file_lock);
722 cond_resched();
723 spin_lock(&files->file_lock);
724 }
725 }
726 spin_unlock(&files->file_lock);
727}
728
729/**
730 * __close_range() - Close all file descriptors in a given range.
731 *
732 * @fd: starting file descriptor to close
733 * @max_fd: last file descriptor to close
734 * @flags: CLOSE_RANGE flags.
735 *
736 * This closes a range of file descriptors. All file descriptors
737 * from @fd up to and including @max_fd are closed.
738 */
739int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
740{
741 struct task_struct *me = current;
742 struct files_struct *cur_fds = me->files, *fds = NULL;
743
744 if (flags & ~(CLOSE_RANGE_UNSHARE | CLOSE_RANGE_CLOEXEC))
745 return -EINVAL;
746
747 if (fd > max_fd)
748 return -EINVAL;
749
750 if (flags & CLOSE_RANGE_UNSHARE) {
751 int ret;
752 unsigned int max_unshare_fds = NR_OPEN_MAX;
753
754 /*
755 * If the caller requested all fds to be made cloexec we always
756 * copy all of the file descriptors since they still want to
757 * use them.
758 */
759 if (!(flags & CLOSE_RANGE_CLOEXEC)) {
760 /*
761 * If the requested range is greater than the current
762 * maximum, we're closing everything so only copy all
763 * file descriptors beneath the lowest file descriptor.
764 */
765 rcu_read_lock();
766 if (max_fd >= last_fd(files_fdtable(cur_fds)))
767 max_unshare_fds = fd;
768 rcu_read_unlock();
769 }
770
771 ret = unshare_fd(CLONE_FILES, max_unshare_fds, &fds);
772 if (ret)
773 return ret;
774
775 /*
776 * We used to share our file descriptor table, and have now
777 * created a private one, make sure we're using it below.
778 */
779 if (fds)
780 swap(cur_fds, fds);
781 }
782
783 if (flags & CLOSE_RANGE_CLOEXEC)
784 __range_cloexec(cur_fds, fd, max_fd);
785 else
786 __range_close(cur_fds, fd, max_fd);
787
788 if (fds) {
789 /*
790 * We're done closing the files we were supposed to. Time to install
791 * the new file descriptor table and drop the old one.
792 */
793 task_lock(me);
794 me->files = cur_fds;
795 task_unlock(me);
796 put_files_struct(fds);
797 }
798
799 return 0;
800}
801
802/**
803 * file_close_fd - return file associated with fd
804 * @fd: file descriptor to retrieve file for
805 *
806 * Doesn't take a separate reference count.
807 *
808 * Returns: The file associated with @fd (NULL if @fd is not open)
809 */
810struct file *file_close_fd(unsigned int fd)
811{
812 struct files_struct *files = current->files;
813 struct file *file;
814
815 spin_lock(&files->file_lock);
816 file = file_close_fd_locked(files, fd);
817 spin_unlock(&files->file_lock);
818
819 return file;
820}
821
822void do_close_on_exec(struct files_struct *files)
823{
824 unsigned i;
825 struct fdtable *fdt;
826
827 /* exec unshares first */
828 spin_lock(&files->file_lock);
829 for (i = 0; ; i++) {
830 unsigned long set;
831 unsigned fd = i * BITS_PER_LONG;
832 fdt = files_fdtable(files);
833 if (fd >= fdt->max_fds)
834 break;
835 set = fdt->close_on_exec[i];
836 if (!set)
837 continue;
838 fdt->close_on_exec[i] = 0;
839 for ( ; set ; fd++, set >>= 1) {
840 struct file *file;
841 if (!(set & 1))
842 continue;
843 file = fdt->fd[fd];
844 if (!file)
845 continue;
846 rcu_assign_pointer(fdt->fd[fd], NULL);
847 __put_unused_fd(files, fd);
848 spin_unlock(&files->file_lock);
849 filp_close(file, files);
850 cond_resched();
851 spin_lock(&files->file_lock);
852 }
853
854 }
855 spin_unlock(&files->file_lock);
856}
857
858static struct file *__get_file_rcu(struct file __rcu **f)
859{
860 struct file __rcu *file;
861 struct file __rcu *file_reloaded;
862 struct file __rcu *file_reloaded_cmp;
863
864 file = rcu_dereference_raw(*f);
865 if (!file)
866 return NULL;
867
868 if (unlikely(!atomic_long_inc_not_zero(&file->f_count)))
869 return ERR_PTR(-EAGAIN);
870
871 file_reloaded = rcu_dereference_raw(*f);
872
873 /*
874 * Ensure that all accesses have a dependency on the load from
875 * rcu_dereference_raw() above so we get correct ordering
876 * between reuse/allocation and the pointer check below.
877 */
878 file_reloaded_cmp = file_reloaded;
879 OPTIMIZER_HIDE_VAR(file_reloaded_cmp);
880
881 /*
882 * atomic_long_inc_not_zero() above provided a full memory
883 * barrier when we acquired a reference.
884 *
885 * This is paired with the write barrier from assigning to the
886 * __rcu protected file pointer so that if that pointer still
887 * matches the current file, we know we have successfully
888 * acquired a reference to the right file.
889 *
890 * If the pointers don't match the file has been reallocated by
891 * SLAB_TYPESAFE_BY_RCU.
892 */
893 if (file == file_reloaded_cmp)
894 return file_reloaded;
895
896 fput(file);
897 return ERR_PTR(-EAGAIN);
898}
899
900/**
901 * get_file_rcu - try go get a reference to a file under rcu
902 * @f: the file to get a reference on
903 *
904 * This function tries to get a reference on @f carefully verifying that
905 * @f hasn't been reused.
906 *
907 * This function should rarely have to be used and only by users who
908 * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it.
909 *
910 * Return: Returns @f with the reference count increased or NULL.
911 */
912struct file *get_file_rcu(struct file __rcu **f)
913{
914 for (;;) {
915 struct file __rcu *file;
916
917 file = __get_file_rcu(f);
918 if (unlikely(!file))
919 return NULL;
920
921 if (unlikely(IS_ERR(file)))
922 continue;
923
924 return file;
925 }
926}
927EXPORT_SYMBOL_GPL(get_file_rcu);
928
929/**
930 * get_file_active - try go get a reference to a file
931 * @f: the file to get a reference on
932 *
933 * In contast to get_file_rcu() the pointer itself isn't part of the
934 * reference counting.
935 *
936 * This function should rarely have to be used and only by users who
937 * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it.
938 *
939 * Return: Returns @f with the reference count increased or NULL.
940 */
941struct file *get_file_active(struct file **f)
942{
943 struct file __rcu *file;
944
945 rcu_read_lock();
946 file = __get_file_rcu(f);
947 rcu_read_unlock();
948 if (IS_ERR(file))
949 file = NULL;
950 return file;
951}
952EXPORT_SYMBOL_GPL(get_file_active);
953
954static inline struct file *__fget_files_rcu(struct files_struct *files,
955 unsigned int fd, fmode_t mask)
956{
957 for (;;) {
958 struct file *file;
959 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
960 struct file __rcu **fdentry;
961 unsigned long nospec_mask;
962
963 /* Mask is a 0 for invalid fd's, ~0 for valid ones */
964 nospec_mask = array_index_mask_nospec(fd, fdt->max_fds);
965
966 /*
967 * fdentry points to the 'fd' offset, or fdt->fd[0].
968 * Loading from fdt->fd[0] is always safe, because the
969 * array always exists.
970 */
971 fdentry = fdt->fd + (fd & nospec_mask);
972
973 /* Do the load, then mask any invalid result */
974 file = rcu_dereference_raw(*fdentry);
975 file = (void *)(nospec_mask & (unsigned long)file);
976 if (unlikely(!file))
977 return NULL;
978
979 /*
980 * Ok, we have a file pointer that was valid at
981 * some point, but it might have become stale since.
982 *
983 * We need to confirm it by incrementing the refcount
984 * and then check the lookup again.
985 *
986 * atomic_long_inc_not_zero() gives us a full memory
987 * barrier. We only really need an 'acquire' one to
988 * protect the loads below, but we don't have that.
989 */
990 if (unlikely(!atomic_long_inc_not_zero(&file->f_count)))
991 continue;
992
993 /*
994 * Such a race can take two forms:
995 *
996 * (a) the file ref already went down to zero and the
997 * file hasn't been reused yet or the file count
998 * isn't zero but the file has already been reused.
999 *
1000 * (b) the file table entry has changed under us.
1001 * Note that we don't need to re-check the 'fdt->fd'
1002 * pointer having changed, because it always goes
1003 * hand-in-hand with 'fdt'.
1004 *
1005 * If so, we need to put our ref and try again.
1006 */
1007 if (unlikely(file != rcu_dereference_raw(*fdentry)) ||
1008 unlikely(rcu_dereference_raw(files->fdt) != fdt)) {
1009 fput(file);
1010 continue;
1011 }
1012
1013 /*
1014 * This isn't the file we're looking for or we're not
1015 * allowed to get a reference to it.
1016 */
1017 if (unlikely(file->f_mode & mask)) {
1018 fput(file);
1019 return NULL;
1020 }
1021
1022 /*
1023 * Ok, we have a ref to the file, and checked that it
1024 * still exists.
1025 */
1026 return file;
1027 }
1028}
1029
1030static struct file *__fget_files(struct files_struct *files, unsigned int fd,
1031 fmode_t mask)
1032{
1033 struct file *file;
1034
1035 rcu_read_lock();
1036 file = __fget_files_rcu(files, fd, mask);
1037 rcu_read_unlock();
1038
1039 return file;
1040}
1041
1042static inline struct file *__fget(unsigned int fd, fmode_t mask)
1043{
1044 return __fget_files(current->files, fd, mask);
1045}
1046
1047struct file *fget(unsigned int fd)
1048{
1049 return __fget(fd, FMODE_PATH);
1050}
1051EXPORT_SYMBOL(fget);
1052
1053struct file *fget_raw(unsigned int fd)
1054{
1055 return __fget(fd, 0);
1056}
1057EXPORT_SYMBOL(fget_raw);
1058
1059struct file *fget_task(struct task_struct *task, unsigned int fd)
1060{
1061 struct file *file = NULL;
1062
1063 task_lock(task);
1064 if (task->files)
1065 file = __fget_files(task->files, fd, 0);
1066 task_unlock(task);
1067
1068 return file;
1069}
1070
1071struct file *lookup_fdget_rcu(unsigned int fd)
1072{
1073 return __fget_files_rcu(current->files, fd, 0);
1074
1075}
1076EXPORT_SYMBOL_GPL(lookup_fdget_rcu);
1077
1078struct file *task_lookup_fdget_rcu(struct task_struct *task, unsigned int fd)
1079{
1080 /* Must be called with rcu_read_lock held */
1081 struct files_struct *files;
1082 struct file *file = NULL;
1083
1084 task_lock(task);
1085 files = task->files;
1086 if (files)
1087 file = __fget_files_rcu(files, fd, 0);
1088 task_unlock(task);
1089
1090 return file;
1091}
1092
1093struct file *task_lookup_next_fdget_rcu(struct task_struct *task, unsigned int *ret_fd)
1094{
1095 /* Must be called with rcu_read_lock held */
1096 struct files_struct *files;
1097 unsigned int fd = *ret_fd;
1098 struct file *file = NULL;
1099
1100 task_lock(task);
1101 files = task->files;
1102 if (files) {
1103 for (; fd < files_fdtable(files)->max_fds; fd++) {
1104 file = __fget_files_rcu(files, fd, 0);
1105 if (file)
1106 break;
1107 }
1108 }
1109 task_unlock(task);
1110 *ret_fd = fd;
1111 return file;
1112}
1113EXPORT_SYMBOL(task_lookup_next_fdget_rcu);
1114
1115/*
1116 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
1117 *
1118 * You can use this instead of fget if you satisfy all of the following
1119 * conditions:
1120 * 1) You must call fput_light before exiting the syscall and returning control
1121 * to userspace (i.e. you cannot remember the returned struct file * after
1122 * returning to userspace).
1123 * 2) You must not call filp_close on the returned struct file * in between
1124 * calls to fget_light and fput_light.
1125 * 3) You must not clone the current task in between the calls to fget_light
1126 * and fput_light.
1127 *
1128 * The fput_needed flag returned by fget_light should be passed to the
1129 * corresponding fput_light.
1130 */
1131static unsigned long __fget_light(unsigned int fd, fmode_t mask)
1132{
1133 struct files_struct *files = current->files;
1134 struct file *file;
1135
1136 /*
1137 * If another thread is concurrently calling close_fd() followed
1138 * by put_files_struct(), we must not observe the old table
1139 * entry combined with the new refcount - otherwise we could
1140 * return a file that is concurrently being freed.
1141 *
1142 * atomic_read_acquire() pairs with atomic_dec_and_test() in
1143 * put_files_struct().
1144 */
1145 if (likely(atomic_read_acquire(&files->count) == 1)) {
1146 file = files_lookup_fd_raw(files, fd);
1147 if (!file || unlikely(file->f_mode & mask))
1148 return 0;
1149 return (unsigned long)file;
1150 } else {
1151 file = __fget_files(files, fd, mask);
1152 if (!file)
1153 return 0;
1154 return FDPUT_FPUT | (unsigned long)file;
1155 }
1156}
1157unsigned long __fdget(unsigned int fd)
1158{
1159 return __fget_light(fd, FMODE_PATH);
1160}
1161EXPORT_SYMBOL(__fdget);
1162
1163unsigned long __fdget_raw(unsigned int fd)
1164{
1165 return __fget_light(fd, 0);
1166}
1167
1168/*
1169 * Try to avoid f_pos locking. We only need it if the
1170 * file is marked for FMODE_ATOMIC_POS, and it can be
1171 * accessed multiple ways.
1172 *
1173 * Always do it for directories, because pidfd_getfd()
1174 * can make a file accessible even if it otherwise would
1175 * not be, and for directories this is a correctness
1176 * issue, not a "POSIX requirement".
1177 */
1178static inline bool file_needs_f_pos_lock(struct file *file)
1179{
1180 return (file->f_mode & FMODE_ATOMIC_POS) &&
1181 (file_count(file) > 1 || file->f_op->iterate_shared);
1182}
1183
1184unsigned long __fdget_pos(unsigned int fd)
1185{
1186 unsigned long v = __fdget(fd);
1187 struct file *file = (struct file *)(v & ~3);
1188
1189 if (file && file_needs_f_pos_lock(file)) {
1190 v |= FDPUT_POS_UNLOCK;
1191 mutex_lock(&file->f_pos_lock);
1192 }
1193 return v;
1194}
1195
1196void __f_unlock_pos(struct file *f)
1197{
1198 mutex_unlock(&f->f_pos_lock);
1199}
1200
1201/*
1202 * We only lock f_pos if we have threads or if the file might be
1203 * shared with another process. In both cases we'll have an elevated
1204 * file count (done either by fdget() or by fork()).
1205 */
1206
1207void set_close_on_exec(unsigned int fd, int flag)
1208{
1209 struct files_struct *files = current->files;
1210 struct fdtable *fdt;
1211 spin_lock(&files->file_lock);
1212 fdt = files_fdtable(files);
1213 if (flag)
1214 __set_close_on_exec(fd, fdt);
1215 else
1216 __clear_close_on_exec(fd, fdt);
1217 spin_unlock(&files->file_lock);
1218}
1219
1220bool get_close_on_exec(unsigned int fd)
1221{
1222 struct files_struct *files = current->files;
1223 struct fdtable *fdt;
1224 bool res;
1225 rcu_read_lock();
1226 fdt = files_fdtable(files);
1227 res = close_on_exec(fd, fdt);
1228 rcu_read_unlock();
1229 return res;
1230}
1231
1232static int do_dup2(struct files_struct *files,
1233 struct file *file, unsigned fd, unsigned flags)
1234__releases(&files->file_lock)
1235{
1236 struct file *tofree;
1237 struct fdtable *fdt;
1238
1239 /*
1240 * We need to detect attempts to do dup2() over allocated but still
1241 * not finished descriptor. NB: OpenBSD avoids that at the price of
1242 * extra work in their equivalent of fget() - they insert struct
1243 * file immediately after grabbing descriptor, mark it larval if
1244 * more work (e.g. actual opening) is needed and make sure that
1245 * fget() treats larval files as absent. Potentially interesting,
1246 * but while extra work in fget() is trivial, locking implications
1247 * and amount of surgery on open()-related paths in VFS are not.
1248 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
1249 * deadlocks in rather amusing ways, AFAICS. All of that is out of
1250 * scope of POSIX or SUS, since neither considers shared descriptor
1251 * tables and this condition does not arise without those.
1252 */
1253 fdt = files_fdtable(files);
1254 tofree = fdt->fd[fd];
1255 if (!tofree && fd_is_open(fd, fdt))
1256 goto Ebusy;
1257 get_file(file);
1258 rcu_assign_pointer(fdt->fd[fd], file);
1259 __set_open_fd(fd, fdt);
1260 if (flags & O_CLOEXEC)
1261 __set_close_on_exec(fd, fdt);
1262 else
1263 __clear_close_on_exec(fd, fdt);
1264 spin_unlock(&files->file_lock);
1265
1266 if (tofree)
1267 filp_close(tofree, files);
1268
1269 return fd;
1270
1271Ebusy:
1272 spin_unlock(&files->file_lock);
1273 return -EBUSY;
1274}
1275
1276int replace_fd(unsigned fd, struct file *file, unsigned flags)
1277{
1278 int err;
1279 struct files_struct *files = current->files;
1280
1281 if (!file)
1282 return close_fd(fd);
1283
1284 if (fd >= rlimit(RLIMIT_NOFILE))
1285 return -EBADF;
1286
1287 spin_lock(&files->file_lock);
1288 err = expand_files(files, fd);
1289 if (unlikely(err < 0))
1290 goto out_unlock;
1291 return do_dup2(files, file, fd, flags);
1292
1293out_unlock:
1294 spin_unlock(&files->file_lock);
1295 return err;
1296}
1297
1298/**
1299 * receive_fd() - Install received file into file descriptor table
1300 * @file: struct file that was received from another process
1301 * @ufd: __user pointer to write new fd number to
1302 * @o_flags: the O_* flags to apply to the new fd entry
1303 *
1304 * Installs a received file into the file descriptor table, with appropriate
1305 * checks and count updates. Optionally writes the fd number to userspace, if
1306 * @ufd is non-NULL.
1307 *
1308 * This helper handles its own reference counting of the incoming
1309 * struct file.
1310 *
1311 * Returns newly install fd or -ve on error.
1312 */
1313int receive_fd(struct file *file, int __user *ufd, unsigned int o_flags)
1314{
1315 int new_fd;
1316 int error;
1317
1318 error = security_file_receive(file);
1319 if (error)
1320 return error;
1321
1322 new_fd = get_unused_fd_flags(o_flags);
1323 if (new_fd < 0)
1324 return new_fd;
1325
1326 if (ufd) {
1327 error = put_user(new_fd, ufd);
1328 if (error) {
1329 put_unused_fd(new_fd);
1330 return error;
1331 }
1332 }
1333
1334 fd_install(new_fd, get_file(file));
1335 __receive_sock(file);
1336 return new_fd;
1337}
1338EXPORT_SYMBOL_GPL(receive_fd);
1339
1340int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags)
1341{
1342 int error;
1343
1344 error = security_file_receive(file);
1345 if (error)
1346 return error;
1347 error = replace_fd(new_fd, file, o_flags);
1348 if (error)
1349 return error;
1350 __receive_sock(file);
1351 return new_fd;
1352}
1353
1354static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
1355{
1356 int err = -EBADF;
1357 struct file *file;
1358 struct files_struct *files = current->files;
1359
1360 if ((flags & ~O_CLOEXEC) != 0)
1361 return -EINVAL;
1362
1363 if (unlikely(oldfd == newfd))
1364 return -EINVAL;
1365
1366 if (newfd >= rlimit(RLIMIT_NOFILE))
1367 return -EBADF;
1368
1369 spin_lock(&files->file_lock);
1370 err = expand_files(files, newfd);
1371 file = files_lookup_fd_locked(files, oldfd);
1372 if (unlikely(!file))
1373 goto Ebadf;
1374 if (unlikely(err < 0)) {
1375 if (err == -EMFILE)
1376 goto Ebadf;
1377 goto out_unlock;
1378 }
1379 return do_dup2(files, file, newfd, flags);
1380
1381Ebadf:
1382 err = -EBADF;
1383out_unlock:
1384 spin_unlock(&files->file_lock);
1385 return err;
1386}
1387
1388SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
1389{
1390 return ksys_dup3(oldfd, newfd, flags);
1391}
1392
1393SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
1394{
1395 if (unlikely(newfd == oldfd)) { /* corner case */
1396 struct files_struct *files = current->files;
1397 struct file *f;
1398 int retval = oldfd;
1399
1400 rcu_read_lock();
1401 f = __fget_files_rcu(files, oldfd, 0);
1402 if (!f)
1403 retval = -EBADF;
1404 rcu_read_unlock();
1405 if (f)
1406 fput(f);
1407 return retval;
1408 }
1409 return ksys_dup3(oldfd, newfd, 0);
1410}
1411
1412SYSCALL_DEFINE1(dup, unsigned int, fildes)
1413{
1414 int ret = -EBADF;
1415 struct file *file = fget_raw(fildes);
1416
1417 if (file) {
1418 ret = get_unused_fd_flags(0);
1419 if (ret >= 0)
1420 fd_install(ret, file);
1421 else
1422 fput(file);
1423 }
1424 return ret;
1425}
1426
1427int f_dupfd(unsigned int from, struct file *file, unsigned flags)
1428{
1429 unsigned long nofile = rlimit(RLIMIT_NOFILE);
1430 int err;
1431 if (from >= nofile)
1432 return -EINVAL;
1433 err = alloc_fd(from, nofile, flags);
1434 if (err >= 0) {
1435 get_file(file);
1436 fd_install(err, file);
1437 }
1438 return err;
1439}
1440
1441int iterate_fd(struct files_struct *files, unsigned n,
1442 int (*f)(const void *, struct file *, unsigned),
1443 const void *p)
1444{
1445 struct fdtable *fdt;
1446 int res = 0;
1447 if (!files)
1448 return 0;
1449 spin_lock(&files->file_lock);
1450 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
1451 struct file *file;
1452 file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
1453 if (!file)
1454 continue;
1455 res = f(p, file, n);
1456 if (res)
1457 break;
1458 }
1459 spin_unlock(&files->file_lock);
1460 return res;
1461}
1462EXPORT_SYMBOL(iterate_fd);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/file.c
4 *
5 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
6 *
7 * Manage the dynamic fd arrays in the process files_struct.
8 */
9
10#include <linux/syscalls.h>
11#include <linux/export.h>
12#include <linux/fs.h>
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/sched/signal.h>
16#include <linux/slab.h>
17#include <linux/file.h>
18#include <linux/fdtable.h>
19#include <linux/bitops.h>
20#include <linux/spinlock.h>
21#include <linux/rcupdate.h>
22#include <linux/close_range.h>
23#include <linux/file_ref.h>
24#include <net/sock.h>
25#include <linux/init_task.h>
26
27#include "internal.h"
28
29/**
30 * __file_ref_put - Slowpath of file_ref_put()
31 * @ref: Pointer to the reference count
32 * @cnt: Current reference count
33 *
34 * Invoked when the reference count is outside of the valid zone.
35 *
36 * Return:
37 * True if this was the last reference with no future references
38 * possible. This signals the caller that it can safely schedule the
39 * object, which is protected by the reference counter, for
40 * deconstruction.
41 *
42 * False if there are still active references or the put() raced
43 * with a concurrent get()/put() pair. Caller is not allowed to
44 * deconstruct the protected object.
45 */
46bool __file_ref_put(file_ref_t *ref, unsigned long cnt)
47{
48 /* Did this drop the last reference? */
49 if (likely(cnt == FILE_REF_NOREF)) {
50 /*
51 * Carefully try to set the reference count to FILE_REF_DEAD.
52 *
53 * This can fail if a concurrent get() operation has
54 * elevated it again or the corresponding put() even marked
55 * it dead already. Both are valid situations and do not
56 * require a retry. If this fails the caller is not
57 * allowed to deconstruct the object.
58 */
59 if (!atomic_long_try_cmpxchg_release(&ref->refcnt, &cnt, FILE_REF_DEAD))
60 return false;
61
62 /*
63 * The caller can safely schedule the object for
64 * deconstruction. Provide acquire ordering.
65 */
66 smp_acquire__after_ctrl_dep();
67 return true;
68 }
69
70 /*
71 * If the reference count was already in the dead zone, then this
72 * put() operation is imbalanced. Warn, put the reference count back to
73 * DEAD and tell the caller to not deconstruct the object.
74 */
75 if (WARN_ONCE(cnt >= FILE_REF_RELEASED, "imbalanced put on file reference count")) {
76 atomic_long_set(&ref->refcnt, FILE_REF_DEAD);
77 return false;
78 }
79
80 /*
81 * This is a put() operation on a saturated refcount. Restore the
82 * mean saturation value and tell the caller to not deconstruct the
83 * object.
84 */
85 if (cnt > FILE_REF_MAXREF)
86 atomic_long_set(&ref->refcnt, FILE_REF_SATURATED);
87 return false;
88}
89EXPORT_SYMBOL_GPL(__file_ref_put);
90
91unsigned int sysctl_nr_open __read_mostly = 1024*1024;
92unsigned int sysctl_nr_open_min = BITS_PER_LONG;
93/* our min() is unusable in constant expressions ;-/ */
94#define __const_min(x, y) ((x) < (y) ? (x) : (y))
95unsigned int sysctl_nr_open_max =
96 __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
97
98static void __free_fdtable(struct fdtable *fdt)
99{
100 kvfree(fdt->fd);
101 kvfree(fdt->open_fds);
102 kfree(fdt);
103}
104
105static void free_fdtable_rcu(struct rcu_head *rcu)
106{
107 __free_fdtable(container_of(rcu, struct fdtable, rcu));
108}
109
110#define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
111#define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
112
113#define fdt_words(fdt) ((fdt)->max_fds / BITS_PER_LONG) // words in ->open_fds
114/*
115 * Copy 'count' fd bits from the old table to the new table and clear the extra
116 * space if any. This does not copy the file pointers. Called with the files
117 * spinlock held for write.
118 */
119static inline void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
120 unsigned int copy_words)
121{
122 unsigned int nwords = fdt_words(nfdt);
123
124 bitmap_copy_and_extend(nfdt->open_fds, ofdt->open_fds,
125 copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG);
126 bitmap_copy_and_extend(nfdt->close_on_exec, ofdt->close_on_exec,
127 copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG);
128 bitmap_copy_and_extend(nfdt->full_fds_bits, ofdt->full_fds_bits,
129 copy_words, nwords);
130}
131
132/*
133 * Copy all file descriptors from the old table to the new, expanded table and
134 * clear the extra space. Called with the files spinlock held for write.
135 */
136static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
137{
138 size_t cpy, set;
139
140 BUG_ON(nfdt->max_fds < ofdt->max_fds);
141
142 cpy = ofdt->max_fds * sizeof(struct file *);
143 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
144 memcpy(nfdt->fd, ofdt->fd, cpy);
145 memset((char *)nfdt->fd + cpy, 0, set);
146
147 copy_fd_bitmaps(nfdt, ofdt, fdt_words(ofdt));
148}
149
150/*
151 * Note how the fdtable bitmap allocations very much have to be a multiple of
152 * BITS_PER_LONG. This is not only because we walk those things in chunks of
153 * 'unsigned long' in some places, but simply because that is how the Linux
154 * kernel bitmaps are defined to work: they are not "bits in an array of bytes",
155 * they are very much "bits in an array of unsigned long".
156 */
157static struct fdtable *alloc_fdtable(unsigned int slots_wanted)
158{
159 struct fdtable *fdt;
160 unsigned int nr;
161 void *data;
162
163 /*
164 * Figure out how many fds we actually want to support in this fdtable.
165 * Allocation steps are keyed to the size of the fdarray, since it
166 * grows far faster than any of the other dynamic data. We try to fit
167 * the fdarray into comfortable page-tuned chunks: starting at 1024B
168 * and growing in powers of two from there on. Since we called only
169 * with slots_wanted > BITS_PER_LONG (embedded instance in files->fdtab
170 * already gives BITS_PER_LONG slots), the above boils down to
171 * 1. use the smallest power of two large enough to give us that many
172 * slots.
173 * 2. on 32bit skip 64 and 128 - the minimal capacity we want there is
174 * 256 slots (i.e. 1Kb fd array).
175 * 3. on 64bit don't skip anything, 1Kb fd array means 128 slots there
176 * and we are never going to be asked for 64 or less.
177 */
178 if (IS_ENABLED(CONFIG_32BIT) && slots_wanted < 256)
179 nr = 256;
180 else
181 nr = roundup_pow_of_two(slots_wanted);
182 /*
183 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
184 * had been set lower between the check in expand_files() and here.
185 *
186 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
187 * bitmaps handling below becomes unpleasant, to put it mildly...
188 */
189 if (unlikely(nr > sysctl_nr_open)) {
190 nr = round_down(sysctl_nr_open, BITS_PER_LONG);
191 if (nr < slots_wanted)
192 return ERR_PTR(-EMFILE);
193 }
194
195 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
196 if (!fdt)
197 goto out;
198 fdt->max_fds = nr;
199 data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT);
200 if (!data)
201 goto out_fdt;
202 fdt->fd = data;
203
204 data = kvmalloc(max_t(size_t,
205 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES),
206 GFP_KERNEL_ACCOUNT);
207 if (!data)
208 goto out_arr;
209 fdt->open_fds = data;
210 data += nr / BITS_PER_BYTE;
211 fdt->close_on_exec = data;
212 data += nr / BITS_PER_BYTE;
213 fdt->full_fds_bits = data;
214
215 return fdt;
216
217out_arr:
218 kvfree(fdt->fd);
219out_fdt:
220 kfree(fdt);
221out:
222 return ERR_PTR(-ENOMEM);
223}
224
225/*
226 * Expand the file descriptor table.
227 * This function will allocate a new fdtable and both fd array and fdset, of
228 * the given size.
229 * Return <0 error code on error; 0 on successful completion.
230 * The files->file_lock should be held on entry, and will be held on exit.
231 */
232static int expand_fdtable(struct files_struct *files, unsigned int nr)
233 __releases(files->file_lock)
234 __acquires(files->file_lock)
235{
236 struct fdtable *new_fdt, *cur_fdt;
237
238 spin_unlock(&files->file_lock);
239 new_fdt = alloc_fdtable(nr + 1);
240
241 /* make sure all fd_install() have seen resize_in_progress
242 * or have finished their rcu_read_lock_sched() section.
243 */
244 if (atomic_read(&files->count) > 1)
245 synchronize_rcu();
246
247 spin_lock(&files->file_lock);
248 if (IS_ERR(new_fdt))
249 return PTR_ERR(new_fdt);
250 cur_fdt = files_fdtable(files);
251 BUG_ON(nr < cur_fdt->max_fds);
252 copy_fdtable(new_fdt, cur_fdt);
253 rcu_assign_pointer(files->fdt, new_fdt);
254 if (cur_fdt != &files->fdtab)
255 call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
256 /* coupled with smp_rmb() in fd_install() */
257 smp_wmb();
258 return 0;
259}
260
261/*
262 * Expand files.
263 * This function will expand the file structures, if the requested size exceeds
264 * the current capacity and there is room for expansion.
265 * Return <0 error code on error; 0 on success.
266 * The files->file_lock should be held on entry, and will be held on exit.
267 */
268static int expand_files(struct files_struct *files, unsigned int nr)
269 __releases(files->file_lock)
270 __acquires(files->file_lock)
271{
272 struct fdtable *fdt;
273 int error;
274
275repeat:
276 fdt = files_fdtable(files);
277
278 /* Do we need to expand? */
279 if (nr < fdt->max_fds)
280 return 0;
281
282 /* Can we expand? */
283 if (nr >= sysctl_nr_open)
284 return -EMFILE;
285
286 if (unlikely(files->resize_in_progress)) {
287 spin_unlock(&files->file_lock);
288 wait_event(files->resize_wait, !files->resize_in_progress);
289 spin_lock(&files->file_lock);
290 goto repeat;
291 }
292
293 /* All good, so we try */
294 files->resize_in_progress = true;
295 error = expand_fdtable(files, nr);
296 files->resize_in_progress = false;
297
298 wake_up_all(&files->resize_wait);
299 return error;
300}
301
302static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt,
303 bool set)
304{
305 if (set) {
306 __set_bit(fd, fdt->close_on_exec);
307 } else {
308 if (test_bit(fd, fdt->close_on_exec))
309 __clear_bit(fd, fdt->close_on_exec);
310 }
311}
312
313static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt, bool set)
314{
315 __set_bit(fd, fdt->open_fds);
316 __set_close_on_exec(fd, fdt, set);
317 fd /= BITS_PER_LONG;
318 if (!~fdt->open_fds[fd])
319 __set_bit(fd, fdt->full_fds_bits);
320}
321
322static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
323{
324 __clear_bit(fd, fdt->open_fds);
325 fd /= BITS_PER_LONG;
326 if (test_bit(fd, fdt->full_fds_bits))
327 __clear_bit(fd, fdt->full_fds_bits);
328}
329
330static inline bool fd_is_open(unsigned int fd, const struct fdtable *fdt)
331{
332 return test_bit(fd, fdt->open_fds);
333}
334
335/*
336 * Note that a sane fdtable size always has to be a multiple of
337 * BITS_PER_LONG, since we have bitmaps that are sized by this.
338 *
339 * punch_hole is optional - when close_range() is asked to unshare
340 * and close, we don't need to copy descriptors in that range, so
341 * a smaller cloned descriptor table might suffice if the last
342 * currently opened descriptor falls into that range.
343 */
344static unsigned int sane_fdtable_size(struct fdtable *fdt, struct fd_range *punch_hole)
345{
346 unsigned int last = find_last_bit(fdt->open_fds, fdt->max_fds);
347
348 if (last == fdt->max_fds)
349 return NR_OPEN_DEFAULT;
350 if (punch_hole && punch_hole->to >= last && punch_hole->from <= last) {
351 last = find_last_bit(fdt->open_fds, punch_hole->from);
352 if (last == punch_hole->from)
353 return NR_OPEN_DEFAULT;
354 }
355 return ALIGN(last + 1, BITS_PER_LONG);
356}
357
358/*
359 * Allocate a new descriptor table and copy contents from the passed in
360 * instance. Returns a pointer to cloned table on success, ERR_PTR()
361 * on failure. For 'punch_hole' see sane_fdtable_size().
362 */
363struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_hole)
364{
365 struct files_struct *newf;
366 struct file **old_fds, **new_fds;
367 unsigned int open_files, i;
368 struct fdtable *old_fdt, *new_fdt;
369
370 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
371 if (!newf)
372 return ERR_PTR(-ENOMEM);
373
374 atomic_set(&newf->count, 1);
375
376 spin_lock_init(&newf->file_lock);
377 newf->resize_in_progress = false;
378 init_waitqueue_head(&newf->resize_wait);
379 newf->next_fd = 0;
380 new_fdt = &newf->fdtab;
381 new_fdt->max_fds = NR_OPEN_DEFAULT;
382 new_fdt->close_on_exec = newf->close_on_exec_init;
383 new_fdt->open_fds = newf->open_fds_init;
384 new_fdt->full_fds_bits = newf->full_fds_bits_init;
385 new_fdt->fd = &newf->fd_array[0];
386
387 spin_lock(&oldf->file_lock);
388 old_fdt = files_fdtable(oldf);
389 open_files = sane_fdtable_size(old_fdt, punch_hole);
390
391 /*
392 * Check whether we need to allocate a larger fd array and fd set.
393 */
394 while (unlikely(open_files > new_fdt->max_fds)) {
395 spin_unlock(&oldf->file_lock);
396
397 if (new_fdt != &newf->fdtab)
398 __free_fdtable(new_fdt);
399
400 new_fdt = alloc_fdtable(open_files);
401 if (IS_ERR(new_fdt)) {
402 kmem_cache_free(files_cachep, newf);
403 return ERR_CAST(new_fdt);
404 }
405
406 /*
407 * Reacquire the oldf lock and a pointer to its fd table
408 * who knows it may have a new bigger fd table. We need
409 * the latest pointer.
410 */
411 spin_lock(&oldf->file_lock);
412 old_fdt = files_fdtable(oldf);
413 open_files = sane_fdtable_size(old_fdt, punch_hole);
414 }
415
416 copy_fd_bitmaps(new_fdt, old_fdt, open_files / BITS_PER_LONG);
417
418 old_fds = old_fdt->fd;
419 new_fds = new_fdt->fd;
420
421 for (i = open_files; i != 0; i--) {
422 struct file *f = *old_fds++;
423 if (f) {
424 get_file(f);
425 } else {
426 /*
427 * The fd may be claimed in the fd bitmap but not yet
428 * instantiated in the files array if a sibling thread
429 * is partway through open(). So make sure that this
430 * fd is available to the new process.
431 */
432 __clear_open_fd(open_files - i, new_fdt);
433 }
434 rcu_assign_pointer(*new_fds++, f);
435 }
436 spin_unlock(&oldf->file_lock);
437
438 /* clear the remainder */
439 memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *));
440
441 rcu_assign_pointer(newf->fdt, new_fdt);
442
443 return newf;
444}
445
446static struct fdtable *close_files(struct files_struct * files)
447{
448 /*
449 * It is safe to dereference the fd table without RCU or
450 * ->file_lock because this is the last reference to the
451 * files structure.
452 */
453 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
454 unsigned int i, j = 0;
455
456 for (;;) {
457 unsigned long set;
458 i = j * BITS_PER_LONG;
459 if (i >= fdt->max_fds)
460 break;
461 set = fdt->open_fds[j++];
462 while (set) {
463 if (set & 1) {
464 struct file *file = fdt->fd[i];
465 if (file) {
466 filp_close(file, files);
467 cond_resched();
468 }
469 }
470 i++;
471 set >>= 1;
472 }
473 }
474
475 return fdt;
476}
477
478void put_files_struct(struct files_struct *files)
479{
480 if (atomic_dec_and_test(&files->count)) {
481 struct fdtable *fdt = close_files(files);
482
483 /* free the arrays if they are not embedded */
484 if (fdt != &files->fdtab)
485 __free_fdtable(fdt);
486 kmem_cache_free(files_cachep, files);
487 }
488}
489
490void exit_files(struct task_struct *tsk)
491{
492 struct files_struct * files = tsk->files;
493
494 if (files) {
495 task_lock(tsk);
496 tsk->files = NULL;
497 task_unlock(tsk);
498 put_files_struct(files);
499 }
500}
501
502struct files_struct init_files = {
503 .count = ATOMIC_INIT(1),
504 .fdt = &init_files.fdtab,
505 .fdtab = {
506 .max_fds = NR_OPEN_DEFAULT,
507 .fd = &init_files.fd_array[0],
508 .close_on_exec = init_files.close_on_exec_init,
509 .open_fds = init_files.open_fds_init,
510 .full_fds_bits = init_files.full_fds_bits_init,
511 },
512 .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
513 .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait),
514};
515
516static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
517{
518 unsigned int maxfd = fdt->max_fds; /* always multiple of BITS_PER_LONG */
519 unsigned int maxbit = maxfd / BITS_PER_LONG;
520 unsigned int bitbit = start / BITS_PER_LONG;
521 unsigned int bit;
522
523 /*
524 * Try to avoid looking at the second level bitmap
525 */
526 bit = find_next_zero_bit(&fdt->open_fds[bitbit], BITS_PER_LONG,
527 start & (BITS_PER_LONG - 1));
528 if (bit < BITS_PER_LONG)
529 return bit + bitbit * BITS_PER_LONG;
530
531 bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
532 if (bitbit >= maxfd)
533 return maxfd;
534 if (bitbit > start)
535 start = bitbit;
536 return find_next_zero_bit(fdt->open_fds, maxfd, start);
537}
538
539/*
540 * allocate a file descriptor, mark it busy.
541 */
542static int alloc_fd(unsigned start, unsigned end, unsigned flags)
543{
544 struct files_struct *files = current->files;
545 unsigned int fd;
546 int error;
547 struct fdtable *fdt;
548
549 spin_lock(&files->file_lock);
550repeat:
551 fdt = files_fdtable(files);
552 fd = start;
553 if (fd < files->next_fd)
554 fd = files->next_fd;
555
556 if (likely(fd < fdt->max_fds))
557 fd = find_next_fd(fdt, fd);
558
559 /*
560 * N.B. For clone tasks sharing a files structure, this test
561 * will limit the total number of files that can be opened.
562 */
563 error = -EMFILE;
564 if (unlikely(fd >= end))
565 goto out;
566
567 if (unlikely(fd >= fdt->max_fds)) {
568 error = expand_files(files, fd);
569 if (error < 0)
570 goto out;
571
572 goto repeat;
573 }
574
575 if (start <= files->next_fd)
576 files->next_fd = fd + 1;
577
578 __set_open_fd(fd, fdt, flags & O_CLOEXEC);
579 error = fd;
580
581out:
582 spin_unlock(&files->file_lock);
583 return error;
584}
585
586int __get_unused_fd_flags(unsigned flags, unsigned long nofile)
587{
588 return alloc_fd(0, nofile, flags);
589}
590
591int get_unused_fd_flags(unsigned flags)
592{
593 return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE));
594}
595EXPORT_SYMBOL(get_unused_fd_flags);
596
597static void __put_unused_fd(struct files_struct *files, unsigned int fd)
598{
599 struct fdtable *fdt = files_fdtable(files);
600 __clear_open_fd(fd, fdt);
601 if (fd < files->next_fd)
602 files->next_fd = fd;
603}
604
605void put_unused_fd(unsigned int fd)
606{
607 struct files_struct *files = current->files;
608 spin_lock(&files->file_lock);
609 __put_unused_fd(files, fd);
610 spin_unlock(&files->file_lock);
611}
612
613EXPORT_SYMBOL(put_unused_fd);
614
615/*
616 * Install a file pointer in the fd array.
617 *
618 * The VFS is full of places where we drop the files lock between
619 * setting the open_fds bitmap and installing the file in the file
620 * array. At any such point, we are vulnerable to a dup2() race
621 * installing a file in the array before us. We need to detect this and
622 * fput() the struct file we are about to overwrite in this case.
623 *
624 * It should never happen - if we allow dup2() do it, _really_ bad things
625 * will follow.
626 *
627 * This consumes the "file" refcount, so callers should treat it
628 * as if they had called fput(file).
629 */
630
631void fd_install(unsigned int fd, struct file *file)
632{
633 struct files_struct *files = current->files;
634 struct fdtable *fdt;
635
636 if (WARN_ON_ONCE(unlikely(file->f_mode & FMODE_BACKING)))
637 return;
638
639 rcu_read_lock_sched();
640
641 if (unlikely(files->resize_in_progress)) {
642 rcu_read_unlock_sched();
643 spin_lock(&files->file_lock);
644 fdt = files_fdtable(files);
645 WARN_ON(fdt->fd[fd] != NULL);
646 rcu_assign_pointer(fdt->fd[fd], file);
647 spin_unlock(&files->file_lock);
648 return;
649 }
650 /* coupled with smp_wmb() in expand_fdtable() */
651 smp_rmb();
652 fdt = rcu_dereference_sched(files->fdt);
653 BUG_ON(fdt->fd[fd] != NULL);
654 rcu_assign_pointer(fdt->fd[fd], file);
655 rcu_read_unlock_sched();
656}
657
658EXPORT_SYMBOL(fd_install);
659
660/**
661 * file_close_fd_locked - return file associated with fd
662 * @files: file struct to retrieve file from
663 * @fd: file descriptor to retrieve file for
664 *
665 * Doesn't take a separate reference count.
666 *
667 * Context: files_lock must be held.
668 *
669 * Returns: The file associated with @fd (NULL if @fd is not open)
670 */
671struct file *file_close_fd_locked(struct files_struct *files, unsigned fd)
672{
673 struct fdtable *fdt = files_fdtable(files);
674 struct file *file;
675
676 lockdep_assert_held(&files->file_lock);
677
678 if (fd >= fdt->max_fds)
679 return NULL;
680
681 fd = array_index_nospec(fd, fdt->max_fds);
682 file = fdt->fd[fd];
683 if (file) {
684 rcu_assign_pointer(fdt->fd[fd], NULL);
685 __put_unused_fd(files, fd);
686 }
687 return file;
688}
689
690int close_fd(unsigned fd)
691{
692 struct files_struct *files = current->files;
693 struct file *file;
694
695 spin_lock(&files->file_lock);
696 file = file_close_fd_locked(files, fd);
697 spin_unlock(&files->file_lock);
698 if (!file)
699 return -EBADF;
700
701 return filp_close(file, files);
702}
703EXPORT_SYMBOL(close_fd);
704
705/**
706 * last_fd - return last valid index into fd table
707 * @fdt: File descriptor table.
708 *
709 * Context: Either rcu read lock or files_lock must be held.
710 *
711 * Returns: Last valid index into fdtable.
712 */
713static inline unsigned last_fd(struct fdtable *fdt)
714{
715 return fdt->max_fds - 1;
716}
717
718static inline void __range_cloexec(struct files_struct *cur_fds,
719 unsigned int fd, unsigned int max_fd)
720{
721 struct fdtable *fdt;
722
723 /* make sure we're using the correct maximum value */
724 spin_lock(&cur_fds->file_lock);
725 fdt = files_fdtable(cur_fds);
726 max_fd = min(last_fd(fdt), max_fd);
727 if (fd <= max_fd)
728 bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1);
729 spin_unlock(&cur_fds->file_lock);
730}
731
732static inline void __range_close(struct files_struct *files, unsigned int fd,
733 unsigned int max_fd)
734{
735 struct file *file;
736 unsigned n;
737
738 spin_lock(&files->file_lock);
739 n = last_fd(files_fdtable(files));
740 max_fd = min(max_fd, n);
741
742 for (; fd <= max_fd; fd++) {
743 file = file_close_fd_locked(files, fd);
744 if (file) {
745 spin_unlock(&files->file_lock);
746 filp_close(file, files);
747 cond_resched();
748 spin_lock(&files->file_lock);
749 } else if (need_resched()) {
750 spin_unlock(&files->file_lock);
751 cond_resched();
752 spin_lock(&files->file_lock);
753 }
754 }
755 spin_unlock(&files->file_lock);
756}
757
758/**
759 * sys_close_range() - Close all file descriptors in a given range.
760 *
761 * @fd: starting file descriptor to close
762 * @max_fd: last file descriptor to close
763 * @flags: CLOSE_RANGE flags.
764 *
765 * This closes a range of file descriptors. All file descriptors
766 * from @fd up to and including @max_fd are closed.
767 * Currently, errors to close a given file descriptor are ignored.
768 */
769SYSCALL_DEFINE3(close_range, unsigned int, fd, unsigned int, max_fd,
770 unsigned int, flags)
771{
772 struct task_struct *me = current;
773 struct files_struct *cur_fds = me->files, *fds = NULL;
774
775 if (flags & ~(CLOSE_RANGE_UNSHARE | CLOSE_RANGE_CLOEXEC))
776 return -EINVAL;
777
778 if (fd > max_fd)
779 return -EINVAL;
780
781 if ((flags & CLOSE_RANGE_UNSHARE) && atomic_read(&cur_fds->count) > 1) {
782 struct fd_range range = {fd, max_fd}, *punch_hole = ⦥
783
784 /*
785 * If the caller requested all fds to be made cloexec we always
786 * copy all of the file descriptors since they still want to
787 * use them.
788 */
789 if (flags & CLOSE_RANGE_CLOEXEC)
790 punch_hole = NULL;
791
792 fds = dup_fd(cur_fds, punch_hole);
793 if (IS_ERR(fds))
794 return PTR_ERR(fds);
795 /*
796 * We used to share our file descriptor table, and have now
797 * created a private one, make sure we're using it below.
798 */
799 swap(cur_fds, fds);
800 }
801
802 if (flags & CLOSE_RANGE_CLOEXEC)
803 __range_cloexec(cur_fds, fd, max_fd);
804 else
805 __range_close(cur_fds, fd, max_fd);
806
807 if (fds) {
808 /*
809 * We're done closing the files we were supposed to. Time to install
810 * the new file descriptor table and drop the old one.
811 */
812 task_lock(me);
813 me->files = cur_fds;
814 task_unlock(me);
815 put_files_struct(fds);
816 }
817
818 return 0;
819}
820
821/**
822 * file_close_fd - return file associated with fd
823 * @fd: file descriptor to retrieve file for
824 *
825 * Doesn't take a separate reference count.
826 *
827 * Returns: The file associated with @fd (NULL if @fd is not open)
828 */
829struct file *file_close_fd(unsigned int fd)
830{
831 struct files_struct *files = current->files;
832 struct file *file;
833
834 spin_lock(&files->file_lock);
835 file = file_close_fd_locked(files, fd);
836 spin_unlock(&files->file_lock);
837
838 return file;
839}
840
841void do_close_on_exec(struct files_struct *files)
842{
843 unsigned i;
844 struct fdtable *fdt;
845
846 /* exec unshares first */
847 spin_lock(&files->file_lock);
848 for (i = 0; ; i++) {
849 unsigned long set;
850 unsigned fd = i * BITS_PER_LONG;
851 fdt = files_fdtable(files);
852 if (fd >= fdt->max_fds)
853 break;
854 set = fdt->close_on_exec[i];
855 if (!set)
856 continue;
857 fdt->close_on_exec[i] = 0;
858 for ( ; set ; fd++, set >>= 1) {
859 struct file *file;
860 if (!(set & 1))
861 continue;
862 file = fdt->fd[fd];
863 if (!file)
864 continue;
865 rcu_assign_pointer(fdt->fd[fd], NULL);
866 __put_unused_fd(files, fd);
867 spin_unlock(&files->file_lock);
868 filp_close(file, files);
869 cond_resched();
870 spin_lock(&files->file_lock);
871 }
872
873 }
874 spin_unlock(&files->file_lock);
875}
876
877static struct file *__get_file_rcu(struct file __rcu **f)
878{
879 struct file __rcu *file;
880 struct file __rcu *file_reloaded;
881 struct file __rcu *file_reloaded_cmp;
882
883 file = rcu_dereference_raw(*f);
884 if (!file)
885 return NULL;
886
887 if (unlikely(!file_ref_get(&file->f_ref)))
888 return ERR_PTR(-EAGAIN);
889
890 file_reloaded = rcu_dereference_raw(*f);
891
892 /*
893 * Ensure that all accesses have a dependency on the load from
894 * rcu_dereference_raw() above so we get correct ordering
895 * between reuse/allocation and the pointer check below.
896 */
897 file_reloaded_cmp = file_reloaded;
898 OPTIMIZER_HIDE_VAR(file_reloaded_cmp);
899
900 /*
901 * file_ref_get() above provided a full memory barrier when we
902 * acquired a reference.
903 *
904 * This is paired with the write barrier from assigning to the
905 * __rcu protected file pointer so that if that pointer still
906 * matches the current file, we know we have successfully
907 * acquired a reference to the right file.
908 *
909 * If the pointers don't match the file has been reallocated by
910 * SLAB_TYPESAFE_BY_RCU.
911 */
912 if (file == file_reloaded_cmp)
913 return file_reloaded;
914
915 fput(file);
916 return ERR_PTR(-EAGAIN);
917}
918
919/**
920 * get_file_rcu - try go get a reference to a file under rcu
921 * @f: the file to get a reference on
922 *
923 * This function tries to get a reference on @f carefully verifying that
924 * @f hasn't been reused.
925 *
926 * This function should rarely have to be used and only by users who
927 * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it.
928 *
929 * Return: Returns @f with the reference count increased or NULL.
930 */
931struct file *get_file_rcu(struct file __rcu **f)
932{
933 for (;;) {
934 struct file __rcu *file;
935
936 file = __get_file_rcu(f);
937 if (!IS_ERR(file))
938 return file;
939 }
940}
941EXPORT_SYMBOL_GPL(get_file_rcu);
942
943/**
944 * get_file_active - try go get a reference to a file
945 * @f: the file to get a reference on
946 *
947 * In contast to get_file_rcu() the pointer itself isn't part of the
948 * reference counting.
949 *
950 * This function should rarely have to be used and only by users who
951 * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it.
952 *
953 * Return: Returns @f with the reference count increased or NULL.
954 */
955struct file *get_file_active(struct file **f)
956{
957 struct file __rcu *file;
958
959 rcu_read_lock();
960 file = __get_file_rcu(f);
961 rcu_read_unlock();
962 if (IS_ERR(file))
963 file = NULL;
964 return file;
965}
966EXPORT_SYMBOL_GPL(get_file_active);
967
968static inline struct file *__fget_files_rcu(struct files_struct *files,
969 unsigned int fd, fmode_t mask)
970{
971 for (;;) {
972 struct file *file;
973 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
974 struct file __rcu **fdentry;
975 unsigned long nospec_mask;
976
977 /* Mask is a 0 for invalid fd's, ~0 for valid ones */
978 nospec_mask = array_index_mask_nospec(fd, fdt->max_fds);
979
980 /*
981 * fdentry points to the 'fd' offset, or fdt->fd[0].
982 * Loading from fdt->fd[0] is always safe, because the
983 * array always exists.
984 */
985 fdentry = fdt->fd + (fd & nospec_mask);
986
987 /* Do the load, then mask any invalid result */
988 file = rcu_dereference_raw(*fdentry);
989 file = (void *)(nospec_mask & (unsigned long)file);
990 if (unlikely(!file))
991 return NULL;
992
993 /*
994 * Ok, we have a file pointer that was valid at
995 * some point, but it might have become stale since.
996 *
997 * We need to confirm it by incrementing the refcount
998 * and then check the lookup again.
999 *
1000 * file_ref_get() gives us a full memory barrier. We
1001 * only really need an 'acquire' one to protect the
1002 * loads below, but we don't have that.
1003 */
1004 if (unlikely(!file_ref_get(&file->f_ref)))
1005 continue;
1006
1007 /*
1008 * Such a race can take two forms:
1009 *
1010 * (a) the file ref already went down to zero and the
1011 * file hasn't been reused yet or the file count
1012 * isn't zero but the file has already been reused.
1013 *
1014 * (b) the file table entry has changed under us.
1015 * Note that we don't need to re-check the 'fdt->fd'
1016 * pointer having changed, because it always goes
1017 * hand-in-hand with 'fdt'.
1018 *
1019 * If so, we need to put our ref and try again.
1020 */
1021 if (unlikely(file != rcu_dereference_raw(*fdentry)) ||
1022 unlikely(rcu_dereference_raw(files->fdt) != fdt)) {
1023 fput(file);
1024 continue;
1025 }
1026
1027 /*
1028 * This isn't the file we're looking for or we're not
1029 * allowed to get a reference to it.
1030 */
1031 if (unlikely(file->f_mode & mask)) {
1032 fput(file);
1033 return NULL;
1034 }
1035
1036 /*
1037 * Ok, we have a ref to the file, and checked that it
1038 * still exists.
1039 */
1040 return file;
1041 }
1042}
1043
1044static struct file *__fget_files(struct files_struct *files, unsigned int fd,
1045 fmode_t mask)
1046{
1047 struct file *file;
1048
1049 rcu_read_lock();
1050 file = __fget_files_rcu(files, fd, mask);
1051 rcu_read_unlock();
1052
1053 return file;
1054}
1055
1056static inline struct file *__fget(unsigned int fd, fmode_t mask)
1057{
1058 return __fget_files(current->files, fd, mask);
1059}
1060
1061struct file *fget(unsigned int fd)
1062{
1063 return __fget(fd, FMODE_PATH);
1064}
1065EXPORT_SYMBOL(fget);
1066
1067struct file *fget_raw(unsigned int fd)
1068{
1069 return __fget(fd, 0);
1070}
1071EXPORT_SYMBOL(fget_raw);
1072
1073struct file *fget_task(struct task_struct *task, unsigned int fd)
1074{
1075 struct file *file = NULL;
1076
1077 task_lock(task);
1078 if (task->files)
1079 file = __fget_files(task->files, fd, 0);
1080 task_unlock(task);
1081
1082 return file;
1083}
1084
1085struct file *fget_task_next(struct task_struct *task, unsigned int *ret_fd)
1086{
1087 /* Must be called with rcu_read_lock held */
1088 struct files_struct *files;
1089 unsigned int fd = *ret_fd;
1090 struct file *file = NULL;
1091
1092 task_lock(task);
1093 files = task->files;
1094 if (files) {
1095 rcu_read_lock();
1096 for (; fd < files_fdtable(files)->max_fds; fd++) {
1097 file = __fget_files_rcu(files, fd, 0);
1098 if (file)
1099 break;
1100 }
1101 rcu_read_unlock();
1102 }
1103 task_unlock(task);
1104 *ret_fd = fd;
1105 return file;
1106}
1107EXPORT_SYMBOL(fget_task_next);
1108
1109/*
1110 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
1111 *
1112 * You can use this instead of fget if you satisfy all of the following
1113 * conditions:
1114 * 1) You must call fput_light before exiting the syscall and returning control
1115 * to userspace (i.e. you cannot remember the returned struct file * after
1116 * returning to userspace).
1117 * 2) You must not call filp_close on the returned struct file * in between
1118 * calls to fget_light and fput_light.
1119 * 3) You must not clone the current task in between the calls to fget_light
1120 * and fput_light.
1121 *
1122 * The fput_needed flag returned by fget_light should be passed to the
1123 * corresponding fput_light.
1124 *
1125 * (As an exception to rule 2, you can call filp_close between fget_light and
1126 * fput_light provided that you capture a real refcount with get_file before
1127 * the call to filp_close, and ensure that this real refcount is fput *after*
1128 * the fput_light call.)
1129 *
1130 * See also the documentation in rust/kernel/file.rs.
1131 */
1132static inline struct fd __fget_light(unsigned int fd, fmode_t mask)
1133{
1134 struct files_struct *files = current->files;
1135 struct file *file;
1136
1137 /*
1138 * If another thread is concurrently calling close_fd() followed
1139 * by put_files_struct(), we must not observe the old table
1140 * entry combined with the new refcount - otherwise we could
1141 * return a file that is concurrently being freed.
1142 *
1143 * atomic_read_acquire() pairs with atomic_dec_and_test() in
1144 * put_files_struct().
1145 */
1146 if (likely(atomic_read_acquire(&files->count) == 1)) {
1147 file = files_lookup_fd_raw(files, fd);
1148 if (!file || unlikely(file->f_mode & mask))
1149 return EMPTY_FD;
1150 return BORROWED_FD(file);
1151 } else {
1152 file = __fget_files(files, fd, mask);
1153 if (!file)
1154 return EMPTY_FD;
1155 return CLONED_FD(file);
1156 }
1157}
1158struct fd fdget(unsigned int fd)
1159{
1160 return __fget_light(fd, FMODE_PATH);
1161}
1162EXPORT_SYMBOL(fdget);
1163
1164struct fd fdget_raw(unsigned int fd)
1165{
1166 return __fget_light(fd, 0);
1167}
1168
1169/*
1170 * Try to avoid f_pos locking. We only need it if the
1171 * file is marked for FMODE_ATOMIC_POS, and it can be
1172 * accessed multiple ways.
1173 *
1174 * Always do it for directories, because pidfd_getfd()
1175 * can make a file accessible even if it otherwise would
1176 * not be, and for directories this is a correctness
1177 * issue, not a "POSIX requirement".
1178 */
1179static inline bool file_needs_f_pos_lock(struct file *file)
1180{
1181 return (file->f_mode & FMODE_ATOMIC_POS) &&
1182 (file_count(file) > 1 || file->f_op->iterate_shared);
1183}
1184
1185struct fd fdget_pos(unsigned int fd)
1186{
1187 struct fd f = fdget(fd);
1188 struct file *file = fd_file(f);
1189
1190 if (file && file_needs_f_pos_lock(file)) {
1191 f.word |= FDPUT_POS_UNLOCK;
1192 mutex_lock(&file->f_pos_lock);
1193 }
1194 return f;
1195}
1196
1197void __f_unlock_pos(struct file *f)
1198{
1199 mutex_unlock(&f->f_pos_lock);
1200}
1201
1202/*
1203 * We only lock f_pos if we have threads or if the file might be
1204 * shared with another process. In both cases we'll have an elevated
1205 * file count (done either by fdget() or by fork()).
1206 */
1207
1208void set_close_on_exec(unsigned int fd, int flag)
1209{
1210 struct files_struct *files = current->files;
1211 spin_lock(&files->file_lock);
1212 __set_close_on_exec(fd, files_fdtable(files), flag);
1213 spin_unlock(&files->file_lock);
1214}
1215
1216bool get_close_on_exec(unsigned int fd)
1217{
1218 bool res;
1219 rcu_read_lock();
1220 res = close_on_exec(fd, current->files);
1221 rcu_read_unlock();
1222 return res;
1223}
1224
1225static int do_dup2(struct files_struct *files,
1226 struct file *file, unsigned fd, unsigned flags)
1227__releases(&files->file_lock)
1228{
1229 struct file *tofree;
1230 struct fdtable *fdt;
1231
1232 /*
1233 * We need to detect attempts to do dup2() over allocated but still
1234 * not finished descriptor. NB: OpenBSD avoids that at the price of
1235 * extra work in their equivalent of fget() - they insert struct
1236 * file immediately after grabbing descriptor, mark it larval if
1237 * more work (e.g. actual opening) is needed and make sure that
1238 * fget() treats larval files as absent. Potentially interesting,
1239 * but while extra work in fget() is trivial, locking implications
1240 * and amount of surgery on open()-related paths in VFS are not.
1241 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
1242 * deadlocks in rather amusing ways, AFAICS. All of that is out of
1243 * scope of POSIX or SUS, since neither considers shared descriptor
1244 * tables and this condition does not arise without those.
1245 */
1246 fdt = files_fdtable(files);
1247 fd = array_index_nospec(fd, fdt->max_fds);
1248 tofree = fdt->fd[fd];
1249 if (!tofree && fd_is_open(fd, fdt))
1250 goto Ebusy;
1251 get_file(file);
1252 rcu_assign_pointer(fdt->fd[fd], file);
1253 __set_open_fd(fd, fdt, flags & O_CLOEXEC);
1254 spin_unlock(&files->file_lock);
1255
1256 if (tofree)
1257 filp_close(tofree, files);
1258
1259 return fd;
1260
1261Ebusy:
1262 spin_unlock(&files->file_lock);
1263 return -EBUSY;
1264}
1265
1266int replace_fd(unsigned fd, struct file *file, unsigned flags)
1267{
1268 int err;
1269 struct files_struct *files = current->files;
1270
1271 if (!file)
1272 return close_fd(fd);
1273
1274 if (fd >= rlimit(RLIMIT_NOFILE))
1275 return -EBADF;
1276
1277 spin_lock(&files->file_lock);
1278 err = expand_files(files, fd);
1279 if (unlikely(err < 0))
1280 goto out_unlock;
1281 return do_dup2(files, file, fd, flags);
1282
1283out_unlock:
1284 spin_unlock(&files->file_lock);
1285 return err;
1286}
1287
1288/**
1289 * receive_fd() - Install received file into file descriptor table
1290 * @file: struct file that was received from another process
1291 * @ufd: __user pointer to write new fd number to
1292 * @o_flags: the O_* flags to apply to the new fd entry
1293 *
1294 * Installs a received file into the file descriptor table, with appropriate
1295 * checks and count updates. Optionally writes the fd number to userspace, if
1296 * @ufd is non-NULL.
1297 *
1298 * This helper handles its own reference counting of the incoming
1299 * struct file.
1300 *
1301 * Returns newly install fd or -ve on error.
1302 */
1303int receive_fd(struct file *file, int __user *ufd, unsigned int o_flags)
1304{
1305 int new_fd;
1306 int error;
1307
1308 error = security_file_receive(file);
1309 if (error)
1310 return error;
1311
1312 new_fd = get_unused_fd_flags(o_flags);
1313 if (new_fd < 0)
1314 return new_fd;
1315
1316 if (ufd) {
1317 error = put_user(new_fd, ufd);
1318 if (error) {
1319 put_unused_fd(new_fd);
1320 return error;
1321 }
1322 }
1323
1324 fd_install(new_fd, get_file(file));
1325 __receive_sock(file);
1326 return new_fd;
1327}
1328EXPORT_SYMBOL_GPL(receive_fd);
1329
1330int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags)
1331{
1332 int error;
1333
1334 error = security_file_receive(file);
1335 if (error)
1336 return error;
1337 error = replace_fd(new_fd, file, o_flags);
1338 if (error)
1339 return error;
1340 __receive_sock(file);
1341 return new_fd;
1342}
1343
1344static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
1345{
1346 int err = -EBADF;
1347 struct file *file;
1348 struct files_struct *files = current->files;
1349
1350 if ((flags & ~O_CLOEXEC) != 0)
1351 return -EINVAL;
1352
1353 if (unlikely(oldfd == newfd))
1354 return -EINVAL;
1355
1356 if (newfd >= rlimit(RLIMIT_NOFILE))
1357 return -EBADF;
1358
1359 spin_lock(&files->file_lock);
1360 err = expand_files(files, newfd);
1361 file = files_lookup_fd_locked(files, oldfd);
1362 if (unlikely(!file))
1363 goto Ebadf;
1364 if (unlikely(err < 0)) {
1365 if (err == -EMFILE)
1366 goto Ebadf;
1367 goto out_unlock;
1368 }
1369 return do_dup2(files, file, newfd, flags);
1370
1371Ebadf:
1372 err = -EBADF;
1373out_unlock:
1374 spin_unlock(&files->file_lock);
1375 return err;
1376}
1377
1378SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
1379{
1380 return ksys_dup3(oldfd, newfd, flags);
1381}
1382
1383SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
1384{
1385 if (unlikely(newfd == oldfd)) { /* corner case */
1386 struct files_struct *files = current->files;
1387 struct file *f;
1388 int retval = oldfd;
1389
1390 rcu_read_lock();
1391 f = __fget_files_rcu(files, oldfd, 0);
1392 if (!f)
1393 retval = -EBADF;
1394 rcu_read_unlock();
1395 if (f)
1396 fput(f);
1397 return retval;
1398 }
1399 return ksys_dup3(oldfd, newfd, 0);
1400}
1401
1402SYSCALL_DEFINE1(dup, unsigned int, fildes)
1403{
1404 int ret = -EBADF;
1405 struct file *file = fget_raw(fildes);
1406
1407 if (file) {
1408 ret = get_unused_fd_flags(0);
1409 if (ret >= 0)
1410 fd_install(ret, file);
1411 else
1412 fput(file);
1413 }
1414 return ret;
1415}
1416
1417int f_dupfd(unsigned int from, struct file *file, unsigned flags)
1418{
1419 unsigned long nofile = rlimit(RLIMIT_NOFILE);
1420 int err;
1421 if (from >= nofile)
1422 return -EINVAL;
1423 err = alloc_fd(from, nofile, flags);
1424 if (err >= 0) {
1425 get_file(file);
1426 fd_install(err, file);
1427 }
1428 return err;
1429}
1430
1431int iterate_fd(struct files_struct *files, unsigned n,
1432 int (*f)(const void *, struct file *, unsigned),
1433 const void *p)
1434{
1435 struct fdtable *fdt;
1436 int res = 0;
1437 if (!files)
1438 return 0;
1439 spin_lock(&files->file_lock);
1440 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
1441 struct file *file;
1442 file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
1443 if (!file)
1444 continue;
1445 res = f(p, file, n);
1446 if (res)
1447 break;
1448 }
1449 spin_unlock(&files->file_lock);
1450 return res;
1451}
1452EXPORT_SYMBOL(iterate_fd);