Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/file.c
4 *
5 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
6 *
7 * Manage the dynamic fd arrays in the process files_struct.
8 */
9
10#include <linux/syscalls.h>
11#include <linux/export.h>
12#include <linux/fs.h>
13#include <linux/mm.h>
14#include <linux/sched/signal.h>
15#include <linux/slab.h>
16#include <linux/file.h>
17#include <linux/fdtable.h>
18#include <linux/bitops.h>
19#include <linux/spinlock.h>
20#include <linux/rcupdate.h>
21
22unsigned int sysctl_nr_open __read_mostly = 1024*1024;
23unsigned int sysctl_nr_open_min = BITS_PER_LONG;
24/* our min() is unusable in constant expressions ;-/ */
25#define __const_min(x, y) ((x) < (y) ? (x) : (y))
26unsigned int sysctl_nr_open_max =
27 __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
28
29static void __free_fdtable(struct fdtable *fdt)
30{
31 kvfree(fdt->fd);
32 kvfree(fdt->open_fds);
33 kfree(fdt);
34}
35
36static void free_fdtable_rcu(struct rcu_head *rcu)
37{
38 __free_fdtable(container_of(rcu, struct fdtable, rcu));
39}
40
41#define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
42#define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
43
44/*
45 * Copy 'count' fd bits from the old table to the new table and clear the extra
46 * space if any. This does not copy the file pointers. Called with the files
47 * spinlock held for write.
48 */
49static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
50 unsigned int count)
51{
52 unsigned int cpy, set;
53
54 cpy = count / BITS_PER_BYTE;
55 set = (nfdt->max_fds - count) / BITS_PER_BYTE;
56 memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
57 memset((char *)nfdt->open_fds + cpy, 0, set);
58 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
59 memset((char *)nfdt->close_on_exec + cpy, 0, set);
60
61 cpy = BITBIT_SIZE(count);
62 set = BITBIT_SIZE(nfdt->max_fds) - cpy;
63 memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
64 memset((char *)nfdt->full_fds_bits + cpy, 0, set);
65}
66
67/*
68 * Copy all file descriptors from the old table to the new, expanded table and
69 * clear the extra space. Called with the files spinlock held for write.
70 */
71static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
72{
73 unsigned int cpy, set;
74
75 BUG_ON(nfdt->max_fds < ofdt->max_fds);
76
77 cpy = ofdt->max_fds * sizeof(struct file *);
78 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
79 memcpy(nfdt->fd, ofdt->fd, cpy);
80 memset((char *)nfdt->fd + cpy, 0, set);
81
82 copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds);
83}
84
85static struct fdtable * alloc_fdtable(unsigned int nr)
86{
87 struct fdtable *fdt;
88 void *data;
89
90 /*
91 * Figure out how many fds we actually want to support in this fdtable.
92 * Allocation steps are keyed to the size of the fdarray, since it
93 * grows far faster than any of the other dynamic data. We try to fit
94 * the fdarray into comfortable page-tuned chunks: starting at 1024B
95 * and growing in powers of two from there on.
96 */
97 nr /= (1024 / sizeof(struct file *));
98 nr = roundup_pow_of_two(nr + 1);
99 nr *= (1024 / sizeof(struct file *));
100 /*
101 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
102 * had been set lower between the check in expand_files() and here. Deal
103 * with that in caller, it's cheaper that way.
104 *
105 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
106 * bitmaps handling below becomes unpleasant, to put it mildly...
107 */
108 if (unlikely(nr > sysctl_nr_open))
109 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
110
111 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
112 if (!fdt)
113 goto out;
114 fdt->max_fds = nr;
115 data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT);
116 if (!data)
117 goto out_fdt;
118 fdt->fd = data;
119
120 data = kvmalloc(max_t(size_t,
121 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES),
122 GFP_KERNEL_ACCOUNT);
123 if (!data)
124 goto out_arr;
125 fdt->open_fds = data;
126 data += nr / BITS_PER_BYTE;
127 fdt->close_on_exec = data;
128 data += nr / BITS_PER_BYTE;
129 fdt->full_fds_bits = data;
130
131 return fdt;
132
133out_arr:
134 kvfree(fdt->fd);
135out_fdt:
136 kfree(fdt);
137out:
138 return NULL;
139}
140
141/*
142 * Expand the file descriptor table.
143 * This function will allocate a new fdtable and both fd array and fdset, of
144 * the given size.
145 * Return <0 error code on error; 1 on successful completion.
146 * The files->file_lock should be held on entry, and will be held on exit.
147 */
148static int expand_fdtable(struct files_struct *files, unsigned int nr)
149 __releases(files->file_lock)
150 __acquires(files->file_lock)
151{
152 struct fdtable *new_fdt, *cur_fdt;
153
154 spin_unlock(&files->file_lock);
155 new_fdt = alloc_fdtable(nr);
156
157 /* make sure all __fd_install() have seen resize_in_progress
158 * or have finished their rcu_read_lock_sched() section.
159 */
160 if (atomic_read(&files->count) > 1)
161 synchronize_sched();
162
163 spin_lock(&files->file_lock);
164 if (!new_fdt)
165 return -ENOMEM;
166 /*
167 * extremely unlikely race - sysctl_nr_open decreased between the check in
168 * caller and alloc_fdtable(). Cheaper to catch it here...
169 */
170 if (unlikely(new_fdt->max_fds <= nr)) {
171 __free_fdtable(new_fdt);
172 return -EMFILE;
173 }
174 cur_fdt = files_fdtable(files);
175 BUG_ON(nr < cur_fdt->max_fds);
176 copy_fdtable(new_fdt, cur_fdt);
177 rcu_assign_pointer(files->fdt, new_fdt);
178 if (cur_fdt != &files->fdtab)
179 call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
180 /* coupled with smp_rmb() in __fd_install() */
181 smp_wmb();
182 return 1;
183}
184
185/*
186 * Expand files.
187 * This function will expand the file structures, if the requested size exceeds
188 * the current capacity and there is room for expansion.
189 * Return <0 error code on error; 0 when nothing done; 1 when files were
190 * expanded and execution may have blocked.
191 * The files->file_lock should be held on entry, and will be held on exit.
192 */
193static int expand_files(struct files_struct *files, unsigned int nr)
194 __releases(files->file_lock)
195 __acquires(files->file_lock)
196{
197 struct fdtable *fdt;
198 int expanded = 0;
199
200repeat:
201 fdt = files_fdtable(files);
202
203 /* Do we need to expand? */
204 if (nr < fdt->max_fds)
205 return expanded;
206
207 /* Can we expand? */
208 if (nr >= sysctl_nr_open)
209 return -EMFILE;
210
211 if (unlikely(files->resize_in_progress)) {
212 spin_unlock(&files->file_lock);
213 expanded = 1;
214 wait_event(files->resize_wait, !files->resize_in_progress);
215 spin_lock(&files->file_lock);
216 goto repeat;
217 }
218
219 /* All good, so we try */
220 files->resize_in_progress = true;
221 expanded = expand_fdtable(files, nr);
222 files->resize_in_progress = false;
223
224 wake_up_all(&files->resize_wait);
225 return expanded;
226}
227
228static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt)
229{
230 __set_bit(fd, fdt->close_on_exec);
231}
232
233static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt)
234{
235 if (test_bit(fd, fdt->close_on_exec))
236 __clear_bit(fd, fdt->close_on_exec);
237}
238
239static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
240{
241 __set_bit(fd, fdt->open_fds);
242 fd /= BITS_PER_LONG;
243 if (!~fdt->open_fds[fd])
244 __set_bit(fd, fdt->full_fds_bits);
245}
246
247static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
248{
249 __clear_bit(fd, fdt->open_fds);
250 __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
251}
252
253static unsigned int count_open_files(struct fdtable *fdt)
254{
255 unsigned int size = fdt->max_fds;
256 unsigned int i;
257
258 /* Find the last open fd */
259 for (i = size / BITS_PER_LONG; i > 0; ) {
260 if (fdt->open_fds[--i])
261 break;
262 }
263 i = (i + 1) * BITS_PER_LONG;
264 return i;
265}
266
267/*
268 * Allocate a new files structure and copy contents from the
269 * passed in files structure.
270 * errorp will be valid only when the returned files_struct is NULL.
271 */
272struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
273{
274 struct files_struct *newf;
275 struct file **old_fds, **new_fds;
276 unsigned int open_files, i;
277 struct fdtable *old_fdt, *new_fdt;
278
279 *errorp = -ENOMEM;
280 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
281 if (!newf)
282 goto out;
283
284 atomic_set(&newf->count, 1);
285
286 spin_lock_init(&newf->file_lock);
287 newf->resize_in_progress = false;
288 init_waitqueue_head(&newf->resize_wait);
289 newf->next_fd = 0;
290 new_fdt = &newf->fdtab;
291 new_fdt->max_fds = NR_OPEN_DEFAULT;
292 new_fdt->close_on_exec = newf->close_on_exec_init;
293 new_fdt->open_fds = newf->open_fds_init;
294 new_fdt->full_fds_bits = newf->full_fds_bits_init;
295 new_fdt->fd = &newf->fd_array[0];
296
297 spin_lock(&oldf->file_lock);
298 old_fdt = files_fdtable(oldf);
299 open_files = count_open_files(old_fdt);
300
301 /*
302 * Check whether we need to allocate a larger fd array and fd set.
303 */
304 while (unlikely(open_files > new_fdt->max_fds)) {
305 spin_unlock(&oldf->file_lock);
306
307 if (new_fdt != &newf->fdtab)
308 __free_fdtable(new_fdt);
309
310 new_fdt = alloc_fdtable(open_files - 1);
311 if (!new_fdt) {
312 *errorp = -ENOMEM;
313 goto out_release;
314 }
315
316 /* beyond sysctl_nr_open; nothing to do */
317 if (unlikely(new_fdt->max_fds < open_files)) {
318 __free_fdtable(new_fdt);
319 *errorp = -EMFILE;
320 goto out_release;
321 }
322
323 /*
324 * Reacquire the oldf lock and a pointer to its fd table
325 * who knows it may have a new bigger fd table. We need
326 * the latest pointer.
327 */
328 spin_lock(&oldf->file_lock);
329 old_fdt = files_fdtable(oldf);
330 open_files = count_open_files(old_fdt);
331 }
332
333 copy_fd_bitmaps(new_fdt, old_fdt, open_files);
334
335 old_fds = old_fdt->fd;
336 new_fds = new_fdt->fd;
337
338 for (i = open_files; i != 0; i--) {
339 struct file *f = *old_fds++;
340 if (f) {
341 get_file(f);
342 } else {
343 /*
344 * The fd may be claimed in the fd bitmap but not yet
345 * instantiated in the files array if a sibling thread
346 * is partway through open(). So make sure that this
347 * fd is available to the new process.
348 */
349 __clear_open_fd(open_files - i, new_fdt);
350 }
351 rcu_assign_pointer(*new_fds++, f);
352 }
353 spin_unlock(&oldf->file_lock);
354
355 /* clear the remainder */
356 memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *));
357
358 rcu_assign_pointer(newf->fdt, new_fdt);
359
360 return newf;
361
362out_release:
363 kmem_cache_free(files_cachep, newf);
364out:
365 return NULL;
366}
367
368static struct fdtable *close_files(struct files_struct * files)
369{
370 /*
371 * It is safe to dereference the fd table without RCU or
372 * ->file_lock because this is the last reference to the
373 * files structure.
374 */
375 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
376 unsigned int i, j = 0;
377
378 for (;;) {
379 unsigned long set;
380 i = j * BITS_PER_LONG;
381 if (i >= fdt->max_fds)
382 break;
383 set = fdt->open_fds[j++];
384 while (set) {
385 if (set & 1) {
386 struct file * file = xchg(&fdt->fd[i], NULL);
387 if (file) {
388 filp_close(file, files);
389 cond_resched();
390 }
391 }
392 i++;
393 set >>= 1;
394 }
395 }
396
397 return fdt;
398}
399
400struct files_struct *get_files_struct(struct task_struct *task)
401{
402 struct files_struct *files;
403
404 task_lock(task);
405 files = task->files;
406 if (files)
407 atomic_inc(&files->count);
408 task_unlock(task);
409
410 return files;
411}
412
413void put_files_struct(struct files_struct *files)
414{
415 if (atomic_dec_and_test(&files->count)) {
416 struct fdtable *fdt = close_files(files);
417
418 /* free the arrays if they are not embedded */
419 if (fdt != &files->fdtab)
420 __free_fdtable(fdt);
421 kmem_cache_free(files_cachep, files);
422 }
423}
424
425void reset_files_struct(struct files_struct *files)
426{
427 struct task_struct *tsk = current;
428 struct files_struct *old;
429
430 old = tsk->files;
431 task_lock(tsk);
432 tsk->files = files;
433 task_unlock(tsk);
434 put_files_struct(old);
435}
436
437void exit_files(struct task_struct *tsk)
438{
439 struct files_struct * files = tsk->files;
440
441 if (files) {
442 task_lock(tsk);
443 tsk->files = NULL;
444 task_unlock(tsk);
445 put_files_struct(files);
446 }
447}
448
449struct files_struct init_files = {
450 .count = ATOMIC_INIT(1),
451 .fdt = &init_files.fdtab,
452 .fdtab = {
453 .max_fds = NR_OPEN_DEFAULT,
454 .fd = &init_files.fd_array[0],
455 .close_on_exec = init_files.close_on_exec_init,
456 .open_fds = init_files.open_fds_init,
457 .full_fds_bits = init_files.full_fds_bits_init,
458 },
459 .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
460};
461
462static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
463{
464 unsigned int maxfd = fdt->max_fds;
465 unsigned int maxbit = maxfd / BITS_PER_LONG;
466 unsigned int bitbit = start / BITS_PER_LONG;
467
468 bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
469 if (bitbit > maxfd)
470 return maxfd;
471 if (bitbit > start)
472 start = bitbit;
473 return find_next_zero_bit(fdt->open_fds, maxfd, start);
474}
475
476/*
477 * allocate a file descriptor, mark it busy.
478 */
479int __alloc_fd(struct files_struct *files,
480 unsigned start, unsigned end, unsigned flags)
481{
482 unsigned int fd;
483 int error;
484 struct fdtable *fdt;
485
486 spin_lock(&files->file_lock);
487repeat:
488 fdt = files_fdtable(files);
489 fd = start;
490 if (fd < files->next_fd)
491 fd = files->next_fd;
492
493 if (fd < fdt->max_fds)
494 fd = find_next_fd(fdt, fd);
495
496 /*
497 * N.B. For clone tasks sharing a files structure, this test
498 * will limit the total number of files that can be opened.
499 */
500 error = -EMFILE;
501 if (fd >= end)
502 goto out;
503
504 error = expand_files(files, fd);
505 if (error < 0)
506 goto out;
507
508 /*
509 * If we needed to expand the fs array we
510 * might have blocked - try again.
511 */
512 if (error)
513 goto repeat;
514
515 if (start <= files->next_fd)
516 files->next_fd = fd + 1;
517
518 __set_open_fd(fd, fdt);
519 if (flags & O_CLOEXEC)
520 __set_close_on_exec(fd, fdt);
521 else
522 __clear_close_on_exec(fd, fdt);
523 error = fd;
524#if 1
525 /* Sanity check */
526 if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
527 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
528 rcu_assign_pointer(fdt->fd[fd], NULL);
529 }
530#endif
531
532out:
533 spin_unlock(&files->file_lock);
534 return error;
535}
536
537static int alloc_fd(unsigned start, unsigned flags)
538{
539 return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags);
540}
541
542int get_unused_fd_flags(unsigned flags)
543{
544 return __alloc_fd(current->files, 0, rlimit(RLIMIT_NOFILE), flags);
545}
546EXPORT_SYMBOL(get_unused_fd_flags);
547
548static void __put_unused_fd(struct files_struct *files, unsigned int fd)
549{
550 struct fdtable *fdt = files_fdtable(files);
551 __clear_open_fd(fd, fdt);
552 if (fd < files->next_fd)
553 files->next_fd = fd;
554}
555
556void put_unused_fd(unsigned int fd)
557{
558 struct files_struct *files = current->files;
559 spin_lock(&files->file_lock);
560 __put_unused_fd(files, fd);
561 spin_unlock(&files->file_lock);
562}
563
564EXPORT_SYMBOL(put_unused_fd);
565
566/*
567 * Install a file pointer in the fd array.
568 *
569 * The VFS is full of places where we drop the files lock between
570 * setting the open_fds bitmap and installing the file in the file
571 * array. At any such point, we are vulnerable to a dup2() race
572 * installing a file in the array before us. We need to detect this and
573 * fput() the struct file we are about to overwrite in this case.
574 *
575 * It should never happen - if we allow dup2() do it, _really_ bad things
576 * will follow.
577 *
578 * NOTE: __fd_install() variant is really, really low-level; don't
579 * use it unless you are forced to by truly lousy API shoved down
580 * your throat. 'files' *MUST* be either current->files or obtained
581 * by get_files_struct(current) done by whoever had given it to you,
582 * or really bad things will happen. Normally you want to use
583 * fd_install() instead.
584 */
585
586void __fd_install(struct files_struct *files, unsigned int fd,
587 struct file *file)
588{
589 struct fdtable *fdt;
590
591 rcu_read_lock_sched();
592
593 if (unlikely(files->resize_in_progress)) {
594 rcu_read_unlock_sched();
595 spin_lock(&files->file_lock);
596 fdt = files_fdtable(files);
597 BUG_ON(fdt->fd[fd] != NULL);
598 rcu_assign_pointer(fdt->fd[fd], file);
599 spin_unlock(&files->file_lock);
600 return;
601 }
602 /* coupled with smp_wmb() in expand_fdtable() */
603 smp_rmb();
604 fdt = rcu_dereference_sched(files->fdt);
605 BUG_ON(fdt->fd[fd] != NULL);
606 rcu_assign_pointer(fdt->fd[fd], file);
607 rcu_read_unlock_sched();
608}
609
610void fd_install(unsigned int fd, struct file *file)
611{
612 __fd_install(current->files, fd, file);
613}
614
615EXPORT_SYMBOL(fd_install);
616
617/*
618 * The same warnings as for __alloc_fd()/__fd_install() apply here...
619 */
620int __close_fd(struct files_struct *files, unsigned fd)
621{
622 struct file *file;
623 struct fdtable *fdt;
624
625 spin_lock(&files->file_lock);
626 fdt = files_fdtable(files);
627 if (fd >= fdt->max_fds)
628 goto out_unlock;
629 file = fdt->fd[fd];
630 if (!file)
631 goto out_unlock;
632 rcu_assign_pointer(fdt->fd[fd], NULL);
633 __put_unused_fd(files, fd);
634 spin_unlock(&files->file_lock);
635 return filp_close(file, files);
636
637out_unlock:
638 spin_unlock(&files->file_lock);
639 return -EBADF;
640}
641EXPORT_SYMBOL(__close_fd); /* for ksys_close() */
642
643void do_close_on_exec(struct files_struct *files)
644{
645 unsigned i;
646 struct fdtable *fdt;
647
648 /* exec unshares first */
649 spin_lock(&files->file_lock);
650 for (i = 0; ; i++) {
651 unsigned long set;
652 unsigned fd = i * BITS_PER_LONG;
653 fdt = files_fdtable(files);
654 if (fd >= fdt->max_fds)
655 break;
656 set = fdt->close_on_exec[i];
657 if (!set)
658 continue;
659 fdt->close_on_exec[i] = 0;
660 for ( ; set ; fd++, set >>= 1) {
661 struct file *file;
662 if (!(set & 1))
663 continue;
664 file = fdt->fd[fd];
665 if (!file)
666 continue;
667 rcu_assign_pointer(fdt->fd[fd], NULL);
668 __put_unused_fd(files, fd);
669 spin_unlock(&files->file_lock);
670 filp_close(file, files);
671 cond_resched();
672 spin_lock(&files->file_lock);
673 }
674
675 }
676 spin_unlock(&files->file_lock);
677}
678
679static struct file *__fget(unsigned int fd, fmode_t mask)
680{
681 struct files_struct *files = current->files;
682 struct file *file;
683
684 rcu_read_lock();
685loop:
686 file = fcheck_files(files, fd);
687 if (file) {
688 /* File object ref couldn't be taken.
689 * dup2() atomicity guarantee is the reason
690 * we loop to catch the new file (or NULL pointer)
691 */
692 if (file->f_mode & mask)
693 file = NULL;
694 else if (!get_file_rcu(file))
695 goto loop;
696 }
697 rcu_read_unlock();
698
699 return file;
700}
701
702struct file *fget(unsigned int fd)
703{
704 return __fget(fd, FMODE_PATH);
705}
706EXPORT_SYMBOL(fget);
707
708struct file *fget_raw(unsigned int fd)
709{
710 return __fget(fd, 0);
711}
712EXPORT_SYMBOL(fget_raw);
713
714/*
715 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
716 *
717 * You can use this instead of fget if you satisfy all of the following
718 * conditions:
719 * 1) You must call fput_light before exiting the syscall and returning control
720 * to userspace (i.e. you cannot remember the returned struct file * after
721 * returning to userspace).
722 * 2) You must not call filp_close on the returned struct file * in between
723 * calls to fget_light and fput_light.
724 * 3) You must not clone the current task in between the calls to fget_light
725 * and fput_light.
726 *
727 * The fput_needed flag returned by fget_light should be passed to the
728 * corresponding fput_light.
729 */
730static unsigned long __fget_light(unsigned int fd, fmode_t mask)
731{
732 struct files_struct *files = current->files;
733 struct file *file;
734
735 if (atomic_read(&files->count) == 1) {
736 file = __fcheck_files(files, fd);
737 if (!file || unlikely(file->f_mode & mask))
738 return 0;
739 return (unsigned long)file;
740 } else {
741 file = __fget(fd, mask);
742 if (!file)
743 return 0;
744 return FDPUT_FPUT | (unsigned long)file;
745 }
746}
747unsigned long __fdget(unsigned int fd)
748{
749 return __fget_light(fd, FMODE_PATH);
750}
751EXPORT_SYMBOL(__fdget);
752
753unsigned long __fdget_raw(unsigned int fd)
754{
755 return __fget_light(fd, 0);
756}
757
758unsigned long __fdget_pos(unsigned int fd)
759{
760 unsigned long v = __fdget(fd);
761 struct file *file = (struct file *)(v & ~3);
762
763 if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
764 if (file_count(file) > 1) {
765 v |= FDPUT_POS_UNLOCK;
766 mutex_lock(&file->f_pos_lock);
767 }
768 }
769 return v;
770}
771
772void __f_unlock_pos(struct file *f)
773{
774 mutex_unlock(&f->f_pos_lock);
775}
776
777/*
778 * We only lock f_pos if we have threads or if the file might be
779 * shared with another process. In both cases we'll have an elevated
780 * file count (done either by fdget() or by fork()).
781 */
782
783void set_close_on_exec(unsigned int fd, int flag)
784{
785 struct files_struct *files = current->files;
786 struct fdtable *fdt;
787 spin_lock(&files->file_lock);
788 fdt = files_fdtable(files);
789 if (flag)
790 __set_close_on_exec(fd, fdt);
791 else
792 __clear_close_on_exec(fd, fdt);
793 spin_unlock(&files->file_lock);
794}
795
796bool get_close_on_exec(unsigned int fd)
797{
798 struct files_struct *files = current->files;
799 struct fdtable *fdt;
800 bool res;
801 rcu_read_lock();
802 fdt = files_fdtable(files);
803 res = close_on_exec(fd, fdt);
804 rcu_read_unlock();
805 return res;
806}
807
808static int do_dup2(struct files_struct *files,
809 struct file *file, unsigned fd, unsigned flags)
810__releases(&files->file_lock)
811{
812 struct file *tofree;
813 struct fdtable *fdt;
814
815 /*
816 * We need to detect attempts to do dup2() over allocated but still
817 * not finished descriptor. NB: OpenBSD avoids that at the price of
818 * extra work in their equivalent of fget() - they insert struct
819 * file immediately after grabbing descriptor, mark it larval if
820 * more work (e.g. actual opening) is needed and make sure that
821 * fget() treats larval files as absent. Potentially interesting,
822 * but while extra work in fget() is trivial, locking implications
823 * and amount of surgery on open()-related paths in VFS are not.
824 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
825 * deadlocks in rather amusing ways, AFAICS. All of that is out of
826 * scope of POSIX or SUS, since neither considers shared descriptor
827 * tables and this condition does not arise without those.
828 */
829 fdt = files_fdtable(files);
830 tofree = fdt->fd[fd];
831 if (!tofree && fd_is_open(fd, fdt))
832 goto Ebusy;
833 get_file(file);
834 rcu_assign_pointer(fdt->fd[fd], file);
835 __set_open_fd(fd, fdt);
836 if (flags & O_CLOEXEC)
837 __set_close_on_exec(fd, fdt);
838 else
839 __clear_close_on_exec(fd, fdt);
840 spin_unlock(&files->file_lock);
841
842 if (tofree)
843 filp_close(tofree, files);
844
845 return fd;
846
847Ebusy:
848 spin_unlock(&files->file_lock);
849 return -EBUSY;
850}
851
852int replace_fd(unsigned fd, struct file *file, unsigned flags)
853{
854 int err;
855 struct files_struct *files = current->files;
856
857 if (!file)
858 return __close_fd(files, fd);
859
860 if (fd >= rlimit(RLIMIT_NOFILE))
861 return -EBADF;
862
863 spin_lock(&files->file_lock);
864 err = expand_files(files, fd);
865 if (unlikely(err < 0))
866 goto out_unlock;
867 return do_dup2(files, file, fd, flags);
868
869out_unlock:
870 spin_unlock(&files->file_lock);
871 return err;
872}
873
874static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
875{
876 int err = -EBADF;
877 struct file *file;
878 struct files_struct *files = current->files;
879
880 if ((flags & ~O_CLOEXEC) != 0)
881 return -EINVAL;
882
883 if (unlikely(oldfd == newfd))
884 return -EINVAL;
885
886 if (newfd >= rlimit(RLIMIT_NOFILE))
887 return -EBADF;
888
889 spin_lock(&files->file_lock);
890 err = expand_files(files, newfd);
891 file = fcheck(oldfd);
892 if (unlikely(!file))
893 goto Ebadf;
894 if (unlikely(err < 0)) {
895 if (err == -EMFILE)
896 goto Ebadf;
897 goto out_unlock;
898 }
899 return do_dup2(files, file, newfd, flags);
900
901Ebadf:
902 err = -EBADF;
903out_unlock:
904 spin_unlock(&files->file_lock);
905 return err;
906}
907
908SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
909{
910 return ksys_dup3(oldfd, newfd, flags);
911}
912
913SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
914{
915 if (unlikely(newfd == oldfd)) { /* corner case */
916 struct files_struct *files = current->files;
917 int retval = oldfd;
918
919 rcu_read_lock();
920 if (!fcheck_files(files, oldfd))
921 retval = -EBADF;
922 rcu_read_unlock();
923 return retval;
924 }
925 return ksys_dup3(oldfd, newfd, 0);
926}
927
928int ksys_dup(unsigned int fildes)
929{
930 int ret = -EBADF;
931 struct file *file = fget_raw(fildes);
932
933 if (file) {
934 ret = get_unused_fd_flags(0);
935 if (ret >= 0)
936 fd_install(ret, file);
937 else
938 fput(file);
939 }
940 return ret;
941}
942
943SYSCALL_DEFINE1(dup, unsigned int, fildes)
944{
945 return ksys_dup(fildes);
946}
947
948int f_dupfd(unsigned int from, struct file *file, unsigned flags)
949{
950 int err;
951 if (from >= rlimit(RLIMIT_NOFILE))
952 return -EINVAL;
953 err = alloc_fd(from, flags);
954 if (err >= 0) {
955 get_file(file);
956 fd_install(err, file);
957 }
958 return err;
959}
960
961int iterate_fd(struct files_struct *files, unsigned n,
962 int (*f)(const void *, struct file *, unsigned),
963 const void *p)
964{
965 struct fdtable *fdt;
966 int res = 0;
967 if (!files)
968 return 0;
969 spin_lock(&files->file_lock);
970 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
971 struct file *file;
972 file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
973 if (!file)
974 continue;
975 res = f(p, file, n);
976 if (res)
977 break;
978 }
979 spin_unlock(&files->file_lock);
980 return res;
981}
982EXPORT_SYMBOL(iterate_fd);
1/*
2 * linux/fs/file.c
3 *
4 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
5 *
6 * Manage the dynamic fd arrays in the process files_struct.
7 */
8
9#include <linux/module.h>
10#include <linux/fs.h>
11#include <linux/mm.h>
12#include <linux/mmzone.h>
13#include <linux/time.h>
14#include <linux/sched.h>
15#include <linux/slab.h>
16#include <linux/vmalloc.h>
17#include <linux/file.h>
18#include <linux/fdtable.h>
19#include <linux/bitops.h>
20#include <linux/interrupt.h>
21#include <linux/spinlock.h>
22#include <linux/rcupdate.h>
23#include <linux/workqueue.h>
24
25struct fdtable_defer {
26 spinlock_t lock;
27 struct work_struct wq;
28 struct fdtable *next;
29};
30
31int sysctl_nr_open __read_mostly = 1024*1024;
32int sysctl_nr_open_min = BITS_PER_LONG;
33int sysctl_nr_open_max = 1024 * 1024; /* raised later */
34
35/*
36 * We use this list to defer free fdtables that have vmalloced
37 * sets/arrays. By keeping a per-cpu list, we avoid having to embed
38 * the work_struct in fdtable itself which avoids a 64 byte (i386) increase in
39 * this per-task structure.
40 */
41static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list);
42
43static void *alloc_fdmem(unsigned int size)
44{
45 /*
46 * Very large allocations can stress page reclaim, so fall back to
47 * vmalloc() if the allocation size will be considered "large" by the VM.
48 */
49 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
50 void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN);
51 if (data != NULL)
52 return data;
53 }
54 return vmalloc(size);
55}
56
57static void free_fdmem(void *ptr)
58{
59 is_vmalloc_addr(ptr) ? vfree(ptr) : kfree(ptr);
60}
61
62static void __free_fdtable(struct fdtable *fdt)
63{
64 free_fdmem(fdt->fd);
65 free_fdmem(fdt->open_fds);
66 kfree(fdt);
67}
68
69static void free_fdtable_work(struct work_struct *work)
70{
71 struct fdtable_defer *f =
72 container_of(work, struct fdtable_defer, wq);
73 struct fdtable *fdt;
74
75 spin_lock_bh(&f->lock);
76 fdt = f->next;
77 f->next = NULL;
78 spin_unlock_bh(&f->lock);
79 while(fdt) {
80 struct fdtable *next = fdt->next;
81
82 __free_fdtable(fdt);
83 fdt = next;
84 }
85}
86
87void free_fdtable_rcu(struct rcu_head *rcu)
88{
89 struct fdtable *fdt = container_of(rcu, struct fdtable, rcu);
90 struct fdtable_defer *fddef;
91
92 BUG_ON(!fdt);
93
94 if (fdt->max_fds <= NR_OPEN_DEFAULT) {
95 /*
96 * This fdtable is embedded in the files structure and that
97 * structure itself is getting destroyed.
98 */
99 kmem_cache_free(files_cachep,
100 container_of(fdt, struct files_struct, fdtab));
101 return;
102 }
103 if (!is_vmalloc_addr(fdt->fd) && !is_vmalloc_addr(fdt->open_fds)) {
104 kfree(fdt->fd);
105 kfree(fdt->open_fds);
106 kfree(fdt);
107 } else {
108 fddef = &get_cpu_var(fdtable_defer_list);
109 spin_lock(&fddef->lock);
110 fdt->next = fddef->next;
111 fddef->next = fdt;
112 /* vmallocs are handled from the workqueue context */
113 schedule_work(&fddef->wq);
114 spin_unlock(&fddef->lock);
115 put_cpu_var(fdtable_defer_list);
116 }
117}
118
119/*
120 * Expand the fdset in the files_struct. Called with the files spinlock
121 * held for write.
122 */
123static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
124{
125 unsigned int cpy, set;
126
127 BUG_ON(nfdt->max_fds < ofdt->max_fds);
128
129 cpy = ofdt->max_fds * sizeof(struct file *);
130 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
131 memcpy(nfdt->fd, ofdt->fd, cpy);
132 memset((char *)(nfdt->fd) + cpy, 0, set);
133
134 cpy = ofdt->max_fds / BITS_PER_BYTE;
135 set = (nfdt->max_fds - ofdt->max_fds) / BITS_PER_BYTE;
136 memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
137 memset((char *)(nfdt->open_fds) + cpy, 0, set);
138 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
139 memset((char *)(nfdt->close_on_exec) + cpy, 0, set);
140}
141
142static struct fdtable * alloc_fdtable(unsigned int nr)
143{
144 struct fdtable *fdt;
145 char *data;
146
147 /*
148 * Figure out how many fds we actually want to support in this fdtable.
149 * Allocation steps are keyed to the size of the fdarray, since it
150 * grows far faster than any of the other dynamic data. We try to fit
151 * the fdarray into comfortable page-tuned chunks: starting at 1024B
152 * and growing in powers of two from there on.
153 */
154 nr /= (1024 / sizeof(struct file *));
155 nr = roundup_pow_of_two(nr + 1);
156 nr *= (1024 / sizeof(struct file *));
157 /*
158 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
159 * had been set lower between the check in expand_files() and here. Deal
160 * with that in caller, it's cheaper that way.
161 *
162 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
163 * bitmaps handling below becomes unpleasant, to put it mildly...
164 */
165 if (unlikely(nr > sysctl_nr_open))
166 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
167
168 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL);
169 if (!fdt)
170 goto out;
171 fdt->max_fds = nr;
172 data = alloc_fdmem(nr * sizeof(struct file *));
173 if (!data)
174 goto out_fdt;
175 fdt->fd = (struct file **)data;
176 data = alloc_fdmem(max_t(unsigned int,
177 2 * nr / BITS_PER_BYTE, L1_CACHE_BYTES));
178 if (!data)
179 goto out_arr;
180 fdt->open_fds = (fd_set *)data;
181 data += nr / BITS_PER_BYTE;
182 fdt->close_on_exec = (fd_set *)data;
183 fdt->next = NULL;
184
185 return fdt;
186
187out_arr:
188 free_fdmem(fdt->fd);
189out_fdt:
190 kfree(fdt);
191out:
192 return NULL;
193}
194
195/*
196 * Expand the file descriptor table.
197 * This function will allocate a new fdtable and both fd array and fdset, of
198 * the given size.
199 * Return <0 error code on error; 1 on successful completion.
200 * The files->file_lock should be held on entry, and will be held on exit.
201 */
202static int expand_fdtable(struct files_struct *files, int nr)
203 __releases(files->file_lock)
204 __acquires(files->file_lock)
205{
206 struct fdtable *new_fdt, *cur_fdt;
207
208 spin_unlock(&files->file_lock);
209 new_fdt = alloc_fdtable(nr);
210 spin_lock(&files->file_lock);
211 if (!new_fdt)
212 return -ENOMEM;
213 /*
214 * extremely unlikely race - sysctl_nr_open decreased between the check in
215 * caller and alloc_fdtable(). Cheaper to catch it here...
216 */
217 if (unlikely(new_fdt->max_fds <= nr)) {
218 __free_fdtable(new_fdt);
219 return -EMFILE;
220 }
221 /*
222 * Check again since another task may have expanded the fd table while
223 * we dropped the lock
224 */
225 cur_fdt = files_fdtable(files);
226 if (nr >= cur_fdt->max_fds) {
227 /* Continue as planned */
228 copy_fdtable(new_fdt, cur_fdt);
229 rcu_assign_pointer(files->fdt, new_fdt);
230 if (cur_fdt->max_fds > NR_OPEN_DEFAULT)
231 free_fdtable(cur_fdt);
232 } else {
233 /* Somebody else expanded, so undo our attempt */
234 __free_fdtable(new_fdt);
235 }
236 return 1;
237}
238
239/*
240 * Expand files.
241 * This function will expand the file structures, if the requested size exceeds
242 * the current capacity and there is room for expansion.
243 * Return <0 error code on error; 0 when nothing done; 1 when files were
244 * expanded and execution may have blocked.
245 * The files->file_lock should be held on entry, and will be held on exit.
246 */
247int expand_files(struct files_struct *files, int nr)
248{
249 struct fdtable *fdt;
250
251 fdt = files_fdtable(files);
252
253 /*
254 * N.B. For clone tasks sharing a files structure, this test
255 * will limit the total number of files that can be opened.
256 */
257 if (nr >= rlimit(RLIMIT_NOFILE))
258 return -EMFILE;
259
260 /* Do we need to expand? */
261 if (nr < fdt->max_fds)
262 return 0;
263
264 /* Can we expand? */
265 if (nr >= sysctl_nr_open)
266 return -EMFILE;
267
268 /* All good, so we try */
269 return expand_fdtable(files, nr);
270}
271
272static int count_open_files(struct fdtable *fdt)
273{
274 int size = fdt->max_fds;
275 int i;
276
277 /* Find the last open fd */
278 for (i = size/(8*sizeof(long)); i > 0; ) {
279 if (fdt->open_fds->fds_bits[--i])
280 break;
281 }
282 i = (i+1) * 8 * sizeof(long);
283 return i;
284}
285
286/*
287 * Allocate a new files structure and copy contents from the
288 * passed in files structure.
289 * errorp will be valid only when the returned files_struct is NULL.
290 */
291struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
292{
293 struct files_struct *newf;
294 struct file **old_fds, **new_fds;
295 int open_files, size, i;
296 struct fdtable *old_fdt, *new_fdt;
297
298 *errorp = -ENOMEM;
299 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
300 if (!newf)
301 goto out;
302
303 atomic_set(&newf->count, 1);
304
305 spin_lock_init(&newf->file_lock);
306 newf->next_fd = 0;
307 new_fdt = &newf->fdtab;
308 new_fdt->max_fds = NR_OPEN_DEFAULT;
309 new_fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
310 new_fdt->open_fds = (fd_set *)&newf->open_fds_init;
311 new_fdt->fd = &newf->fd_array[0];
312 new_fdt->next = NULL;
313
314 spin_lock(&oldf->file_lock);
315 old_fdt = files_fdtable(oldf);
316 open_files = count_open_files(old_fdt);
317
318 /*
319 * Check whether we need to allocate a larger fd array and fd set.
320 */
321 while (unlikely(open_files > new_fdt->max_fds)) {
322 spin_unlock(&oldf->file_lock);
323
324 if (new_fdt != &newf->fdtab)
325 __free_fdtable(new_fdt);
326
327 new_fdt = alloc_fdtable(open_files - 1);
328 if (!new_fdt) {
329 *errorp = -ENOMEM;
330 goto out_release;
331 }
332
333 /* beyond sysctl_nr_open; nothing to do */
334 if (unlikely(new_fdt->max_fds < open_files)) {
335 __free_fdtable(new_fdt);
336 *errorp = -EMFILE;
337 goto out_release;
338 }
339
340 /*
341 * Reacquire the oldf lock and a pointer to its fd table
342 * who knows it may have a new bigger fd table. We need
343 * the latest pointer.
344 */
345 spin_lock(&oldf->file_lock);
346 old_fdt = files_fdtable(oldf);
347 open_files = count_open_files(old_fdt);
348 }
349
350 old_fds = old_fdt->fd;
351 new_fds = new_fdt->fd;
352
353 memcpy(new_fdt->open_fds->fds_bits,
354 old_fdt->open_fds->fds_bits, open_files/8);
355 memcpy(new_fdt->close_on_exec->fds_bits,
356 old_fdt->close_on_exec->fds_bits, open_files/8);
357
358 for (i = open_files; i != 0; i--) {
359 struct file *f = *old_fds++;
360 if (f) {
361 get_file(f);
362 } else {
363 /*
364 * The fd may be claimed in the fd bitmap but not yet
365 * instantiated in the files array if a sibling thread
366 * is partway through open(). So make sure that this
367 * fd is available to the new process.
368 */
369 FD_CLR(open_files - i, new_fdt->open_fds);
370 }
371 rcu_assign_pointer(*new_fds++, f);
372 }
373 spin_unlock(&oldf->file_lock);
374
375 /* compute the remainder to be cleared */
376 size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
377
378 /* This is long word aligned thus could use a optimized version */
379 memset(new_fds, 0, size);
380
381 if (new_fdt->max_fds > open_files) {
382 int left = (new_fdt->max_fds-open_files)/8;
383 int start = open_files / (8 * sizeof(unsigned long));
384
385 memset(&new_fdt->open_fds->fds_bits[start], 0, left);
386 memset(&new_fdt->close_on_exec->fds_bits[start], 0, left);
387 }
388
389 rcu_assign_pointer(newf->fdt, new_fdt);
390
391 return newf;
392
393out_release:
394 kmem_cache_free(files_cachep, newf);
395out:
396 return NULL;
397}
398
399static void __devinit fdtable_defer_list_init(int cpu)
400{
401 struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu);
402 spin_lock_init(&fddef->lock);
403 INIT_WORK(&fddef->wq, free_fdtable_work);
404 fddef->next = NULL;
405}
406
407void __init files_defer_init(void)
408{
409 int i;
410 for_each_possible_cpu(i)
411 fdtable_defer_list_init(i);
412 sysctl_nr_open_max = min((size_t)INT_MAX, ~(size_t)0/sizeof(void *)) &
413 -BITS_PER_LONG;
414}
415
416struct files_struct init_files = {
417 .count = ATOMIC_INIT(1),
418 .fdt = &init_files.fdtab,
419 .fdtab = {
420 .max_fds = NR_OPEN_DEFAULT,
421 .fd = &init_files.fd_array[0],
422 .close_on_exec = (fd_set *)&init_files.close_on_exec_init,
423 .open_fds = (fd_set *)&init_files.open_fds_init,
424 },
425 .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock),
426};
427
428/*
429 * allocate a file descriptor, mark it busy.
430 */
431int alloc_fd(unsigned start, unsigned flags)
432{
433 struct files_struct *files = current->files;
434 unsigned int fd;
435 int error;
436 struct fdtable *fdt;
437
438 spin_lock(&files->file_lock);
439repeat:
440 fdt = files_fdtable(files);
441 fd = start;
442 if (fd < files->next_fd)
443 fd = files->next_fd;
444
445 if (fd < fdt->max_fds)
446 fd = find_next_zero_bit(fdt->open_fds->fds_bits,
447 fdt->max_fds, fd);
448
449 error = expand_files(files, fd);
450 if (error < 0)
451 goto out;
452
453 /*
454 * If we needed to expand the fs array we
455 * might have blocked - try again.
456 */
457 if (error)
458 goto repeat;
459
460 if (start <= files->next_fd)
461 files->next_fd = fd + 1;
462
463 FD_SET(fd, fdt->open_fds);
464 if (flags & O_CLOEXEC)
465 FD_SET(fd, fdt->close_on_exec);
466 else
467 FD_CLR(fd, fdt->close_on_exec);
468 error = fd;
469#if 1
470 /* Sanity check */
471 if (rcu_dereference_raw(fdt->fd[fd]) != NULL) {
472 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
473 rcu_assign_pointer(fdt->fd[fd], NULL);
474 }
475#endif
476
477out:
478 spin_unlock(&files->file_lock);
479 return error;
480}
481
482int get_unused_fd(void)
483{
484 return alloc_fd(0, 0);
485}
486EXPORT_SYMBOL(get_unused_fd);