Loading...
1/*
2 * linux/fs/file.c
3 *
4 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
5 *
6 * Manage the dynamic fd arrays in the process files_struct.
7 */
8
9#include <linux/module.h>
10#include <linux/fs.h>
11#include <linux/mm.h>
12#include <linux/mmzone.h>
13#include <linux/time.h>
14#include <linux/sched.h>
15#include <linux/slab.h>
16#include <linux/vmalloc.h>
17#include <linux/file.h>
18#include <linux/fdtable.h>
19#include <linux/bitops.h>
20#include <linux/interrupt.h>
21#include <linux/spinlock.h>
22#include <linux/rcupdate.h>
23#include <linux/workqueue.h>
24
25struct fdtable_defer {
26 spinlock_t lock;
27 struct work_struct wq;
28 struct fdtable *next;
29};
30
31int sysctl_nr_open __read_mostly = 1024*1024;
32int sysctl_nr_open_min = BITS_PER_LONG;
33int sysctl_nr_open_max = 1024 * 1024; /* raised later */
34
35/*
36 * We use this list to defer free fdtables that have vmalloced
37 * sets/arrays. By keeping a per-cpu list, we avoid having to embed
38 * the work_struct in fdtable itself which avoids a 64 byte (i386) increase in
39 * this per-task structure.
40 */
41static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list);
42
43static void *alloc_fdmem(unsigned int size)
44{
45 /*
46 * Very large allocations can stress page reclaim, so fall back to
47 * vmalloc() if the allocation size will be considered "large" by the VM.
48 */
49 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
50 void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN);
51 if (data != NULL)
52 return data;
53 }
54 return vmalloc(size);
55}
56
57static void free_fdmem(void *ptr)
58{
59 is_vmalloc_addr(ptr) ? vfree(ptr) : kfree(ptr);
60}
61
62static void __free_fdtable(struct fdtable *fdt)
63{
64 free_fdmem(fdt->fd);
65 free_fdmem(fdt->open_fds);
66 kfree(fdt);
67}
68
69static void free_fdtable_work(struct work_struct *work)
70{
71 struct fdtable_defer *f =
72 container_of(work, struct fdtable_defer, wq);
73 struct fdtable *fdt;
74
75 spin_lock_bh(&f->lock);
76 fdt = f->next;
77 f->next = NULL;
78 spin_unlock_bh(&f->lock);
79 while(fdt) {
80 struct fdtable *next = fdt->next;
81
82 __free_fdtable(fdt);
83 fdt = next;
84 }
85}
86
87void free_fdtable_rcu(struct rcu_head *rcu)
88{
89 struct fdtable *fdt = container_of(rcu, struct fdtable, rcu);
90 struct fdtable_defer *fddef;
91
92 BUG_ON(!fdt);
93
94 if (fdt->max_fds <= NR_OPEN_DEFAULT) {
95 /*
96 * This fdtable is embedded in the files structure and that
97 * structure itself is getting destroyed.
98 */
99 kmem_cache_free(files_cachep,
100 container_of(fdt, struct files_struct, fdtab));
101 return;
102 }
103 if (!is_vmalloc_addr(fdt->fd) && !is_vmalloc_addr(fdt->open_fds)) {
104 kfree(fdt->fd);
105 kfree(fdt->open_fds);
106 kfree(fdt);
107 } else {
108 fddef = &get_cpu_var(fdtable_defer_list);
109 spin_lock(&fddef->lock);
110 fdt->next = fddef->next;
111 fddef->next = fdt;
112 /* vmallocs are handled from the workqueue context */
113 schedule_work(&fddef->wq);
114 spin_unlock(&fddef->lock);
115 put_cpu_var(fdtable_defer_list);
116 }
117}
118
119/*
120 * Expand the fdset in the files_struct. Called with the files spinlock
121 * held for write.
122 */
123static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
124{
125 unsigned int cpy, set;
126
127 BUG_ON(nfdt->max_fds < ofdt->max_fds);
128
129 cpy = ofdt->max_fds * sizeof(struct file *);
130 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
131 memcpy(nfdt->fd, ofdt->fd, cpy);
132 memset((char *)(nfdt->fd) + cpy, 0, set);
133
134 cpy = ofdt->max_fds / BITS_PER_BYTE;
135 set = (nfdt->max_fds - ofdt->max_fds) / BITS_PER_BYTE;
136 memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
137 memset((char *)(nfdt->open_fds) + cpy, 0, set);
138 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
139 memset((char *)(nfdt->close_on_exec) + cpy, 0, set);
140}
141
142static struct fdtable * alloc_fdtable(unsigned int nr)
143{
144 struct fdtable *fdt;
145 char *data;
146
147 /*
148 * Figure out how many fds we actually want to support in this fdtable.
149 * Allocation steps are keyed to the size of the fdarray, since it
150 * grows far faster than any of the other dynamic data. We try to fit
151 * the fdarray into comfortable page-tuned chunks: starting at 1024B
152 * and growing in powers of two from there on.
153 */
154 nr /= (1024 / sizeof(struct file *));
155 nr = roundup_pow_of_two(nr + 1);
156 nr *= (1024 / sizeof(struct file *));
157 /*
158 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
159 * had been set lower between the check in expand_files() and here. Deal
160 * with that in caller, it's cheaper that way.
161 *
162 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
163 * bitmaps handling below becomes unpleasant, to put it mildly...
164 */
165 if (unlikely(nr > sysctl_nr_open))
166 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
167
168 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL);
169 if (!fdt)
170 goto out;
171 fdt->max_fds = nr;
172 data = alloc_fdmem(nr * sizeof(struct file *));
173 if (!data)
174 goto out_fdt;
175 fdt->fd = (struct file **)data;
176 data = alloc_fdmem(max_t(unsigned int,
177 2 * nr / BITS_PER_BYTE, L1_CACHE_BYTES));
178 if (!data)
179 goto out_arr;
180 fdt->open_fds = (fd_set *)data;
181 data += nr / BITS_PER_BYTE;
182 fdt->close_on_exec = (fd_set *)data;
183 fdt->next = NULL;
184
185 return fdt;
186
187out_arr:
188 free_fdmem(fdt->fd);
189out_fdt:
190 kfree(fdt);
191out:
192 return NULL;
193}
194
195/*
196 * Expand the file descriptor table.
197 * This function will allocate a new fdtable and both fd array and fdset, of
198 * the given size.
199 * Return <0 error code on error; 1 on successful completion.
200 * The files->file_lock should be held on entry, and will be held on exit.
201 */
202static int expand_fdtable(struct files_struct *files, int nr)
203 __releases(files->file_lock)
204 __acquires(files->file_lock)
205{
206 struct fdtable *new_fdt, *cur_fdt;
207
208 spin_unlock(&files->file_lock);
209 new_fdt = alloc_fdtable(nr);
210 spin_lock(&files->file_lock);
211 if (!new_fdt)
212 return -ENOMEM;
213 /*
214 * extremely unlikely race - sysctl_nr_open decreased between the check in
215 * caller and alloc_fdtable(). Cheaper to catch it here...
216 */
217 if (unlikely(new_fdt->max_fds <= nr)) {
218 __free_fdtable(new_fdt);
219 return -EMFILE;
220 }
221 /*
222 * Check again since another task may have expanded the fd table while
223 * we dropped the lock
224 */
225 cur_fdt = files_fdtable(files);
226 if (nr >= cur_fdt->max_fds) {
227 /* Continue as planned */
228 copy_fdtable(new_fdt, cur_fdt);
229 rcu_assign_pointer(files->fdt, new_fdt);
230 if (cur_fdt->max_fds > NR_OPEN_DEFAULT)
231 free_fdtable(cur_fdt);
232 } else {
233 /* Somebody else expanded, so undo our attempt */
234 __free_fdtable(new_fdt);
235 }
236 return 1;
237}
238
239/*
240 * Expand files.
241 * This function will expand the file structures, if the requested size exceeds
242 * the current capacity and there is room for expansion.
243 * Return <0 error code on error; 0 when nothing done; 1 when files were
244 * expanded and execution may have blocked.
245 * The files->file_lock should be held on entry, and will be held on exit.
246 */
247int expand_files(struct files_struct *files, int nr)
248{
249 struct fdtable *fdt;
250
251 fdt = files_fdtable(files);
252
253 /*
254 * N.B. For clone tasks sharing a files structure, this test
255 * will limit the total number of files that can be opened.
256 */
257 if (nr >= rlimit(RLIMIT_NOFILE))
258 return -EMFILE;
259
260 /* Do we need to expand? */
261 if (nr < fdt->max_fds)
262 return 0;
263
264 /* Can we expand? */
265 if (nr >= sysctl_nr_open)
266 return -EMFILE;
267
268 /* All good, so we try */
269 return expand_fdtable(files, nr);
270}
271
272static int count_open_files(struct fdtable *fdt)
273{
274 int size = fdt->max_fds;
275 int i;
276
277 /* Find the last open fd */
278 for (i = size/(8*sizeof(long)); i > 0; ) {
279 if (fdt->open_fds->fds_bits[--i])
280 break;
281 }
282 i = (i+1) * 8 * sizeof(long);
283 return i;
284}
285
286/*
287 * Allocate a new files structure and copy contents from the
288 * passed in files structure.
289 * errorp will be valid only when the returned files_struct is NULL.
290 */
291struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
292{
293 struct files_struct *newf;
294 struct file **old_fds, **new_fds;
295 int open_files, size, i;
296 struct fdtable *old_fdt, *new_fdt;
297
298 *errorp = -ENOMEM;
299 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
300 if (!newf)
301 goto out;
302
303 atomic_set(&newf->count, 1);
304
305 spin_lock_init(&newf->file_lock);
306 newf->next_fd = 0;
307 new_fdt = &newf->fdtab;
308 new_fdt->max_fds = NR_OPEN_DEFAULT;
309 new_fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
310 new_fdt->open_fds = (fd_set *)&newf->open_fds_init;
311 new_fdt->fd = &newf->fd_array[0];
312 new_fdt->next = NULL;
313
314 spin_lock(&oldf->file_lock);
315 old_fdt = files_fdtable(oldf);
316 open_files = count_open_files(old_fdt);
317
318 /*
319 * Check whether we need to allocate a larger fd array and fd set.
320 */
321 while (unlikely(open_files > new_fdt->max_fds)) {
322 spin_unlock(&oldf->file_lock);
323
324 if (new_fdt != &newf->fdtab)
325 __free_fdtable(new_fdt);
326
327 new_fdt = alloc_fdtable(open_files - 1);
328 if (!new_fdt) {
329 *errorp = -ENOMEM;
330 goto out_release;
331 }
332
333 /* beyond sysctl_nr_open; nothing to do */
334 if (unlikely(new_fdt->max_fds < open_files)) {
335 __free_fdtable(new_fdt);
336 *errorp = -EMFILE;
337 goto out_release;
338 }
339
340 /*
341 * Reacquire the oldf lock and a pointer to its fd table
342 * who knows it may have a new bigger fd table. We need
343 * the latest pointer.
344 */
345 spin_lock(&oldf->file_lock);
346 old_fdt = files_fdtable(oldf);
347 open_files = count_open_files(old_fdt);
348 }
349
350 old_fds = old_fdt->fd;
351 new_fds = new_fdt->fd;
352
353 memcpy(new_fdt->open_fds->fds_bits,
354 old_fdt->open_fds->fds_bits, open_files/8);
355 memcpy(new_fdt->close_on_exec->fds_bits,
356 old_fdt->close_on_exec->fds_bits, open_files/8);
357
358 for (i = open_files; i != 0; i--) {
359 struct file *f = *old_fds++;
360 if (f) {
361 get_file(f);
362 } else {
363 /*
364 * The fd may be claimed in the fd bitmap but not yet
365 * instantiated in the files array if a sibling thread
366 * is partway through open(). So make sure that this
367 * fd is available to the new process.
368 */
369 FD_CLR(open_files - i, new_fdt->open_fds);
370 }
371 rcu_assign_pointer(*new_fds++, f);
372 }
373 spin_unlock(&oldf->file_lock);
374
375 /* compute the remainder to be cleared */
376 size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
377
378 /* This is long word aligned thus could use a optimized version */
379 memset(new_fds, 0, size);
380
381 if (new_fdt->max_fds > open_files) {
382 int left = (new_fdt->max_fds-open_files)/8;
383 int start = open_files / (8 * sizeof(unsigned long));
384
385 memset(&new_fdt->open_fds->fds_bits[start], 0, left);
386 memset(&new_fdt->close_on_exec->fds_bits[start], 0, left);
387 }
388
389 rcu_assign_pointer(newf->fdt, new_fdt);
390
391 return newf;
392
393out_release:
394 kmem_cache_free(files_cachep, newf);
395out:
396 return NULL;
397}
398
399static void __devinit fdtable_defer_list_init(int cpu)
400{
401 struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu);
402 spin_lock_init(&fddef->lock);
403 INIT_WORK(&fddef->wq, free_fdtable_work);
404 fddef->next = NULL;
405}
406
407void __init files_defer_init(void)
408{
409 int i;
410 for_each_possible_cpu(i)
411 fdtable_defer_list_init(i);
412 sysctl_nr_open_max = min((size_t)INT_MAX, ~(size_t)0/sizeof(void *)) &
413 -BITS_PER_LONG;
414}
415
416struct files_struct init_files = {
417 .count = ATOMIC_INIT(1),
418 .fdt = &init_files.fdtab,
419 .fdtab = {
420 .max_fds = NR_OPEN_DEFAULT,
421 .fd = &init_files.fd_array[0],
422 .close_on_exec = (fd_set *)&init_files.close_on_exec_init,
423 .open_fds = (fd_set *)&init_files.open_fds_init,
424 },
425 .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock),
426};
427
428/*
429 * allocate a file descriptor, mark it busy.
430 */
431int alloc_fd(unsigned start, unsigned flags)
432{
433 struct files_struct *files = current->files;
434 unsigned int fd;
435 int error;
436 struct fdtable *fdt;
437
438 spin_lock(&files->file_lock);
439repeat:
440 fdt = files_fdtable(files);
441 fd = start;
442 if (fd < files->next_fd)
443 fd = files->next_fd;
444
445 if (fd < fdt->max_fds)
446 fd = find_next_zero_bit(fdt->open_fds->fds_bits,
447 fdt->max_fds, fd);
448
449 error = expand_files(files, fd);
450 if (error < 0)
451 goto out;
452
453 /*
454 * If we needed to expand the fs array we
455 * might have blocked - try again.
456 */
457 if (error)
458 goto repeat;
459
460 if (start <= files->next_fd)
461 files->next_fd = fd + 1;
462
463 FD_SET(fd, fdt->open_fds);
464 if (flags & O_CLOEXEC)
465 FD_SET(fd, fdt->close_on_exec);
466 else
467 FD_CLR(fd, fdt->close_on_exec);
468 error = fd;
469#if 1
470 /* Sanity check */
471 if (rcu_dereference_raw(fdt->fd[fd]) != NULL) {
472 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
473 rcu_assign_pointer(fdt->fd[fd], NULL);
474 }
475#endif
476
477out:
478 spin_unlock(&files->file_lock);
479 return error;
480}
481
482int get_unused_fd(void)
483{
484 return alloc_fd(0, 0);
485}
486EXPORT_SYMBOL(get_unused_fd);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/file.c
4 *
5 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
6 *
7 * Manage the dynamic fd arrays in the process files_struct.
8 */
9
10#include <linux/syscalls.h>
11#include <linux/export.h>
12#include <linux/fs.h>
13#include <linux/mm.h>
14#include <linux/sched/signal.h>
15#include <linux/slab.h>
16#include <linux/file.h>
17#include <linux/fdtable.h>
18#include <linux/bitops.h>
19#include <linux/spinlock.h>
20#include <linux/rcupdate.h>
21
22unsigned int sysctl_nr_open __read_mostly = 1024*1024;
23unsigned int sysctl_nr_open_min = BITS_PER_LONG;
24/* our min() is unusable in constant expressions ;-/ */
25#define __const_min(x, y) ((x) < (y) ? (x) : (y))
26unsigned int sysctl_nr_open_max =
27 __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
28
29static void __free_fdtable(struct fdtable *fdt)
30{
31 kvfree(fdt->fd);
32 kvfree(fdt->open_fds);
33 kfree(fdt);
34}
35
36static void free_fdtable_rcu(struct rcu_head *rcu)
37{
38 __free_fdtable(container_of(rcu, struct fdtable, rcu));
39}
40
41#define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
42#define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
43
44/*
45 * Copy 'count' fd bits from the old table to the new table and clear the extra
46 * space if any. This does not copy the file pointers. Called with the files
47 * spinlock held for write.
48 */
49static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
50 unsigned int count)
51{
52 unsigned int cpy, set;
53
54 cpy = count / BITS_PER_BYTE;
55 set = (nfdt->max_fds - count) / BITS_PER_BYTE;
56 memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
57 memset((char *)nfdt->open_fds + cpy, 0, set);
58 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
59 memset((char *)nfdt->close_on_exec + cpy, 0, set);
60
61 cpy = BITBIT_SIZE(count);
62 set = BITBIT_SIZE(nfdt->max_fds) - cpy;
63 memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
64 memset((char *)nfdt->full_fds_bits + cpy, 0, set);
65}
66
67/*
68 * Copy all file descriptors from the old table to the new, expanded table and
69 * clear the extra space. Called with the files spinlock held for write.
70 */
71static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
72{
73 unsigned int cpy, set;
74
75 BUG_ON(nfdt->max_fds < ofdt->max_fds);
76
77 cpy = ofdt->max_fds * sizeof(struct file *);
78 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
79 memcpy(nfdt->fd, ofdt->fd, cpy);
80 memset((char *)nfdt->fd + cpy, 0, set);
81
82 copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds);
83}
84
85static struct fdtable * alloc_fdtable(unsigned int nr)
86{
87 struct fdtable *fdt;
88 void *data;
89
90 /*
91 * Figure out how many fds we actually want to support in this fdtable.
92 * Allocation steps are keyed to the size of the fdarray, since it
93 * grows far faster than any of the other dynamic data. We try to fit
94 * the fdarray into comfortable page-tuned chunks: starting at 1024B
95 * and growing in powers of two from there on.
96 */
97 nr /= (1024 / sizeof(struct file *));
98 nr = roundup_pow_of_two(nr + 1);
99 nr *= (1024 / sizeof(struct file *));
100 /*
101 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
102 * had been set lower between the check in expand_files() and here. Deal
103 * with that in caller, it's cheaper that way.
104 *
105 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
106 * bitmaps handling below becomes unpleasant, to put it mildly...
107 */
108 if (unlikely(nr > sysctl_nr_open))
109 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
110
111 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
112 if (!fdt)
113 goto out;
114 fdt->max_fds = nr;
115 data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT);
116 if (!data)
117 goto out_fdt;
118 fdt->fd = data;
119
120 data = kvmalloc(max_t(size_t,
121 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES),
122 GFP_KERNEL_ACCOUNT);
123 if (!data)
124 goto out_arr;
125 fdt->open_fds = data;
126 data += nr / BITS_PER_BYTE;
127 fdt->close_on_exec = data;
128 data += nr / BITS_PER_BYTE;
129 fdt->full_fds_bits = data;
130
131 return fdt;
132
133out_arr:
134 kvfree(fdt->fd);
135out_fdt:
136 kfree(fdt);
137out:
138 return NULL;
139}
140
141/*
142 * Expand the file descriptor table.
143 * This function will allocate a new fdtable and both fd array and fdset, of
144 * the given size.
145 * Return <0 error code on error; 1 on successful completion.
146 * The files->file_lock should be held on entry, and will be held on exit.
147 */
148static int expand_fdtable(struct files_struct *files, unsigned int nr)
149 __releases(files->file_lock)
150 __acquires(files->file_lock)
151{
152 struct fdtable *new_fdt, *cur_fdt;
153
154 spin_unlock(&files->file_lock);
155 new_fdt = alloc_fdtable(nr);
156
157 /* make sure all __fd_install() have seen resize_in_progress
158 * or have finished their rcu_read_lock_sched() section.
159 */
160 if (atomic_read(&files->count) > 1)
161 synchronize_rcu();
162
163 spin_lock(&files->file_lock);
164 if (!new_fdt)
165 return -ENOMEM;
166 /*
167 * extremely unlikely race - sysctl_nr_open decreased between the check in
168 * caller and alloc_fdtable(). Cheaper to catch it here...
169 */
170 if (unlikely(new_fdt->max_fds <= nr)) {
171 __free_fdtable(new_fdt);
172 return -EMFILE;
173 }
174 cur_fdt = files_fdtable(files);
175 BUG_ON(nr < cur_fdt->max_fds);
176 copy_fdtable(new_fdt, cur_fdt);
177 rcu_assign_pointer(files->fdt, new_fdt);
178 if (cur_fdt != &files->fdtab)
179 call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
180 /* coupled with smp_rmb() in __fd_install() */
181 smp_wmb();
182 return 1;
183}
184
185/*
186 * Expand files.
187 * This function will expand the file structures, if the requested size exceeds
188 * the current capacity and there is room for expansion.
189 * Return <0 error code on error; 0 when nothing done; 1 when files were
190 * expanded and execution may have blocked.
191 * The files->file_lock should be held on entry, and will be held on exit.
192 */
193static int expand_files(struct files_struct *files, unsigned int nr)
194 __releases(files->file_lock)
195 __acquires(files->file_lock)
196{
197 struct fdtable *fdt;
198 int expanded = 0;
199
200repeat:
201 fdt = files_fdtable(files);
202
203 /* Do we need to expand? */
204 if (nr < fdt->max_fds)
205 return expanded;
206
207 /* Can we expand? */
208 if (nr >= sysctl_nr_open)
209 return -EMFILE;
210
211 if (unlikely(files->resize_in_progress)) {
212 spin_unlock(&files->file_lock);
213 expanded = 1;
214 wait_event(files->resize_wait, !files->resize_in_progress);
215 spin_lock(&files->file_lock);
216 goto repeat;
217 }
218
219 /* All good, so we try */
220 files->resize_in_progress = true;
221 expanded = expand_fdtable(files, nr);
222 files->resize_in_progress = false;
223
224 wake_up_all(&files->resize_wait);
225 return expanded;
226}
227
228static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt)
229{
230 __set_bit(fd, fdt->close_on_exec);
231}
232
233static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt)
234{
235 if (test_bit(fd, fdt->close_on_exec))
236 __clear_bit(fd, fdt->close_on_exec);
237}
238
239static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
240{
241 __set_bit(fd, fdt->open_fds);
242 fd /= BITS_PER_LONG;
243 if (!~fdt->open_fds[fd])
244 __set_bit(fd, fdt->full_fds_bits);
245}
246
247static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
248{
249 __clear_bit(fd, fdt->open_fds);
250 __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
251}
252
253static unsigned int count_open_files(struct fdtable *fdt)
254{
255 unsigned int size = fdt->max_fds;
256 unsigned int i;
257
258 /* Find the last open fd */
259 for (i = size / BITS_PER_LONG; i > 0; ) {
260 if (fdt->open_fds[--i])
261 break;
262 }
263 i = (i + 1) * BITS_PER_LONG;
264 return i;
265}
266
267/*
268 * Allocate a new files structure and copy contents from the
269 * passed in files structure.
270 * errorp will be valid only when the returned files_struct is NULL.
271 */
272struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
273{
274 struct files_struct *newf;
275 struct file **old_fds, **new_fds;
276 unsigned int open_files, i;
277 struct fdtable *old_fdt, *new_fdt;
278
279 *errorp = -ENOMEM;
280 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
281 if (!newf)
282 goto out;
283
284 atomic_set(&newf->count, 1);
285
286 spin_lock_init(&newf->file_lock);
287 newf->resize_in_progress = false;
288 init_waitqueue_head(&newf->resize_wait);
289 newf->next_fd = 0;
290 new_fdt = &newf->fdtab;
291 new_fdt->max_fds = NR_OPEN_DEFAULT;
292 new_fdt->close_on_exec = newf->close_on_exec_init;
293 new_fdt->open_fds = newf->open_fds_init;
294 new_fdt->full_fds_bits = newf->full_fds_bits_init;
295 new_fdt->fd = &newf->fd_array[0];
296
297 spin_lock(&oldf->file_lock);
298 old_fdt = files_fdtable(oldf);
299 open_files = count_open_files(old_fdt);
300
301 /*
302 * Check whether we need to allocate a larger fd array and fd set.
303 */
304 while (unlikely(open_files > new_fdt->max_fds)) {
305 spin_unlock(&oldf->file_lock);
306
307 if (new_fdt != &newf->fdtab)
308 __free_fdtable(new_fdt);
309
310 new_fdt = alloc_fdtable(open_files - 1);
311 if (!new_fdt) {
312 *errorp = -ENOMEM;
313 goto out_release;
314 }
315
316 /* beyond sysctl_nr_open; nothing to do */
317 if (unlikely(new_fdt->max_fds < open_files)) {
318 __free_fdtable(new_fdt);
319 *errorp = -EMFILE;
320 goto out_release;
321 }
322
323 /*
324 * Reacquire the oldf lock and a pointer to its fd table
325 * who knows it may have a new bigger fd table. We need
326 * the latest pointer.
327 */
328 spin_lock(&oldf->file_lock);
329 old_fdt = files_fdtable(oldf);
330 open_files = count_open_files(old_fdt);
331 }
332
333 copy_fd_bitmaps(new_fdt, old_fdt, open_files);
334
335 old_fds = old_fdt->fd;
336 new_fds = new_fdt->fd;
337
338 for (i = open_files; i != 0; i--) {
339 struct file *f = *old_fds++;
340 if (f) {
341 get_file(f);
342 } else {
343 /*
344 * The fd may be claimed in the fd bitmap but not yet
345 * instantiated in the files array if a sibling thread
346 * is partway through open(). So make sure that this
347 * fd is available to the new process.
348 */
349 __clear_open_fd(open_files - i, new_fdt);
350 }
351 rcu_assign_pointer(*new_fds++, f);
352 }
353 spin_unlock(&oldf->file_lock);
354
355 /* clear the remainder */
356 memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *));
357
358 rcu_assign_pointer(newf->fdt, new_fdt);
359
360 return newf;
361
362out_release:
363 kmem_cache_free(files_cachep, newf);
364out:
365 return NULL;
366}
367
368static struct fdtable *close_files(struct files_struct * files)
369{
370 /*
371 * It is safe to dereference the fd table without RCU or
372 * ->file_lock because this is the last reference to the
373 * files structure.
374 */
375 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
376 unsigned int i, j = 0;
377
378 for (;;) {
379 unsigned long set;
380 i = j * BITS_PER_LONG;
381 if (i >= fdt->max_fds)
382 break;
383 set = fdt->open_fds[j++];
384 while (set) {
385 if (set & 1) {
386 struct file * file = xchg(&fdt->fd[i], NULL);
387 if (file) {
388 filp_close(file, files);
389 cond_resched();
390 }
391 }
392 i++;
393 set >>= 1;
394 }
395 }
396
397 return fdt;
398}
399
400struct files_struct *get_files_struct(struct task_struct *task)
401{
402 struct files_struct *files;
403
404 task_lock(task);
405 files = task->files;
406 if (files)
407 atomic_inc(&files->count);
408 task_unlock(task);
409
410 return files;
411}
412
413void put_files_struct(struct files_struct *files)
414{
415 if (atomic_dec_and_test(&files->count)) {
416 struct fdtable *fdt = close_files(files);
417
418 /* free the arrays if they are not embedded */
419 if (fdt != &files->fdtab)
420 __free_fdtable(fdt);
421 kmem_cache_free(files_cachep, files);
422 }
423}
424
425void reset_files_struct(struct files_struct *files)
426{
427 struct task_struct *tsk = current;
428 struct files_struct *old;
429
430 old = tsk->files;
431 task_lock(tsk);
432 tsk->files = files;
433 task_unlock(tsk);
434 put_files_struct(old);
435}
436
437void exit_files(struct task_struct *tsk)
438{
439 struct files_struct * files = tsk->files;
440
441 if (files) {
442 task_lock(tsk);
443 tsk->files = NULL;
444 task_unlock(tsk);
445 put_files_struct(files);
446 }
447}
448
449struct files_struct init_files = {
450 .count = ATOMIC_INIT(1),
451 .fdt = &init_files.fdtab,
452 .fdtab = {
453 .max_fds = NR_OPEN_DEFAULT,
454 .fd = &init_files.fd_array[0],
455 .close_on_exec = init_files.close_on_exec_init,
456 .open_fds = init_files.open_fds_init,
457 .full_fds_bits = init_files.full_fds_bits_init,
458 },
459 .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
460 .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait),
461};
462
463static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
464{
465 unsigned int maxfd = fdt->max_fds;
466 unsigned int maxbit = maxfd / BITS_PER_LONG;
467 unsigned int bitbit = start / BITS_PER_LONG;
468
469 bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
470 if (bitbit > maxfd)
471 return maxfd;
472 if (bitbit > start)
473 start = bitbit;
474 return find_next_zero_bit(fdt->open_fds, maxfd, start);
475}
476
477/*
478 * allocate a file descriptor, mark it busy.
479 */
480int __alloc_fd(struct files_struct *files,
481 unsigned start, unsigned end, unsigned flags)
482{
483 unsigned int fd;
484 int error;
485 struct fdtable *fdt;
486
487 spin_lock(&files->file_lock);
488repeat:
489 fdt = files_fdtable(files);
490 fd = start;
491 if (fd < files->next_fd)
492 fd = files->next_fd;
493
494 if (fd < fdt->max_fds)
495 fd = find_next_fd(fdt, fd);
496
497 /*
498 * N.B. For clone tasks sharing a files structure, this test
499 * will limit the total number of files that can be opened.
500 */
501 error = -EMFILE;
502 if (fd >= end)
503 goto out;
504
505 error = expand_files(files, fd);
506 if (error < 0)
507 goto out;
508
509 /*
510 * If we needed to expand the fs array we
511 * might have blocked - try again.
512 */
513 if (error)
514 goto repeat;
515
516 if (start <= files->next_fd)
517 files->next_fd = fd + 1;
518
519 __set_open_fd(fd, fdt);
520 if (flags & O_CLOEXEC)
521 __set_close_on_exec(fd, fdt);
522 else
523 __clear_close_on_exec(fd, fdt);
524 error = fd;
525#if 1
526 /* Sanity check */
527 if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
528 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
529 rcu_assign_pointer(fdt->fd[fd], NULL);
530 }
531#endif
532
533out:
534 spin_unlock(&files->file_lock);
535 return error;
536}
537
538static int alloc_fd(unsigned start, unsigned flags)
539{
540 return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags);
541}
542
543int get_unused_fd_flags(unsigned flags)
544{
545 return __alloc_fd(current->files, 0, rlimit(RLIMIT_NOFILE), flags);
546}
547EXPORT_SYMBOL(get_unused_fd_flags);
548
549static void __put_unused_fd(struct files_struct *files, unsigned int fd)
550{
551 struct fdtable *fdt = files_fdtable(files);
552 __clear_open_fd(fd, fdt);
553 if (fd < files->next_fd)
554 files->next_fd = fd;
555}
556
557void put_unused_fd(unsigned int fd)
558{
559 struct files_struct *files = current->files;
560 spin_lock(&files->file_lock);
561 __put_unused_fd(files, fd);
562 spin_unlock(&files->file_lock);
563}
564
565EXPORT_SYMBOL(put_unused_fd);
566
567/*
568 * Install a file pointer in the fd array.
569 *
570 * The VFS is full of places where we drop the files lock between
571 * setting the open_fds bitmap and installing the file in the file
572 * array. At any such point, we are vulnerable to a dup2() race
573 * installing a file in the array before us. We need to detect this and
574 * fput() the struct file we are about to overwrite in this case.
575 *
576 * It should never happen - if we allow dup2() do it, _really_ bad things
577 * will follow.
578 *
579 * NOTE: __fd_install() variant is really, really low-level; don't
580 * use it unless you are forced to by truly lousy API shoved down
581 * your throat. 'files' *MUST* be either current->files or obtained
582 * by get_files_struct(current) done by whoever had given it to you,
583 * or really bad things will happen. Normally you want to use
584 * fd_install() instead.
585 */
586
587void __fd_install(struct files_struct *files, unsigned int fd,
588 struct file *file)
589{
590 struct fdtable *fdt;
591
592 rcu_read_lock_sched();
593
594 if (unlikely(files->resize_in_progress)) {
595 rcu_read_unlock_sched();
596 spin_lock(&files->file_lock);
597 fdt = files_fdtable(files);
598 BUG_ON(fdt->fd[fd] != NULL);
599 rcu_assign_pointer(fdt->fd[fd], file);
600 spin_unlock(&files->file_lock);
601 return;
602 }
603 /* coupled with smp_wmb() in expand_fdtable() */
604 smp_rmb();
605 fdt = rcu_dereference_sched(files->fdt);
606 BUG_ON(fdt->fd[fd] != NULL);
607 rcu_assign_pointer(fdt->fd[fd], file);
608 rcu_read_unlock_sched();
609}
610
611void fd_install(unsigned int fd, struct file *file)
612{
613 __fd_install(current->files, fd, file);
614}
615
616EXPORT_SYMBOL(fd_install);
617
618/*
619 * The same warnings as for __alloc_fd()/__fd_install() apply here...
620 */
621int __close_fd(struct files_struct *files, unsigned fd)
622{
623 struct file *file;
624 struct fdtable *fdt;
625
626 spin_lock(&files->file_lock);
627 fdt = files_fdtable(files);
628 if (fd >= fdt->max_fds)
629 goto out_unlock;
630 file = fdt->fd[fd];
631 if (!file)
632 goto out_unlock;
633 rcu_assign_pointer(fdt->fd[fd], NULL);
634 __put_unused_fd(files, fd);
635 spin_unlock(&files->file_lock);
636 return filp_close(file, files);
637
638out_unlock:
639 spin_unlock(&files->file_lock);
640 return -EBADF;
641}
642EXPORT_SYMBOL(__close_fd); /* for ksys_close() */
643
644/*
645 * variant of __close_fd that gets a ref on the file for later fput
646 */
647int __close_fd_get_file(unsigned int fd, struct file **res)
648{
649 struct files_struct *files = current->files;
650 struct file *file;
651 struct fdtable *fdt;
652
653 spin_lock(&files->file_lock);
654 fdt = files_fdtable(files);
655 if (fd >= fdt->max_fds)
656 goto out_unlock;
657 file = fdt->fd[fd];
658 if (!file)
659 goto out_unlock;
660 rcu_assign_pointer(fdt->fd[fd], NULL);
661 __put_unused_fd(files, fd);
662 spin_unlock(&files->file_lock);
663 get_file(file);
664 *res = file;
665 return filp_close(file, files);
666
667out_unlock:
668 spin_unlock(&files->file_lock);
669 *res = NULL;
670 return -ENOENT;
671}
672
673void do_close_on_exec(struct files_struct *files)
674{
675 unsigned i;
676 struct fdtable *fdt;
677
678 /* exec unshares first */
679 spin_lock(&files->file_lock);
680 for (i = 0; ; i++) {
681 unsigned long set;
682 unsigned fd = i * BITS_PER_LONG;
683 fdt = files_fdtable(files);
684 if (fd >= fdt->max_fds)
685 break;
686 set = fdt->close_on_exec[i];
687 if (!set)
688 continue;
689 fdt->close_on_exec[i] = 0;
690 for ( ; set ; fd++, set >>= 1) {
691 struct file *file;
692 if (!(set & 1))
693 continue;
694 file = fdt->fd[fd];
695 if (!file)
696 continue;
697 rcu_assign_pointer(fdt->fd[fd], NULL);
698 __put_unused_fd(files, fd);
699 spin_unlock(&files->file_lock);
700 filp_close(file, files);
701 cond_resched();
702 spin_lock(&files->file_lock);
703 }
704
705 }
706 spin_unlock(&files->file_lock);
707}
708
709static struct file *__fget(unsigned int fd, fmode_t mask, unsigned int refs)
710{
711 struct files_struct *files = current->files;
712 struct file *file;
713
714 rcu_read_lock();
715loop:
716 file = fcheck_files(files, fd);
717 if (file) {
718 /* File object ref couldn't be taken.
719 * dup2() atomicity guarantee is the reason
720 * we loop to catch the new file (or NULL pointer)
721 */
722 if (file->f_mode & mask)
723 file = NULL;
724 else if (!get_file_rcu_many(file, refs))
725 goto loop;
726 }
727 rcu_read_unlock();
728
729 return file;
730}
731
732struct file *fget_many(unsigned int fd, unsigned int refs)
733{
734 return __fget(fd, FMODE_PATH, refs);
735}
736
737struct file *fget(unsigned int fd)
738{
739 return __fget(fd, FMODE_PATH, 1);
740}
741EXPORT_SYMBOL(fget);
742
743struct file *fget_raw(unsigned int fd)
744{
745 return __fget(fd, 0, 1);
746}
747EXPORT_SYMBOL(fget_raw);
748
749/*
750 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
751 *
752 * You can use this instead of fget if you satisfy all of the following
753 * conditions:
754 * 1) You must call fput_light before exiting the syscall and returning control
755 * to userspace (i.e. you cannot remember the returned struct file * after
756 * returning to userspace).
757 * 2) You must not call filp_close on the returned struct file * in between
758 * calls to fget_light and fput_light.
759 * 3) You must not clone the current task in between the calls to fget_light
760 * and fput_light.
761 *
762 * The fput_needed flag returned by fget_light should be passed to the
763 * corresponding fput_light.
764 */
765static unsigned long __fget_light(unsigned int fd, fmode_t mask)
766{
767 struct files_struct *files = current->files;
768 struct file *file;
769
770 if (atomic_read(&files->count) == 1) {
771 file = __fcheck_files(files, fd);
772 if (!file || unlikely(file->f_mode & mask))
773 return 0;
774 return (unsigned long)file;
775 } else {
776 file = __fget(fd, mask, 1);
777 if (!file)
778 return 0;
779 return FDPUT_FPUT | (unsigned long)file;
780 }
781}
782unsigned long __fdget(unsigned int fd)
783{
784 return __fget_light(fd, FMODE_PATH);
785}
786EXPORT_SYMBOL(__fdget);
787
788unsigned long __fdget_raw(unsigned int fd)
789{
790 return __fget_light(fd, 0);
791}
792
793unsigned long __fdget_pos(unsigned int fd)
794{
795 unsigned long v = __fdget(fd);
796 struct file *file = (struct file *)(v & ~3);
797
798 if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
799 if (file_count(file) > 1) {
800 v |= FDPUT_POS_UNLOCK;
801 mutex_lock(&file->f_pos_lock);
802 }
803 }
804 return v;
805}
806
807void __f_unlock_pos(struct file *f)
808{
809 mutex_unlock(&f->f_pos_lock);
810}
811
812/*
813 * We only lock f_pos if we have threads or if the file might be
814 * shared with another process. In both cases we'll have an elevated
815 * file count (done either by fdget() or by fork()).
816 */
817
818void set_close_on_exec(unsigned int fd, int flag)
819{
820 struct files_struct *files = current->files;
821 struct fdtable *fdt;
822 spin_lock(&files->file_lock);
823 fdt = files_fdtable(files);
824 if (flag)
825 __set_close_on_exec(fd, fdt);
826 else
827 __clear_close_on_exec(fd, fdt);
828 spin_unlock(&files->file_lock);
829}
830
831bool get_close_on_exec(unsigned int fd)
832{
833 struct files_struct *files = current->files;
834 struct fdtable *fdt;
835 bool res;
836 rcu_read_lock();
837 fdt = files_fdtable(files);
838 res = close_on_exec(fd, fdt);
839 rcu_read_unlock();
840 return res;
841}
842
843static int do_dup2(struct files_struct *files,
844 struct file *file, unsigned fd, unsigned flags)
845__releases(&files->file_lock)
846{
847 struct file *tofree;
848 struct fdtable *fdt;
849
850 /*
851 * We need to detect attempts to do dup2() over allocated but still
852 * not finished descriptor. NB: OpenBSD avoids that at the price of
853 * extra work in their equivalent of fget() - they insert struct
854 * file immediately after grabbing descriptor, mark it larval if
855 * more work (e.g. actual opening) is needed and make sure that
856 * fget() treats larval files as absent. Potentially interesting,
857 * but while extra work in fget() is trivial, locking implications
858 * and amount of surgery on open()-related paths in VFS are not.
859 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
860 * deadlocks in rather amusing ways, AFAICS. All of that is out of
861 * scope of POSIX or SUS, since neither considers shared descriptor
862 * tables and this condition does not arise without those.
863 */
864 fdt = files_fdtable(files);
865 tofree = fdt->fd[fd];
866 if (!tofree && fd_is_open(fd, fdt))
867 goto Ebusy;
868 get_file(file);
869 rcu_assign_pointer(fdt->fd[fd], file);
870 __set_open_fd(fd, fdt);
871 if (flags & O_CLOEXEC)
872 __set_close_on_exec(fd, fdt);
873 else
874 __clear_close_on_exec(fd, fdt);
875 spin_unlock(&files->file_lock);
876
877 if (tofree)
878 filp_close(tofree, files);
879
880 return fd;
881
882Ebusy:
883 spin_unlock(&files->file_lock);
884 return -EBUSY;
885}
886
887int replace_fd(unsigned fd, struct file *file, unsigned flags)
888{
889 int err;
890 struct files_struct *files = current->files;
891
892 if (!file)
893 return __close_fd(files, fd);
894
895 if (fd >= rlimit(RLIMIT_NOFILE))
896 return -EBADF;
897
898 spin_lock(&files->file_lock);
899 err = expand_files(files, fd);
900 if (unlikely(err < 0))
901 goto out_unlock;
902 return do_dup2(files, file, fd, flags);
903
904out_unlock:
905 spin_unlock(&files->file_lock);
906 return err;
907}
908
909static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
910{
911 int err = -EBADF;
912 struct file *file;
913 struct files_struct *files = current->files;
914
915 if ((flags & ~O_CLOEXEC) != 0)
916 return -EINVAL;
917
918 if (unlikely(oldfd == newfd))
919 return -EINVAL;
920
921 if (newfd >= rlimit(RLIMIT_NOFILE))
922 return -EBADF;
923
924 spin_lock(&files->file_lock);
925 err = expand_files(files, newfd);
926 file = fcheck(oldfd);
927 if (unlikely(!file))
928 goto Ebadf;
929 if (unlikely(err < 0)) {
930 if (err == -EMFILE)
931 goto Ebadf;
932 goto out_unlock;
933 }
934 return do_dup2(files, file, newfd, flags);
935
936Ebadf:
937 err = -EBADF;
938out_unlock:
939 spin_unlock(&files->file_lock);
940 return err;
941}
942
943SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
944{
945 return ksys_dup3(oldfd, newfd, flags);
946}
947
948SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
949{
950 if (unlikely(newfd == oldfd)) { /* corner case */
951 struct files_struct *files = current->files;
952 int retval = oldfd;
953
954 rcu_read_lock();
955 if (!fcheck_files(files, oldfd))
956 retval = -EBADF;
957 rcu_read_unlock();
958 return retval;
959 }
960 return ksys_dup3(oldfd, newfd, 0);
961}
962
963int ksys_dup(unsigned int fildes)
964{
965 int ret = -EBADF;
966 struct file *file = fget_raw(fildes);
967
968 if (file) {
969 ret = get_unused_fd_flags(0);
970 if (ret >= 0)
971 fd_install(ret, file);
972 else
973 fput(file);
974 }
975 return ret;
976}
977
978SYSCALL_DEFINE1(dup, unsigned int, fildes)
979{
980 return ksys_dup(fildes);
981}
982
983int f_dupfd(unsigned int from, struct file *file, unsigned flags)
984{
985 int err;
986 if (from >= rlimit(RLIMIT_NOFILE))
987 return -EINVAL;
988 err = alloc_fd(from, flags);
989 if (err >= 0) {
990 get_file(file);
991 fd_install(err, file);
992 }
993 return err;
994}
995
996int iterate_fd(struct files_struct *files, unsigned n,
997 int (*f)(const void *, struct file *, unsigned),
998 const void *p)
999{
1000 struct fdtable *fdt;
1001 int res = 0;
1002 if (!files)
1003 return 0;
1004 spin_lock(&files->file_lock);
1005 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
1006 struct file *file;
1007 file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
1008 if (!file)
1009 continue;
1010 res = f(p, file, n);
1011 if (res)
1012 break;
1013 }
1014 spin_unlock(&files->file_lock);
1015 return res;
1016}
1017EXPORT_SYMBOL(iterate_fd);