Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 *  linux/fs/file.c
  3 *
  4 *  Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
  5 *
  6 *  Manage the dynamic fd arrays in the process files_struct.
  7 */
  8
  9#include <linux/module.h>
 
 10#include <linux/fs.h>
 
 11#include <linux/mm.h>
 12#include <linux/mmzone.h>
 13#include <linux/time.h>
 14#include <linux/sched.h>
 15#include <linux/slab.h>
 16#include <linux/vmalloc.h>
 17#include <linux/file.h>
 18#include <linux/fdtable.h>
 19#include <linux/bitops.h>
 20#include <linux/interrupt.h>
 21#include <linux/spinlock.h>
 22#include <linux/rcupdate.h>
 23#include <linux/workqueue.h>
 
 24
 25struct fdtable_defer {
 26	spinlock_t lock;
 27	struct work_struct wq;
 28	struct fdtable *next;
 29};
 30
 31int sysctl_nr_open __read_mostly = 1024*1024;
 32int sysctl_nr_open_min = BITS_PER_LONG;
 33int sysctl_nr_open_max = 1024 * 1024; /* raised later */
 34
 35/*
 36 * We use this list to defer free fdtables that have vmalloced
 37 * sets/arrays. By keeping a per-cpu list, we avoid having to embed
 38 * the work_struct in fdtable itself which avoids a 64 byte (i386) increase in
 39 * this per-task structure.
 40 */
 41static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list);
 42
 43static void *alloc_fdmem(unsigned int size)
 44{
 45	/*
 46	 * Very large allocations can stress page reclaim, so fall back to
 47	 * vmalloc() if the allocation size will be considered "large" by the VM.
 48	 */
 49	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
 50		void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN);
 51		if (data != NULL)
 52			return data;
 53	}
 54	return vmalloc(size);
 55}
 56
 57static void free_fdmem(void *ptr)
 58{
 59	is_vmalloc_addr(ptr) ? vfree(ptr) : kfree(ptr);
 60}
 61
 62static void __free_fdtable(struct fdtable *fdt)
 63{
 64	free_fdmem(fdt->fd);
 65	free_fdmem(fdt->open_fds);
 66	kfree(fdt);
 67}
 68
 69static void free_fdtable_work(struct work_struct *work)
 70{
 71	struct fdtable_defer *f =
 72		container_of(work, struct fdtable_defer, wq);
 73	struct fdtable *fdt;
 74
 75	spin_lock_bh(&f->lock);
 76	fdt = f->next;
 77	f->next = NULL;
 78	spin_unlock_bh(&f->lock);
 79	while(fdt) {
 80		struct fdtable *next = fdt->next;
 81
 82		__free_fdtable(fdt);
 83		fdt = next;
 84	}
 85}
 86
 87void free_fdtable_rcu(struct rcu_head *rcu)
 
 
 
 
 
 
 
 
 
 88{
 89	struct fdtable *fdt = container_of(rcu, struct fdtable, rcu);
 90	struct fdtable_defer *fddef;
 91
 92	BUG_ON(!fdt);
 
 
 
 
 
 93
 94	if (fdt->max_fds <= NR_OPEN_DEFAULT) {
 95		/*
 96		 * This fdtable is embedded in the files structure and that
 97		 * structure itself is getting destroyed.
 98		 */
 99		kmem_cache_free(files_cachep,
100				container_of(fdt, struct files_struct, fdtab));
101		return;
102	}
103	if (!is_vmalloc_addr(fdt->fd) && !is_vmalloc_addr(fdt->open_fds)) {
104		kfree(fdt->fd);
105		kfree(fdt->open_fds);
106		kfree(fdt);
107	} else {
108		fddef = &get_cpu_var(fdtable_defer_list);
109		spin_lock(&fddef->lock);
110		fdt->next = fddef->next;
111		fddef->next = fdt;
112		/* vmallocs are handled from the workqueue context */
113		schedule_work(&fddef->wq);
114		spin_unlock(&fddef->lock);
115		put_cpu_var(fdtable_defer_list);
116	}
117}
118
119/*
120 * Expand the fdset in the files_struct.  Called with the files spinlock
121 * held for write.
122 */
123static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
124{
125	unsigned int cpy, set;
126
127	BUG_ON(nfdt->max_fds < ofdt->max_fds);
128
129	cpy = ofdt->max_fds * sizeof(struct file *);
130	set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
131	memcpy(nfdt->fd, ofdt->fd, cpy);
132	memset((char *)(nfdt->fd) + cpy, 0, set);
133
134	cpy = ofdt->max_fds / BITS_PER_BYTE;
135	set = (nfdt->max_fds - ofdt->max_fds) / BITS_PER_BYTE;
136	memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
137	memset((char *)(nfdt->open_fds) + cpy, 0, set);
138	memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
139	memset((char *)(nfdt->close_on_exec) + cpy, 0, set);
140}
141
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142static struct fdtable * alloc_fdtable(unsigned int nr)
143{
144	struct fdtable *fdt;
145	char *data;
146
147	/*
148	 * Figure out how many fds we actually want to support in this fdtable.
149	 * Allocation steps are keyed to the size of the fdarray, since it
150	 * grows far faster than any of the other dynamic data. We try to fit
151	 * the fdarray into comfortable page-tuned chunks: starting at 1024B
152	 * and growing in powers of two from there on.
153	 */
154	nr /= (1024 / sizeof(struct file *));
155	nr = roundup_pow_of_two(nr + 1);
156	nr *= (1024 / sizeof(struct file *));
 
157	/*
158	 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
159	 * had been set lower between the check in expand_files() and here.  Deal
160	 * with that in caller, it's cheaper that way.
161	 *
162	 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
163	 * bitmaps handling below becomes unpleasant, to put it mildly...
164	 */
165	if (unlikely(nr > sysctl_nr_open))
166		nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
167
168	fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL);
169	if (!fdt)
170		goto out;
171	fdt->max_fds = nr;
172	data = alloc_fdmem(nr * sizeof(struct file *));
173	if (!data)
174		goto out_fdt;
175	fdt->fd = (struct file **)data;
176	data = alloc_fdmem(max_t(unsigned int,
177				 2 * nr / BITS_PER_BYTE, L1_CACHE_BYTES));
 
 
178	if (!data)
179		goto out_arr;
180	fdt->open_fds = (fd_set *)data;
 
 
181	data += nr / BITS_PER_BYTE;
182	fdt->close_on_exec = (fd_set *)data;
183	fdt->next = NULL;
184
185	return fdt;
186
187out_arr:
188	free_fdmem(fdt->fd);
189out_fdt:
190	kfree(fdt);
191out:
192	return NULL;
193}
194
195/*
196 * Expand the file descriptor table.
197 * This function will allocate a new fdtable and both fd array and fdset, of
198 * the given size.
199 * Return <0 error code on error; 1 on successful completion.
200 * The files->file_lock should be held on entry, and will be held on exit.
201 */
202static int expand_fdtable(struct files_struct *files, int nr)
203	__releases(files->file_lock)
204	__acquires(files->file_lock)
205{
206	struct fdtable *new_fdt, *cur_fdt;
207
208	spin_unlock(&files->file_lock);
209	new_fdt = alloc_fdtable(nr);
 
 
 
 
 
 
 
210	spin_lock(&files->file_lock);
211	if (!new_fdt)
212		return -ENOMEM;
213	/*
214	 * extremely unlikely race - sysctl_nr_open decreased between the check in
215	 * caller and alloc_fdtable().  Cheaper to catch it here...
216	 */
217	if (unlikely(new_fdt->max_fds <= nr)) {
218		__free_fdtable(new_fdt);
219		return -EMFILE;
220	}
221	/*
222	 * Check again since another task may have expanded the fd table while
223	 * we dropped the lock
224	 */
225	cur_fdt = files_fdtable(files);
226	if (nr >= cur_fdt->max_fds) {
227		/* Continue as planned */
228		copy_fdtable(new_fdt, cur_fdt);
229		rcu_assign_pointer(files->fdt, new_fdt);
230		if (cur_fdt->max_fds > NR_OPEN_DEFAULT)
231			free_fdtable(cur_fdt);
232	} else {
233		/* Somebody else expanded, so undo our attempt */
234		__free_fdtable(new_fdt);
235	}
236	return 1;
237}
238
239/*
240 * Expand files.
241 * This function will expand the file structures, if the requested size exceeds
242 * the current capacity and there is room for expansion.
243 * Return <0 error code on error; 0 when nothing done; 1 when files were
244 * expanded and execution may have blocked.
245 * The files->file_lock should be held on entry, and will be held on exit.
246 */
247int expand_files(struct files_struct *files, int nr)
 
 
248{
249	struct fdtable *fdt;
 
250
 
251	fdt = files_fdtable(files);
252
253	/*
254	 * N.B. For clone tasks sharing a files structure, this test
255	 * will limit the total number of files that can be opened.
256	 */
257	if (nr >= rlimit(RLIMIT_NOFILE))
258		return -EMFILE;
259
260	/* Do we need to expand? */
261	if (nr < fdt->max_fds)
262		return 0;
263
264	/* Can we expand? */
265	if (nr >= sysctl_nr_open)
266		return -EMFILE;
267
 
 
 
 
 
 
 
 
268	/* All good, so we try */
269	return expand_fdtable(files, nr);
 
 
 
 
 
 
 
 
 
 
270}
271
272static int count_open_files(struct fdtable *fdt)
273{
274	int size = fdt->max_fds;
275	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
276
277	/* Find the last open fd */
278	for (i = size/(8*sizeof(long)); i > 0; ) {
279		if (fdt->open_fds->fds_bits[--i])
280			break;
281	}
282	i = (i+1) * 8 * sizeof(long);
283	return i;
284}
285
286/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287 * Allocate a new files structure and copy contents from the
288 * passed in files structure.
289 * errorp will be valid only when the returned files_struct is NULL.
290 */
291struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
292{
293	struct files_struct *newf;
294	struct file **old_fds, **new_fds;
295	int open_files, size, i;
296	struct fdtable *old_fdt, *new_fdt;
297
298	*errorp = -ENOMEM;
299	newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
300	if (!newf)
301		goto out;
302
303	atomic_set(&newf->count, 1);
304
305	spin_lock_init(&newf->file_lock);
 
 
306	newf->next_fd = 0;
307	new_fdt = &newf->fdtab;
308	new_fdt->max_fds = NR_OPEN_DEFAULT;
309	new_fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
310	new_fdt->open_fds = (fd_set *)&newf->open_fds_init;
 
311	new_fdt->fd = &newf->fd_array[0];
312	new_fdt->next = NULL;
313
314	spin_lock(&oldf->file_lock);
315	old_fdt = files_fdtable(oldf);
316	open_files = count_open_files(old_fdt);
317
318	/*
319	 * Check whether we need to allocate a larger fd array and fd set.
320	 */
321	while (unlikely(open_files > new_fdt->max_fds)) {
322		spin_unlock(&oldf->file_lock);
323
324		if (new_fdt != &newf->fdtab)
325			__free_fdtable(new_fdt);
326
327		new_fdt = alloc_fdtable(open_files - 1);
328		if (!new_fdt) {
329			*errorp = -ENOMEM;
330			goto out_release;
331		}
332
333		/* beyond sysctl_nr_open; nothing to do */
334		if (unlikely(new_fdt->max_fds < open_files)) {
335			__free_fdtable(new_fdt);
336			*errorp = -EMFILE;
337			goto out_release;
338		}
339
340		/*
341		 * Reacquire the oldf lock and a pointer to its fd table
342		 * who knows it may have a new bigger fd table. We need
343		 * the latest pointer.
344		 */
345		spin_lock(&oldf->file_lock);
346		old_fdt = files_fdtable(oldf);
347		open_files = count_open_files(old_fdt);
348	}
349
 
 
350	old_fds = old_fdt->fd;
351	new_fds = new_fdt->fd;
352
353	memcpy(new_fdt->open_fds->fds_bits,
354		old_fdt->open_fds->fds_bits, open_files/8);
355	memcpy(new_fdt->close_on_exec->fds_bits,
356		old_fdt->close_on_exec->fds_bits, open_files/8);
357
358	for (i = open_files; i != 0; i--) {
359		struct file *f = *old_fds++;
360		if (f) {
361			get_file(f);
362		} else {
363			/*
364			 * The fd may be claimed in the fd bitmap but not yet
365			 * instantiated in the files array if a sibling thread
366			 * is partway through open().  So make sure that this
367			 * fd is available to the new process.
368			 */
369			FD_CLR(open_files - i, new_fdt->open_fds);
370		}
371		rcu_assign_pointer(*new_fds++, f);
372	}
373	spin_unlock(&oldf->file_lock);
374
375	/* compute the remainder to be cleared */
376	size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
377
378	/* This is long word aligned thus could use a optimized version */
379	memset(new_fds, 0, size);
380
381	if (new_fdt->max_fds > open_files) {
382		int left = (new_fdt->max_fds-open_files)/8;
383		int start = open_files / (8 * sizeof(unsigned long));
384
385		memset(&new_fdt->open_fds->fds_bits[start], 0, left);
386		memset(&new_fdt->close_on_exec->fds_bits[start], 0, left);
387	}
388
389	rcu_assign_pointer(newf->fdt, new_fdt);
390
391	return newf;
392
393out_release:
394	kmem_cache_free(files_cachep, newf);
395out:
396	return NULL;
397}
398
399static void __devinit fdtable_defer_list_init(int cpu)
400{
401	struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu);
402	spin_lock_init(&fddef->lock);
403	INIT_WORK(&fddef->wq, free_fdtable_work);
404	fddef->next = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
405}
406
407void __init files_defer_init(void)
408{
409	int i;
410	for_each_possible_cpu(i)
411		fdtable_defer_list_init(i);
412	sysctl_nr_open_max = min((size_t)INT_MAX, ~(size_t)0/sizeof(void *)) &
413			     -BITS_PER_LONG;
 
 
 
414}
415
416struct files_struct init_files = {
417	.count		= ATOMIC_INIT(1),
418	.fdt		= &init_files.fdtab,
419	.fdtab		= {
420		.max_fds	= NR_OPEN_DEFAULT,
421		.fd		= &init_files.fd_array[0],
422		.close_on_exec	= (fd_set *)&init_files.close_on_exec_init,
423		.open_fds	= (fd_set *)&init_files.open_fds_init,
 
424	},
425	.file_lock	= __SPIN_LOCK_UNLOCKED(init_task.file_lock),
 
426};
427
 
 
 
 
 
 
 
 
 
 
 
 
 
 
428/*
429 * allocate a file descriptor, mark it busy.
430 */
431int alloc_fd(unsigned start, unsigned flags)
432{
433	struct files_struct *files = current->files;
434	unsigned int fd;
435	int error;
436	struct fdtable *fdt;
437
438	spin_lock(&files->file_lock);
439repeat:
440	fdt = files_fdtable(files);
441	fd = start;
442	if (fd < files->next_fd)
443		fd = files->next_fd;
444
445	if (fd < fdt->max_fds)
446		fd = find_next_zero_bit(fdt->open_fds->fds_bits,
447					   fdt->max_fds, fd);
 
 
 
 
 
 
 
448
449	error = expand_files(files, fd);
450	if (error < 0)
451		goto out;
452
453	/*
454	 * If we needed to expand the fs array we
455	 * might have blocked - try again.
456	 */
457	if (error)
458		goto repeat;
459
460	if (start <= files->next_fd)
461		files->next_fd = fd + 1;
462
463	FD_SET(fd, fdt->open_fds);
464	if (flags & O_CLOEXEC)
465		FD_SET(fd, fdt->close_on_exec);
466	else
467		FD_CLR(fd, fdt->close_on_exec);
468	error = fd;
469#if 1
470	/* Sanity check */
471	if (rcu_dereference_raw(fdt->fd[fd]) != NULL) {
472		printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
473		rcu_assign_pointer(fdt->fd[fd], NULL);
474	}
475#endif
476
477out:
478	spin_unlock(&files->file_lock);
479	return error;
480}
481
482int get_unused_fd(void)
 
 
 
 
 
 
 
 
 
 
 
483{
484	return alloc_fd(0, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
485}
486EXPORT_SYMBOL(get_unused_fd);
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/fs/file.c
   4 *
   5 *  Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
   6 *
   7 *  Manage the dynamic fd arrays in the process files_struct.
   8 */
   9
  10#include <linux/syscalls.h>
  11#include <linux/export.h>
  12#include <linux/fs.h>
  13#include <linux/kernel.h>
  14#include <linux/mm.h>
  15#include <linux/sched/signal.h>
 
 
  16#include <linux/slab.h>
 
  17#include <linux/file.h>
  18#include <linux/fdtable.h>
  19#include <linux/bitops.h>
 
  20#include <linux/spinlock.h>
  21#include <linux/rcupdate.h>
  22#include <linux/close_range.h>
  23#include <net/sock.h>
  24
  25#include "internal.h"
 
 
 
 
 
 
 
 
  26
  27unsigned int sysctl_nr_open __read_mostly = 1024*1024;
  28unsigned int sysctl_nr_open_min = BITS_PER_LONG;
  29/* our min() is unusable in constant expressions ;-/ */
  30#define __const_min(x, y) ((x) < (y) ? (x) : (y))
  31unsigned int sysctl_nr_open_max =
  32	__const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  33
  34static void __free_fdtable(struct fdtable *fdt)
  35{
  36	kvfree(fdt->fd);
  37	kvfree(fdt->open_fds);
  38	kfree(fdt);
  39}
  40
  41static void free_fdtable_rcu(struct rcu_head *rcu)
  42{
  43	__free_fdtable(container_of(rcu, struct fdtable, rcu));
 
 
 
 
 
 
 
 
 
 
 
 
 
  44}
  45
  46#define BITBIT_NR(nr)	BITS_TO_LONGS(BITS_TO_LONGS(nr))
  47#define BITBIT_SIZE(nr)	(BITBIT_NR(nr) * sizeof(long))
  48
  49/*
  50 * Copy 'count' fd bits from the old table to the new table and clear the extra
  51 * space if any.  This does not copy the file pointers.  Called with the files
  52 * spinlock held for write.
  53 */
  54static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
  55			    unsigned int count)
  56{
  57	unsigned int cpy, set;
 
  58
  59	cpy = count / BITS_PER_BYTE;
  60	set = (nfdt->max_fds - count) / BITS_PER_BYTE;
  61	memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
  62	memset((char *)nfdt->open_fds + cpy, 0, set);
  63	memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
  64	memset((char *)nfdt->close_on_exec + cpy, 0, set);
  65
  66	cpy = BITBIT_SIZE(count);
  67	set = BITBIT_SIZE(nfdt->max_fds) - cpy;
  68	memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
  69	memset((char *)nfdt->full_fds_bits + cpy, 0, set);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  70}
  71
  72/*
  73 * Copy all file descriptors from the old table to the new, expanded table and
  74 * clear the extra space.  Called with the files spinlock held for write.
  75 */
  76static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
  77{
  78	size_t cpy, set;
  79
  80	BUG_ON(nfdt->max_fds < ofdt->max_fds);
  81
  82	cpy = ofdt->max_fds * sizeof(struct file *);
  83	set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
  84	memcpy(nfdt->fd, ofdt->fd, cpy);
  85	memset((char *)nfdt->fd + cpy, 0, set);
  86
  87	copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds);
 
 
 
 
 
  88}
  89
  90/*
  91 * Note how the fdtable bitmap allocations very much have to be a multiple of
  92 * BITS_PER_LONG. This is not only because we walk those things in chunks of
  93 * 'unsigned long' in some places, but simply because that is how the Linux
  94 * kernel bitmaps are defined to work: they are not "bits in an array of bytes",
  95 * they are very much "bits in an array of unsigned long".
  96 *
  97 * The ALIGN(nr, BITS_PER_LONG) here is for clarity: since we just multiplied
  98 * by that "1024/sizeof(ptr)" before, we already know there are sufficient
  99 * clear low bits. Clang seems to realize that, gcc ends up being confused.
 100 *
 101 * On a 128-bit machine, the ALIGN() would actually matter. In the meantime,
 102 * let's consider it documentation (and maybe a test-case for gcc to improve
 103 * its code generation ;)
 104 */
 105static struct fdtable * alloc_fdtable(unsigned int nr)
 106{
 107	struct fdtable *fdt;
 108	void *data;
 109
 110	/*
 111	 * Figure out how many fds we actually want to support in this fdtable.
 112	 * Allocation steps are keyed to the size of the fdarray, since it
 113	 * grows far faster than any of the other dynamic data. We try to fit
 114	 * the fdarray into comfortable page-tuned chunks: starting at 1024B
 115	 * and growing in powers of two from there on.
 116	 */
 117	nr /= (1024 / sizeof(struct file *));
 118	nr = roundup_pow_of_two(nr + 1);
 119	nr *= (1024 / sizeof(struct file *));
 120	nr = ALIGN(nr, BITS_PER_LONG);
 121	/*
 122	 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
 123	 * had been set lower between the check in expand_files() and here.  Deal
 124	 * with that in caller, it's cheaper that way.
 125	 *
 126	 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
 127	 * bitmaps handling below becomes unpleasant, to put it mildly...
 128	 */
 129	if (unlikely(nr > sysctl_nr_open))
 130		nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
 131
 132	fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
 133	if (!fdt)
 134		goto out;
 135	fdt->max_fds = nr;
 136	data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT);
 137	if (!data)
 138		goto out_fdt;
 139	fdt->fd = data;
 140
 141	data = kvmalloc(max_t(size_t,
 142				 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES),
 143				 GFP_KERNEL_ACCOUNT);
 144	if (!data)
 145		goto out_arr;
 146	fdt->open_fds = data;
 147	data += nr / BITS_PER_BYTE;
 148	fdt->close_on_exec = data;
 149	data += nr / BITS_PER_BYTE;
 150	fdt->full_fds_bits = data;
 
 151
 152	return fdt;
 153
 154out_arr:
 155	kvfree(fdt->fd);
 156out_fdt:
 157	kfree(fdt);
 158out:
 159	return NULL;
 160}
 161
 162/*
 163 * Expand the file descriptor table.
 164 * This function will allocate a new fdtable and both fd array and fdset, of
 165 * the given size.
 166 * Return <0 error code on error; 1 on successful completion.
 167 * The files->file_lock should be held on entry, and will be held on exit.
 168 */
 169static int expand_fdtable(struct files_struct *files, unsigned int nr)
 170	__releases(files->file_lock)
 171	__acquires(files->file_lock)
 172{
 173	struct fdtable *new_fdt, *cur_fdt;
 174
 175	spin_unlock(&files->file_lock);
 176	new_fdt = alloc_fdtable(nr);
 177
 178	/* make sure all fd_install() have seen resize_in_progress
 179	 * or have finished their rcu_read_lock_sched() section.
 180	 */
 181	if (atomic_read(&files->count) > 1)
 182		synchronize_rcu();
 183
 184	spin_lock(&files->file_lock);
 185	if (!new_fdt)
 186		return -ENOMEM;
 187	/*
 188	 * extremely unlikely race - sysctl_nr_open decreased between the check in
 189	 * caller and alloc_fdtable().  Cheaper to catch it here...
 190	 */
 191	if (unlikely(new_fdt->max_fds <= nr)) {
 192		__free_fdtable(new_fdt);
 193		return -EMFILE;
 194	}
 
 
 
 
 195	cur_fdt = files_fdtable(files);
 196	BUG_ON(nr < cur_fdt->max_fds);
 197	copy_fdtable(new_fdt, cur_fdt);
 198	rcu_assign_pointer(files->fdt, new_fdt);
 199	if (cur_fdt != &files->fdtab)
 200		call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
 201	/* coupled with smp_rmb() in fd_install() */
 202	smp_wmb();
 
 
 
 203	return 1;
 204}
 205
 206/*
 207 * Expand files.
 208 * This function will expand the file structures, if the requested size exceeds
 209 * the current capacity and there is room for expansion.
 210 * Return <0 error code on error; 0 when nothing done; 1 when files were
 211 * expanded and execution may have blocked.
 212 * The files->file_lock should be held on entry, and will be held on exit.
 213 */
 214static int expand_files(struct files_struct *files, unsigned int nr)
 215	__releases(files->file_lock)
 216	__acquires(files->file_lock)
 217{
 218	struct fdtable *fdt;
 219	int expanded = 0;
 220
 221repeat:
 222	fdt = files_fdtable(files);
 223
 
 
 
 
 
 
 
 224	/* Do we need to expand? */
 225	if (nr < fdt->max_fds)
 226		return expanded;
 227
 228	/* Can we expand? */
 229	if (nr >= sysctl_nr_open)
 230		return -EMFILE;
 231
 232	if (unlikely(files->resize_in_progress)) {
 233		spin_unlock(&files->file_lock);
 234		expanded = 1;
 235		wait_event(files->resize_wait, !files->resize_in_progress);
 236		spin_lock(&files->file_lock);
 237		goto repeat;
 238	}
 239
 240	/* All good, so we try */
 241	files->resize_in_progress = true;
 242	expanded = expand_fdtable(files, nr);
 243	files->resize_in_progress = false;
 244
 245	wake_up_all(&files->resize_wait);
 246	return expanded;
 247}
 248
 249static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt)
 250{
 251	__set_bit(fd, fdt->close_on_exec);
 252}
 253
 254static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt)
 255{
 256	if (test_bit(fd, fdt->close_on_exec))
 257		__clear_bit(fd, fdt->close_on_exec);
 258}
 259
 260static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
 261{
 262	__set_bit(fd, fdt->open_fds);
 263	fd /= BITS_PER_LONG;
 264	if (!~fdt->open_fds[fd])
 265		__set_bit(fd, fdt->full_fds_bits);
 266}
 267
 268static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
 269{
 270	__clear_bit(fd, fdt->open_fds);
 271	__clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
 272}
 273
 274static unsigned int count_open_files(struct fdtable *fdt)
 275{
 276	unsigned int size = fdt->max_fds;
 277	unsigned int i;
 278
 279	/* Find the last open fd */
 280	for (i = size / BITS_PER_LONG; i > 0; ) {
 281		if (fdt->open_fds[--i])
 282			break;
 283	}
 284	i = (i + 1) * BITS_PER_LONG;
 285	return i;
 286}
 287
 288/*
 289 * Note that a sane fdtable size always has to be a multiple of
 290 * BITS_PER_LONG, since we have bitmaps that are sized by this.
 291 *
 292 * 'max_fds' will normally already be properly aligned, but it
 293 * turns out that in the close_range() -> __close_range() ->
 294 * unshare_fd() -> dup_fd() -> sane_fdtable_size() we can end
 295 * up having a 'max_fds' value that isn't already aligned.
 296 *
 297 * Rather than make close_range() have to worry about this,
 298 * just make that BITS_PER_LONG alignment be part of a sane
 299 * fdtable size. Becuase that's really what it is.
 300 */
 301static unsigned int sane_fdtable_size(struct fdtable *fdt, unsigned int max_fds)
 302{
 303	unsigned int count;
 304
 305	count = count_open_files(fdt);
 306	if (max_fds < NR_OPEN_DEFAULT)
 307		max_fds = NR_OPEN_DEFAULT;
 308	return ALIGN(min(count, max_fds), BITS_PER_LONG);
 309}
 310
 311/*
 312 * Allocate a new files structure and copy contents from the
 313 * passed in files structure.
 314 * errorp will be valid only when the returned files_struct is NULL.
 315 */
 316struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int *errorp)
 317{
 318	struct files_struct *newf;
 319	struct file **old_fds, **new_fds;
 320	unsigned int open_files, i;
 321	struct fdtable *old_fdt, *new_fdt;
 322
 323	*errorp = -ENOMEM;
 324	newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
 325	if (!newf)
 326		goto out;
 327
 328	atomic_set(&newf->count, 1);
 329
 330	spin_lock_init(&newf->file_lock);
 331	newf->resize_in_progress = false;
 332	init_waitqueue_head(&newf->resize_wait);
 333	newf->next_fd = 0;
 334	new_fdt = &newf->fdtab;
 335	new_fdt->max_fds = NR_OPEN_DEFAULT;
 336	new_fdt->close_on_exec = newf->close_on_exec_init;
 337	new_fdt->open_fds = newf->open_fds_init;
 338	new_fdt->full_fds_bits = newf->full_fds_bits_init;
 339	new_fdt->fd = &newf->fd_array[0];
 
 340
 341	spin_lock(&oldf->file_lock);
 342	old_fdt = files_fdtable(oldf);
 343	open_files = sane_fdtable_size(old_fdt, max_fds);
 344
 345	/*
 346	 * Check whether we need to allocate a larger fd array and fd set.
 347	 */
 348	while (unlikely(open_files > new_fdt->max_fds)) {
 349		spin_unlock(&oldf->file_lock);
 350
 351		if (new_fdt != &newf->fdtab)
 352			__free_fdtable(new_fdt);
 353
 354		new_fdt = alloc_fdtable(open_files - 1);
 355		if (!new_fdt) {
 356			*errorp = -ENOMEM;
 357			goto out_release;
 358		}
 359
 360		/* beyond sysctl_nr_open; nothing to do */
 361		if (unlikely(new_fdt->max_fds < open_files)) {
 362			__free_fdtable(new_fdt);
 363			*errorp = -EMFILE;
 364			goto out_release;
 365		}
 366
 367		/*
 368		 * Reacquire the oldf lock and a pointer to its fd table
 369		 * who knows it may have a new bigger fd table. We need
 370		 * the latest pointer.
 371		 */
 372		spin_lock(&oldf->file_lock);
 373		old_fdt = files_fdtable(oldf);
 374		open_files = sane_fdtable_size(old_fdt, max_fds);
 375	}
 376
 377	copy_fd_bitmaps(new_fdt, old_fdt, open_files);
 378
 379	old_fds = old_fdt->fd;
 380	new_fds = new_fdt->fd;
 381
 
 
 
 
 
 382	for (i = open_files; i != 0; i--) {
 383		struct file *f = *old_fds++;
 384		if (f) {
 385			get_file(f);
 386		} else {
 387			/*
 388			 * The fd may be claimed in the fd bitmap but not yet
 389			 * instantiated in the files array if a sibling thread
 390			 * is partway through open().  So make sure that this
 391			 * fd is available to the new process.
 392			 */
 393			__clear_open_fd(open_files - i, new_fdt);
 394		}
 395		rcu_assign_pointer(*new_fds++, f);
 396	}
 397	spin_unlock(&oldf->file_lock);
 398
 399	/* clear the remainder */
 400	memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *));
 
 
 
 
 
 
 
 
 
 
 
 401
 402	rcu_assign_pointer(newf->fdt, new_fdt);
 403
 404	return newf;
 405
 406out_release:
 407	kmem_cache_free(files_cachep, newf);
 408out:
 409	return NULL;
 410}
 411
 412static struct fdtable *close_files(struct files_struct * files)
 413{
 414	/*
 415	 * It is safe to dereference the fd table without RCU or
 416	 * ->file_lock because this is the last reference to the
 417	 * files structure.
 418	 */
 419	struct fdtable *fdt = rcu_dereference_raw(files->fdt);
 420	unsigned int i, j = 0;
 421
 422	for (;;) {
 423		unsigned long set;
 424		i = j * BITS_PER_LONG;
 425		if (i >= fdt->max_fds)
 426			break;
 427		set = fdt->open_fds[j++];
 428		while (set) {
 429			if (set & 1) {
 430				struct file * file = xchg(&fdt->fd[i], NULL);
 431				if (file) {
 432					filp_close(file, files);
 433					cond_resched();
 434				}
 435			}
 436			i++;
 437			set >>= 1;
 438		}
 439	}
 440
 441	return fdt;
 442}
 443
 444void put_files_struct(struct files_struct *files)
 445{
 446	if (atomic_dec_and_test(&files->count)) {
 447		struct fdtable *fdt = close_files(files);
 448
 449		/* free the arrays if they are not embedded */
 450		if (fdt != &files->fdtab)
 451			__free_fdtable(fdt);
 452		kmem_cache_free(files_cachep, files);
 453	}
 454}
 455
 456void exit_files(struct task_struct *tsk)
 457{
 458	struct files_struct * files = tsk->files;
 459
 460	if (files) {
 461		task_lock(tsk);
 462		tsk->files = NULL;
 463		task_unlock(tsk);
 464		put_files_struct(files);
 465	}
 466}
 467
 468struct files_struct init_files = {
 469	.count		= ATOMIC_INIT(1),
 470	.fdt		= &init_files.fdtab,
 471	.fdtab		= {
 472		.max_fds	= NR_OPEN_DEFAULT,
 473		.fd		= &init_files.fd_array[0],
 474		.close_on_exec	= init_files.close_on_exec_init,
 475		.open_fds	= init_files.open_fds_init,
 476		.full_fds_bits	= init_files.full_fds_bits_init,
 477	},
 478	.file_lock	= __SPIN_LOCK_UNLOCKED(init_files.file_lock),
 479	.resize_wait	= __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait),
 480};
 481
 482static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
 483{
 484	unsigned int maxfd = fdt->max_fds;
 485	unsigned int maxbit = maxfd / BITS_PER_LONG;
 486	unsigned int bitbit = start / BITS_PER_LONG;
 487
 488	bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
 489	if (bitbit > maxfd)
 490		return maxfd;
 491	if (bitbit > start)
 492		start = bitbit;
 493	return find_next_zero_bit(fdt->open_fds, maxfd, start);
 494}
 495
 496/*
 497 * allocate a file descriptor, mark it busy.
 498 */
 499static int alloc_fd(unsigned start, unsigned end, unsigned flags)
 500{
 501	struct files_struct *files = current->files;
 502	unsigned int fd;
 503	int error;
 504	struct fdtable *fdt;
 505
 506	spin_lock(&files->file_lock);
 507repeat:
 508	fdt = files_fdtable(files);
 509	fd = start;
 510	if (fd < files->next_fd)
 511		fd = files->next_fd;
 512
 513	if (fd < fdt->max_fds)
 514		fd = find_next_fd(fdt, fd);
 515
 516	/*
 517	 * N.B. For clone tasks sharing a files structure, this test
 518	 * will limit the total number of files that can be opened.
 519	 */
 520	error = -EMFILE;
 521	if (fd >= end)
 522		goto out;
 523
 524	error = expand_files(files, fd);
 525	if (error < 0)
 526		goto out;
 527
 528	/*
 529	 * If we needed to expand the fs array we
 530	 * might have blocked - try again.
 531	 */
 532	if (error)
 533		goto repeat;
 534
 535	if (start <= files->next_fd)
 536		files->next_fd = fd + 1;
 537
 538	__set_open_fd(fd, fdt);
 539	if (flags & O_CLOEXEC)
 540		__set_close_on_exec(fd, fdt);
 541	else
 542		__clear_close_on_exec(fd, fdt);
 543	error = fd;
 544#if 1
 545	/* Sanity check */
 546	if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
 547		printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
 548		rcu_assign_pointer(fdt->fd[fd], NULL);
 549	}
 550#endif
 551
 552out:
 553	spin_unlock(&files->file_lock);
 554	return error;
 555}
 556
 557int __get_unused_fd_flags(unsigned flags, unsigned long nofile)
 558{
 559	return alloc_fd(0, nofile, flags);
 560}
 561
 562int get_unused_fd_flags(unsigned flags)
 563{
 564	return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE));
 565}
 566EXPORT_SYMBOL(get_unused_fd_flags);
 567
 568static void __put_unused_fd(struct files_struct *files, unsigned int fd)
 569{
 570	struct fdtable *fdt = files_fdtable(files);
 571	__clear_open_fd(fd, fdt);
 572	if (fd < files->next_fd)
 573		files->next_fd = fd;
 574}
 575
 576void put_unused_fd(unsigned int fd)
 577{
 578	struct files_struct *files = current->files;
 579	spin_lock(&files->file_lock);
 580	__put_unused_fd(files, fd);
 581	spin_unlock(&files->file_lock);
 582}
 583
 584EXPORT_SYMBOL(put_unused_fd);
 585
 586/*
 587 * Install a file pointer in the fd array.
 588 *
 589 * The VFS is full of places where we drop the files lock between
 590 * setting the open_fds bitmap and installing the file in the file
 591 * array.  At any such point, we are vulnerable to a dup2() race
 592 * installing a file in the array before us.  We need to detect this and
 593 * fput() the struct file we are about to overwrite in this case.
 594 *
 595 * It should never happen - if we allow dup2() do it, _really_ bad things
 596 * will follow.
 597 *
 598 * This consumes the "file" refcount, so callers should treat it
 599 * as if they had called fput(file).
 600 */
 601
 602void fd_install(unsigned int fd, struct file *file)
 603{
 604	struct files_struct *files = current->files;
 605	struct fdtable *fdt;
 606
 607	rcu_read_lock_sched();
 608
 609	if (unlikely(files->resize_in_progress)) {
 610		rcu_read_unlock_sched();
 611		spin_lock(&files->file_lock);
 612		fdt = files_fdtable(files);
 613		BUG_ON(fdt->fd[fd] != NULL);
 614		rcu_assign_pointer(fdt->fd[fd], file);
 615		spin_unlock(&files->file_lock);
 616		return;
 617	}
 618	/* coupled with smp_wmb() in expand_fdtable() */
 619	smp_rmb();
 620	fdt = rcu_dereference_sched(files->fdt);
 621	BUG_ON(fdt->fd[fd] != NULL);
 622	rcu_assign_pointer(fdt->fd[fd], file);
 623	rcu_read_unlock_sched();
 624}
 625
 626EXPORT_SYMBOL(fd_install);
 627
 628/**
 629 * pick_file - return file associatd with fd
 630 * @files: file struct to retrieve file from
 631 * @fd: file descriptor to retrieve file for
 632 *
 633 * Context: files_lock must be held.
 634 *
 635 * Returns: The file associated with @fd (NULL if @fd is not open)
 636 */
 637static struct file *pick_file(struct files_struct *files, unsigned fd)
 638{
 639	struct fdtable *fdt = files_fdtable(files);
 640	struct file *file;
 641
 642	if (fd >= fdt->max_fds)
 643		return NULL;
 644
 645	file = fdt->fd[fd];
 646	if (file) {
 647		rcu_assign_pointer(fdt->fd[fd], NULL);
 648		__put_unused_fd(files, fd);
 649	}
 650	return file;
 651}
 652
 653int close_fd(unsigned fd)
 654{
 655	struct files_struct *files = current->files;
 656	struct file *file;
 657
 658	spin_lock(&files->file_lock);
 659	file = pick_file(files, fd);
 660	spin_unlock(&files->file_lock);
 661	if (!file)
 662		return -EBADF;
 663
 664	return filp_close(file, files);
 665}
 666EXPORT_SYMBOL(close_fd); /* for ksys_close() */
 667
 668/**
 669 * last_fd - return last valid index into fd table
 670 * @cur_fds: files struct
 671 *
 672 * Context: Either rcu read lock or files_lock must be held.
 673 *
 674 * Returns: Last valid index into fdtable.
 675 */
 676static inline unsigned last_fd(struct fdtable *fdt)
 677{
 678	return fdt->max_fds - 1;
 679}
 680
 681static inline void __range_cloexec(struct files_struct *cur_fds,
 682				   unsigned int fd, unsigned int max_fd)
 683{
 684	struct fdtable *fdt;
 685
 686	/* make sure we're using the correct maximum value */
 687	spin_lock(&cur_fds->file_lock);
 688	fdt = files_fdtable(cur_fds);
 689	max_fd = min(last_fd(fdt), max_fd);
 690	if (fd <= max_fd)
 691		bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1);
 692	spin_unlock(&cur_fds->file_lock);
 693}
 694
 695static inline void __range_close(struct files_struct *cur_fds, unsigned int fd,
 696				 unsigned int max_fd)
 697{
 698	unsigned n;
 699
 700	rcu_read_lock();
 701	n = last_fd(files_fdtable(cur_fds));
 702	rcu_read_unlock();
 703	max_fd = min(max_fd, n);
 704
 705	while (fd <= max_fd) {
 706		struct file *file;
 707
 708		spin_lock(&cur_fds->file_lock);
 709		file = pick_file(cur_fds, fd++);
 710		spin_unlock(&cur_fds->file_lock);
 711
 712		if (file) {
 713			/* found a valid file to close */
 714			filp_close(file, cur_fds);
 715			cond_resched();
 716		}
 717	}
 718}
 719
 720/**
 721 * __close_range() - Close all file descriptors in a given range.
 722 *
 723 * @fd:     starting file descriptor to close
 724 * @max_fd: last file descriptor to close
 725 *
 726 * This closes a range of file descriptors. All file descriptors
 727 * from @fd up to and including @max_fd are closed.
 728 */
 729int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
 730{
 731	struct task_struct *me = current;
 732	struct files_struct *cur_fds = me->files, *fds = NULL;
 733
 734	if (flags & ~(CLOSE_RANGE_UNSHARE | CLOSE_RANGE_CLOEXEC))
 735		return -EINVAL;
 736
 737	if (fd > max_fd)
 738		return -EINVAL;
 739
 740	if (flags & CLOSE_RANGE_UNSHARE) {
 741		int ret;
 742		unsigned int max_unshare_fds = NR_OPEN_MAX;
 743
 744		/*
 745		 * If the caller requested all fds to be made cloexec we always
 746		 * copy all of the file descriptors since they still want to
 747		 * use them.
 748		 */
 749		if (!(flags & CLOSE_RANGE_CLOEXEC)) {
 750			/*
 751			 * If the requested range is greater than the current
 752			 * maximum, we're closing everything so only copy all
 753			 * file descriptors beneath the lowest file descriptor.
 754			 */
 755			rcu_read_lock();
 756			if (max_fd >= last_fd(files_fdtable(cur_fds)))
 757				max_unshare_fds = fd;
 758			rcu_read_unlock();
 759		}
 760
 761		ret = unshare_fd(CLONE_FILES, max_unshare_fds, &fds);
 762		if (ret)
 763			return ret;
 764
 765		/*
 766		 * We used to share our file descriptor table, and have now
 767		 * created a private one, make sure we're using it below.
 768		 */
 769		if (fds)
 770			swap(cur_fds, fds);
 771	}
 772
 773	if (flags & CLOSE_RANGE_CLOEXEC)
 774		__range_cloexec(cur_fds, fd, max_fd);
 775	else
 776		__range_close(cur_fds, fd, max_fd);
 777
 778	if (fds) {
 779		/*
 780		 * We're done closing the files we were supposed to. Time to install
 781		 * the new file descriptor table and drop the old one.
 782		 */
 783		task_lock(me);
 784		me->files = cur_fds;
 785		task_unlock(me);
 786		put_files_struct(fds);
 787	}
 788
 789	return 0;
 790}
 791
 792/*
 793 * See close_fd_get_file() below, this variant assumes current->files->file_lock
 794 * is held.
 795 */
 796struct file *__close_fd_get_file(unsigned int fd)
 797{
 798	return pick_file(current->files, fd);
 799}
 800
 801/*
 802 * variant of close_fd that gets a ref on the file for later fput.
 803 * The caller must ensure that filp_close() called on the file.
 804 */
 805struct file *close_fd_get_file(unsigned int fd)
 806{
 807	struct files_struct *files = current->files;
 808	struct file *file;
 809
 810	spin_lock(&files->file_lock);
 811	file = pick_file(files, fd);
 812	spin_unlock(&files->file_lock);
 813
 814	return file;
 815}
 816
 817void do_close_on_exec(struct files_struct *files)
 818{
 819	unsigned i;
 820	struct fdtable *fdt;
 821
 822	/* exec unshares first */
 823	spin_lock(&files->file_lock);
 824	for (i = 0; ; i++) {
 825		unsigned long set;
 826		unsigned fd = i * BITS_PER_LONG;
 827		fdt = files_fdtable(files);
 828		if (fd >= fdt->max_fds)
 829			break;
 830		set = fdt->close_on_exec[i];
 831		if (!set)
 832			continue;
 833		fdt->close_on_exec[i] = 0;
 834		for ( ; set ; fd++, set >>= 1) {
 835			struct file *file;
 836			if (!(set & 1))
 837				continue;
 838			file = fdt->fd[fd];
 839			if (!file)
 840				continue;
 841			rcu_assign_pointer(fdt->fd[fd], NULL);
 842			__put_unused_fd(files, fd);
 843			spin_unlock(&files->file_lock);
 844			filp_close(file, files);
 845			cond_resched();
 846			spin_lock(&files->file_lock);
 847		}
 848
 849	}
 850	spin_unlock(&files->file_lock);
 851}
 852
 853static inline struct file *__fget_files_rcu(struct files_struct *files,
 854	unsigned int fd, fmode_t mask)
 855{
 856	for (;;) {
 857		struct file *file;
 858		struct fdtable *fdt = rcu_dereference_raw(files->fdt);
 859		struct file __rcu **fdentry;
 860
 861		if (unlikely(fd >= fdt->max_fds))
 862			return NULL;
 863
 864		fdentry = fdt->fd + array_index_nospec(fd, fdt->max_fds);
 865		file = rcu_dereference_raw(*fdentry);
 866		if (unlikely(!file))
 867			return NULL;
 868
 869		if (unlikely(file->f_mode & mask))
 870			return NULL;
 871
 872		/*
 873		 * Ok, we have a file pointer. However, because we do
 874		 * this all locklessly under RCU, we may be racing with
 875		 * that file being closed.
 876		 *
 877		 * Such a race can take two forms:
 878		 *
 879		 *  (a) the file ref already went down to zero,
 880		 *      and get_file_rcu() fails. Just try again:
 881		 */
 882		if (unlikely(!get_file_rcu(file)))
 883			continue;
 884
 885		/*
 886		 *  (b) the file table entry has changed under us.
 887		 *       Note that we don't need to re-check the 'fdt->fd'
 888		 *       pointer having changed, because it always goes
 889		 *       hand-in-hand with 'fdt'.
 890		 *
 891		 * If so, we need to put our ref and try again.
 892		 */
 893		if (unlikely(rcu_dereference_raw(files->fdt) != fdt) ||
 894		    unlikely(rcu_dereference_raw(*fdentry) != file)) {
 895			fput(file);
 896			continue;
 897		}
 898
 899		/*
 900		 * Ok, we have a ref to the file, and checked that it
 901		 * still exists.
 902		 */
 903		return file;
 904	}
 905}
 906
 907static struct file *__fget_files(struct files_struct *files, unsigned int fd,
 908				 fmode_t mask)
 909{
 910	struct file *file;
 911
 912	rcu_read_lock();
 913	file = __fget_files_rcu(files, fd, mask);
 914	rcu_read_unlock();
 915
 916	return file;
 917}
 918
 919static inline struct file *__fget(unsigned int fd, fmode_t mask)
 920{
 921	return __fget_files(current->files, fd, mask);
 922}
 923
 924struct file *fget(unsigned int fd)
 925{
 926	return __fget(fd, FMODE_PATH);
 927}
 928EXPORT_SYMBOL(fget);
 929
 930struct file *fget_raw(unsigned int fd)
 931{
 932	return __fget(fd, 0);
 933}
 934EXPORT_SYMBOL(fget_raw);
 935
 936struct file *fget_task(struct task_struct *task, unsigned int fd)
 937{
 938	struct file *file = NULL;
 939
 940	task_lock(task);
 941	if (task->files)
 942		file = __fget_files(task->files, fd, 0);
 943	task_unlock(task);
 944
 945	return file;
 946}
 947
 948struct file *task_lookup_fd_rcu(struct task_struct *task, unsigned int fd)
 949{
 950	/* Must be called with rcu_read_lock held */
 951	struct files_struct *files;
 952	struct file *file = NULL;
 953
 954	task_lock(task);
 955	files = task->files;
 956	if (files)
 957		file = files_lookup_fd_rcu(files, fd);
 958	task_unlock(task);
 959
 960	return file;
 961}
 962
 963struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *ret_fd)
 964{
 965	/* Must be called with rcu_read_lock held */
 966	struct files_struct *files;
 967	unsigned int fd = *ret_fd;
 968	struct file *file = NULL;
 969
 970	task_lock(task);
 971	files = task->files;
 972	if (files) {
 973		for (; fd < files_fdtable(files)->max_fds; fd++) {
 974			file = files_lookup_fd_rcu(files, fd);
 975			if (file)
 976				break;
 977		}
 978	}
 979	task_unlock(task);
 980	*ret_fd = fd;
 981	return file;
 982}
 983EXPORT_SYMBOL(task_lookup_next_fd_rcu);
 984
 985/*
 986 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
 987 *
 988 * You can use this instead of fget if you satisfy all of the following
 989 * conditions:
 990 * 1) You must call fput_light before exiting the syscall and returning control
 991 *    to userspace (i.e. you cannot remember the returned struct file * after
 992 *    returning to userspace).
 993 * 2) You must not call filp_close on the returned struct file * in between
 994 *    calls to fget_light and fput_light.
 995 * 3) You must not clone the current task in between the calls to fget_light
 996 *    and fput_light.
 997 *
 998 * The fput_needed flag returned by fget_light should be passed to the
 999 * corresponding fput_light.
1000 */
1001static unsigned long __fget_light(unsigned int fd, fmode_t mask)
1002{
1003	struct files_struct *files = current->files;
1004	struct file *file;
1005
1006	/*
1007	 * If another thread is concurrently calling close_fd() followed
1008	 * by put_files_struct(), we must not observe the old table
1009	 * entry combined with the new refcount - otherwise we could
1010	 * return a file that is concurrently being freed.
1011	 *
1012	 * atomic_read_acquire() pairs with atomic_dec_and_test() in
1013	 * put_files_struct().
1014	 */
1015	if (atomic_read_acquire(&files->count) == 1) {
1016		file = files_lookup_fd_raw(files, fd);
1017		if (!file || unlikely(file->f_mode & mask))
1018			return 0;
1019		return (unsigned long)file;
1020	} else {
1021		file = __fget(fd, mask);
1022		if (!file)
1023			return 0;
1024		return FDPUT_FPUT | (unsigned long)file;
1025	}
1026}
1027unsigned long __fdget(unsigned int fd)
1028{
1029	return __fget_light(fd, FMODE_PATH);
1030}
1031EXPORT_SYMBOL(__fdget);
1032
1033unsigned long __fdget_raw(unsigned int fd)
1034{
1035	return __fget_light(fd, 0);
1036}
1037
1038unsigned long __fdget_pos(unsigned int fd)
1039{
1040	unsigned long v = __fdget(fd);
1041	struct file *file = (struct file *)(v & ~3);
1042
1043	if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
1044		if (file_count(file) > 1) {
1045			v |= FDPUT_POS_UNLOCK;
1046			mutex_lock(&file->f_pos_lock);
1047		}
1048	}
1049	return v;
1050}
1051
1052void __f_unlock_pos(struct file *f)
1053{
1054	mutex_unlock(&f->f_pos_lock);
1055}
1056
1057/*
1058 * We only lock f_pos if we have threads or if the file might be
1059 * shared with another process. In both cases we'll have an elevated
1060 * file count (done either by fdget() or by fork()).
1061 */
1062
1063void set_close_on_exec(unsigned int fd, int flag)
1064{
1065	struct files_struct *files = current->files;
1066	struct fdtable *fdt;
1067	spin_lock(&files->file_lock);
1068	fdt = files_fdtable(files);
1069	if (flag)
1070		__set_close_on_exec(fd, fdt);
1071	else
1072		__clear_close_on_exec(fd, fdt);
1073	spin_unlock(&files->file_lock);
1074}
1075
1076bool get_close_on_exec(unsigned int fd)
1077{
1078	struct files_struct *files = current->files;
1079	struct fdtable *fdt;
1080	bool res;
1081	rcu_read_lock();
1082	fdt = files_fdtable(files);
1083	res = close_on_exec(fd, fdt);
1084	rcu_read_unlock();
1085	return res;
1086}
1087
1088static int do_dup2(struct files_struct *files,
1089	struct file *file, unsigned fd, unsigned flags)
1090__releases(&files->file_lock)
1091{
1092	struct file *tofree;
1093	struct fdtable *fdt;
1094
1095	/*
1096	 * We need to detect attempts to do dup2() over allocated but still
1097	 * not finished descriptor.  NB: OpenBSD avoids that at the price of
1098	 * extra work in their equivalent of fget() - they insert struct
1099	 * file immediately after grabbing descriptor, mark it larval if
1100	 * more work (e.g. actual opening) is needed and make sure that
1101	 * fget() treats larval files as absent.  Potentially interesting,
1102	 * but while extra work in fget() is trivial, locking implications
1103	 * and amount of surgery on open()-related paths in VFS are not.
1104	 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
1105	 * deadlocks in rather amusing ways, AFAICS.  All of that is out of
1106	 * scope of POSIX or SUS, since neither considers shared descriptor
1107	 * tables and this condition does not arise without those.
1108	 */
1109	fdt = files_fdtable(files);
1110	tofree = fdt->fd[fd];
1111	if (!tofree && fd_is_open(fd, fdt))
1112		goto Ebusy;
1113	get_file(file);
1114	rcu_assign_pointer(fdt->fd[fd], file);
1115	__set_open_fd(fd, fdt);
1116	if (flags & O_CLOEXEC)
1117		__set_close_on_exec(fd, fdt);
1118	else
1119		__clear_close_on_exec(fd, fdt);
1120	spin_unlock(&files->file_lock);
1121
1122	if (tofree)
1123		filp_close(tofree, files);
1124
1125	return fd;
1126
1127Ebusy:
1128	spin_unlock(&files->file_lock);
1129	return -EBUSY;
1130}
1131
1132int replace_fd(unsigned fd, struct file *file, unsigned flags)
1133{
1134	int err;
1135	struct files_struct *files = current->files;
1136
1137	if (!file)
1138		return close_fd(fd);
1139
1140	if (fd >= rlimit(RLIMIT_NOFILE))
1141		return -EBADF;
1142
1143	spin_lock(&files->file_lock);
1144	err = expand_files(files, fd);
1145	if (unlikely(err < 0))
1146		goto out_unlock;
1147	return do_dup2(files, file, fd, flags);
1148
1149out_unlock:
1150	spin_unlock(&files->file_lock);
1151	return err;
1152}
1153
1154/**
1155 * __receive_fd() - Install received file into file descriptor table
1156 * @file: struct file that was received from another process
1157 * @ufd: __user pointer to write new fd number to
1158 * @o_flags: the O_* flags to apply to the new fd entry
1159 *
1160 * Installs a received file into the file descriptor table, with appropriate
1161 * checks and count updates. Optionally writes the fd number to userspace, if
1162 * @ufd is non-NULL.
1163 *
1164 * This helper handles its own reference counting of the incoming
1165 * struct file.
1166 *
1167 * Returns newly install fd or -ve on error.
1168 */
1169int __receive_fd(struct file *file, int __user *ufd, unsigned int o_flags)
1170{
1171	int new_fd;
1172	int error;
1173
1174	error = security_file_receive(file);
1175	if (error)
1176		return error;
1177
1178	new_fd = get_unused_fd_flags(o_flags);
1179	if (new_fd < 0)
1180		return new_fd;
1181
1182	if (ufd) {
1183		error = put_user(new_fd, ufd);
1184		if (error) {
1185			put_unused_fd(new_fd);
1186			return error;
1187		}
1188	}
1189
1190	fd_install(new_fd, get_file(file));
1191	__receive_sock(file);
1192	return new_fd;
1193}
1194
1195int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags)
1196{
1197	int error;
1198
1199	error = security_file_receive(file);
1200	if (error)
1201		return error;
1202	error = replace_fd(new_fd, file, o_flags);
1203	if (error)
1204		return error;
1205	__receive_sock(file);
1206	return new_fd;
1207}
1208
1209int receive_fd(struct file *file, unsigned int o_flags)
1210{
1211	return __receive_fd(file, NULL, o_flags);
1212}
1213EXPORT_SYMBOL_GPL(receive_fd);
1214
1215static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
1216{
1217	int err = -EBADF;
1218	struct file *file;
1219	struct files_struct *files = current->files;
1220
1221	if ((flags & ~O_CLOEXEC) != 0)
1222		return -EINVAL;
1223
1224	if (unlikely(oldfd == newfd))
1225		return -EINVAL;
1226
1227	if (newfd >= rlimit(RLIMIT_NOFILE))
1228		return -EBADF;
1229
1230	spin_lock(&files->file_lock);
1231	err = expand_files(files, newfd);
1232	file = files_lookup_fd_locked(files, oldfd);
1233	if (unlikely(!file))
1234		goto Ebadf;
1235	if (unlikely(err < 0)) {
1236		if (err == -EMFILE)
1237			goto Ebadf;
1238		goto out_unlock;
1239	}
1240	return do_dup2(files, file, newfd, flags);
1241
1242Ebadf:
1243	err = -EBADF;
1244out_unlock:
1245	spin_unlock(&files->file_lock);
1246	return err;
1247}
1248
1249SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
1250{
1251	return ksys_dup3(oldfd, newfd, flags);
1252}
1253
1254SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
1255{
1256	if (unlikely(newfd == oldfd)) { /* corner case */
1257		struct files_struct *files = current->files;
1258		int retval = oldfd;
1259
1260		rcu_read_lock();
1261		if (!files_lookup_fd_rcu(files, oldfd))
1262			retval = -EBADF;
1263		rcu_read_unlock();
1264		return retval;
1265	}
1266	return ksys_dup3(oldfd, newfd, 0);
1267}
1268
1269SYSCALL_DEFINE1(dup, unsigned int, fildes)
1270{
1271	int ret = -EBADF;
1272	struct file *file = fget_raw(fildes);
1273
1274	if (file) {
1275		ret = get_unused_fd_flags(0);
1276		if (ret >= 0)
1277			fd_install(ret, file);
1278		else
1279			fput(file);
1280	}
1281	return ret;
1282}
1283
1284int f_dupfd(unsigned int from, struct file *file, unsigned flags)
1285{
1286	unsigned long nofile = rlimit(RLIMIT_NOFILE);
1287	int err;
1288	if (from >= nofile)
1289		return -EINVAL;
1290	err = alloc_fd(from, nofile, flags);
1291	if (err >= 0) {
1292		get_file(file);
1293		fd_install(err, file);
1294	}
1295	return err;
1296}
1297
1298int iterate_fd(struct files_struct *files, unsigned n,
1299		int (*f)(const void *, struct file *, unsigned),
1300		const void *p)
1301{
1302	struct fdtable *fdt;
1303	int res = 0;
1304	if (!files)
1305		return 0;
1306	spin_lock(&files->file_lock);
1307	for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
1308		struct file *file;
1309		file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
1310		if (!file)
1311			continue;
1312		res = f(p, file, n);
1313		if (res)
1314			break;
1315	}
1316	spin_unlock(&files->file_lock);
1317	return res;
1318}
1319EXPORT_SYMBOL(iterate_fd);