Loading...
1/*
2 * linux/fs/file_table.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <linux/string.h>
9#include <linux/slab.h>
10#include <linux/file.h>
11#include <linux/fdtable.h>
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/fs.h>
15#include <linux/security.h>
16#include <linux/eventpoll.h>
17#include <linux/rcupdate.h>
18#include <linux/mount.h>
19#include <linux/capability.h>
20#include <linux/cdev.h>
21#include <linux/fsnotify.h>
22#include <linux/sysctl.h>
23#include <linux/percpu_counter.h>
24#include <linux/percpu.h>
25#include <linux/hardirq.h>
26#include <linux/task_work.h>
27#include <linux/ima.h>
28#include <linux/swap.h>
29
30#include <linux/atomic.h>
31
32#include "internal.h"
33
34/* sysctl tunables... */
35struct files_stat_struct files_stat = {
36 .max_files = NR_FILE
37};
38
39/* SLAB cache for file structures */
40static struct kmem_cache *filp_cachep __read_mostly;
41
42static struct percpu_counter nr_files __cacheline_aligned_in_smp;
43
44static void file_free_rcu(struct rcu_head *head)
45{
46 struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
47
48 put_cred(f->f_cred);
49 kmem_cache_free(filp_cachep, f);
50}
51
52static inline void file_free(struct file *f)
53{
54 percpu_counter_dec(&nr_files);
55 call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
56}
57
58/*
59 * Return the total number of open files in the system
60 */
61static long get_nr_files(void)
62{
63 return percpu_counter_read_positive(&nr_files);
64}
65
66/*
67 * Return the maximum number of open files in the system
68 */
69unsigned long get_max_files(void)
70{
71 return files_stat.max_files;
72}
73EXPORT_SYMBOL_GPL(get_max_files);
74
75/*
76 * Handle nr_files sysctl
77 */
78#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
79int proc_nr_files(struct ctl_table *table, int write,
80 void __user *buffer, size_t *lenp, loff_t *ppos)
81{
82 files_stat.nr_files = get_nr_files();
83 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
84}
85#else
86int proc_nr_files(struct ctl_table *table, int write,
87 void __user *buffer, size_t *lenp, loff_t *ppos)
88{
89 return -ENOSYS;
90}
91#endif
92
93/* Find an unused file structure and return a pointer to it.
94 * Returns an error pointer if some error happend e.g. we over file
95 * structures limit, run out of memory or operation is not permitted.
96 *
97 * Be very careful using this. You are responsible for
98 * getting write access to any mount that you might assign
99 * to this filp, if it is opened for write. If this is not
100 * done, you will imbalance int the mount's writer count
101 * and a warning at __fput() time.
102 */
103struct file *get_empty_filp(void)
104{
105 const struct cred *cred = current_cred();
106 static long old_max;
107 struct file *f;
108 int error;
109
110 /*
111 * Privileged users can go above max_files
112 */
113 if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
114 /*
115 * percpu_counters are inaccurate. Do an expensive check before
116 * we go and fail.
117 */
118 if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
119 goto over;
120 }
121
122 f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
123 if (unlikely(!f))
124 return ERR_PTR(-ENOMEM);
125
126 percpu_counter_inc(&nr_files);
127 f->f_cred = get_cred(cred);
128 error = security_file_alloc(f);
129 if (unlikely(error)) {
130 file_free(f);
131 return ERR_PTR(error);
132 }
133
134 atomic_long_set(&f->f_count, 1);
135 rwlock_init(&f->f_owner.lock);
136 spin_lock_init(&f->f_lock);
137 mutex_init(&f->f_pos_lock);
138 eventpoll_init_file(f);
139 /* f->f_version: 0 */
140 return f;
141
142over:
143 /* Ran out of filps - report that */
144 if (get_nr_files() > old_max) {
145 pr_info("VFS: file-max limit %lu reached\n", get_max_files());
146 old_max = get_nr_files();
147 }
148 return ERR_PTR(-ENFILE);
149}
150
151/**
152 * alloc_file - allocate and initialize a 'struct file'
153 *
154 * @path: the (dentry, vfsmount) pair for the new file
155 * @mode: the mode with which the new file will be opened
156 * @fop: the 'struct file_operations' for the new file
157 */
158struct file *alloc_file(const struct path *path, fmode_t mode,
159 const struct file_operations *fop)
160{
161 struct file *file;
162
163 file = get_empty_filp();
164 if (IS_ERR(file))
165 return file;
166
167 file->f_path = *path;
168 file->f_inode = path->dentry->d_inode;
169 file->f_mapping = path->dentry->d_inode->i_mapping;
170 if ((mode & FMODE_READ) &&
171 likely(fop->read || fop->read_iter))
172 mode |= FMODE_CAN_READ;
173 if ((mode & FMODE_WRITE) &&
174 likely(fop->write || fop->write_iter))
175 mode |= FMODE_CAN_WRITE;
176 file->f_mode = mode;
177 file->f_op = fop;
178 if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
179 i_readcount_inc(path->dentry->d_inode);
180 return file;
181}
182EXPORT_SYMBOL(alloc_file);
183
184/* the real guts of fput() - releasing the last reference to file
185 */
186static void __fput(struct file *file)
187{
188 struct dentry *dentry = file->f_path.dentry;
189 struct vfsmount *mnt = file->f_path.mnt;
190 struct inode *inode = file->f_inode;
191
192 might_sleep();
193
194 fsnotify_close(file);
195 /*
196 * The function eventpoll_release() should be the first called
197 * in the file cleanup chain.
198 */
199 eventpoll_release(file);
200 locks_remove_file(file);
201
202 if (unlikely(file->f_flags & FASYNC)) {
203 if (file->f_op->fasync)
204 file->f_op->fasync(-1, file, 0);
205 }
206 ima_file_free(file);
207 if (file->f_op->release)
208 file->f_op->release(inode, file);
209 security_file_free(file);
210 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL &&
211 !(file->f_mode & FMODE_PATH))) {
212 cdev_put(inode->i_cdev);
213 }
214 fops_put(file->f_op);
215 put_pid(file->f_owner.pid);
216 if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
217 i_readcount_dec(inode);
218 if (file->f_mode & FMODE_WRITER) {
219 put_write_access(inode);
220 __mnt_drop_write(mnt);
221 }
222 file->f_path.dentry = NULL;
223 file->f_path.mnt = NULL;
224 file->f_inode = NULL;
225 file_free(file);
226 dput(dentry);
227 mntput(mnt);
228}
229
230static LLIST_HEAD(delayed_fput_list);
231static void delayed_fput(struct work_struct *unused)
232{
233 struct llist_node *node = llist_del_all(&delayed_fput_list);
234 struct llist_node *next;
235
236 for (; node; node = next) {
237 next = llist_next(node);
238 __fput(llist_entry(node, struct file, f_u.fu_llist));
239 }
240}
241
242static void ____fput(struct callback_head *work)
243{
244 __fput(container_of(work, struct file, f_u.fu_rcuhead));
245}
246
247/*
248 * If kernel thread really needs to have the final fput() it has done
249 * to complete, call this. The only user right now is the boot - we
250 * *do* need to make sure our writes to binaries on initramfs has
251 * not left us with opened struct file waiting for __fput() - execve()
252 * won't work without that. Please, don't add more callers without
253 * very good reasons; in particular, never call that with locks
254 * held and never call that from a thread that might need to do
255 * some work on any kind of umount.
256 */
257void flush_delayed_fput(void)
258{
259 delayed_fput(NULL);
260}
261
262static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
263
264void fput(struct file *file)
265{
266 if (atomic_long_dec_and_test(&file->f_count)) {
267 struct task_struct *task = current;
268
269 if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
270 init_task_work(&file->f_u.fu_rcuhead, ____fput);
271 if (!task_work_add(task, &file->f_u.fu_rcuhead, true))
272 return;
273 /*
274 * After this task has run exit_task_work(),
275 * task_work_add() will fail. Fall through to delayed
276 * fput to avoid leaking *file.
277 */
278 }
279
280 if (llist_add(&file->f_u.fu_llist, &delayed_fput_list))
281 schedule_delayed_work(&delayed_fput_work, 1);
282 }
283}
284
285/*
286 * synchronous analog of fput(); for kernel threads that might be needed
287 * in some umount() (and thus can't use flush_delayed_fput() without
288 * risking deadlocks), need to wait for completion of __fput() and know
289 * for this specific struct file it won't involve anything that would
290 * need them. Use only if you really need it - at the very least,
291 * don't blindly convert fput() by kernel thread to that.
292 */
293void __fput_sync(struct file *file)
294{
295 if (atomic_long_dec_and_test(&file->f_count)) {
296 struct task_struct *task = current;
297 BUG_ON(!(task->flags & PF_KTHREAD));
298 __fput(file);
299 }
300}
301
302EXPORT_SYMBOL(fput);
303
304void put_filp(struct file *file)
305{
306 if (atomic_long_dec_and_test(&file->f_count)) {
307 security_file_free(file);
308 file_free(file);
309 }
310}
311
312void __init files_init(void)
313{
314 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
315 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
316 percpu_counter_init(&nr_files, 0, GFP_KERNEL);
317}
318
319/*
320 * One file with associated inode and dcache is very roughly 1K. Per default
321 * do not use more than 10% of our memory for files.
322 */
323void __init files_maxfiles_init(void)
324{
325 unsigned long n;
326 unsigned long memreserve = (totalram_pages - nr_free_pages()) * 3/2;
327
328 memreserve = min(memreserve, totalram_pages - 1);
329 n = ((totalram_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
330
331 files_stat.max_files = max_t(unsigned long, n, NR_FILE);
332}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/fs/file_table.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
7 */
8
9#include <linux/string.h>
10#include <linux/slab.h>
11#include <linux/file.h>
12#include <linux/fdtable.h>
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/fs.h>
16#include <linux/security.h>
17#include <linux/cred.h>
18#include <linux/eventpoll.h>
19#include <linux/rcupdate.h>
20#include <linux/mount.h>
21#include <linux/capability.h>
22#include <linux/cdev.h>
23#include <linux/fsnotify.h>
24#include <linux/sysctl.h>
25#include <linux/percpu_counter.h>
26#include <linux/percpu.h>
27#include <linux/task_work.h>
28#include <linux/ima.h>
29#include <linux/swap.h>
30
31#include <linux/atomic.h>
32
33#include "internal.h"
34
35/* sysctl tunables... */
36struct files_stat_struct files_stat = {
37 .max_files = NR_FILE
38};
39
40/* SLAB cache for file structures */
41static struct kmem_cache *filp_cachep __read_mostly;
42
43static struct percpu_counter nr_files __cacheline_aligned_in_smp;
44
45static void file_free_rcu(struct rcu_head *head)
46{
47 struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
48
49 put_cred(f->f_cred);
50 kmem_cache_free(filp_cachep, f);
51}
52
53static inline void file_free(struct file *f)
54{
55 security_file_free(f);
56 if (!(f->f_mode & FMODE_NOACCOUNT))
57 percpu_counter_dec(&nr_files);
58 call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
59}
60
61/*
62 * Return the total number of open files in the system
63 */
64static long get_nr_files(void)
65{
66 return percpu_counter_read_positive(&nr_files);
67}
68
69/*
70 * Return the maximum number of open files in the system
71 */
72unsigned long get_max_files(void)
73{
74 return files_stat.max_files;
75}
76EXPORT_SYMBOL_GPL(get_max_files);
77
78/*
79 * Handle nr_files sysctl
80 */
81#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
82int proc_nr_files(struct ctl_table *table, int write,
83 void *buffer, size_t *lenp, loff_t *ppos)
84{
85 files_stat.nr_files = get_nr_files();
86 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
87}
88#else
89int proc_nr_files(struct ctl_table *table, int write,
90 void *buffer, size_t *lenp, loff_t *ppos)
91{
92 return -ENOSYS;
93}
94#endif
95
96static struct file *__alloc_file(int flags, const struct cred *cred)
97{
98 struct file *f;
99 int error;
100
101 f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
102 if (unlikely(!f))
103 return ERR_PTR(-ENOMEM);
104
105 f->f_cred = get_cred(cred);
106 error = security_file_alloc(f);
107 if (unlikely(error)) {
108 file_free_rcu(&f->f_u.fu_rcuhead);
109 return ERR_PTR(error);
110 }
111
112 atomic_long_set(&f->f_count, 1);
113 rwlock_init(&f->f_owner.lock);
114 spin_lock_init(&f->f_lock);
115 mutex_init(&f->f_pos_lock);
116 eventpoll_init_file(f);
117 f->f_flags = flags;
118 f->f_mode = OPEN_FMODE(flags);
119 /* f->f_version: 0 */
120
121 return f;
122}
123
124/* Find an unused file structure and return a pointer to it.
125 * Returns an error pointer if some error happend e.g. we over file
126 * structures limit, run out of memory or operation is not permitted.
127 *
128 * Be very careful using this. You are responsible for
129 * getting write access to any mount that you might assign
130 * to this filp, if it is opened for write. If this is not
131 * done, you will imbalance int the mount's writer count
132 * and a warning at __fput() time.
133 */
134struct file *alloc_empty_file(int flags, const struct cred *cred)
135{
136 static long old_max;
137 struct file *f;
138
139 /*
140 * Privileged users can go above max_files
141 */
142 if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
143 /*
144 * percpu_counters are inaccurate. Do an expensive check before
145 * we go and fail.
146 */
147 if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
148 goto over;
149 }
150
151 f = __alloc_file(flags, cred);
152 if (!IS_ERR(f))
153 percpu_counter_inc(&nr_files);
154
155 return f;
156
157over:
158 /* Ran out of filps - report that */
159 if (get_nr_files() > old_max) {
160 pr_info("VFS: file-max limit %lu reached\n", get_max_files());
161 old_max = get_nr_files();
162 }
163 return ERR_PTR(-ENFILE);
164}
165
166/*
167 * Variant of alloc_empty_file() that doesn't check and modify nr_files.
168 *
169 * Should not be used unless there's a very good reason to do so.
170 */
171struct file *alloc_empty_file_noaccount(int flags, const struct cred *cred)
172{
173 struct file *f = __alloc_file(flags, cred);
174
175 if (!IS_ERR(f))
176 f->f_mode |= FMODE_NOACCOUNT;
177
178 return f;
179}
180
181/**
182 * alloc_file - allocate and initialize a 'struct file'
183 *
184 * @path: the (dentry, vfsmount) pair for the new file
185 * @flags: O_... flags with which the new file will be opened
186 * @fop: the 'struct file_operations' for the new file
187 */
188static struct file *alloc_file(const struct path *path, int flags,
189 const struct file_operations *fop)
190{
191 struct file *file;
192
193 file = alloc_empty_file(flags, current_cred());
194 if (IS_ERR(file))
195 return file;
196
197 file->f_path = *path;
198 file->f_inode = path->dentry->d_inode;
199 file->f_mapping = path->dentry->d_inode->i_mapping;
200 file->f_wb_err = filemap_sample_wb_err(file->f_mapping);
201 file->f_sb_err = file_sample_sb_err(file);
202 if ((file->f_mode & FMODE_READ) &&
203 likely(fop->read || fop->read_iter))
204 file->f_mode |= FMODE_CAN_READ;
205 if ((file->f_mode & FMODE_WRITE) &&
206 likely(fop->write || fop->write_iter))
207 file->f_mode |= FMODE_CAN_WRITE;
208 file->f_mode |= FMODE_OPENED;
209 file->f_op = fop;
210 if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
211 i_readcount_inc(path->dentry->d_inode);
212 return file;
213}
214
215struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt,
216 const char *name, int flags,
217 const struct file_operations *fops)
218{
219 static const struct dentry_operations anon_ops = {
220 .d_dname = simple_dname
221 };
222 struct qstr this = QSTR_INIT(name, strlen(name));
223 struct path path;
224 struct file *file;
225
226 path.dentry = d_alloc_pseudo(mnt->mnt_sb, &this);
227 if (!path.dentry)
228 return ERR_PTR(-ENOMEM);
229 if (!mnt->mnt_sb->s_d_op)
230 d_set_d_op(path.dentry, &anon_ops);
231 path.mnt = mntget(mnt);
232 d_instantiate(path.dentry, inode);
233 file = alloc_file(&path, flags, fops);
234 if (IS_ERR(file)) {
235 ihold(inode);
236 path_put(&path);
237 }
238 return file;
239}
240EXPORT_SYMBOL(alloc_file_pseudo);
241
242struct file *alloc_file_clone(struct file *base, int flags,
243 const struct file_operations *fops)
244{
245 struct file *f = alloc_file(&base->f_path, flags, fops);
246 if (!IS_ERR(f)) {
247 path_get(&f->f_path);
248 f->f_mapping = base->f_mapping;
249 }
250 return f;
251}
252
253/* the real guts of fput() - releasing the last reference to file
254 */
255static void __fput(struct file *file)
256{
257 struct dentry *dentry = file->f_path.dentry;
258 struct vfsmount *mnt = file->f_path.mnt;
259 struct inode *inode = file->f_inode;
260 fmode_t mode = file->f_mode;
261
262 if (unlikely(!(file->f_mode & FMODE_OPENED)))
263 goto out;
264
265 might_sleep();
266
267 fsnotify_close(file);
268 /*
269 * The function eventpoll_release() should be the first called
270 * in the file cleanup chain.
271 */
272 eventpoll_release(file);
273 locks_remove_file(file);
274
275 ima_file_free(file);
276 if (unlikely(file->f_flags & FASYNC)) {
277 if (file->f_op->fasync)
278 file->f_op->fasync(-1, file, 0);
279 }
280 if (file->f_op->release)
281 file->f_op->release(inode, file);
282 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL &&
283 !(mode & FMODE_PATH))) {
284 cdev_put(inode->i_cdev);
285 }
286 fops_put(file->f_op);
287 put_pid(file->f_owner.pid);
288 if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
289 i_readcount_dec(inode);
290 if (mode & FMODE_WRITER) {
291 put_write_access(inode);
292 __mnt_drop_write(mnt);
293 }
294 dput(dentry);
295 if (unlikely(mode & FMODE_NEED_UNMOUNT))
296 dissolve_on_fput(mnt);
297 mntput(mnt);
298out:
299 file_free(file);
300}
301
302static LLIST_HEAD(delayed_fput_list);
303static void delayed_fput(struct work_struct *unused)
304{
305 struct llist_node *node = llist_del_all(&delayed_fput_list);
306 struct file *f, *t;
307
308 llist_for_each_entry_safe(f, t, node, f_u.fu_llist)
309 __fput(f);
310}
311
312static void ____fput(struct callback_head *work)
313{
314 __fput(container_of(work, struct file, f_u.fu_rcuhead));
315}
316
317/*
318 * If kernel thread really needs to have the final fput() it has done
319 * to complete, call this. The only user right now is the boot - we
320 * *do* need to make sure our writes to binaries on initramfs has
321 * not left us with opened struct file waiting for __fput() - execve()
322 * won't work without that. Please, don't add more callers without
323 * very good reasons; in particular, never call that with locks
324 * held and never call that from a thread that might need to do
325 * some work on any kind of umount.
326 */
327void flush_delayed_fput(void)
328{
329 delayed_fput(NULL);
330}
331EXPORT_SYMBOL_GPL(flush_delayed_fput);
332
333static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
334
335void fput_many(struct file *file, unsigned int refs)
336{
337 if (atomic_long_sub_and_test(refs, &file->f_count)) {
338 struct task_struct *task = current;
339
340 if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
341 init_task_work(&file->f_u.fu_rcuhead, ____fput);
342 if (!task_work_add(task, &file->f_u.fu_rcuhead, true))
343 return;
344 /*
345 * After this task has run exit_task_work(),
346 * task_work_add() will fail. Fall through to delayed
347 * fput to avoid leaking *file.
348 */
349 }
350
351 if (llist_add(&file->f_u.fu_llist, &delayed_fput_list))
352 schedule_delayed_work(&delayed_fput_work, 1);
353 }
354}
355
356void fput(struct file *file)
357{
358 fput_many(file, 1);
359}
360
361/*
362 * synchronous analog of fput(); for kernel threads that might be needed
363 * in some umount() (and thus can't use flush_delayed_fput() without
364 * risking deadlocks), need to wait for completion of __fput() and know
365 * for this specific struct file it won't involve anything that would
366 * need them. Use only if you really need it - at the very least,
367 * don't blindly convert fput() by kernel thread to that.
368 */
369void __fput_sync(struct file *file)
370{
371 if (atomic_long_dec_and_test(&file->f_count)) {
372 struct task_struct *task = current;
373 BUG_ON(!(task->flags & PF_KTHREAD));
374 __fput(file);
375 }
376}
377
378EXPORT_SYMBOL(fput);
379
380void __init files_init(void)
381{
382 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
383 SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT, NULL);
384 percpu_counter_init(&nr_files, 0, GFP_KERNEL);
385}
386
387/*
388 * One file with associated inode and dcache is very roughly 1K. Per default
389 * do not use more than 10% of our memory for files.
390 */
391void __init files_maxfiles_init(void)
392{
393 unsigned long n;
394 unsigned long nr_pages = totalram_pages();
395 unsigned long memreserve = (nr_pages - nr_free_pages()) * 3/2;
396
397 memreserve = min(memreserve, nr_pages - 1);
398 n = ((nr_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
399
400 files_stat.max_files = max_t(unsigned long, n, NR_FILE);
401}