Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/fanotify.h>
3#include <linux/fcntl.h>
4#include <linux/file.h>
5#include <linux/fs.h>
6#include <linux/anon_inodes.h>
7#include <linux/fsnotify_backend.h>
8#include <linux/init.h>
9#include <linux/mount.h>
10#include <linux/namei.h>
11#include <linux/poll.h>
12#include <linux/security.h>
13#include <linux/syscalls.h>
14#include <linux/slab.h>
15#include <linux/types.h>
16#include <linux/uaccess.h>
17#include <linux/compat.h>
18#include <linux/sched/signal.h>
19
20#include <asm/ioctls.h>
21
22#include "../../mount.h"
23#include "../fdinfo.h"
24#include "fanotify.h"
25
26#define FANOTIFY_DEFAULT_MAX_EVENTS 16384
27#define FANOTIFY_DEFAULT_MAX_MARKS 8192
28#define FANOTIFY_DEFAULT_MAX_LISTENERS 128
29
30/*
31 * All flags that may be specified in parameter event_f_flags of fanotify_init.
32 *
33 * Internal and external open flags are stored together in field f_flags of
34 * struct file. Only external open flags shall be allowed in event_f_flags.
35 * Internal flags like FMODE_NONOTIFY, FMODE_EXEC, FMODE_NOCMTIME shall be
36 * excluded.
37 */
38#define FANOTIFY_INIT_ALL_EVENT_F_BITS ( \
39 O_ACCMODE | O_APPEND | O_NONBLOCK | \
40 __O_SYNC | O_DSYNC | O_CLOEXEC | \
41 O_LARGEFILE | O_NOATIME )
42
43extern const struct fsnotify_ops fanotify_fsnotify_ops;
44
45struct kmem_cache *fanotify_mark_cache __read_mostly;
46struct kmem_cache *fanotify_event_cachep __read_mostly;
47struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
48
49/*
50 * Get an fsnotify notification event if one exists and is small
51 * enough to fit in "count". Return an error pointer if the count
52 * is not large enough.
53 *
54 * Called with the group->notification_lock held.
55 */
56static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
57 size_t count)
58{
59 assert_spin_locked(&group->notification_lock);
60
61 pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
62
63 if (fsnotify_notify_queue_is_empty(group))
64 return NULL;
65
66 if (FAN_EVENT_METADATA_LEN > count)
67 return ERR_PTR(-EINVAL);
68
69 /* held the notification_lock the whole time, so this is the
70 * same event we peeked above */
71 return fsnotify_remove_first_event(group);
72}
73
74static int create_fd(struct fsnotify_group *group,
75 struct fanotify_event_info *event,
76 struct file **file)
77{
78 int client_fd;
79 struct file *new_file;
80
81 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
82
83 client_fd = get_unused_fd_flags(group->fanotify_data.f_flags);
84 if (client_fd < 0)
85 return client_fd;
86
87 /*
88 * we need a new file handle for the userspace program so it can read even if it was
89 * originally opened O_WRONLY.
90 */
91 /* it's possible this event was an overflow event. in that case dentry and mnt
92 * are NULL; That's fine, just don't call dentry open */
93 if (event->path.dentry && event->path.mnt)
94 new_file = dentry_open(&event->path,
95 group->fanotify_data.f_flags | FMODE_NONOTIFY,
96 current_cred());
97 else
98 new_file = ERR_PTR(-EOVERFLOW);
99 if (IS_ERR(new_file)) {
100 /*
101 * we still send an event even if we can't open the file. this
102 * can happen when say tasks are gone and we try to open their
103 * /proc files or we try to open a WRONLY file like in sysfs
104 * we just send the errno to userspace since there isn't much
105 * else we can do.
106 */
107 put_unused_fd(client_fd);
108 client_fd = PTR_ERR(new_file);
109 } else {
110 *file = new_file;
111 }
112
113 return client_fd;
114}
115
116static int fill_event_metadata(struct fsnotify_group *group,
117 struct fanotify_event_metadata *metadata,
118 struct fsnotify_event *fsn_event,
119 struct file **file)
120{
121 int ret = 0;
122 struct fanotify_event_info *event;
123
124 pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
125 group, metadata, fsn_event);
126
127 *file = NULL;
128 event = container_of(fsn_event, struct fanotify_event_info, fse);
129 metadata->event_len = FAN_EVENT_METADATA_LEN;
130 metadata->metadata_len = FAN_EVENT_METADATA_LEN;
131 metadata->vers = FANOTIFY_METADATA_VERSION;
132 metadata->reserved = 0;
133 metadata->mask = fsn_event->mask & FAN_ALL_OUTGOING_EVENTS;
134 metadata->pid = pid_vnr(event->tgid);
135 if (unlikely(fsn_event->mask & FAN_Q_OVERFLOW))
136 metadata->fd = FAN_NOFD;
137 else {
138 metadata->fd = create_fd(group, event, file);
139 if (metadata->fd < 0)
140 ret = metadata->fd;
141 }
142
143 return ret;
144}
145
146static struct fanotify_perm_event_info *dequeue_event(
147 struct fsnotify_group *group, int fd)
148{
149 struct fanotify_perm_event_info *event, *return_e = NULL;
150
151 spin_lock(&group->notification_lock);
152 list_for_each_entry(event, &group->fanotify_data.access_list,
153 fae.fse.list) {
154 if (event->fd != fd)
155 continue;
156
157 list_del_init(&event->fae.fse.list);
158 return_e = event;
159 break;
160 }
161 spin_unlock(&group->notification_lock);
162
163 pr_debug("%s: found return_re=%p\n", __func__, return_e);
164
165 return return_e;
166}
167
168static int process_access_response(struct fsnotify_group *group,
169 struct fanotify_response *response_struct)
170{
171 struct fanotify_perm_event_info *event;
172 int fd = response_struct->fd;
173 int response = response_struct->response;
174
175 pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
176 fd, response);
177 /*
178 * make sure the response is valid, if invalid we do nothing and either
179 * userspace can send a valid response or we will clean it up after the
180 * timeout
181 */
182 switch (response & ~FAN_AUDIT) {
183 case FAN_ALLOW:
184 case FAN_DENY:
185 break;
186 default:
187 return -EINVAL;
188 }
189
190 if (fd < 0)
191 return -EINVAL;
192
193 if ((response & FAN_AUDIT) && !group->fanotify_data.audit)
194 return -EINVAL;
195
196 event = dequeue_event(group, fd);
197 if (!event)
198 return -ENOENT;
199
200 event->response = response;
201 wake_up(&group->fanotify_data.access_waitq);
202
203 return 0;
204}
205
206static ssize_t copy_event_to_user(struct fsnotify_group *group,
207 struct fsnotify_event *event,
208 char __user *buf)
209{
210 struct fanotify_event_metadata fanotify_event_metadata;
211 struct file *f;
212 int fd, ret;
213
214 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
215
216 ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f);
217 if (ret < 0)
218 return ret;
219
220 fd = fanotify_event_metadata.fd;
221 ret = -EFAULT;
222 if (copy_to_user(buf, &fanotify_event_metadata,
223 fanotify_event_metadata.event_len))
224 goto out_close_fd;
225
226 if (fanotify_is_perm_event(event->mask))
227 FANOTIFY_PE(event)->fd = fd;
228
229 if (fd != FAN_NOFD)
230 fd_install(fd, f);
231 return fanotify_event_metadata.event_len;
232
233out_close_fd:
234 if (fd != FAN_NOFD) {
235 put_unused_fd(fd);
236 fput(f);
237 }
238 return ret;
239}
240
241/* intofiy userspace file descriptor functions */
242static __poll_t fanotify_poll(struct file *file, poll_table *wait)
243{
244 struct fsnotify_group *group = file->private_data;
245 __poll_t ret = 0;
246
247 poll_wait(file, &group->notification_waitq, wait);
248 spin_lock(&group->notification_lock);
249 if (!fsnotify_notify_queue_is_empty(group))
250 ret = EPOLLIN | EPOLLRDNORM;
251 spin_unlock(&group->notification_lock);
252
253 return ret;
254}
255
256static ssize_t fanotify_read(struct file *file, char __user *buf,
257 size_t count, loff_t *pos)
258{
259 struct fsnotify_group *group;
260 struct fsnotify_event *kevent;
261 char __user *start;
262 int ret;
263 DEFINE_WAIT_FUNC(wait, woken_wake_function);
264
265 start = buf;
266 group = file->private_data;
267
268 pr_debug("%s: group=%p\n", __func__, group);
269
270 add_wait_queue(&group->notification_waitq, &wait);
271 while (1) {
272 spin_lock(&group->notification_lock);
273 kevent = get_one_event(group, count);
274 spin_unlock(&group->notification_lock);
275
276 if (IS_ERR(kevent)) {
277 ret = PTR_ERR(kevent);
278 break;
279 }
280
281 if (!kevent) {
282 ret = -EAGAIN;
283 if (file->f_flags & O_NONBLOCK)
284 break;
285
286 ret = -ERESTARTSYS;
287 if (signal_pending(current))
288 break;
289
290 if (start != buf)
291 break;
292
293 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
294 continue;
295 }
296
297 ret = copy_event_to_user(group, kevent, buf);
298 if (unlikely(ret == -EOPENSTALE)) {
299 /*
300 * We cannot report events with stale fd so drop it.
301 * Setting ret to 0 will continue the event loop and
302 * do the right thing if there are no more events to
303 * read (i.e. return bytes read, -EAGAIN or wait).
304 */
305 ret = 0;
306 }
307
308 /*
309 * Permission events get queued to wait for response. Other
310 * events can be destroyed now.
311 */
312 if (!fanotify_is_perm_event(kevent->mask)) {
313 fsnotify_destroy_event(group, kevent);
314 } else {
315 if (ret <= 0) {
316 FANOTIFY_PE(kevent)->response = FAN_DENY;
317 wake_up(&group->fanotify_data.access_waitq);
318 } else {
319 spin_lock(&group->notification_lock);
320 list_add_tail(&kevent->list,
321 &group->fanotify_data.access_list);
322 spin_unlock(&group->notification_lock);
323 }
324 }
325 if (ret < 0)
326 break;
327 buf += ret;
328 count -= ret;
329 }
330 remove_wait_queue(&group->notification_waitq, &wait);
331
332 if (start != buf && ret != -EFAULT)
333 ret = buf - start;
334 return ret;
335}
336
337static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
338{
339 struct fanotify_response response = { .fd = -1, .response = -1 };
340 struct fsnotify_group *group;
341 int ret;
342
343 if (!IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS))
344 return -EINVAL;
345
346 group = file->private_data;
347
348 if (count > sizeof(response))
349 count = sizeof(response);
350
351 pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
352
353 if (copy_from_user(&response, buf, count))
354 return -EFAULT;
355
356 ret = process_access_response(group, &response);
357 if (ret < 0)
358 count = ret;
359
360 return count;
361}
362
363static int fanotify_release(struct inode *ignored, struct file *file)
364{
365 struct fsnotify_group *group = file->private_data;
366 struct fanotify_perm_event_info *event, *next;
367 struct fsnotify_event *fsn_event;
368
369 /*
370 * Stop new events from arriving in the notification queue. since
371 * userspace cannot use fanotify fd anymore, no event can enter or
372 * leave access_list by now either.
373 */
374 fsnotify_group_stop_queueing(group);
375
376 /*
377 * Process all permission events on access_list and notification queue
378 * and simulate reply from userspace.
379 */
380 spin_lock(&group->notification_lock);
381 list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
382 fae.fse.list) {
383 pr_debug("%s: found group=%p event=%p\n", __func__, group,
384 event);
385
386 list_del_init(&event->fae.fse.list);
387 event->response = FAN_ALLOW;
388 }
389
390 /*
391 * Destroy all non-permission events. For permission events just
392 * dequeue them and set the response. They will be freed once the
393 * response is consumed and fanotify_get_response() returns.
394 */
395 while (!fsnotify_notify_queue_is_empty(group)) {
396 fsn_event = fsnotify_remove_first_event(group);
397 if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS)) {
398 spin_unlock(&group->notification_lock);
399 fsnotify_destroy_event(group, fsn_event);
400 spin_lock(&group->notification_lock);
401 } else {
402 FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
403 }
404 }
405 spin_unlock(&group->notification_lock);
406
407 /* Response for all permission events it set, wakeup waiters */
408 wake_up(&group->fanotify_data.access_waitq);
409
410 /* matches the fanotify_init->fsnotify_alloc_group */
411 fsnotify_destroy_group(group);
412
413 return 0;
414}
415
416static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
417{
418 struct fsnotify_group *group;
419 struct fsnotify_event *fsn_event;
420 void __user *p;
421 int ret = -ENOTTY;
422 size_t send_len = 0;
423
424 group = file->private_data;
425
426 p = (void __user *) arg;
427
428 switch (cmd) {
429 case FIONREAD:
430 spin_lock(&group->notification_lock);
431 list_for_each_entry(fsn_event, &group->notification_list, list)
432 send_len += FAN_EVENT_METADATA_LEN;
433 spin_unlock(&group->notification_lock);
434 ret = put_user(send_len, (int __user *) p);
435 break;
436 }
437
438 return ret;
439}
440
441static const struct file_operations fanotify_fops = {
442 .show_fdinfo = fanotify_show_fdinfo,
443 .poll = fanotify_poll,
444 .read = fanotify_read,
445 .write = fanotify_write,
446 .fasync = NULL,
447 .release = fanotify_release,
448 .unlocked_ioctl = fanotify_ioctl,
449 .compat_ioctl = fanotify_ioctl,
450 .llseek = noop_llseek,
451};
452
453static int fanotify_find_path(int dfd, const char __user *filename,
454 struct path *path, unsigned int flags)
455{
456 int ret;
457
458 pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
459 dfd, filename, flags);
460
461 if (filename == NULL) {
462 struct fd f = fdget(dfd);
463
464 ret = -EBADF;
465 if (!f.file)
466 goto out;
467
468 ret = -ENOTDIR;
469 if ((flags & FAN_MARK_ONLYDIR) &&
470 !(S_ISDIR(file_inode(f.file)->i_mode))) {
471 fdput(f);
472 goto out;
473 }
474
475 *path = f.file->f_path;
476 path_get(path);
477 fdput(f);
478 } else {
479 unsigned int lookup_flags = 0;
480
481 if (!(flags & FAN_MARK_DONT_FOLLOW))
482 lookup_flags |= LOOKUP_FOLLOW;
483 if (flags & FAN_MARK_ONLYDIR)
484 lookup_flags |= LOOKUP_DIRECTORY;
485
486 ret = user_path_at(dfd, filename, lookup_flags, path);
487 if (ret)
488 goto out;
489 }
490
491 /* you can only watch an inode if you have read permissions on it */
492 ret = inode_permission(path->dentry->d_inode, MAY_READ);
493 if (ret)
494 path_put(path);
495out:
496 return ret;
497}
498
499static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
500 __u32 mask,
501 unsigned int flags,
502 int *destroy)
503{
504 __u32 oldmask = 0;
505
506 spin_lock(&fsn_mark->lock);
507 if (!(flags & FAN_MARK_IGNORED_MASK)) {
508 __u32 tmask = fsn_mark->mask & ~mask;
509
510 if (flags & FAN_MARK_ONDIR)
511 tmask &= ~FAN_ONDIR;
512
513 oldmask = fsn_mark->mask;
514 fsn_mark->mask = tmask;
515 } else {
516 __u32 tmask = fsn_mark->ignored_mask & ~mask;
517 if (flags & FAN_MARK_ONDIR)
518 tmask &= ~FAN_ONDIR;
519 fsn_mark->ignored_mask = tmask;
520 }
521 *destroy = !(fsn_mark->mask | fsn_mark->ignored_mask);
522 spin_unlock(&fsn_mark->lock);
523
524 return mask & oldmask;
525}
526
527static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
528 struct vfsmount *mnt, __u32 mask,
529 unsigned int flags)
530{
531 struct fsnotify_mark *fsn_mark = NULL;
532 __u32 removed;
533 int destroy_mark;
534
535 mutex_lock(&group->mark_mutex);
536 fsn_mark = fsnotify_find_mark(&real_mount(mnt)->mnt_fsnotify_marks,
537 group);
538 if (!fsn_mark) {
539 mutex_unlock(&group->mark_mutex);
540 return -ENOENT;
541 }
542
543 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
544 &destroy_mark);
545 if (removed & real_mount(mnt)->mnt_fsnotify_mask)
546 fsnotify_recalc_mask(real_mount(mnt)->mnt_fsnotify_marks);
547 if (destroy_mark)
548 fsnotify_detach_mark(fsn_mark);
549 mutex_unlock(&group->mark_mutex);
550 if (destroy_mark)
551 fsnotify_free_mark(fsn_mark);
552
553 fsnotify_put_mark(fsn_mark);
554 return 0;
555}
556
557static int fanotify_remove_inode_mark(struct fsnotify_group *group,
558 struct inode *inode, __u32 mask,
559 unsigned int flags)
560{
561 struct fsnotify_mark *fsn_mark = NULL;
562 __u32 removed;
563 int destroy_mark;
564
565 mutex_lock(&group->mark_mutex);
566 fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
567 if (!fsn_mark) {
568 mutex_unlock(&group->mark_mutex);
569 return -ENOENT;
570 }
571
572 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
573 &destroy_mark);
574 if (removed & inode->i_fsnotify_mask)
575 fsnotify_recalc_mask(inode->i_fsnotify_marks);
576 if (destroy_mark)
577 fsnotify_detach_mark(fsn_mark);
578 mutex_unlock(&group->mark_mutex);
579 if (destroy_mark)
580 fsnotify_free_mark(fsn_mark);
581
582 /* matches the fsnotify_find_mark() */
583 fsnotify_put_mark(fsn_mark);
584
585 return 0;
586}
587
588static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
589 __u32 mask,
590 unsigned int flags)
591{
592 __u32 oldmask = -1;
593
594 spin_lock(&fsn_mark->lock);
595 if (!(flags & FAN_MARK_IGNORED_MASK)) {
596 __u32 tmask = fsn_mark->mask | mask;
597
598 if (flags & FAN_MARK_ONDIR)
599 tmask |= FAN_ONDIR;
600
601 oldmask = fsn_mark->mask;
602 fsn_mark->mask = tmask;
603 } else {
604 __u32 tmask = fsn_mark->ignored_mask | mask;
605 if (flags & FAN_MARK_ONDIR)
606 tmask |= FAN_ONDIR;
607
608 fsn_mark->ignored_mask = tmask;
609 if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
610 fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
611 }
612 spin_unlock(&fsn_mark->lock);
613
614 return mask & ~oldmask;
615}
616
617static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group,
618 struct inode *inode,
619 struct vfsmount *mnt)
620{
621 struct fsnotify_mark *mark;
622 int ret;
623
624 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
625 return ERR_PTR(-ENOSPC);
626
627 mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
628 if (!mark)
629 return ERR_PTR(-ENOMEM);
630
631 fsnotify_init_mark(mark, group);
632 ret = fsnotify_add_mark_locked(mark, inode, mnt, 0);
633 if (ret) {
634 fsnotify_put_mark(mark);
635 return ERR_PTR(ret);
636 }
637
638 return mark;
639}
640
641
642static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
643 struct vfsmount *mnt, __u32 mask,
644 unsigned int flags)
645{
646 struct fsnotify_mark *fsn_mark;
647 __u32 added;
648
649 mutex_lock(&group->mark_mutex);
650 fsn_mark = fsnotify_find_mark(&real_mount(mnt)->mnt_fsnotify_marks,
651 group);
652 if (!fsn_mark) {
653 fsn_mark = fanotify_add_new_mark(group, NULL, mnt);
654 if (IS_ERR(fsn_mark)) {
655 mutex_unlock(&group->mark_mutex);
656 return PTR_ERR(fsn_mark);
657 }
658 }
659 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
660 if (added & ~real_mount(mnt)->mnt_fsnotify_mask)
661 fsnotify_recalc_mask(real_mount(mnt)->mnt_fsnotify_marks);
662 mutex_unlock(&group->mark_mutex);
663
664 fsnotify_put_mark(fsn_mark);
665 return 0;
666}
667
668static int fanotify_add_inode_mark(struct fsnotify_group *group,
669 struct inode *inode, __u32 mask,
670 unsigned int flags)
671{
672 struct fsnotify_mark *fsn_mark;
673 __u32 added;
674
675 pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
676
677 /*
678 * If some other task has this inode open for write we should not add
679 * an ignored mark, unless that ignored mark is supposed to survive
680 * modification changes anyway.
681 */
682 if ((flags & FAN_MARK_IGNORED_MASK) &&
683 !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
684 (atomic_read(&inode->i_writecount) > 0))
685 return 0;
686
687 mutex_lock(&group->mark_mutex);
688 fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
689 if (!fsn_mark) {
690 fsn_mark = fanotify_add_new_mark(group, inode, NULL);
691 if (IS_ERR(fsn_mark)) {
692 mutex_unlock(&group->mark_mutex);
693 return PTR_ERR(fsn_mark);
694 }
695 }
696 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
697 if (added & ~inode->i_fsnotify_mask)
698 fsnotify_recalc_mask(inode->i_fsnotify_marks);
699 mutex_unlock(&group->mark_mutex);
700
701 fsnotify_put_mark(fsn_mark);
702 return 0;
703}
704
705/* fanotify syscalls */
706SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
707{
708 struct fsnotify_group *group;
709 int f_flags, fd;
710 struct user_struct *user;
711 struct fanotify_event_info *oevent;
712
713 pr_debug("%s: flags=%d event_f_flags=%d\n",
714 __func__, flags, event_f_flags);
715
716 if (!capable(CAP_SYS_ADMIN))
717 return -EPERM;
718
719#ifdef CONFIG_AUDITSYSCALL
720 if (flags & ~(FAN_ALL_INIT_FLAGS | FAN_ENABLE_AUDIT))
721#else
722 if (flags & ~FAN_ALL_INIT_FLAGS)
723#endif
724 return -EINVAL;
725
726 if (event_f_flags & ~FANOTIFY_INIT_ALL_EVENT_F_BITS)
727 return -EINVAL;
728
729 switch (event_f_flags & O_ACCMODE) {
730 case O_RDONLY:
731 case O_RDWR:
732 case O_WRONLY:
733 break;
734 default:
735 return -EINVAL;
736 }
737
738 user = get_current_user();
739 if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
740 free_uid(user);
741 return -EMFILE;
742 }
743
744 f_flags = O_RDWR | FMODE_NONOTIFY;
745 if (flags & FAN_CLOEXEC)
746 f_flags |= O_CLOEXEC;
747 if (flags & FAN_NONBLOCK)
748 f_flags |= O_NONBLOCK;
749
750 /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
751 group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
752 if (IS_ERR(group)) {
753 free_uid(user);
754 return PTR_ERR(group);
755 }
756
757 group->fanotify_data.user = user;
758 atomic_inc(&user->fanotify_listeners);
759
760 oevent = fanotify_alloc_event(group, NULL, FS_Q_OVERFLOW, NULL);
761 if (unlikely(!oevent)) {
762 fd = -ENOMEM;
763 goto out_destroy_group;
764 }
765 group->overflow_event = &oevent->fse;
766
767 if (force_o_largefile())
768 event_f_flags |= O_LARGEFILE;
769 group->fanotify_data.f_flags = event_f_flags;
770 init_waitqueue_head(&group->fanotify_data.access_waitq);
771 INIT_LIST_HEAD(&group->fanotify_data.access_list);
772 switch (flags & FAN_ALL_CLASS_BITS) {
773 case FAN_CLASS_NOTIF:
774 group->priority = FS_PRIO_0;
775 break;
776 case FAN_CLASS_CONTENT:
777 group->priority = FS_PRIO_1;
778 break;
779 case FAN_CLASS_PRE_CONTENT:
780 group->priority = FS_PRIO_2;
781 break;
782 default:
783 fd = -EINVAL;
784 goto out_destroy_group;
785 }
786
787 if (flags & FAN_UNLIMITED_QUEUE) {
788 fd = -EPERM;
789 if (!capable(CAP_SYS_ADMIN))
790 goto out_destroy_group;
791 group->max_events = UINT_MAX;
792 } else {
793 group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
794 }
795
796 if (flags & FAN_UNLIMITED_MARKS) {
797 fd = -EPERM;
798 if (!capable(CAP_SYS_ADMIN))
799 goto out_destroy_group;
800 group->fanotify_data.max_marks = UINT_MAX;
801 } else {
802 group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
803 }
804
805 if (flags & FAN_ENABLE_AUDIT) {
806 fd = -EPERM;
807 if (!capable(CAP_AUDIT_WRITE))
808 goto out_destroy_group;
809 group->fanotify_data.audit = true;
810 }
811
812 fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
813 if (fd < 0)
814 goto out_destroy_group;
815
816 return fd;
817
818out_destroy_group:
819 fsnotify_destroy_group(group);
820 return fd;
821}
822
823static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
824 int dfd, const char __user *pathname)
825{
826 struct inode *inode = NULL;
827 struct vfsmount *mnt = NULL;
828 struct fsnotify_group *group;
829 struct fd f;
830 struct path path;
831 u32 valid_mask = FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD;
832 int ret;
833
834 pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
835 __func__, fanotify_fd, flags, dfd, pathname, mask);
836
837 /* we only use the lower 32 bits as of right now. */
838 if (mask & ((__u64)0xffffffff << 32))
839 return -EINVAL;
840
841 if (flags & ~FAN_ALL_MARK_FLAGS)
842 return -EINVAL;
843 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
844 case FAN_MARK_ADD: /* fallthrough */
845 case FAN_MARK_REMOVE:
846 if (!mask)
847 return -EINVAL;
848 break;
849 case FAN_MARK_FLUSH:
850 if (flags & ~(FAN_MARK_MOUNT | FAN_MARK_FLUSH))
851 return -EINVAL;
852 break;
853 default:
854 return -EINVAL;
855 }
856
857 if (mask & FAN_ONDIR) {
858 flags |= FAN_MARK_ONDIR;
859 mask &= ~FAN_ONDIR;
860 }
861
862 if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS))
863 valid_mask |= FAN_ALL_PERM_EVENTS;
864
865 if (mask & ~valid_mask)
866 return -EINVAL;
867
868 f = fdget(fanotify_fd);
869 if (unlikely(!f.file))
870 return -EBADF;
871
872 /* verify that this is indeed an fanotify instance */
873 ret = -EINVAL;
874 if (unlikely(f.file->f_op != &fanotify_fops))
875 goto fput_and_out;
876 group = f.file->private_data;
877
878 /*
879 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not
880 * allowed to set permissions events.
881 */
882 ret = -EINVAL;
883 if (mask & FAN_ALL_PERM_EVENTS &&
884 group->priority == FS_PRIO_0)
885 goto fput_and_out;
886
887 if (flags & FAN_MARK_FLUSH) {
888 ret = 0;
889 if (flags & FAN_MARK_MOUNT)
890 fsnotify_clear_vfsmount_marks_by_group(group);
891 else
892 fsnotify_clear_inode_marks_by_group(group);
893 goto fput_and_out;
894 }
895
896 ret = fanotify_find_path(dfd, pathname, &path, flags);
897 if (ret)
898 goto fput_and_out;
899
900 /* inode held in place by reference to path; group by fget on fd */
901 if (!(flags & FAN_MARK_MOUNT))
902 inode = path.dentry->d_inode;
903 else
904 mnt = path.mnt;
905
906 /* create/update an inode mark */
907 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE)) {
908 case FAN_MARK_ADD:
909 if (flags & FAN_MARK_MOUNT)
910 ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
911 else
912 ret = fanotify_add_inode_mark(group, inode, mask, flags);
913 break;
914 case FAN_MARK_REMOVE:
915 if (flags & FAN_MARK_MOUNT)
916 ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
917 else
918 ret = fanotify_remove_inode_mark(group, inode, mask, flags);
919 break;
920 default:
921 ret = -EINVAL;
922 }
923
924 path_put(&path);
925fput_and_out:
926 fdput(f);
927 return ret;
928}
929
930SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags,
931 __u64, mask, int, dfd,
932 const char __user *, pathname)
933{
934 return do_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname);
935}
936
937#ifdef CONFIG_COMPAT
938COMPAT_SYSCALL_DEFINE6(fanotify_mark,
939 int, fanotify_fd, unsigned int, flags,
940 __u32, mask0, __u32, mask1, int, dfd,
941 const char __user *, pathname)
942{
943 return do_fanotify_mark(fanotify_fd, flags,
944#ifdef __BIG_ENDIAN
945 ((__u64)mask0 << 32) | mask1,
946#else
947 ((__u64)mask1 << 32) | mask0,
948#endif
949 dfd, pathname);
950}
951#endif
952
953/*
954 * fanotify_user_setup - Our initialization function. Note that we cannot return
955 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
956 * must result in panic().
957 */
958static int __init fanotify_user_setup(void)
959{
960 fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
961 fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC);
962 if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS)) {
963 fanotify_perm_event_cachep =
964 KMEM_CACHE(fanotify_perm_event_info, SLAB_PANIC);
965 }
966
967 return 0;
968}
969device_initcall(fanotify_user_setup);
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/fanotify.h>
3#include <linux/fcntl.h>
4#include <linux/file.h>
5#include <linux/fs.h>
6#include <linux/anon_inodes.h>
7#include <linux/fsnotify_backend.h>
8#include <linux/init.h>
9#include <linux/mount.h>
10#include <linux/namei.h>
11#include <linux/poll.h>
12#include <linux/security.h>
13#include <linux/syscalls.h>
14#include <linux/slab.h>
15#include <linux/types.h>
16#include <linux/uaccess.h>
17#include <linux/compat.h>
18#include <linux/sched/signal.h>
19#include <linux/memcontrol.h>
20#include <linux/statfs.h>
21#include <linux/exportfs.h>
22
23#include <asm/ioctls.h>
24
25#include "../../mount.h"
26#include "../fdinfo.h"
27#include "fanotify.h"
28
29#define FANOTIFY_DEFAULT_MAX_EVENTS 16384
30#define FANOTIFY_DEFAULT_MAX_MARKS 8192
31#define FANOTIFY_DEFAULT_MAX_LISTENERS 128
32
33/*
34 * All flags that may be specified in parameter event_f_flags of fanotify_init.
35 *
36 * Internal and external open flags are stored together in field f_flags of
37 * struct file. Only external open flags shall be allowed in event_f_flags.
38 * Internal flags like FMODE_NONOTIFY, FMODE_EXEC, FMODE_NOCMTIME shall be
39 * excluded.
40 */
41#define FANOTIFY_INIT_ALL_EVENT_F_BITS ( \
42 O_ACCMODE | O_APPEND | O_NONBLOCK | \
43 __O_SYNC | O_DSYNC | O_CLOEXEC | \
44 O_LARGEFILE | O_NOATIME )
45
46extern const struct fsnotify_ops fanotify_fsnotify_ops;
47
48struct kmem_cache *fanotify_mark_cache __read_mostly;
49struct kmem_cache *fanotify_event_cachep __read_mostly;
50struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
51
52#define FANOTIFY_EVENT_ALIGN 4
53
54static int fanotify_event_info_len(struct fanotify_event *event)
55{
56 if (!fanotify_event_has_fid(event))
57 return 0;
58
59 return roundup(sizeof(struct fanotify_event_info_fid) +
60 sizeof(struct file_handle) + event->fh_len,
61 FANOTIFY_EVENT_ALIGN);
62}
63
64/*
65 * Get an fsnotify notification event if one exists and is small
66 * enough to fit in "count". Return an error pointer if the count
67 * is not large enough. When permission event is dequeued, its state is
68 * updated accordingly.
69 */
70static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
71 size_t count)
72{
73 size_t event_size = FAN_EVENT_METADATA_LEN;
74 struct fsnotify_event *fsn_event = NULL;
75
76 pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
77
78 spin_lock(&group->notification_lock);
79 if (fsnotify_notify_queue_is_empty(group))
80 goto out;
81
82 if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) {
83 event_size += fanotify_event_info_len(
84 FANOTIFY_E(fsnotify_peek_first_event(group)));
85 }
86
87 if (event_size > count) {
88 fsn_event = ERR_PTR(-EINVAL);
89 goto out;
90 }
91 fsn_event = fsnotify_remove_first_event(group);
92 if (fanotify_is_perm_event(FANOTIFY_E(fsn_event)->mask))
93 FANOTIFY_PE(fsn_event)->state = FAN_EVENT_REPORTED;
94out:
95 spin_unlock(&group->notification_lock);
96 return fsn_event;
97}
98
99static int create_fd(struct fsnotify_group *group,
100 struct fanotify_event *event,
101 struct file **file)
102{
103 int client_fd;
104 struct file *new_file;
105
106 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
107
108 client_fd = get_unused_fd_flags(group->fanotify_data.f_flags);
109 if (client_fd < 0)
110 return client_fd;
111
112 /*
113 * we need a new file handle for the userspace program so it can read even if it was
114 * originally opened O_WRONLY.
115 */
116 /* it's possible this event was an overflow event. in that case dentry and mnt
117 * are NULL; That's fine, just don't call dentry open */
118 if (event->path.dentry && event->path.mnt)
119 new_file = dentry_open(&event->path,
120 group->fanotify_data.f_flags | FMODE_NONOTIFY,
121 current_cred());
122 else
123 new_file = ERR_PTR(-EOVERFLOW);
124 if (IS_ERR(new_file)) {
125 /*
126 * we still send an event even if we can't open the file. this
127 * can happen when say tasks are gone and we try to open their
128 * /proc files or we try to open a WRONLY file like in sysfs
129 * we just send the errno to userspace since there isn't much
130 * else we can do.
131 */
132 put_unused_fd(client_fd);
133 client_fd = PTR_ERR(new_file);
134 } else {
135 *file = new_file;
136 }
137
138 return client_fd;
139}
140
141/*
142 * Finish processing of permission event by setting it to ANSWERED state and
143 * drop group->notification_lock.
144 */
145static void finish_permission_event(struct fsnotify_group *group,
146 struct fanotify_perm_event *event,
147 unsigned int response)
148 __releases(&group->notification_lock)
149{
150 bool destroy = false;
151
152 assert_spin_locked(&group->notification_lock);
153 event->response = response;
154 if (event->state == FAN_EVENT_CANCELED)
155 destroy = true;
156 else
157 event->state = FAN_EVENT_ANSWERED;
158 spin_unlock(&group->notification_lock);
159 if (destroy)
160 fsnotify_destroy_event(group, &event->fae.fse);
161}
162
163static int process_access_response(struct fsnotify_group *group,
164 struct fanotify_response *response_struct)
165{
166 struct fanotify_perm_event *event;
167 int fd = response_struct->fd;
168 int response = response_struct->response;
169
170 pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
171 fd, response);
172 /*
173 * make sure the response is valid, if invalid we do nothing and either
174 * userspace can send a valid response or we will clean it up after the
175 * timeout
176 */
177 switch (response & ~FAN_AUDIT) {
178 case FAN_ALLOW:
179 case FAN_DENY:
180 break;
181 default:
182 return -EINVAL;
183 }
184
185 if (fd < 0)
186 return -EINVAL;
187
188 if ((response & FAN_AUDIT) && !FAN_GROUP_FLAG(group, FAN_ENABLE_AUDIT))
189 return -EINVAL;
190
191 spin_lock(&group->notification_lock);
192 list_for_each_entry(event, &group->fanotify_data.access_list,
193 fae.fse.list) {
194 if (event->fd != fd)
195 continue;
196
197 list_del_init(&event->fae.fse.list);
198 finish_permission_event(group, event, response);
199 wake_up(&group->fanotify_data.access_waitq);
200 return 0;
201 }
202 spin_unlock(&group->notification_lock);
203
204 return -ENOENT;
205}
206
207static int copy_fid_to_user(struct fanotify_event *event, char __user *buf)
208{
209 struct fanotify_event_info_fid info = { };
210 struct file_handle handle = { };
211 unsigned char bounce[FANOTIFY_INLINE_FH_LEN], *fh;
212 size_t fh_len = event->fh_len;
213 size_t len = fanotify_event_info_len(event);
214
215 if (!len)
216 return 0;
217
218 if (WARN_ON_ONCE(len < sizeof(info) + sizeof(handle) + fh_len))
219 return -EFAULT;
220
221 /* Copy event info fid header followed by vaiable sized file handle */
222 info.hdr.info_type = FAN_EVENT_INFO_TYPE_FID;
223 info.hdr.len = len;
224 info.fsid = event->fid.fsid;
225 if (copy_to_user(buf, &info, sizeof(info)))
226 return -EFAULT;
227
228 buf += sizeof(info);
229 len -= sizeof(info);
230 handle.handle_type = event->fh_type;
231 handle.handle_bytes = fh_len;
232 if (copy_to_user(buf, &handle, sizeof(handle)))
233 return -EFAULT;
234
235 buf += sizeof(handle);
236 len -= sizeof(handle);
237 /*
238 * For an inline fh, copy through stack to exclude the copy from
239 * usercopy hardening protections.
240 */
241 fh = fanotify_event_fh(event);
242 if (fh_len <= FANOTIFY_INLINE_FH_LEN) {
243 memcpy(bounce, fh, fh_len);
244 fh = bounce;
245 }
246 if (copy_to_user(buf, fh, fh_len))
247 return -EFAULT;
248
249 /* Pad with 0's */
250 buf += fh_len;
251 len -= fh_len;
252 WARN_ON_ONCE(len < 0 || len >= FANOTIFY_EVENT_ALIGN);
253 if (len > 0 && clear_user(buf, len))
254 return -EFAULT;
255
256 return 0;
257}
258
259static ssize_t copy_event_to_user(struct fsnotify_group *group,
260 struct fsnotify_event *fsn_event,
261 char __user *buf, size_t count)
262{
263 struct fanotify_event_metadata metadata;
264 struct fanotify_event *event;
265 struct file *f = NULL;
266 int ret, fd = FAN_NOFD;
267
268 pr_debug("%s: group=%p event=%p\n", __func__, group, fsn_event);
269
270 event = container_of(fsn_event, struct fanotify_event, fse);
271 metadata.event_len = FAN_EVENT_METADATA_LEN;
272 metadata.metadata_len = FAN_EVENT_METADATA_LEN;
273 metadata.vers = FANOTIFY_METADATA_VERSION;
274 metadata.reserved = 0;
275 metadata.mask = event->mask & FANOTIFY_OUTGOING_EVENTS;
276 metadata.pid = pid_vnr(event->pid);
277
278 if (fanotify_event_has_path(event)) {
279 fd = create_fd(group, event, &f);
280 if (fd < 0)
281 return fd;
282 } else if (fanotify_event_has_fid(event)) {
283 metadata.event_len += fanotify_event_info_len(event);
284 }
285 metadata.fd = fd;
286
287 ret = -EFAULT;
288 /*
289 * Sanity check copy size in case get_one_event() and
290 * fill_event_metadata() event_len sizes ever get out of sync.
291 */
292 if (WARN_ON_ONCE(metadata.event_len > count))
293 goto out_close_fd;
294
295 if (copy_to_user(buf, &metadata, FAN_EVENT_METADATA_LEN))
296 goto out_close_fd;
297
298 if (fanotify_is_perm_event(event->mask))
299 FANOTIFY_PE(fsn_event)->fd = fd;
300
301 if (fanotify_event_has_path(event)) {
302 fd_install(fd, f);
303 } else if (fanotify_event_has_fid(event)) {
304 ret = copy_fid_to_user(event, buf + FAN_EVENT_METADATA_LEN);
305 if (ret < 0)
306 return ret;
307 }
308
309 return metadata.event_len;
310
311out_close_fd:
312 if (fd != FAN_NOFD) {
313 put_unused_fd(fd);
314 fput(f);
315 }
316 return ret;
317}
318
319/* intofiy userspace file descriptor functions */
320static __poll_t fanotify_poll(struct file *file, poll_table *wait)
321{
322 struct fsnotify_group *group = file->private_data;
323 __poll_t ret = 0;
324
325 poll_wait(file, &group->notification_waitq, wait);
326 spin_lock(&group->notification_lock);
327 if (!fsnotify_notify_queue_is_empty(group))
328 ret = EPOLLIN | EPOLLRDNORM;
329 spin_unlock(&group->notification_lock);
330
331 return ret;
332}
333
334static ssize_t fanotify_read(struct file *file, char __user *buf,
335 size_t count, loff_t *pos)
336{
337 struct fsnotify_group *group;
338 struct fsnotify_event *kevent;
339 char __user *start;
340 int ret;
341 DEFINE_WAIT_FUNC(wait, woken_wake_function);
342
343 start = buf;
344 group = file->private_data;
345
346 pr_debug("%s: group=%p\n", __func__, group);
347
348 add_wait_queue(&group->notification_waitq, &wait);
349 while (1) {
350 kevent = get_one_event(group, count);
351 if (IS_ERR(kevent)) {
352 ret = PTR_ERR(kevent);
353 break;
354 }
355
356 if (!kevent) {
357 ret = -EAGAIN;
358 if (file->f_flags & O_NONBLOCK)
359 break;
360
361 ret = -ERESTARTSYS;
362 if (signal_pending(current))
363 break;
364
365 if (start != buf)
366 break;
367
368 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
369 continue;
370 }
371
372 ret = copy_event_to_user(group, kevent, buf, count);
373 if (unlikely(ret == -EOPENSTALE)) {
374 /*
375 * We cannot report events with stale fd so drop it.
376 * Setting ret to 0 will continue the event loop and
377 * do the right thing if there are no more events to
378 * read (i.e. return bytes read, -EAGAIN or wait).
379 */
380 ret = 0;
381 }
382
383 /*
384 * Permission events get queued to wait for response. Other
385 * events can be destroyed now.
386 */
387 if (!fanotify_is_perm_event(FANOTIFY_E(kevent)->mask)) {
388 fsnotify_destroy_event(group, kevent);
389 } else {
390 if (ret <= 0) {
391 spin_lock(&group->notification_lock);
392 finish_permission_event(group,
393 FANOTIFY_PE(kevent), FAN_DENY);
394 wake_up(&group->fanotify_data.access_waitq);
395 } else {
396 spin_lock(&group->notification_lock);
397 list_add_tail(&kevent->list,
398 &group->fanotify_data.access_list);
399 spin_unlock(&group->notification_lock);
400 }
401 }
402 if (ret < 0)
403 break;
404 buf += ret;
405 count -= ret;
406 }
407 remove_wait_queue(&group->notification_waitq, &wait);
408
409 if (start != buf && ret != -EFAULT)
410 ret = buf - start;
411 return ret;
412}
413
414static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
415{
416 struct fanotify_response response = { .fd = -1, .response = -1 };
417 struct fsnotify_group *group;
418 int ret;
419
420 if (!IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS))
421 return -EINVAL;
422
423 group = file->private_data;
424
425 if (count > sizeof(response))
426 count = sizeof(response);
427
428 pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
429
430 if (copy_from_user(&response, buf, count))
431 return -EFAULT;
432
433 ret = process_access_response(group, &response);
434 if (ret < 0)
435 count = ret;
436
437 return count;
438}
439
440static int fanotify_release(struct inode *ignored, struct file *file)
441{
442 struct fsnotify_group *group = file->private_data;
443 struct fanotify_perm_event *event;
444 struct fsnotify_event *fsn_event;
445
446 /*
447 * Stop new events from arriving in the notification queue. since
448 * userspace cannot use fanotify fd anymore, no event can enter or
449 * leave access_list by now either.
450 */
451 fsnotify_group_stop_queueing(group);
452
453 /*
454 * Process all permission events on access_list and notification queue
455 * and simulate reply from userspace.
456 */
457 spin_lock(&group->notification_lock);
458 while (!list_empty(&group->fanotify_data.access_list)) {
459 event = list_first_entry(&group->fanotify_data.access_list,
460 struct fanotify_perm_event, fae.fse.list);
461 list_del_init(&event->fae.fse.list);
462 finish_permission_event(group, event, FAN_ALLOW);
463 spin_lock(&group->notification_lock);
464 }
465
466 /*
467 * Destroy all non-permission events. For permission events just
468 * dequeue them and set the response. They will be freed once the
469 * response is consumed and fanotify_get_response() returns.
470 */
471 while (!fsnotify_notify_queue_is_empty(group)) {
472 fsn_event = fsnotify_remove_first_event(group);
473 if (!(FANOTIFY_E(fsn_event)->mask & FANOTIFY_PERM_EVENTS)) {
474 spin_unlock(&group->notification_lock);
475 fsnotify_destroy_event(group, fsn_event);
476 } else {
477 finish_permission_event(group, FANOTIFY_PE(fsn_event),
478 FAN_ALLOW);
479 }
480 spin_lock(&group->notification_lock);
481 }
482 spin_unlock(&group->notification_lock);
483
484 /* Response for all permission events it set, wakeup waiters */
485 wake_up(&group->fanotify_data.access_waitq);
486
487 /* matches the fanotify_init->fsnotify_alloc_group */
488 fsnotify_destroy_group(group);
489
490 return 0;
491}
492
493static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
494{
495 struct fsnotify_group *group;
496 struct fsnotify_event *fsn_event;
497 void __user *p;
498 int ret = -ENOTTY;
499 size_t send_len = 0;
500
501 group = file->private_data;
502
503 p = (void __user *) arg;
504
505 switch (cmd) {
506 case FIONREAD:
507 spin_lock(&group->notification_lock);
508 list_for_each_entry(fsn_event, &group->notification_list, list)
509 send_len += FAN_EVENT_METADATA_LEN;
510 spin_unlock(&group->notification_lock);
511 ret = put_user(send_len, (int __user *) p);
512 break;
513 }
514
515 return ret;
516}
517
518static const struct file_operations fanotify_fops = {
519 .show_fdinfo = fanotify_show_fdinfo,
520 .poll = fanotify_poll,
521 .read = fanotify_read,
522 .write = fanotify_write,
523 .fasync = NULL,
524 .release = fanotify_release,
525 .unlocked_ioctl = fanotify_ioctl,
526 .compat_ioctl = fanotify_ioctl,
527 .llseek = noop_llseek,
528};
529
530static int fanotify_find_path(int dfd, const char __user *filename,
531 struct path *path, unsigned int flags, __u64 mask,
532 unsigned int obj_type)
533{
534 int ret;
535
536 pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
537 dfd, filename, flags);
538
539 if (filename == NULL) {
540 struct fd f = fdget(dfd);
541
542 ret = -EBADF;
543 if (!f.file)
544 goto out;
545
546 ret = -ENOTDIR;
547 if ((flags & FAN_MARK_ONLYDIR) &&
548 !(S_ISDIR(file_inode(f.file)->i_mode))) {
549 fdput(f);
550 goto out;
551 }
552
553 *path = f.file->f_path;
554 path_get(path);
555 fdput(f);
556 } else {
557 unsigned int lookup_flags = 0;
558
559 if (!(flags & FAN_MARK_DONT_FOLLOW))
560 lookup_flags |= LOOKUP_FOLLOW;
561 if (flags & FAN_MARK_ONLYDIR)
562 lookup_flags |= LOOKUP_DIRECTORY;
563
564 ret = user_path_at(dfd, filename, lookup_flags, path);
565 if (ret)
566 goto out;
567 }
568
569 /* you can only watch an inode if you have read permissions on it */
570 ret = inode_permission(path->dentry->d_inode, MAY_READ);
571 if (ret) {
572 path_put(path);
573 goto out;
574 }
575
576 ret = security_path_notify(path, mask, obj_type);
577 if (ret)
578 path_put(path);
579
580out:
581 return ret;
582}
583
584static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
585 __u32 mask,
586 unsigned int flags,
587 int *destroy)
588{
589 __u32 oldmask = 0;
590
591 spin_lock(&fsn_mark->lock);
592 if (!(flags & FAN_MARK_IGNORED_MASK)) {
593 oldmask = fsn_mark->mask;
594 fsn_mark->mask &= ~mask;
595 } else {
596 fsn_mark->ignored_mask &= ~mask;
597 }
598 *destroy = !(fsn_mark->mask | fsn_mark->ignored_mask);
599 spin_unlock(&fsn_mark->lock);
600
601 return mask & oldmask;
602}
603
604static int fanotify_remove_mark(struct fsnotify_group *group,
605 fsnotify_connp_t *connp, __u32 mask,
606 unsigned int flags)
607{
608 struct fsnotify_mark *fsn_mark = NULL;
609 __u32 removed;
610 int destroy_mark;
611
612 mutex_lock(&group->mark_mutex);
613 fsn_mark = fsnotify_find_mark(connp, group);
614 if (!fsn_mark) {
615 mutex_unlock(&group->mark_mutex);
616 return -ENOENT;
617 }
618
619 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
620 &destroy_mark);
621 if (removed & fsnotify_conn_mask(fsn_mark->connector))
622 fsnotify_recalc_mask(fsn_mark->connector);
623 if (destroy_mark)
624 fsnotify_detach_mark(fsn_mark);
625 mutex_unlock(&group->mark_mutex);
626 if (destroy_mark)
627 fsnotify_free_mark(fsn_mark);
628
629 /* matches the fsnotify_find_mark() */
630 fsnotify_put_mark(fsn_mark);
631 return 0;
632}
633
634static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
635 struct vfsmount *mnt, __u32 mask,
636 unsigned int flags)
637{
638 return fanotify_remove_mark(group, &real_mount(mnt)->mnt_fsnotify_marks,
639 mask, flags);
640}
641
642static int fanotify_remove_sb_mark(struct fsnotify_group *group,
643 struct super_block *sb, __u32 mask,
644 unsigned int flags)
645{
646 return fanotify_remove_mark(group, &sb->s_fsnotify_marks, mask, flags);
647}
648
649static int fanotify_remove_inode_mark(struct fsnotify_group *group,
650 struct inode *inode, __u32 mask,
651 unsigned int flags)
652{
653 return fanotify_remove_mark(group, &inode->i_fsnotify_marks, mask,
654 flags);
655}
656
657static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
658 __u32 mask,
659 unsigned int flags)
660{
661 __u32 oldmask = -1;
662
663 spin_lock(&fsn_mark->lock);
664 if (!(flags & FAN_MARK_IGNORED_MASK)) {
665 oldmask = fsn_mark->mask;
666 fsn_mark->mask |= mask;
667 } else {
668 fsn_mark->ignored_mask |= mask;
669 if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
670 fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
671 }
672 spin_unlock(&fsn_mark->lock);
673
674 return mask & ~oldmask;
675}
676
677static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group,
678 fsnotify_connp_t *connp,
679 unsigned int type,
680 __kernel_fsid_t *fsid)
681{
682 struct fsnotify_mark *mark;
683 int ret;
684
685 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
686 return ERR_PTR(-ENOSPC);
687
688 mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
689 if (!mark)
690 return ERR_PTR(-ENOMEM);
691
692 fsnotify_init_mark(mark, group);
693 ret = fsnotify_add_mark_locked(mark, connp, type, 0, fsid);
694 if (ret) {
695 fsnotify_put_mark(mark);
696 return ERR_PTR(ret);
697 }
698
699 return mark;
700}
701
702
703static int fanotify_add_mark(struct fsnotify_group *group,
704 fsnotify_connp_t *connp, unsigned int type,
705 __u32 mask, unsigned int flags,
706 __kernel_fsid_t *fsid)
707{
708 struct fsnotify_mark *fsn_mark;
709 __u32 added;
710
711 mutex_lock(&group->mark_mutex);
712 fsn_mark = fsnotify_find_mark(connp, group);
713 if (!fsn_mark) {
714 fsn_mark = fanotify_add_new_mark(group, connp, type, fsid);
715 if (IS_ERR(fsn_mark)) {
716 mutex_unlock(&group->mark_mutex);
717 return PTR_ERR(fsn_mark);
718 }
719 }
720 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
721 if (added & ~fsnotify_conn_mask(fsn_mark->connector))
722 fsnotify_recalc_mask(fsn_mark->connector);
723 mutex_unlock(&group->mark_mutex);
724
725 fsnotify_put_mark(fsn_mark);
726 return 0;
727}
728
729static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
730 struct vfsmount *mnt, __u32 mask,
731 unsigned int flags, __kernel_fsid_t *fsid)
732{
733 return fanotify_add_mark(group, &real_mount(mnt)->mnt_fsnotify_marks,
734 FSNOTIFY_OBJ_TYPE_VFSMOUNT, mask, flags, fsid);
735}
736
737static int fanotify_add_sb_mark(struct fsnotify_group *group,
738 struct super_block *sb, __u32 mask,
739 unsigned int flags, __kernel_fsid_t *fsid)
740{
741 return fanotify_add_mark(group, &sb->s_fsnotify_marks,
742 FSNOTIFY_OBJ_TYPE_SB, mask, flags, fsid);
743}
744
745static int fanotify_add_inode_mark(struct fsnotify_group *group,
746 struct inode *inode, __u32 mask,
747 unsigned int flags, __kernel_fsid_t *fsid)
748{
749 pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
750
751 /*
752 * If some other task has this inode open for write we should not add
753 * an ignored mark, unless that ignored mark is supposed to survive
754 * modification changes anyway.
755 */
756 if ((flags & FAN_MARK_IGNORED_MASK) &&
757 !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
758 inode_is_open_for_write(inode))
759 return 0;
760
761 return fanotify_add_mark(group, &inode->i_fsnotify_marks,
762 FSNOTIFY_OBJ_TYPE_INODE, mask, flags, fsid);
763}
764
765/* fanotify syscalls */
766SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
767{
768 struct fsnotify_group *group;
769 int f_flags, fd;
770 struct user_struct *user;
771 struct fanotify_event *oevent;
772
773 pr_debug("%s: flags=%x event_f_flags=%x\n",
774 __func__, flags, event_f_flags);
775
776 if (!capable(CAP_SYS_ADMIN))
777 return -EPERM;
778
779#ifdef CONFIG_AUDITSYSCALL
780 if (flags & ~(FANOTIFY_INIT_FLAGS | FAN_ENABLE_AUDIT))
781#else
782 if (flags & ~FANOTIFY_INIT_FLAGS)
783#endif
784 return -EINVAL;
785
786 if (event_f_flags & ~FANOTIFY_INIT_ALL_EVENT_F_BITS)
787 return -EINVAL;
788
789 switch (event_f_flags & O_ACCMODE) {
790 case O_RDONLY:
791 case O_RDWR:
792 case O_WRONLY:
793 break;
794 default:
795 return -EINVAL;
796 }
797
798 if ((flags & FAN_REPORT_FID) &&
799 (flags & FANOTIFY_CLASS_BITS) != FAN_CLASS_NOTIF)
800 return -EINVAL;
801
802 user = get_current_user();
803 if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
804 free_uid(user);
805 return -EMFILE;
806 }
807
808 f_flags = O_RDWR | FMODE_NONOTIFY;
809 if (flags & FAN_CLOEXEC)
810 f_flags |= O_CLOEXEC;
811 if (flags & FAN_NONBLOCK)
812 f_flags |= O_NONBLOCK;
813
814 /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
815 group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
816 if (IS_ERR(group)) {
817 free_uid(user);
818 return PTR_ERR(group);
819 }
820
821 group->fanotify_data.user = user;
822 group->fanotify_data.flags = flags;
823 atomic_inc(&user->fanotify_listeners);
824 group->memcg = get_mem_cgroup_from_mm(current->mm);
825
826 oevent = fanotify_alloc_event(group, NULL, FS_Q_OVERFLOW, NULL,
827 FSNOTIFY_EVENT_NONE, NULL);
828 if (unlikely(!oevent)) {
829 fd = -ENOMEM;
830 goto out_destroy_group;
831 }
832 group->overflow_event = &oevent->fse;
833
834 if (force_o_largefile())
835 event_f_flags |= O_LARGEFILE;
836 group->fanotify_data.f_flags = event_f_flags;
837 init_waitqueue_head(&group->fanotify_data.access_waitq);
838 INIT_LIST_HEAD(&group->fanotify_data.access_list);
839 switch (flags & FANOTIFY_CLASS_BITS) {
840 case FAN_CLASS_NOTIF:
841 group->priority = FS_PRIO_0;
842 break;
843 case FAN_CLASS_CONTENT:
844 group->priority = FS_PRIO_1;
845 break;
846 case FAN_CLASS_PRE_CONTENT:
847 group->priority = FS_PRIO_2;
848 break;
849 default:
850 fd = -EINVAL;
851 goto out_destroy_group;
852 }
853
854 if (flags & FAN_UNLIMITED_QUEUE) {
855 fd = -EPERM;
856 if (!capable(CAP_SYS_ADMIN))
857 goto out_destroy_group;
858 group->max_events = UINT_MAX;
859 } else {
860 group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
861 }
862
863 if (flags & FAN_UNLIMITED_MARKS) {
864 fd = -EPERM;
865 if (!capable(CAP_SYS_ADMIN))
866 goto out_destroy_group;
867 group->fanotify_data.max_marks = UINT_MAX;
868 } else {
869 group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
870 }
871
872 if (flags & FAN_ENABLE_AUDIT) {
873 fd = -EPERM;
874 if (!capable(CAP_AUDIT_WRITE))
875 goto out_destroy_group;
876 }
877
878 fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
879 if (fd < 0)
880 goto out_destroy_group;
881
882 return fd;
883
884out_destroy_group:
885 fsnotify_destroy_group(group);
886 return fd;
887}
888
889/* Check if filesystem can encode a unique fid */
890static int fanotify_test_fid(struct path *path, __kernel_fsid_t *fsid)
891{
892 __kernel_fsid_t root_fsid;
893 int err;
894
895 /*
896 * Make sure path is not in filesystem with zero fsid (e.g. tmpfs).
897 */
898 err = vfs_get_fsid(path->dentry, fsid);
899 if (err)
900 return err;
901
902 if (!fsid->val[0] && !fsid->val[1])
903 return -ENODEV;
904
905 /*
906 * Make sure path is not inside a filesystem subvolume (e.g. btrfs)
907 * which uses a different fsid than sb root.
908 */
909 err = vfs_get_fsid(path->dentry->d_sb->s_root, &root_fsid);
910 if (err)
911 return err;
912
913 if (root_fsid.val[0] != fsid->val[0] ||
914 root_fsid.val[1] != fsid->val[1])
915 return -EXDEV;
916
917 /*
918 * We need to make sure that the file system supports at least
919 * encoding a file handle so user can use name_to_handle_at() to
920 * compare fid returned with event to the file handle of watched
921 * objects. However, name_to_handle_at() requires that the
922 * filesystem also supports decoding file handles.
923 */
924 if (!path->dentry->d_sb->s_export_op ||
925 !path->dentry->d_sb->s_export_op->fh_to_dentry)
926 return -EOPNOTSUPP;
927
928 return 0;
929}
930
931static int fanotify_events_supported(struct path *path, __u64 mask)
932{
933 /*
934 * Some filesystems such as 'proc' acquire unusual locks when opening
935 * files. For them fanotify permission events have high chances of
936 * deadlocking the system - open done when reporting fanotify event
937 * blocks on this "unusual" lock while another process holding the lock
938 * waits for fanotify permission event to be answered. Just disallow
939 * permission events for such filesystems.
940 */
941 if (mask & FANOTIFY_PERM_EVENTS &&
942 path->mnt->mnt_sb->s_type->fs_flags & FS_DISALLOW_NOTIFY_PERM)
943 return -EINVAL;
944 return 0;
945}
946
947static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
948 int dfd, const char __user *pathname)
949{
950 struct inode *inode = NULL;
951 struct vfsmount *mnt = NULL;
952 struct fsnotify_group *group;
953 struct fd f;
954 struct path path;
955 __kernel_fsid_t __fsid, *fsid = NULL;
956 u32 valid_mask = FANOTIFY_EVENTS | FANOTIFY_EVENT_FLAGS;
957 unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS;
958 unsigned int obj_type;
959 int ret;
960
961 pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
962 __func__, fanotify_fd, flags, dfd, pathname, mask);
963
964 /* we only use the lower 32 bits as of right now. */
965 if (mask & ((__u64)0xffffffff << 32))
966 return -EINVAL;
967
968 if (flags & ~FANOTIFY_MARK_FLAGS)
969 return -EINVAL;
970
971 switch (mark_type) {
972 case FAN_MARK_INODE:
973 obj_type = FSNOTIFY_OBJ_TYPE_INODE;
974 break;
975 case FAN_MARK_MOUNT:
976 obj_type = FSNOTIFY_OBJ_TYPE_VFSMOUNT;
977 break;
978 case FAN_MARK_FILESYSTEM:
979 obj_type = FSNOTIFY_OBJ_TYPE_SB;
980 break;
981 default:
982 return -EINVAL;
983 }
984
985 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
986 case FAN_MARK_ADD: /* fallthrough */
987 case FAN_MARK_REMOVE:
988 if (!mask)
989 return -EINVAL;
990 break;
991 case FAN_MARK_FLUSH:
992 if (flags & ~(FANOTIFY_MARK_TYPE_BITS | FAN_MARK_FLUSH))
993 return -EINVAL;
994 break;
995 default:
996 return -EINVAL;
997 }
998
999 if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS))
1000 valid_mask |= FANOTIFY_PERM_EVENTS;
1001
1002 if (mask & ~valid_mask)
1003 return -EINVAL;
1004
1005 f = fdget(fanotify_fd);
1006 if (unlikely(!f.file))
1007 return -EBADF;
1008
1009 /* verify that this is indeed an fanotify instance */
1010 ret = -EINVAL;
1011 if (unlikely(f.file->f_op != &fanotify_fops))
1012 goto fput_and_out;
1013 group = f.file->private_data;
1014
1015 /*
1016 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not
1017 * allowed to set permissions events.
1018 */
1019 ret = -EINVAL;
1020 if (mask & FANOTIFY_PERM_EVENTS &&
1021 group->priority == FS_PRIO_0)
1022 goto fput_and_out;
1023
1024 /*
1025 * Events with data type inode do not carry enough information to report
1026 * event->fd, so we do not allow setting a mask for inode events unless
1027 * group supports reporting fid.
1028 * inode events are not supported on a mount mark, because they do not
1029 * carry enough information (i.e. path) to be filtered by mount point.
1030 */
1031 if (mask & FANOTIFY_INODE_EVENTS &&
1032 (!FAN_GROUP_FLAG(group, FAN_REPORT_FID) ||
1033 mark_type == FAN_MARK_MOUNT))
1034 goto fput_and_out;
1035
1036 if (flags & FAN_MARK_FLUSH) {
1037 ret = 0;
1038 if (mark_type == FAN_MARK_MOUNT)
1039 fsnotify_clear_vfsmount_marks_by_group(group);
1040 else if (mark_type == FAN_MARK_FILESYSTEM)
1041 fsnotify_clear_sb_marks_by_group(group);
1042 else
1043 fsnotify_clear_inode_marks_by_group(group);
1044 goto fput_and_out;
1045 }
1046
1047 ret = fanotify_find_path(dfd, pathname, &path, flags,
1048 (mask & ALL_FSNOTIFY_EVENTS), obj_type);
1049 if (ret)
1050 goto fput_and_out;
1051
1052 if (flags & FAN_MARK_ADD) {
1053 ret = fanotify_events_supported(&path, mask);
1054 if (ret)
1055 goto path_put_and_out;
1056 }
1057
1058 if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) {
1059 ret = fanotify_test_fid(&path, &__fsid);
1060 if (ret)
1061 goto path_put_and_out;
1062
1063 fsid = &__fsid;
1064 }
1065
1066 /* inode held in place by reference to path; group by fget on fd */
1067 if (mark_type == FAN_MARK_INODE)
1068 inode = path.dentry->d_inode;
1069 else
1070 mnt = path.mnt;
1071
1072 /* create/update an inode mark */
1073 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE)) {
1074 case FAN_MARK_ADD:
1075 if (mark_type == FAN_MARK_MOUNT)
1076 ret = fanotify_add_vfsmount_mark(group, mnt, mask,
1077 flags, fsid);
1078 else if (mark_type == FAN_MARK_FILESYSTEM)
1079 ret = fanotify_add_sb_mark(group, mnt->mnt_sb, mask,
1080 flags, fsid);
1081 else
1082 ret = fanotify_add_inode_mark(group, inode, mask,
1083 flags, fsid);
1084 break;
1085 case FAN_MARK_REMOVE:
1086 if (mark_type == FAN_MARK_MOUNT)
1087 ret = fanotify_remove_vfsmount_mark(group, mnt, mask,
1088 flags);
1089 else if (mark_type == FAN_MARK_FILESYSTEM)
1090 ret = fanotify_remove_sb_mark(group, mnt->mnt_sb, mask,
1091 flags);
1092 else
1093 ret = fanotify_remove_inode_mark(group, inode, mask,
1094 flags);
1095 break;
1096 default:
1097 ret = -EINVAL;
1098 }
1099
1100path_put_and_out:
1101 path_put(&path);
1102fput_and_out:
1103 fdput(f);
1104 return ret;
1105}
1106
1107SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags,
1108 __u64, mask, int, dfd,
1109 const char __user *, pathname)
1110{
1111 return do_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname);
1112}
1113
1114#ifdef CONFIG_COMPAT
1115COMPAT_SYSCALL_DEFINE6(fanotify_mark,
1116 int, fanotify_fd, unsigned int, flags,
1117 __u32, mask0, __u32, mask1, int, dfd,
1118 const char __user *, pathname)
1119{
1120 return do_fanotify_mark(fanotify_fd, flags,
1121#ifdef __BIG_ENDIAN
1122 ((__u64)mask0 << 32) | mask1,
1123#else
1124 ((__u64)mask1 << 32) | mask0,
1125#endif
1126 dfd, pathname);
1127}
1128#endif
1129
1130/*
1131 * fanotify_user_setup - Our initialization function. Note that we cannot return
1132 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
1133 * must result in panic().
1134 */
1135static int __init fanotify_user_setup(void)
1136{
1137 BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 8);
1138 BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 9);
1139
1140 fanotify_mark_cache = KMEM_CACHE(fsnotify_mark,
1141 SLAB_PANIC|SLAB_ACCOUNT);
1142 fanotify_event_cachep = KMEM_CACHE(fanotify_event, SLAB_PANIC);
1143 if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS)) {
1144 fanotify_perm_event_cachep =
1145 KMEM_CACHE(fanotify_perm_event, SLAB_PANIC);
1146 }
1147
1148 return 0;
1149}
1150device_initcall(fanotify_user_setup);