Loading...
1/*
2 * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19#include <linux/list.h>
20#include <linux/mutex.h>
21#include <linux/slab.h>
22#include <linux/srcu.h>
23#include <linux/rculist.h>
24#include <linux/wait.h>
25
26#include <linux/fsnotify_backend.h>
27#include "fsnotify.h"
28
29#include <linux/atomic.h>
30
31/*
32 * Final freeing of a group
33 */
34static void fsnotify_final_destroy_group(struct fsnotify_group *group)
35{
36 if (group->ops->free_group_priv)
37 group->ops->free_group_priv(group);
38
39 kfree(group);
40}
41
42/*
43 * Stop queueing new events for this group. Once this function returns
44 * fsnotify_add_event() will not add any new events to the group's queue.
45 */
46void fsnotify_group_stop_queueing(struct fsnotify_group *group)
47{
48 spin_lock(&group->notification_lock);
49 group->shutdown = true;
50 spin_unlock(&group->notification_lock);
51}
52
53/*
54 * Trying to get rid of a group. Remove all marks, flush all events and release
55 * the group reference.
56 * Note that another thread calling fsnotify_clear_marks_by_group() may still
57 * hold a ref to the group.
58 */
59void fsnotify_destroy_group(struct fsnotify_group *group)
60{
61 /*
62 * Stop queueing new events. The code below is careful enough to not
63 * require this but fanotify needs to stop queuing events even before
64 * fsnotify_destroy_group() is called and this makes the other callers
65 * of fsnotify_destroy_group() to see the same behavior.
66 */
67 fsnotify_group_stop_queueing(group);
68
69 /* Clear all marks for this group and queue them for destruction */
70 fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_ALL_TYPES);
71
72 /*
73 * Some marks can still be pinned when waiting for response from
74 * userspace. Wait for those now. fsnotify_prepare_user_wait() will
75 * not succeed now so this wait is race-free.
76 */
77 wait_event(group->notification_waitq, !atomic_read(&group->user_waits));
78
79 /*
80 * Wait until all marks get really destroyed. We could actually destroy
81 * them ourselves instead of waiting for worker to do it, however that
82 * would be racy as worker can already be processing some marks before
83 * we even entered fsnotify_destroy_group().
84 */
85 fsnotify_wait_marks_destroyed();
86
87 /*
88 * Since we have waited for fsnotify_mark_srcu in
89 * fsnotify_mark_destroy_list() there can be no outstanding event
90 * notification against this group. So clearing the notification queue
91 * of all events is reliable now.
92 */
93 fsnotify_flush_notify(group);
94
95 /*
96 * Destroy overflow event (we cannot use fsnotify_destroy_event() as
97 * that deliberately ignores overflow events.
98 */
99 if (group->overflow_event)
100 group->ops->free_event(group->overflow_event);
101
102 fsnotify_put_group(group);
103}
104
105/*
106 * Get reference to a group.
107 */
108void fsnotify_get_group(struct fsnotify_group *group)
109{
110 refcount_inc(&group->refcnt);
111}
112
113/*
114 * Drop a reference to a group. Free it if it's through.
115 */
116void fsnotify_put_group(struct fsnotify_group *group)
117{
118 if (refcount_dec_and_test(&group->refcnt))
119 fsnotify_final_destroy_group(group);
120}
121
122/*
123 * Create a new fsnotify_group and hold a reference for the group returned.
124 */
125struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
126{
127 struct fsnotify_group *group;
128
129 group = kzalloc(sizeof(struct fsnotify_group), GFP_KERNEL);
130 if (!group)
131 return ERR_PTR(-ENOMEM);
132
133 /* set to 0 when there a no external references to this group */
134 refcount_set(&group->refcnt, 1);
135 atomic_set(&group->num_marks, 0);
136 atomic_set(&group->user_waits, 0);
137
138 spin_lock_init(&group->notification_lock);
139 INIT_LIST_HEAD(&group->notification_list);
140 init_waitqueue_head(&group->notification_waitq);
141 group->max_events = UINT_MAX;
142
143 mutex_init(&group->mark_mutex);
144 INIT_LIST_HEAD(&group->marks_list);
145
146 group->ops = ops;
147
148 return group;
149}
150
151int fsnotify_fasync(int fd, struct file *file, int on)
152{
153 struct fsnotify_group *group = file->private_data;
154
155 return fasync_helper(fd, file, on, &group->fsn_fa) >= 0 ? 0 : -EIO;
156}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
4 */
5
6#include <linux/list.h>
7#include <linux/mutex.h>
8#include <linux/slab.h>
9#include <linux/srcu.h>
10#include <linux/rculist.h>
11#include <linux/wait.h>
12#include <linux/memcontrol.h>
13
14#include <linux/fsnotify_backend.h>
15#include "fsnotify.h"
16
17#include <linux/atomic.h>
18
19/*
20 * Final freeing of a group
21 */
22static void fsnotify_final_destroy_group(struct fsnotify_group *group)
23{
24 if (group->ops->free_group_priv)
25 group->ops->free_group_priv(group);
26
27 mem_cgroup_put(group->memcg);
28 mutex_destroy(&group->mark_mutex);
29
30 kfree(group);
31}
32
33/*
34 * Stop queueing new events for this group. Once this function returns
35 * fsnotify_add_event() will not add any new events to the group's queue.
36 */
37void fsnotify_group_stop_queueing(struct fsnotify_group *group)
38{
39 spin_lock(&group->notification_lock);
40 group->shutdown = true;
41 spin_unlock(&group->notification_lock);
42}
43
44/*
45 * Trying to get rid of a group. Remove all marks, flush all events and release
46 * the group reference.
47 * Note that another thread calling fsnotify_clear_marks_by_group() may still
48 * hold a ref to the group.
49 */
50void fsnotify_destroy_group(struct fsnotify_group *group)
51{
52 /*
53 * Stop queueing new events. The code below is careful enough to not
54 * require this but fanotify needs to stop queuing events even before
55 * fsnotify_destroy_group() is called and this makes the other callers
56 * of fsnotify_destroy_group() to see the same behavior.
57 */
58 fsnotify_group_stop_queueing(group);
59
60 /* Clear all marks for this group and queue them for destruction */
61 fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_ALL_TYPES_MASK);
62
63 /*
64 * Some marks can still be pinned when waiting for response from
65 * userspace. Wait for those now. fsnotify_prepare_user_wait() will
66 * not succeed now so this wait is race-free.
67 */
68 wait_event(group->notification_waitq, !atomic_read(&group->user_waits));
69
70 /*
71 * Wait until all marks get really destroyed. We could actually destroy
72 * them ourselves instead of waiting for worker to do it, however that
73 * would be racy as worker can already be processing some marks before
74 * we even entered fsnotify_destroy_group().
75 */
76 fsnotify_wait_marks_destroyed();
77
78 /*
79 * Since we have waited for fsnotify_mark_srcu in
80 * fsnotify_mark_destroy_list() there can be no outstanding event
81 * notification against this group. So clearing the notification queue
82 * of all events is reliable now.
83 */
84 fsnotify_flush_notify(group);
85
86 /*
87 * Destroy overflow event (we cannot use fsnotify_destroy_event() as
88 * that deliberately ignores overflow events.
89 */
90 if (group->overflow_event)
91 group->ops->free_event(group->overflow_event);
92
93 fsnotify_put_group(group);
94}
95
96/*
97 * Get reference to a group.
98 */
99void fsnotify_get_group(struct fsnotify_group *group)
100{
101 refcount_inc(&group->refcnt);
102}
103
104/*
105 * Drop a reference to a group. Free it if it's through.
106 */
107void fsnotify_put_group(struct fsnotify_group *group)
108{
109 if (refcount_dec_and_test(&group->refcnt))
110 fsnotify_final_destroy_group(group);
111}
112EXPORT_SYMBOL_GPL(fsnotify_put_group);
113
114static struct fsnotify_group *__fsnotify_alloc_group(
115 const struct fsnotify_ops *ops, gfp_t gfp)
116{
117 struct fsnotify_group *group;
118
119 group = kzalloc(sizeof(struct fsnotify_group), gfp);
120 if (!group)
121 return ERR_PTR(-ENOMEM);
122
123 /* set to 0 when there a no external references to this group */
124 refcount_set(&group->refcnt, 1);
125 atomic_set(&group->user_waits, 0);
126
127 spin_lock_init(&group->notification_lock);
128 INIT_LIST_HEAD(&group->notification_list);
129 init_waitqueue_head(&group->notification_waitq);
130 group->max_events = UINT_MAX;
131
132 mutex_init(&group->mark_mutex);
133 INIT_LIST_HEAD(&group->marks_list);
134
135 group->ops = ops;
136
137 return group;
138}
139
140/*
141 * Create a new fsnotify_group and hold a reference for the group returned.
142 */
143struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
144{
145 return __fsnotify_alloc_group(ops, GFP_KERNEL);
146}
147EXPORT_SYMBOL_GPL(fsnotify_alloc_group);
148
149/*
150 * Create a new fsnotify_group and hold a reference for the group returned.
151 */
152struct fsnotify_group *fsnotify_alloc_user_group(const struct fsnotify_ops *ops)
153{
154 return __fsnotify_alloc_group(ops, GFP_KERNEL_ACCOUNT);
155}
156EXPORT_SYMBOL_GPL(fsnotify_alloc_user_group);
157
158int fsnotify_fasync(int fd, struct file *file, int on)
159{
160 struct fsnotify_group *group = file->private_data;
161
162 return fasync_helper(fd, file, on, &group->fsn_fa) >= 0 ? 0 : -EIO;
163}