Loading...
1#include <linux/fanotify.h>
2#include <linux/fdtable.h>
3#include <linux/fsnotify_backend.h>
4#include <linux/init.h>
5#include <linux/jiffies.h>
6#include <linux/kernel.h> /* UINT_MAX */
7#include <linux/mount.h>
8#include <linux/sched.h>
9#include <linux/types.h>
10#include <linux/wait.h>
11
12static bool should_merge(struct fsnotify_event *old, struct fsnotify_event *new)
13{
14 pr_debug("%s: old=%p new=%p\n", __func__, old, new);
15
16 if (old->to_tell == new->to_tell &&
17 old->data_type == new->data_type &&
18 old->tgid == new->tgid) {
19 switch (old->data_type) {
20 case (FSNOTIFY_EVENT_PATH):
21 if ((old->path.mnt == new->path.mnt) &&
22 (old->path.dentry == new->path.dentry))
23 return true;
24 case (FSNOTIFY_EVENT_NONE):
25 return true;
26 default:
27 BUG();
28 };
29 }
30 return false;
31}
32
33/* and the list better be locked by something too! */
34static struct fsnotify_event *fanotify_merge(struct list_head *list,
35 struct fsnotify_event *event)
36{
37 struct fsnotify_event_holder *test_holder;
38 struct fsnotify_event *test_event = NULL;
39 struct fsnotify_event *new_event;
40
41 pr_debug("%s: list=%p event=%p\n", __func__, list, event);
42
43
44 list_for_each_entry_reverse(test_holder, list, event_list) {
45 if (should_merge(test_holder->event, event)) {
46 test_event = test_holder->event;
47 break;
48 }
49 }
50
51 if (!test_event)
52 return NULL;
53
54 fsnotify_get_event(test_event);
55
56 /* if they are exactly the same we are done */
57 if (test_event->mask == event->mask)
58 return test_event;
59
60 /*
61 * if the refcnt == 2 this is the only queue
62 * for this event and so we can update the mask
63 * in place.
64 */
65 if (atomic_read(&test_event->refcnt) == 2) {
66 test_event->mask |= event->mask;
67 return test_event;
68 }
69
70 new_event = fsnotify_clone_event(test_event);
71
72 /* done with test_event */
73 fsnotify_put_event(test_event);
74
75 /* couldn't allocate memory, merge was not possible */
76 if (unlikely(!new_event))
77 return ERR_PTR(-ENOMEM);
78
79 /* build new event and replace it on the list */
80 new_event->mask = (test_event->mask | event->mask);
81 fsnotify_replace_event(test_holder, new_event);
82
83 /* we hold a reference on new_event from clone_event */
84 return new_event;
85}
86
87#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
88static int fanotify_get_response_from_access(struct fsnotify_group *group,
89 struct fsnotify_event *event)
90{
91 int ret;
92
93 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
94
95 wait_event(group->fanotify_data.access_waitq, event->response ||
96 atomic_read(&group->fanotify_data.bypass_perm));
97
98 if (!event->response) /* bypass_perm set */
99 return 0;
100
101 /* userspace responded, convert to something usable */
102 spin_lock(&event->lock);
103 switch (event->response) {
104 case FAN_ALLOW:
105 ret = 0;
106 break;
107 case FAN_DENY:
108 default:
109 ret = -EPERM;
110 }
111 event->response = 0;
112 spin_unlock(&event->lock);
113
114 pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__,
115 group, event, ret);
116
117 return ret;
118}
119#endif
120
121static int fanotify_handle_event(struct fsnotify_group *group,
122 struct fsnotify_mark *inode_mark,
123 struct fsnotify_mark *fanotify_mark,
124 struct fsnotify_event *event)
125{
126 int ret = 0;
127 struct fsnotify_event *notify_event = NULL;
128
129 BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS);
130 BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY);
131 BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
132 BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE);
133 BUILD_BUG_ON(FAN_OPEN != FS_OPEN);
134 BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD);
135 BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW);
136 BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM);
137 BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM);
138 BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR);
139
140 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
141
142 notify_event = fsnotify_add_notify_event(group, event, NULL, fanotify_merge);
143 if (IS_ERR(notify_event))
144 return PTR_ERR(notify_event);
145
146#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
147 if (event->mask & FAN_ALL_PERM_EVENTS) {
148 /* if we merged we need to wait on the new event */
149 if (notify_event)
150 event = notify_event;
151 ret = fanotify_get_response_from_access(group, event);
152 }
153#endif
154
155 if (notify_event)
156 fsnotify_put_event(notify_event);
157
158 return ret;
159}
160
161static bool fanotify_should_send_event(struct fsnotify_group *group,
162 struct inode *to_tell,
163 struct fsnotify_mark *inode_mark,
164 struct fsnotify_mark *vfsmnt_mark,
165 __u32 event_mask, void *data, int data_type)
166{
167 __u32 marks_mask, marks_ignored_mask;
168 struct path *path = data;
169
170 pr_debug("%s: group=%p to_tell=%p inode_mark=%p vfsmnt_mark=%p "
171 "mask=%x data=%p data_type=%d\n", __func__, group, to_tell,
172 inode_mark, vfsmnt_mark, event_mask, data, data_type);
173
174 /* if we don't have enough info to send an event to userspace say no */
175 if (data_type != FSNOTIFY_EVENT_PATH)
176 return false;
177
178 /* sorry, fanotify only gives a damn about files and dirs */
179 if (!S_ISREG(path->dentry->d_inode->i_mode) &&
180 !S_ISDIR(path->dentry->d_inode->i_mode))
181 return false;
182
183 if (inode_mark && vfsmnt_mark) {
184 marks_mask = (vfsmnt_mark->mask | inode_mark->mask);
185 marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask);
186 } else if (inode_mark) {
187 /*
188 * if the event is for a child and this inode doesn't care about
189 * events on the child, don't send it!
190 */
191 if ((event_mask & FS_EVENT_ON_CHILD) &&
192 !(inode_mark->mask & FS_EVENT_ON_CHILD))
193 return false;
194 marks_mask = inode_mark->mask;
195 marks_ignored_mask = inode_mark->ignored_mask;
196 } else if (vfsmnt_mark) {
197 marks_mask = vfsmnt_mark->mask;
198 marks_ignored_mask = vfsmnt_mark->ignored_mask;
199 } else {
200 BUG();
201 }
202
203 if (S_ISDIR(path->dentry->d_inode->i_mode) &&
204 (marks_ignored_mask & FS_ISDIR))
205 return false;
206
207 if (event_mask & marks_mask & ~marks_ignored_mask)
208 return true;
209
210 return false;
211}
212
213static void fanotify_free_group_priv(struct fsnotify_group *group)
214{
215 struct user_struct *user;
216
217 user = group->fanotify_data.user;
218 atomic_dec(&user->fanotify_listeners);
219 free_uid(user);
220}
221
222const struct fsnotify_ops fanotify_fsnotify_ops = {
223 .handle_event = fanotify_handle_event,
224 .should_send_event = fanotify_should_send_event,
225 .free_group_priv = fanotify_free_group_priv,
226 .free_event_priv = NULL,
227 .freeing_mark = NULL,
228};
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/fanotify.h>
3#include <linux/fdtable.h>
4#include <linux/fsnotify_backend.h>
5#include <linux/init.h>
6#include <linux/jiffies.h>
7#include <linux/kernel.h> /* UINT_MAX */
8#include <linux/mount.h>
9#include <linux/sched.h>
10#include <linux/sched/user.h>
11#include <linux/sched/signal.h>
12#include <linux/types.h>
13#include <linux/wait.h>
14#include <linux/audit.h>
15#include <linux/sched/mm.h>
16#include <linux/statfs.h>
17
18#include "fanotify.h"
19
20static bool should_merge(struct fsnotify_event *old_fsn,
21 struct fsnotify_event *new_fsn)
22{
23 struct fanotify_event *old, *new;
24
25 pr_debug("%s: old=%p new=%p\n", __func__, old_fsn, new_fsn);
26 old = FANOTIFY_E(old_fsn);
27 new = FANOTIFY_E(new_fsn);
28
29 if (old_fsn->inode != new_fsn->inode || old->pid != new->pid ||
30 old->fh_type != new->fh_type || old->fh_len != new->fh_len)
31 return false;
32
33 if (fanotify_event_has_path(old)) {
34 return old->path.mnt == new->path.mnt &&
35 old->path.dentry == new->path.dentry;
36 } else if (fanotify_event_has_fid(old)) {
37 /*
38 * We want to merge many dirent events in the same dir (i.e.
39 * creates/unlinks/renames), but we do not want to merge dirent
40 * events referring to subdirs with dirent events referring to
41 * non subdirs, otherwise, user won't be able to tell from a
42 * mask FAN_CREATE|FAN_DELETE|FAN_ONDIR if it describes mkdir+
43 * unlink pair or rmdir+create pair of events.
44 */
45 return (old->mask & FS_ISDIR) == (new->mask & FS_ISDIR) &&
46 fanotify_fid_equal(&old->fid, &new->fid, old->fh_len);
47 }
48
49 /* Do not merge events if we failed to encode fid */
50 return false;
51}
52
53/* and the list better be locked by something too! */
54static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
55{
56 struct fsnotify_event *test_event;
57 struct fanotify_event *new;
58
59 pr_debug("%s: list=%p event=%p\n", __func__, list, event);
60 new = FANOTIFY_E(event);
61
62 /*
63 * Don't merge a permission event with any other event so that we know
64 * the event structure we have created in fanotify_handle_event() is the
65 * one we should check for permission response.
66 */
67 if (fanotify_is_perm_event(new->mask))
68 return 0;
69
70 list_for_each_entry_reverse(test_event, list, list) {
71 if (should_merge(test_event, event)) {
72 FANOTIFY_E(test_event)->mask |= new->mask;
73 return 1;
74 }
75 }
76
77 return 0;
78}
79
80/*
81 * Wait for response to permission event. The function also takes care of
82 * freeing the permission event (or offloads that in case the wait is canceled
83 * by a signal). The function returns 0 in case access got allowed by userspace,
84 * -EPERM in case userspace disallowed the access, and -ERESTARTSYS in case
85 * the wait got interrupted by a signal.
86 */
87static int fanotify_get_response(struct fsnotify_group *group,
88 struct fanotify_perm_event *event,
89 struct fsnotify_iter_info *iter_info)
90{
91 int ret;
92
93 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
94
95 ret = wait_event_killable(group->fanotify_data.access_waitq,
96 event->state == FAN_EVENT_ANSWERED);
97 /* Signal pending? */
98 if (ret < 0) {
99 spin_lock(&group->notification_lock);
100 /* Event reported to userspace and no answer yet? */
101 if (event->state == FAN_EVENT_REPORTED) {
102 /* Event will get freed once userspace answers to it */
103 event->state = FAN_EVENT_CANCELED;
104 spin_unlock(&group->notification_lock);
105 return ret;
106 }
107 /* Event not yet reported? Just remove it. */
108 if (event->state == FAN_EVENT_INIT)
109 fsnotify_remove_queued_event(group, &event->fae.fse);
110 /*
111 * Event may be also answered in case signal delivery raced
112 * with wakeup. In that case we have nothing to do besides
113 * freeing the event and reporting error.
114 */
115 spin_unlock(&group->notification_lock);
116 goto out;
117 }
118
119 /* userspace responded, convert to something usable */
120 switch (event->response & ~FAN_AUDIT) {
121 case FAN_ALLOW:
122 ret = 0;
123 break;
124 case FAN_DENY:
125 default:
126 ret = -EPERM;
127 }
128
129 /* Check if the response should be audited */
130 if (event->response & FAN_AUDIT)
131 audit_fanotify(event->response & ~FAN_AUDIT);
132
133 pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__,
134 group, event, ret);
135out:
136 fsnotify_destroy_event(group, &event->fae.fse);
137
138 return ret;
139}
140
141/*
142 * This function returns a mask for an event that only contains the flags
143 * that have been specifically requested by the user. Flags that may have
144 * been included within the event mask, but have not been explicitly
145 * requested by the user, will not be present in the returned mask.
146 */
147static u32 fanotify_group_event_mask(struct fsnotify_group *group,
148 struct fsnotify_iter_info *iter_info,
149 u32 event_mask, const void *data,
150 int data_type)
151{
152 __u32 marks_mask = 0, marks_ignored_mask = 0;
153 __u32 test_mask, user_mask = FANOTIFY_OUTGOING_EVENTS;
154 const struct path *path = data;
155 struct fsnotify_mark *mark;
156 int type;
157
158 pr_debug("%s: report_mask=%x mask=%x data=%p data_type=%d\n",
159 __func__, iter_info->report_mask, event_mask, data, data_type);
160
161 if (!FAN_GROUP_FLAG(group, FAN_REPORT_FID)) {
162 /* Do we have path to open a file descriptor? */
163 if (data_type != FSNOTIFY_EVENT_PATH)
164 return 0;
165 /* Path type events are only relevant for files and dirs */
166 if (!d_is_reg(path->dentry) && !d_can_lookup(path->dentry))
167 return 0;
168 }
169
170 fsnotify_foreach_obj_type(type) {
171 if (!fsnotify_iter_should_report_type(iter_info, type))
172 continue;
173 mark = iter_info->marks[type];
174 /*
175 * If the event is for a child and this mark doesn't care about
176 * events on a child, don't send it!
177 */
178 if (event_mask & FS_EVENT_ON_CHILD &&
179 (type != FSNOTIFY_OBJ_TYPE_INODE ||
180 !(mark->mask & FS_EVENT_ON_CHILD)))
181 continue;
182
183 marks_mask |= mark->mask;
184 marks_ignored_mask |= mark->ignored_mask;
185 }
186
187 test_mask = event_mask & marks_mask & ~marks_ignored_mask;
188
189 /*
190 * dirent modification events (create/delete/move) do not carry the
191 * child entry name/inode information. Instead, we report FAN_ONDIR
192 * for mkdir/rmdir so user can differentiate them from creat/unlink.
193 *
194 * For backward compatibility and consistency, do not report FAN_ONDIR
195 * to user in legacy fanotify mode (reporting fd) and report FAN_ONDIR
196 * to user in FAN_REPORT_FID mode for all event types.
197 */
198 if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) {
199 /* Do not report FAN_ONDIR without any event */
200 if (!(test_mask & ~FAN_ONDIR))
201 return 0;
202 } else {
203 user_mask &= ~FAN_ONDIR;
204 }
205
206 if (event_mask & FS_ISDIR &&
207 !(marks_mask & FS_ISDIR & ~marks_ignored_mask))
208 return 0;
209
210 return test_mask & user_mask;
211}
212
213static int fanotify_encode_fid(struct fanotify_event *event,
214 struct inode *inode, gfp_t gfp,
215 __kernel_fsid_t *fsid)
216{
217 struct fanotify_fid *fid = &event->fid;
218 int dwords, bytes = 0;
219 int err, type;
220
221 fid->ext_fh = NULL;
222 dwords = 0;
223 err = -ENOENT;
224 type = exportfs_encode_inode_fh(inode, NULL, &dwords, NULL);
225 if (!dwords)
226 goto out_err;
227
228 bytes = dwords << 2;
229 if (bytes > FANOTIFY_INLINE_FH_LEN) {
230 /* Treat failure to allocate fh as failure to allocate event */
231 err = -ENOMEM;
232 fid->ext_fh = kmalloc(bytes, gfp);
233 if (!fid->ext_fh)
234 goto out_err;
235 }
236
237 type = exportfs_encode_inode_fh(inode, fanotify_fid_fh(fid, bytes),
238 &dwords, NULL);
239 err = -EINVAL;
240 if (!type || type == FILEID_INVALID || bytes != dwords << 2)
241 goto out_err;
242
243 fid->fsid = *fsid;
244 event->fh_len = bytes;
245
246 return type;
247
248out_err:
249 pr_warn_ratelimited("fanotify: failed to encode fid (fsid=%x.%x, "
250 "type=%d, bytes=%d, err=%i)\n",
251 fsid->val[0], fsid->val[1], type, bytes, err);
252 kfree(fid->ext_fh);
253 fid->ext_fh = NULL;
254 event->fh_len = 0;
255
256 return FILEID_INVALID;
257}
258
259/*
260 * The inode to use as identifier when reporting fid depends on the event.
261 * Report the modified directory inode on dirent modification events.
262 * Report the "victim" inode otherwise.
263 * For example:
264 * FS_ATTRIB reports the child inode even if reported on a watched parent.
265 * FS_CREATE reports the modified dir inode and not the created inode.
266 */
267static struct inode *fanotify_fid_inode(struct inode *to_tell, u32 event_mask,
268 const void *data, int data_type)
269{
270 if (event_mask & ALL_FSNOTIFY_DIRENT_EVENTS)
271 return to_tell;
272 else if (data_type == FSNOTIFY_EVENT_INODE)
273 return (struct inode *)data;
274 else if (data_type == FSNOTIFY_EVENT_PATH)
275 return d_inode(((struct path *)data)->dentry);
276 return NULL;
277}
278
279struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group,
280 struct inode *inode, u32 mask,
281 const void *data, int data_type,
282 __kernel_fsid_t *fsid)
283{
284 struct fanotify_event *event = NULL;
285 gfp_t gfp = GFP_KERNEL_ACCOUNT;
286 struct inode *id = fanotify_fid_inode(inode, mask, data, data_type);
287
288 /*
289 * For queues with unlimited length lost events are not expected and
290 * can possibly have security implications. Avoid losing events when
291 * memory is short. For the limited size queues, avoid OOM killer in the
292 * target monitoring memcg as it may have security repercussion.
293 */
294 if (group->max_events == UINT_MAX)
295 gfp |= __GFP_NOFAIL;
296 else
297 gfp |= __GFP_RETRY_MAYFAIL;
298
299 /* Whoever is interested in the event, pays for the allocation. */
300 memalloc_use_memcg(group->memcg);
301
302 if (fanotify_is_perm_event(mask)) {
303 struct fanotify_perm_event *pevent;
304
305 pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp);
306 if (!pevent)
307 goto out;
308 event = &pevent->fae;
309 pevent->response = 0;
310 pevent->state = FAN_EVENT_INIT;
311 goto init;
312 }
313 event = kmem_cache_alloc(fanotify_event_cachep, gfp);
314 if (!event)
315 goto out;
316init: __maybe_unused
317 fsnotify_init_event(&event->fse, inode);
318 event->mask = mask;
319 if (FAN_GROUP_FLAG(group, FAN_REPORT_TID))
320 event->pid = get_pid(task_pid(current));
321 else
322 event->pid = get_pid(task_tgid(current));
323 event->fh_len = 0;
324 if (id && FAN_GROUP_FLAG(group, FAN_REPORT_FID)) {
325 /* Report the event without a file identifier on encode error */
326 event->fh_type = fanotify_encode_fid(event, id, gfp, fsid);
327 } else if (data_type == FSNOTIFY_EVENT_PATH) {
328 event->fh_type = FILEID_ROOT;
329 event->path = *((struct path *)data);
330 path_get(&event->path);
331 } else {
332 event->fh_type = FILEID_INVALID;
333 event->path.mnt = NULL;
334 event->path.dentry = NULL;
335 }
336out:
337 memalloc_unuse_memcg();
338 return event;
339}
340
341/*
342 * Get cached fsid of the filesystem containing the object from any connector.
343 * All connectors are supposed to have the same fsid, but we do not verify that
344 * here.
345 */
346static __kernel_fsid_t fanotify_get_fsid(struct fsnotify_iter_info *iter_info)
347{
348 int type;
349 __kernel_fsid_t fsid = {};
350
351 fsnotify_foreach_obj_type(type) {
352 struct fsnotify_mark_connector *conn;
353
354 if (!fsnotify_iter_should_report_type(iter_info, type))
355 continue;
356
357 conn = READ_ONCE(iter_info->marks[type]->connector);
358 /* Mark is just getting destroyed or created? */
359 if (!conn)
360 continue;
361 if (!(conn->flags & FSNOTIFY_CONN_FLAG_HAS_FSID))
362 continue;
363 /* Pairs with smp_wmb() in fsnotify_add_mark_list() */
364 smp_rmb();
365 fsid = conn->fsid;
366 if (WARN_ON_ONCE(!fsid.val[0] && !fsid.val[1]))
367 continue;
368 return fsid;
369 }
370
371 return fsid;
372}
373
374static int fanotify_handle_event(struct fsnotify_group *group,
375 struct inode *inode,
376 u32 mask, const void *data, int data_type,
377 const struct qstr *file_name, u32 cookie,
378 struct fsnotify_iter_info *iter_info)
379{
380 int ret = 0;
381 struct fanotify_event *event;
382 struct fsnotify_event *fsn_event;
383 __kernel_fsid_t fsid = {};
384
385 BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS);
386 BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY);
387 BUILD_BUG_ON(FAN_ATTRIB != FS_ATTRIB);
388 BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
389 BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE);
390 BUILD_BUG_ON(FAN_OPEN != FS_OPEN);
391 BUILD_BUG_ON(FAN_MOVED_TO != FS_MOVED_TO);
392 BUILD_BUG_ON(FAN_MOVED_FROM != FS_MOVED_FROM);
393 BUILD_BUG_ON(FAN_CREATE != FS_CREATE);
394 BUILD_BUG_ON(FAN_DELETE != FS_DELETE);
395 BUILD_BUG_ON(FAN_DELETE_SELF != FS_DELETE_SELF);
396 BUILD_BUG_ON(FAN_MOVE_SELF != FS_MOVE_SELF);
397 BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD);
398 BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW);
399 BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM);
400 BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM);
401 BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR);
402 BUILD_BUG_ON(FAN_OPEN_EXEC != FS_OPEN_EXEC);
403 BUILD_BUG_ON(FAN_OPEN_EXEC_PERM != FS_OPEN_EXEC_PERM);
404
405 BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 19);
406
407 mask = fanotify_group_event_mask(group, iter_info, mask, data,
408 data_type);
409 if (!mask)
410 return 0;
411
412 pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode,
413 mask);
414
415 if (fanotify_is_perm_event(mask)) {
416 /*
417 * fsnotify_prepare_user_wait() fails if we race with mark
418 * deletion. Just let the operation pass in that case.
419 */
420 if (!fsnotify_prepare_user_wait(iter_info))
421 return 0;
422 }
423
424 if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) {
425 fsid = fanotify_get_fsid(iter_info);
426 /* Racing with mark destruction or creation? */
427 if (!fsid.val[0] && !fsid.val[1])
428 return 0;
429 }
430
431 event = fanotify_alloc_event(group, inode, mask, data, data_type,
432 &fsid);
433 ret = -ENOMEM;
434 if (unlikely(!event)) {
435 /*
436 * We don't queue overflow events for permission events as
437 * there the access is denied and so no event is in fact lost.
438 */
439 if (!fanotify_is_perm_event(mask))
440 fsnotify_queue_overflow(group);
441 goto finish;
442 }
443
444 fsn_event = &event->fse;
445 ret = fsnotify_add_event(group, fsn_event, fanotify_merge);
446 if (ret) {
447 /* Permission events shouldn't be merged */
448 BUG_ON(ret == 1 && mask & FANOTIFY_PERM_EVENTS);
449 /* Our event wasn't used in the end. Free it. */
450 fsnotify_destroy_event(group, fsn_event);
451
452 ret = 0;
453 } else if (fanotify_is_perm_event(mask)) {
454 ret = fanotify_get_response(group, FANOTIFY_PE(fsn_event),
455 iter_info);
456 }
457finish:
458 if (fanotify_is_perm_event(mask))
459 fsnotify_finish_user_wait(iter_info);
460
461 return ret;
462}
463
464static void fanotify_free_group_priv(struct fsnotify_group *group)
465{
466 struct user_struct *user;
467
468 user = group->fanotify_data.user;
469 atomic_dec(&user->fanotify_listeners);
470 free_uid(user);
471}
472
473static void fanotify_free_event(struct fsnotify_event *fsn_event)
474{
475 struct fanotify_event *event;
476
477 event = FANOTIFY_E(fsn_event);
478 if (fanotify_event_has_path(event))
479 path_put(&event->path);
480 else if (fanotify_event_has_ext_fh(event))
481 kfree(event->fid.ext_fh);
482 put_pid(event->pid);
483 if (fanotify_is_perm_event(event->mask)) {
484 kmem_cache_free(fanotify_perm_event_cachep,
485 FANOTIFY_PE(fsn_event));
486 return;
487 }
488 kmem_cache_free(fanotify_event_cachep, event);
489}
490
491static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
492{
493 kmem_cache_free(fanotify_mark_cache, fsn_mark);
494}
495
496const struct fsnotify_ops fanotify_fsnotify_ops = {
497 .handle_event = fanotify_handle_event,
498 .free_group_priv = fanotify_free_group_priv,
499 .free_event = fanotify_free_event,
500 .free_mark = fanotify_free_mark,
501};