Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/fanotify.h>
  3#include <linux/fcntl.h>
  4#include <linux/file.h>
  5#include <linux/fs.h>
  6#include <linux/anon_inodes.h>
  7#include <linux/fsnotify_backend.h>
  8#include <linux/init.h>
  9#include <linux/mount.h>
 10#include <linux/namei.h>
 11#include <linux/poll.h>
 12#include <linux/security.h>
 13#include <linux/syscalls.h>
 14#include <linux/slab.h>
 15#include <linux/types.h>
 16#include <linux/uaccess.h>
 17#include <linux/compat.h>
 18#include <linux/sched/signal.h>
 19
 20#include <asm/ioctls.h>
 21
 22#include "../../mount.h"
 23#include "../fdinfo.h"
 24#include "fanotify.h"
 25
 26#define FANOTIFY_DEFAULT_MAX_EVENTS	16384
 27#define FANOTIFY_DEFAULT_MAX_MARKS	8192
 28#define FANOTIFY_DEFAULT_MAX_LISTENERS	128
 29
 30/*
 31 * All flags that may be specified in parameter event_f_flags of fanotify_init.
 32 *
 33 * Internal and external open flags are stored together in field f_flags of
 34 * struct file. Only external open flags shall be allowed in event_f_flags.
 35 * Internal flags like FMODE_NONOTIFY, FMODE_EXEC, FMODE_NOCMTIME shall be
 36 * excluded.
 37 */
 38#define	FANOTIFY_INIT_ALL_EVENT_F_BITS				( \
 39		O_ACCMODE	| O_APPEND	| O_NONBLOCK	| \
 40		__O_SYNC	| O_DSYNC	| O_CLOEXEC     | \
 41		O_LARGEFILE	| O_NOATIME	)
 42
 43extern const struct fsnotify_ops fanotify_fsnotify_ops;
 44
 45struct kmem_cache *fanotify_mark_cache __read_mostly;
 46struct kmem_cache *fanotify_event_cachep __read_mostly;
 47struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
 
 
 
 
 
 48
 49/*
 50 * Get an fsnotify notification event if one exists and is small
 51 * enough to fit in "count". Return an error pointer if the count
 52 * is not large enough.
 53 *
 54 * Called with the group->notification_lock held.
 55 */
 56static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
 57					    size_t count)
 58{
 59	assert_spin_locked(&group->notification_lock);
 60
 61	pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
 62
 63	if (fsnotify_notify_queue_is_empty(group))
 64		return NULL;
 65
 66	if (FAN_EVENT_METADATA_LEN > count)
 67		return ERR_PTR(-EINVAL);
 68
 69	/* held the notification_lock the whole time, so this is the
 70	 * same event we peeked above */
 71	return fsnotify_remove_first_event(group);
 72}
 73
 74static int create_fd(struct fsnotify_group *group,
 75		     struct fanotify_event_info *event,
 76		     struct file **file)
 77{
 78	int client_fd;
 
 
 79	struct file *new_file;
 80
 81	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
 82
 83	client_fd = get_unused_fd_flags(group->fanotify_data.f_flags);
 84	if (client_fd < 0)
 85		return client_fd;
 86
 
 
 
 
 
 
 87	/*
 88	 * we need a new file handle for the userspace program so it can read even if it was
 89	 * originally opened O_WRONLY.
 90	 */
 
 
 91	/* it's possible this event was an overflow event.  in that case dentry and mnt
 92	 * are NULL;  That's fine, just don't call dentry open */
 93	if (event->path.dentry && event->path.mnt)
 94		new_file = dentry_open(&event->path,
 95				       group->fanotify_data.f_flags | FMODE_NONOTIFY,
 96				       current_cred());
 97	else
 98		new_file = ERR_PTR(-EOVERFLOW);
 99	if (IS_ERR(new_file)) {
100		/*
101		 * we still send an event even if we can't open the file.  this
102		 * can happen when say tasks are gone and we try to open their
103		 * /proc files or we try to open a WRONLY file like in sysfs
104		 * we just send the errno to userspace since there isn't much
105		 * else we can do.
106		 */
107		put_unused_fd(client_fd);
108		client_fd = PTR_ERR(new_file);
109	} else {
110		*file = new_file;
111	}
112
113	return client_fd;
114}
115
116static int fill_event_metadata(struct fsnotify_group *group,
117			       struct fanotify_event_metadata *metadata,
118			       struct fsnotify_event *fsn_event,
119			       struct file **file)
120{
121	int ret = 0;
122	struct fanotify_event_info *event;
123
124	pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
125		 group, metadata, fsn_event);
126
127	*file = NULL;
128	event = container_of(fsn_event, struct fanotify_event_info, fse);
129	metadata->event_len = FAN_EVENT_METADATA_LEN;
130	metadata->metadata_len = FAN_EVENT_METADATA_LEN;
131	metadata->vers = FANOTIFY_METADATA_VERSION;
132	metadata->reserved = 0;
133	metadata->mask = fsn_event->mask & FAN_ALL_OUTGOING_EVENTS;
134	metadata->pid = pid_vnr(event->tgid);
135	if (unlikely(fsn_event->mask & FAN_Q_OVERFLOW))
136		metadata->fd = FAN_NOFD;
137	else {
138		metadata->fd = create_fd(group, event, file);
139		if (metadata->fd < 0)
140			ret = metadata->fd;
141	}
142
143	return ret;
144}
145
146static struct fanotify_perm_event_info *dequeue_event(
147				struct fsnotify_group *group, int fd)
148{
149	struct fanotify_perm_event_info *event, *return_e = NULL;
150
151	spin_lock(&group->notification_lock);
152	list_for_each_entry(event, &group->fanotify_data.access_list,
153			    fae.fse.list) {
154		if (event->fd != fd)
155			continue;
156
157		list_del_init(&event->fae.fse.list);
158		return_e = event;
159		break;
160	}
161	spin_unlock(&group->notification_lock);
162
163	pr_debug("%s: found return_re=%p\n", __func__, return_e);
164
165	return return_e;
166}
167
168static int process_access_response(struct fsnotify_group *group,
169				   struct fanotify_response *response_struct)
170{
171	struct fanotify_perm_event_info *event;
172	int fd = response_struct->fd;
173	int response = response_struct->response;
174
175	pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
176		 fd, response);
177	/*
178	 * make sure the response is valid, if invalid we do nothing and either
179	 * userspace can send a valid response or we will clean it up after the
180	 * timeout
181	 */
182	switch (response & ~FAN_AUDIT) {
183	case FAN_ALLOW:
184	case FAN_DENY:
185		break;
186	default:
187		return -EINVAL;
188	}
189
190	if (fd < 0)
191		return -EINVAL;
192
193	if ((response & FAN_AUDIT) && !group->fanotify_data.audit)
194		return -EINVAL;
195
196	event = dequeue_event(group, fd);
197	if (!event)
198		return -ENOENT;
199
200	event->response = response;
 
201	wake_up(&group->fanotify_data.access_waitq);
202
 
 
203	return 0;
204}
205
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206static ssize_t copy_event_to_user(struct fsnotify_group *group,
207				  struct fsnotify_event *event,
208				  char __user *buf)
209{
210	struct fanotify_event_metadata fanotify_event_metadata;
211	struct file *f;
212	int fd, ret;
213
214	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
215
216	ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f);
217	if (ret < 0)
218		return ret;
219
220	fd = fanotify_event_metadata.fd;
 
 
 
 
221	ret = -EFAULT;
222	if (copy_to_user(buf, &fanotify_event_metadata,
223			 fanotify_event_metadata.event_len))
224		goto out_close_fd;
225
226	if (fanotify_is_perm_event(event->mask))
227		FANOTIFY_PE(event)->fd = fd;
228
229	if (fd != FAN_NOFD)
230		fd_install(fd, f);
231	return fanotify_event_metadata.event_len;
232
 
 
233out_close_fd:
234	if (fd != FAN_NOFD) {
235		put_unused_fd(fd);
236		fput(f);
 
 
 
 
237	}
 
238	return ret;
239}
240
241/* intofiy userspace file descriptor functions */
242static __poll_t fanotify_poll(struct file *file, poll_table *wait)
243{
244	struct fsnotify_group *group = file->private_data;
245	__poll_t ret = 0;
246
247	poll_wait(file, &group->notification_waitq, wait);
248	spin_lock(&group->notification_lock);
249	if (!fsnotify_notify_queue_is_empty(group))
250		ret = EPOLLIN | EPOLLRDNORM;
251	spin_unlock(&group->notification_lock);
252
253	return ret;
254}
255
256static ssize_t fanotify_read(struct file *file, char __user *buf,
257			     size_t count, loff_t *pos)
258{
259	struct fsnotify_group *group;
260	struct fsnotify_event *kevent;
261	char __user *start;
262	int ret;
263	DEFINE_WAIT_FUNC(wait, woken_wake_function);
264
265	start = buf;
266	group = file->private_data;
267
268	pr_debug("%s: group=%p\n", __func__, group);
269
270	add_wait_queue(&group->notification_waitq, &wait);
271	while (1) {
272		spin_lock(&group->notification_lock);
 
 
273		kevent = get_one_event(group, count);
274		spin_unlock(&group->notification_lock);
275
276		if (IS_ERR(kevent)) {
277			ret = PTR_ERR(kevent);
278			break;
279		}
280
281		if (!kevent) {
282			ret = -EAGAIN;
283			if (file->f_flags & O_NONBLOCK)
284				break;
285
286			ret = -ERESTARTSYS;
287			if (signal_pending(current))
288				break;
289
290			if (start != buf)
291				break;
292
293			wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
294			continue;
295		}
296
297		ret = copy_event_to_user(group, kevent, buf);
298		if (unlikely(ret == -EOPENSTALE)) {
299			/*
300			 * We cannot report events with stale fd so drop it.
301			 * Setting ret to 0 will continue the event loop and
302			 * do the right thing if there are no more events to
303			 * read (i.e. return bytes read, -EAGAIN or wait).
304			 */
305			ret = 0;
306		}
307
308		/*
309		 * Permission events get queued to wait for response.  Other
310		 * events can be destroyed now.
311		 */
312		if (!fanotify_is_perm_event(kevent->mask)) {
313			fsnotify_destroy_event(group, kevent);
314		} else {
315			if (ret <= 0) {
316				FANOTIFY_PE(kevent)->response = FAN_DENY;
317				wake_up(&group->fanotify_data.access_waitq);
318			} else {
319				spin_lock(&group->notification_lock);
320				list_add_tail(&kevent->list,
321					&group->fanotify_data.access_list);
322				spin_unlock(&group->notification_lock);
323			}
324		}
325		if (ret < 0)
326			break;
327		buf += ret;
328		count -= ret;
329	}
330	remove_wait_queue(&group->notification_waitq, &wait);
331
 
332	if (start != buf && ret != -EFAULT)
333		ret = buf - start;
334	return ret;
335}
336
337static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
338{
 
339	struct fanotify_response response = { .fd = -1, .response = -1 };
340	struct fsnotify_group *group;
341	int ret;
342
343	if (!IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS))
344		return -EINVAL;
345
346	group = file->private_data;
347
348	if (count > sizeof(response))
349		count = sizeof(response);
350
351	pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
352
353	if (copy_from_user(&response, buf, count))
354		return -EFAULT;
355
356	ret = process_access_response(group, &response);
357	if (ret < 0)
358		count = ret;
359
360	return count;
 
 
 
361}
362
363static int fanotify_release(struct inode *ignored, struct file *file)
364{
365	struct fsnotify_group *group = file->private_data;
366	struct fanotify_perm_event_info *event, *next;
367	struct fsnotify_event *fsn_event;
368
369	/*
370	 * Stop new events from arriving in the notification queue. since
371	 * userspace cannot use fanotify fd anymore, no event can enter or
372	 * leave access_list by now either.
373	 */
374	fsnotify_group_stop_queueing(group);
375
376	/*
377	 * Process all permission events on access_list and notification queue
378	 * and simulate reply from userspace.
379	 */
380	spin_lock(&group->notification_lock);
381	list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
382				 fae.fse.list) {
383		pr_debug("%s: found group=%p event=%p\n", __func__, group,
384			 event);
385
386		list_del_init(&event->fae.fse.list);
387		event->response = FAN_ALLOW;
388	}
389
390	/*
391	 * Destroy all non-permission events. For permission events just
392	 * dequeue them and set the response. They will be freed once the
393	 * response is consumed and fanotify_get_response() returns.
394	 */
395	while (!fsnotify_notify_queue_is_empty(group)) {
396		fsn_event = fsnotify_remove_first_event(group);
397		if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS)) {
398			spin_unlock(&group->notification_lock);
399			fsnotify_destroy_event(group, fsn_event);
400			spin_lock(&group->notification_lock);
401		} else {
402			FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
403		}
404	}
405	spin_unlock(&group->notification_lock);
406
407	/* Response for all permission events it set, wakeup waiters */
408	wake_up(&group->fanotify_data.access_waitq);
409
410	/* matches the fanotify_init->fsnotify_alloc_group */
411	fsnotify_destroy_group(group);
412
413	return 0;
414}
415
416static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
417{
418	struct fsnotify_group *group;
419	struct fsnotify_event *fsn_event;
420	void __user *p;
421	int ret = -ENOTTY;
422	size_t send_len = 0;
423
424	group = file->private_data;
425
426	p = (void __user *) arg;
427
428	switch (cmd) {
429	case FIONREAD:
430		spin_lock(&group->notification_lock);
431		list_for_each_entry(fsn_event, &group->notification_list, list)
432			send_len += FAN_EVENT_METADATA_LEN;
433		spin_unlock(&group->notification_lock);
434		ret = put_user(send_len, (int __user *) p);
435		break;
436	}
437
438	return ret;
439}
440
441static const struct file_operations fanotify_fops = {
442	.show_fdinfo	= fanotify_show_fdinfo,
443	.poll		= fanotify_poll,
444	.read		= fanotify_read,
445	.write		= fanotify_write,
446	.fasync		= NULL,
447	.release	= fanotify_release,
448	.unlocked_ioctl	= fanotify_ioctl,
449	.compat_ioctl	= fanotify_ioctl,
450	.llseek		= noop_llseek,
451};
452
 
 
 
 
 
453static int fanotify_find_path(int dfd, const char __user *filename,
454			      struct path *path, unsigned int flags)
455{
456	int ret;
457
458	pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
459		 dfd, filename, flags);
460
461	if (filename == NULL) {
462		struct fd f = fdget(dfd);
 
463
464		ret = -EBADF;
465		if (!f.file)
 
466			goto out;
467
468		ret = -ENOTDIR;
469		if ((flags & FAN_MARK_ONLYDIR) &&
470		    !(S_ISDIR(file_inode(f.file)->i_mode))) {
471			fdput(f);
472			goto out;
473		}
474
475		*path = f.file->f_path;
476		path_get(path);
477		fdput(f);
478	} else {
479		unsigned int lookup_flags = 0;
480
481		if (!(flags & FAN_MARK_DONT_FOLLOW))
482			lookup_flags |= LOOKUP_FOLLOW;
483		if (flags & FAN_MARK_ONLYDIR)
484			lookup_flags |= LOOKUP_DIRECTORY;
485
486		ret = user_path_at(dfd, filename, lookup_flags, path);
487		if (ret)
488			goto out;
489	}
490
491	/* you can only watch an inode if you have read permissions on it */
492	ret = inode_permission(path->dentry->d_inode, MAY_READ);
493	if (ret)
494		path_put(path);
495out:
496	return ret;
497}
498
499static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
500					    __u32 mask,
501					    unsigned int flags,
502					    int *destroy)
503{
504	__u32 oldmask = 0;
505
506	spin_lock(&fsn_mark->lock);
507	if (!(flags & FAN_MARK_IGNORED_MASK)) {
508		__u32 tmask = fsn_mark->mask & ~mask;
509
510		if (flags & FAN_MARK_ONDIR)
511			tmask &= ~FAN_ONDIR;
512
513		oldmask = fsn_mark->mask;
514		fsn_mark->mask = tmask;
515	} else {
516		__u32 tmask = fsn_mark->ignored_mask & ~mask;
517		if (flags & FAN_MARK_ONDIR)
518			tmask &= ~FAN_ONDIR;
519		fsn_mark->ignored_mask = tmask;
520	}
521	*destroy = !(fsn_mark->mask | fsn_mark->ignored_mask);
522	spin_unlock(&fsn_mark->lock);
523
 
 
 
524	return mask & oldmask;
525}
526
527static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
528					 struct vfsmount *mnt, __u32 mask,
529					 unsigned int flags)
530{
531	struct fsnotify_mark *fsn_mark = NULL;
532	__u32 removed;
533	int destroy_mark;
534
535	mutex_lock(&group->mark_mutex);
536	fsn_mark = fsnotify_find_mark(&real_mount(mnt)->mnt_fsnotify_marks,
537				      group);
538	if (!fsn_mark) {
539		mutex_unlock(&group->mark_mutex);
540		return -ENOENT;
541	}
542
543	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
544						 &destroy_mark);
545	if (removed & real_mount(mnt)->mnt_fsnotify_mask)
546		fsnotify_recalc_mask(real_mount(mnt)->mnt_fsnotify_marks);
547	if (destroy_mark)
548		fsnotify_detach_mark(fsn_mark);
549	mutex_unlock(&group->mark_mutex);
550	if (destroy_mark)
551		fsnotify_free_mark(fsn_mark);
552
 
553	fsnotify_put_mark(fsn_mark);
 
 
 
554	return 0;
555}
556
557static int fanotify_remove_inode_mark(struct fsnotify_group *group,
558				      struct inode *inode, __u32 mask,
559				      unsigned int flags)
560{
561	struct fsnotify_mark *fsn_mark = NULL;
562	__u32 removed;
563	int destroy_mark;
564
565	mutex_lock(&group->mark_mutex);
566	fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
567	if (!fsn_mark) {
568		mutex_unlock(&group->mark_mutex);
569		return -ENOENT;
570	}
571
572	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
573						 &destroy_mark);
574	if (removed & inode->i_fsnotify_mask)
575		fsnotify_recalc_mask(inode->i_fsnotify_marks);
576	if (destroy_mark)
577		fsnotify_detach_mark(fsn_mark);
578	mutex_unlock(&group->mark_mutex);
579	if (destroy_mark)
580		fsnotify_free_mark(fsn_mark);
581
582	/* matches the fsnotify_find_mark() */
 
583	fsnotify_put_mark(fsn_mark);
 
 
584
585	return 0;
586}
587
588static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
589				       __u32 mask,
590				       unsigned int flags)
591{
592	__u32 oldmask = -1;
593
594	spin_lock(&fsn_mark->lock);
595	if (!(flags & FAN_MARK_IGNORED_MASK)) {
596		__u32 tmask = fsn_mark->mask | mask;
597
598		if (flags & FAN_MARK_ONDIR)
599			tmask |= FAN_ONDIR;
600
601		oldmask = fsn_mark->mask;
602		fsn_mark->mask = tmask;
603	} else {
604		__u32 tmask = fsn_mark->ignored_mask | mask;
605		if (flags & FAN_MARK_ONDIR)
606			tmask |= FAN_ONDIR;
607
608		fsn_mark->ignored_mask = tmask;
609		if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
610			fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
611	}
612	spin_unlock(&fsn_mark->lock);
613
614	return mask & ~oldmask;
615}
616
617static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group,
618						   struct inode *inode,
619						   struct vfsmount *mnt)
620{
621	struct fsnotify_mark *mark;
622	int ret;
623
624	if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
625		return ERR_PTR(-ENOSPC);
626
627	mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
628	if (!mark)
629		return ERR_PTR(-ENOMEM);
630
631	fsnotify_init_mark(mark, group);
632	ret = fsnotify_add_mark_locked(mark, inode, mnt, 0);
633	if (ret) {
634		fsnotify_put_mark(mark);
635		return ERR_PTR(ret);
636	}
637
638	return mark;
639}
640
 
 
641
642static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
643				      struct vfsmount *mnt, __u32 mask,
644				      unsigned int flags)
645{
646	struct fsnotify_mark *fsn_mark;
647	__u32 added;
 
648
649	mutex_lock(&group->mark_mutex);
650	fsn_mark = fsnotify_find_mark(&real_mount(mnt)->mnt_fsnotify_marks,
651				      group);
652	if (!fsn_mark) {
653		fsn_mark = fanotify_add_new_mark(group, NULL, mnt);
654		if (IS_ERR(fsn_mark)) {
655			mutex_unlock(&group->mark_mutex);
656			return PTR_ERR(fsn_mark);
657		}
 
 
 
 
 
 
658	}
659	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
660	if (added & ~real_mount(mnt)->mnt_fsnotify_mask)
661		fsnotify_recalc_mask(real_mount(mnt)->mnt_fsnotify_marks);
662	mutex_unlock(&group->mark_mutex);
663
 
 
 
664	fsnotify_put_mark(fsn_mark);
665	return 0;
666}
667
668static int fanotify_add_inode_mark(struct fsnotify_group *group,
669				   struct inode *inode, __u32 mask,
670				   unsigned int flags)
671{
672	struct fsnotify_mark *fsn_mark;
673	__u32 added;
 
674
675	pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
676
677	/*
678	 * If some other task has this inode open for write we should not add
679	 * an ignored mark, unless that ignored mark is supposed to survive
680	 * modification changes anyway.
681	 */
682	if ((flags & FAN_MARK_IGNORED_MASK) &&
683	    !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
684	    (atomic_read(&inode->i_writecount) > 0))
685		return 0;
686
687	mutex_lock(&group->mark_mutex);
688	fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
689	if (!fsn_mark) {
690		fsn_mark = fanotify_add_new_mark(group, inode, NULL);
691		if (IS_ERR(fsn_mark)) {
692			mutex_unlock(&group->mark_mutex);
693			return PTR_ERR(fsn_mark);
694		}
 
 
 
 
 
 
695	}
696	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
697	if (added & ~inode->i_fsnotify_mask)
698		fsnotify_recalc_mask(inode->i_fsnotify_marks);
699	mutex_unlock(&group->mark_mutex);
700
 
 
 
701	fsnotify_put_mark(fsn_mark);
702	return 0;
703}
704
705/* fanotify syscalls */
706SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
707{
708	struct fsnotify_group *group;
709	int f_flags, fd;
710	struct user_struct *user;
711	struct fanotify_event_info *oevent;
712
713	pr_debug("%s: flags=%d event_f_flags=%d\n",
714		__func__, flags, event_f_flags);
715
716	if (!capable(CAP_SYS_ADMIN))
717		return -EPERM;
718
719#ifdef CONFIG_AUDITSYSCALL
720	if (flags & ~(FAN_ALL_INIT_FLAGS | FAN_ENABLE_AUDIT))
721#else
722	if (flags & ~FAN_ALL_INIT_FLAGS)
723#endif
724		return -EINVAL;
725
726	if (event_f_flags & ~FANOTIFY_INIT_ALL_EVENT_F_BITS)
727		return -EINVAL;
728
729	switch (event_f_flags & O_ACCMODE) {
730	case O_RDONLY:
731	case O_RDWR:
732	case O_WRONLY:
733		break;
734	default:
735		return -EINVAL;
736	}
737
738	user = get_current_user();
739	if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
740		free_uid(user);
741		return -EMFILE;
742	}
743
744	f_flags = O_RDWR | FMODE_NONOTIFY;
745	if (flags & FAN_CLOEXEC)
746		f_flags |= O_CLOEXEC;
747	if (flags & FAN_NONBLOCK)
748		f_flags |= O_NONBLOCK;
749
750	/* fsnotify_alloc_group takes a ref.  Dropped in fanotify_release */
751	group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
752	if (IS_ERR(group)) {
753		free_uid(user);
754		return PTR_ERR(group);
755	}
756
757	group->fanotify_data.user = user;
758	atomic_inc(&user->fanotify_listeners);
759
760	oevent = fanotify_alloc_event(group, NULL, FS_Q_OVERFLOW, NULL);
761	if (unlikely(!oevent)) {
762		fd = -ENOMEM;
763		goto out_destroy_group;
764	}
765	group->overflow_event = &oevent->fse;
766
767	if (force_o_largefile())
768		event_f_flags |= O_LARGEFILE;
769	group->fanotify_data.f_flags = event_f_flags;
 
 
770	init_waitqueue_head(&group->fanotify_data.access_waitq);
771	INIT_LIST_HEAD(&group->fanotify_data.access_list);
 
 
772	switch (flags & FAN_ALL_CLASS_BITS) {
773	case FAN_CLASS_NOTIF:
774		group->priority = FS_PRIO_0;
775		break;
776	case FAN_CLASS_CONTENT:
777		group->priority = FS_PRIO_1;
778		break;
779	case FAN_CLASS_PRE_CONTENT:
780		group->priority = FS_PRIO_2;
781		break;
782	default:
783		fd = -EINVAL;
784		goto out_destroy_group;
785	}
786
787	if (flags & FAN_UNLIMITED_QUEUE) {
788		fd = -EPERM;
789		if (!capable(CAP_SYS_ADMIN))
790			goto out_destroy_group;
791		group->max_events = UINT_MAX;
792	} else {
793		group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
794	}
795
796	if (flags & FAN_UNLIMITED_MARKS) {
797		fd = -EPERM;
798		if (!capable(CAP_SYS_ADMIN))
799			goto out_destroy_group;
800		group->fanotify_data.max_marks = UINT_MAX;
801	} else {
802		group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
803	}
804
805	if (flags & FAN_ENABLE_AUDIT) {
806		fd = -EPERM;
807		if (!capable(CAP_AUDIT_WRITE))
808			goto out_destroy_group;
809		group->fanotify_data.audit = true;
810	}
811
812	fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
813	if (fd < 0)
814		goto out_destroy_group;
815
816	return fd;
817
818out_destroy_group:
819	fsnotify_destroy_group(group);
820	return fd;
821}
822
823static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
824			    int dfd, const char  __user *pathname)
 
825{
826	struct inode *inode = NULL;
827	struct vfsmount *mnt = NULL;
828	struct fsnotify_group *group;
829	struct fd f;
830	struct path path;
831	u32 valid_mask = FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD;
832	int ret;
833
834	pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
835		 __func__, fanotify_fd, flags, dfd, pathname, mask);
836
837	/* we only use the lower 32 bits as of right now. */
838	if (mask & ((__u64)0xffffffff << 32))
839		return -EINVAL;
840
841	if (flags & ~FAN_ALL_MARK_FLAGS)
842		return -EINVAL;
843	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
844	case FAN_MARK_ADD:		/* fallthrough */
845	case FAN_MARK_REMOVE:
846		if (!mask)
847			return -EINVAL;
848		break;
849	case FAN_MARK_FLUSH:
850		if (flags & ~(FAN_MARK_MOUNT | FAN_MARK_FLUSH))
851			return -EINVAL;
852		break;
853	default:
854		return -EINVAL;
855	}
856
857	if (mask & FAN_ONDIR) {
858		flags |= FAN_MARK_ONDIR;
859		mask &= ~FAN_ONDIR;
860	}
861
862	if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS))
863		valid_mask |= FAN_ALL_PERM_EVENTS;
864
865	if (mask & ~valid_mask)
 
866		return -EINVAL;
867
868	f = fdget(fanotify_fd);
869	if (unlikely(!f.file))
870		return -EBADF;
871
872	/* verify that this is indeed an fanotify instance */
873	ret = -EINVAL;
874	if (unlikely(f.file->f_op != &fanotify_fops))
875		goto fput_and_out;
876	group = f.file->private_data;
877
878	/*
879	 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF.  These are not
880	 * allowed to set permissions events.
881	 */
882	ret = -EINVAL;
883	if (mask & FAN_ALL_PERM_EVENTS &&
884	    group->priority == FS_PRIO_0)
885		goto fput_and_out;
886
887	if (flags & FAN_MARK_FLUSH) {
888		ret = 0;
889		if (flags & FAN_MARK_MOUNT)
890			fsnotify_clear_vfsmount_marks_by_group(group);
891		else
892			fsnotify_clear_inode_marks_by_group(group);
893		goto fput_and_out;
894	}
895
896	ret = fanotify_find_path(dfd, pathname, &path, flags);
897	if (ret)
898		goto fput_and_out;
899
900	/* inode held in place by reference to path; group by fget on fd */
901	if (!(flags & FAN_MARK_MOUNT))
902		inode = path.dentry->d_inode;
903	else
904		mnt = path.mnt;
905
906	/* create/update an inode mark */
907	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE)) {
908	case FAN_MARK_ADD:
909		if (flags & FAN_MARK_MOUNT)
910			ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
911		else
912			ret = fanotify_add_inode_mark(group, inode, mask, flags);
913		break;
914	case FAN_MARK_REMOVE:
915		if (flags & FAN_MARK_MOUNT)
916			ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
917		else
918			ret = fanotify_remove_inode_mark(group, inode, mask, flags);
919		break;
 
 
 
 
 
 
920	default:
921		ret = -EINVAL;
922	}
923
924	path_put(&path);
925fput_and_out:
926	fdput(f);
927	return ret;
928}
929
930SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags,
931			      __u64, mask, int, dfd,
932			      const char  __user *, pathname)
933{
934	return do_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname);
935}
936
937#ifdef CONFIG_COMPAT
938COMPAT_SYSCALL_DEFINE6(fanotify_mark,
939				int, fanotify_fd, unsigned int, flags,
940				__u32, mask0, __u32, mask1, int, dfd,
941				const char  __user *, pathname)
942{
943	return do_fanotify_mark(fanotify_fd, flags,
944#ifdef __BIG_ENDIAN
945				((__u64)mask0 << 32) | mask1,
946#else
947				((__u64)mask1 << 32) | mask0,
948#endif
949				 dfd, pathname);
950}
 
951#endif
952
953/*
954 * fanotify_user_setup - Our initialization function.  Note that we cannot return
955 * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
956 * must result in panic().
957 */
958static int __init fanotify_user_setup(void)
959{
960	fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
961	fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC);
962	if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS)) {
963		fanotify_perm_event_cachep =
964			KMEM_CACHE(fanotify_perm_event_info, SLAB_PANIC);
965	}
966
967	return 0;
968}
969device_initcall(fanotify_user_setup);
v3.1
 
  1#include <linux/fanotify.h>
  2#include <linux/fcntl.h>
  3#include <linux/file.h>
  4#include <linux/fs.h>
  5#include <linux/anon_inodes.h>
  6#include <linux/fsnotify_backend.h>
  7#include <linux/init.h>
  8#include <linux/mount.h>
  9#include <linux/namei.h>
 10#include <linux/poll.h>
 11#include <linux/security.h>
 12#include <linux/syscalls.h>
 13#include <linux/slab.h>
 14#include <linux/types.h>
 15#include <linux/uaccess.h>
 
 
 16
 17#include <asm/ioctls.h>
 18
 
 
 
 
 19#define FANOTIFY_DEFAULT_MAX_EVENTS	16384
 20#define FANOTIFY_DEFAULT_MAX_MARKS	8192
 21#define FANOTIFY_DEFAULT_MAX_LISTENERS	128
 22
 
 
 
 
 
 
 
 
 
 
 
 
 
 23extern const struct fsnotify_ops fanotify_fsnotify_ops;
 24
 25static struct kmem_cache *fanotify_mark_cache __read_mostly;
 26static struct kmem_cache *fanotify_response_event_cache __read_mostly;
 27
 28struct fanotify_response_event {
 29	struct list_head list;
 30	__s32 fd;
 31	struct fsnotify_event *event;
 32};
 33
 34/*
 35 * Get an fsnotify notification event if one exists and is small
 36 * enough to fit in "count". Return an error pointer if the count
 37 * is not large enough.
 38 *
 39 * Called with the group->notification_mutex held.
 40 */
 41static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
 42					    size_t count)
 43{
 44	BUG_ON(!mutex_is_locked(&group->notification_mutex));
 45
 46	pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
 47
 48	if (fsnotify_notify_queue_is_empty(group))
 49		return NULL;
 50
 51	if (FAN_EVENT_METADATA_LEN > count)
 52		return ERR_PTR(-EINVAL);
 53
 54	/* held the notification_mutex the whole time, so this is the
 55	 * same event we peeked above */
 56	return fsnotify_remove_notify_event(group);
 57}
 58
 59static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
 
 
 60{
 61	int client_fd;
 62	struct dentry *dentry;
 63	struct vfsmount *mnt;
 64	struct file *new_file;
 65
 66	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
 67
 68	client_fd = get_unused_fd();
 69	if (client_fd < 0)
 70		return client_fd;
 71
 72	if (event->data_type != FSNOTIFY_EVENT_PATH) {
 73		WARN_ON(1);
 74		put_unused_fd(client_fd);
 75		return -EINVAL;
 76	}
 77
 78	/*
 79	 * we need a new file handle for the userspace program so it can read even if it was
 80	 * originally opened O_WRONLY.
 81	 */
 82	dentry = dget(event->path.dentry);
 83	mnt = mntget(event->path.mnt);
 84	/* it's possible this event was an overflow event.  in that case dentry and mnt
 85	 * are NULL;  That's fine, just don't call dentry open */
 86	if (dentry && mnt)
 87		new_file = dentry_open(dentry, mnt,
 88				       group->fanotify_data.f_flags | FMODE_NONOTIFY,
 89				       current_cred());
 90	else
 91		new_file = ERR_PTR(-EOVERFLOW);
 92	if (IS_ERR(new_file)) {
 93		/*
 94		 * we still send an event even if we can't open the file.  this
 95		 * can happen when say tasks are gone and we try to open their
 96		 * /proc files or we try to open a WRONLY file like in sysfs
 97		 * we just send the errno to userspace since there isn't much
 98		 * else we can do.
 99		 */
100		put_unused_fd(client_fd);
101		client_fd = PTR_ERR(new_file);
102	} else {
103		fd_install(client_fd, new_file);
104	}
105
106	return client_fd;
107}
108
109static int fill_event_metadata(struct fsnotify_group *group,
110				   struct fanotify_event_metadata *metadata,
111				   struct fsnotify_event *event)
 
112{
113	int ret = 0;
 
114
115	pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
116		 group, metadata, event);
117
 
 
118	metadata->event_len = FAN_EVENT_METADATA_LEN;
119	metadata->metadata_len = FAN_EVENT_METADATA_LEN;
120	metadata->vers = FANOTIFY_METADATA_VERSION;
121	metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
 
122	metadata->pid = pid_vnr(event->tgid);
123	if (unlikely(event->mask & FAN_Q_OVERFLOW))
124		metadata->fd = FAN_NOFD;
125	else {
126		metadata->fd = create_fd(group, event);
127		if (metadata->fd < 0)
128			ret = metadata->fd;
129	}
130
131	return ret;
132}
133
134#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
135static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group,
136						  __s32 fd)
137{
138	struct fanotify_response_event *re, *return_re = NULL;
139
140	mutex_lock(&group->fanotify_data.access_mutex);
141	list_for_each_entry(re, &group->fanotify_data.access_list, list) {
142		if (re->fd != fd)
143			continue;
144
145		list_del_init(&re->list);
146		return_re = re;
147		break;
148	}
149	mutex_unlock(&group->fanotify_data.access_mutex);
150
151	pr_debug("%s: found return_re=%p\n", __func__, return_re);
152
153	return return_re;
154}
155
156static int process_access_response(struct fsnotify_group *group,
157				   struct fanotify_response *response_struct)
158{
159	struct fanotify_response_event *re;
160	__s32 fd = response_struct->fd;
161	__u32 response = response_struct->response;
162
163	pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
164		 fd, response);
165	/*
166	 * make sure the response is valid, if invalid we do nothing and either
167	 * userspace can send a valid response or we will clean it up after the
168	 * timeout
169	 */
170	switch (response) {
171	case FAN_ALLOW:
172	case FAN_DENY:
173		break;
174	default:
175		return -EINVAL;
176	}
177
178	if (fd < 0)
179		return -EINVAL;
180
181	re = dequeue_re(group, fd);
182	if (!re)
 
 
 
183		return -ENOENT;
184
185	re->event->response = response;
186
187	wake_up(&group->fanotify_data.access_waitq);
188
189	kmem_cache_free(fanotify_response_event_cache, re);
190
191	return 0;
192}
193
194static int prepare_for_access_response(struct fsnotify_group *group,
195				       struct fsnotify_event *event,
196				       __s32 fd)
197{
198	struct fanotify_response_event *re;
199
200	if (!(event->mask & FAN_ALL_PERM_EVENTS))
201		return 0;
202
203	re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL);
204	if (!re)
205		return -ENOMEM;
206
207	re->event = event;
208	re->fd = fd;
209
210	mutex_lock(&group->fanotify_data.access_mutex);
211
212	if (atomic_read(&group->fanotify_data.bypass_perm)) {
213		mutex_unlock(&group->fanotify_data.access_mutex);
214		kmem_cache_free(fanotify_response_event_cache, re);
215		event->response = FAN_ALLOW;
216		return 0;
217	}
218		
219	list_add_tail(&re->list, &group->fanotify_data.access_list);
220	mutex_unlock(&group->fanotify_data.access_mutex);
221
222	return 0;
223}
224
225static void remove_access_response(struct fsnotify_group *group,
226				   struct fsnotify_event *event,
227				   __s32 fd)
228{
229	struct fanotify_response_event *re;
230
231	if (!(event->mask & FAN_ALL_PERM_EVENTS))
232		return;
233
234	re = dequeue_re(group, fd);
235	if (!re)
236		return;
237
238	BUG_ON(re->event != event);
239
240	kmem_cache_free(fanotify_response_event_cache, re);
241
242	return;
243}
244#else
245static int prepare_for_access_response(struct fsnotify_group *group,
246				       struct fsnotify_event *event,
247				       __s32 fd)
248{
249	return 0;
250}
251
252static void remove_access_response(struct fsnotify_group *group,
253				   struct fsnotify_event *event,
254				   __s32 fd)
255{
256	return;
257}
258#endif
259
260static ssize_t copy_event_to_user(struct fsnotify_group *group,
261				  struct fsnotify_event *event,
262				  char __user *buf)
263{
264	struct fanotify_event_metadata fanotify_event_metadata;
 
265	int fd, ret;
266
267	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
268
269	ret = fill_event_metadata(group, &fanotify_event_metadata, event);
270	if (ret < 0)
271		goto out;
272
273	fd = fanotify_event_metadata.fd;
274	ret = prepare_for_access_response(group, event, fd);
275	if (ret)
276		goto out_close_fd;
277
278	ret = -EFAULT;
279	if (copy_to_user(buf, &fanotify_event_metadata,
280			 fanotify_event_metadata.event_len))
281		goto out_kill_access_response;
 
 
 
282
 
 
283	return fanotify_event_metadata.event_len;
284
285out_kill_access_response:
286	remove_access_response(group, event, fd);
287out_close_fd:
288	if (fd != FAN_NOFD)
289		sys_close(fd);
290out:
291#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
292	if (event->mask & FAN_ALL_PERM_EVENTS) {
293		event->response = FAN_DENY;
294		wake_up(&group->fanotify_data.access_waitq);
295	}
296#endif
297	return ret;
298}
299
300/* intofiy userspace file descriptor functions */
301static unsigned int fanotify_poll(struct file *file, poll_table *wait)
302{
303	struct fsnotify_group *group = file->private_data;
304	int ret = 0;
305
306	poll_wait(file, &group->notification_waitq, wait);
307	mutex_lock(&group->notification_mutex);
308	if (!fsnotify_notify_queue_is_empty(group))
309		ret = POLLIN | POLLRDNORM;
310	mutex_unlock(&group->notification_mutex);
311
312	return ret;
313}
314
315static ssize_t fanotify_read(struct file *file, char __user *buf,
316			     size_t count, loff_t *pos)
317{
318	struct fsnotify_group *group;
319	struct fsnotify_event *kevent;
320	char __user *start;
321	int ret;
322	DEFINE_WAIT(wait);
323
324	start = buf;
325	group = file->private_data;
326
327	pr_debug("%s: group=%p\n", __func__, group);
328
 
329	while (1) {
330		prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
331
332		mutex_lock(&group->notification_mutex);
333		kevent = get_one_event(group, count);
334		mutex_unlock(&group->notification_mutex);
335
336		if (kevent) {
337			ret = PTR_ERR(kevent);
338			if (IS_ERR(kevent))
 
 
 
 
 
339				break;
340			ret = copy_event_to_user(group, kevent, buf);
341			fsnotify_put_event(kevent);
342			if (ret < 0)
343				break;
344			buf += ret;
345			count -= ret;
 
 
 
346			continue;
347		}
348
349		ret = -EAGAIN;
350		if (file->f_flags & O_NONBLOCK)
351			break;
352		ret = -ERESTARTSYS;
353		if (signal_pending(current))
354			break;
 
 
 
 
355
356		if (start != buf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
357			break;
358
359		schedule();
360	}
 
361
362	finish_wait(&group->notification_waitq, &wait);
363	if (start != buf && ret != -EFAULT)
364		ret = buf - start;
365	return ret;
366}
367
368static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
369{
370#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
371	struct fanotify_response response = { .fd = -1, .response = -1 };
372	struct fsnotify_group *group;
373	int ret;
374
 
 
 
375	group = file->private_data;
376
377	if (count > sizeof(response))
378		count = sizeof(response);
379
380	pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
381
382	if (copy_from_user(&response, buf, count))
383		return -EFAULT;
384
385	ret = process_access_response(group, &response);
386	if (ret < 0)
387		count = ret;
388
389	return count;
390#else
391	return -EINVAL;
392#endif
393}
394
395static int fanotify_release(struct inode *ignored, struct file *file)
396{
397	struct fsnotify_group *group = file->private_data;
 
 
398
399#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
400	struct fanotify_response_event *re, *lre;
 
 
 
 
401
402	mutex_lock(&group->fanotify_data.access_mutex);
 
 
 
 
 
 
 
 
403
404	atomic_inc(&group->fanotify_data.bypass_perm);
 
 
405
406	list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) {
407		pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group,
408			 re, re->event);
409
410		list_del_init(&re->list);
411		re->event->response = FAN_ALLOW;
412
413		kmem_cache_free(fanotify_response_event_cache, re);
 
 
 
 
 
 
414	}
415	mutex_unlock(&group->fanotify_data.access_mutex);
416
 
417	wake_up(&group->fanotify_data.access_waitq);
418#endif
419	/* matches the fanotify_init->fsnotify_alloc_group */
420	fsnotify_put_group(group);
421
422	return 0;
423}
424
425static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
426{
427	struct fsnotify_group *group;
428	struct fsnotify_event_holder *holder;
429	void __user *p;
430	int ret = -ENOTTY;
431	size_t send_len = 0;
432
433	group = file->private_data;
434
435	p = (void __user *) arg;
436
437	switch (cmd) {
438	case FIONREAD:
439		mutex_lock(&group->notification_mutex);
440		list_for_each_entry(holder, &group->notification_list, event_list)
441			send_len += FAN_EVENT_METADATA_LEN;
442		mutex_unlock(&group->notification_mutex);
443		ret = put_user(send_len, (int __user *) p);
444		break;
445	}
446
447	return ret;
448}
449
450static const struct file_operations fanotify_fops = {
 
451	.poll		= fanotify_poll,
452	.read		= fanotify_read,
453	.write		= fanotify_write,
454	.fasync		= NULL,
455	.release	= fanotify_release,
456	.unlocked_ioctl	= fanotify_ioctl,
457	.compat_ioctl	= fanotify_ioctl,
458	.llseek		= noop_llseek,
459};
460
461static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
462{
463	kmem_cache_free(fanotify_mark_cache, fsn_mark);
464}
465
466static int fanotify_find_path(int dfd, const char __user *filename,
467			      struct path *path, unsigned int flags)
468{
469	int ret;
470
471	pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
472		 dfd, filename, flags);
473
474	if (filename == NULL) {
475		struct file *file;
476		int fput_needed;
477
478		ret = -EBADF;
479		file = fget_light(dfd, &fput_needed);
480		if (!file)
481			goto out;
482
483		ret = -ENOTDIR;
484		if ((flags & FAN_MARK_ONLYDIR) &&
485		    !(S_ISDIR(file->f_path.dentry->d_inode->i_mode))) {
486			fput_light(file, fput_needed);
487			goto out;
488		}
489
490		*path = file->f_path;
491		path_get(path);
492		fput_light(file, fput_needed);
493	} else {
494		unsigned int lookup_flags = 0;
495
496		if (!(flags & FAN_MARK_DONT_FOLLOW))
497			lookup_flags |= LOOKUP_FOLLOW;
498		if (flags & FAN_MARK_ONLYDIR)
499			lookup_flags |= LOOKUP_DIRECTORY;
500
501		ret = user_path_at(dfd, filename, lookup_flags, path);
502		if (ret)
503			goto out;
504	}
505
506	/* you can only watch an inode if you have read permissions on it */
507	ret = inode_permission(path->dentry->d_inode, MAY_READ);
508	if (ret)
509		path_put(path);
510out:
511	return ret;
512}
513
514static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
515					    __u32 mask,
516					    unsigned int flags)
 
517{
518	__u32 oldmask;
519
520	spin_lock(&fsn_mark->lock);
521	if (!(flags & FAN_MARK_IGNORED_MASK)) {
 
 
 
 
 
522		oldmask = fsn_mark->mask;
523		fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
524	} else {
525		oldmask = fsn_mark->ignored_mask;
526		fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
 
 
527	}
 
528	spin_unlock(&fsn_mark->lock);
529
530	if (!(oldmask & ~mask))
531		fsnotify_destroy_mark(fsn_mark);
532
533	return mask & oldmask;
534}
535
536static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
537					 struct vfsmount *mnt, __u32 mask,
538					 unsigned int flags)
539{
540	struct fsnotify_mark *fsn_mark = NULL;
541	__u32 removed;
 
542
543	fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
544	if (!fsn_mark)
 
 
 
545		return -ENOENT;
 
 
 
 
 
 
 
 
 
 
 
546
547	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
548	fsnotify_put_mark(fsn_mark);
549	if (removed & mnt->mnt_fsnotify_mask)
550		fsnotify_recalc_vfsmount_mask(mnt);
551
552	return 0;
553}
554
555static int fanotify_remove_inode_mark(struct fsnotify_group *group,
556				      struct inode *inode, __u32 mask,
557				      unsigned int flags)
558{
559	struct fsnotify_mark *fsn_mark = NULL;
560	__u32 removed;
 
561
562	fsn_mark = fsnotify_find_inode_mark(group, inode);
563	if (!fsn_mark)
 
 
564		return -ENOENT;
 
 
 
 
 
 
 
 
 
 
 
565
566	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
567	/* matches the fsnotify_find_inode_mark() */
568	fsnotify_put_mark(fsn_mark);
569	if (removed & inode->i_fsnotify_mask)
570		fsnotify_recalc_inode_mask(inode);
571
572	return 0;
573}
574
575static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
576				       __u32 mask,
577				       unsigned int flags)
578{
579	__u32 oldmask = -1;
580
581	spin_lock(&fsn_mark->lock);
582	if (!(flags & FAN_MARK_IGNORED_MASK)) {
 
 
 
 
 
583		oldmask = fsn_mark->mask;
584		fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
585	} else {
586		__u32 tmask = fsn_mark->ignored_mask | mask;
587		fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
 
 
 
588		if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
589			fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
590	}
 
 
 
 
591
592	if (!(flags & FAN_MARK_ONDIR)) {
593		__u32 tmask = fsn_mark->ignored_mask | FAN_ONDIR;
594		fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
595	}
596
597	spin_unlock(&fsn_mark->lock);
 
598
599	return mask & ~oldmask;
600}
601
602static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
603				      struct vfsmount *mnt, __u32 mask,
604				      unsigned int flags)
605{
606	struct fsnotify_mark *fsn_mark;
607	__u32 added;
608	int ret = 0;
609
610	fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
 
 
611	if (!fsn_mark) {
612		if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
613			return -ENOSPC;
614
615		fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
616		if (!fsn_mark)
617			return -ENOMEM;
618
619		fsnotify_init_mark(fsn_mark, fanotify_free_mark);
620		ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0);
621		if (ret)
622			goto err;
623	}
624	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
 
 
 
625
626	if (added & ~mnt->mnt_fsnotify_mask)
627		fsnotify_recalc_vfsmount_mask(mnt);
628err:
629	fsnotify_put_mark(fsn_mark);
630	return ret;
631}
632
633static int fanotify_add_inode_mark(struct fsnotify_group *group,
634				   struct inode *inode, __u32 mask,
635				   unsigned int flags)
636{
637	struct fsnotify_mark *fsn_mark;
638	__u32 added;
639	int ret = 0;
640
641	pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
642
643	/*
644	 * If some other task has this inode open for write we should not add
645	 * an ignored mark, unless that ignored mark is supposed to survive
646	 * modification changes anyway.
647	 */
648	if ((flags & FAN_MARK_IGNORED_MASK) &&
649	    !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
650	    (atomic_read(&inode->i_writecount) > 0))
651		return 0;
652
653	fsn_mark = fsnotify_find_inode_mark(group, inode);
 
654	if (!fsn_mark) {
655		if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
656			return -ENOSPC;
657
658		fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
659		if (!fsn_mark)
660			return -ENOMEM;
661
662		fsnotify_init_mark(fsn_mark, fanotify_free_mark);
663		ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0);
664		if (ret)
665			goto err;
666	}
667	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
 
 
 
668
669	if (added & ~inode->i_fsnotify_mask)
670		fsnotify_recalc_inode_mask(inode);
671err:
672	fsnotify_put_mark(fsn_mark);
673	return ret;
674}
675
676/* fanotify syscalls */
677SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
678{
679	struct fsnotify_group *group;
680	int f_flags, fd;
681	struct user_struct *user;
 
682
683	pr_debug("%s: flags=%d event_f_flags=%d\n",
684		__func__, flags, event_f_flags);
685
686	if (!capable(CAP_SYS_ADMIN))
687		return -EPERM;
688
 
 
 
689	if (flags & ~FAN_ALL_INIT_FLAGS)
 
690		return -EINVAL;
691
 
 
 
 
 
 
 
 
 
 
 
 
692	user = get_current_user();
693	if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
694		free_uid(user);
695		return -EMFILE;
696	}
697
698	f_flags = O_RDWR | FMODE_NONOTIFY;
699	if (flags & FAN_CLOEXEC)
700		f_flags |= O_CLOEXEC;
701	if (flags & FAN_NONBLOCK)
702		f_flags |= O_NONBLOCK;
703
704	/* fsnotify_alloc_group takes a ref.  Dropped in fanotify_release */
705	group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
706	if (IS_ERR(group)) {
707		free_uid(user);
708		return PTR_ERR(group);
709	}
710
711	group->fanotify_data.user = user;
712	atomic_inc(&user->fanotify_listeners);
713
 
 
 
 
 
 
 
 
 
714	group->fanotify_data.f_flags = event_f_flags;
715#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
716	mutex_init(&group->fanotify_data.access_mutex);
717	init_waitqueue_head(&group->fanotify_data.access_waitq);
718	INIT_LIST_HEAD(&group->fanotify_data.access_list);
719	atomic_set(&group->fanotify_data.bypass_perm, 0);
720#endif
721	switch (flags & FAN_ALL_CLASS_BITS) {
722	case FAN_CLASS_NOTIF:
723		group->priority = FS_PRIO_0;
724		break;
725	case FAN_CLASS_CONTENT:
726		group->priority = FS_PRIO_1;
727		break;
728	case FAN_CLASS_PRE_CONTENT:
729		group->priority = FS_PRIO_2;
730		break;
731	default:
732		fd = -EINVAL;
733		goto out_put_group;
734	}
735
736	if (flags & FAN_UNLIMITED_QUEUE) {
737		fd = -EPERM;
738		if (!capable(CAP_SYS_ADMIN))
739			goto out_put_group;
740		group->max_events = UINT_MAX;
741	} else {
742		group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
743	}
744
745	if (flags & FAN_UNLIMITED_MARKS) {
746		fd = -EPERM;
747		if (!capable(CAP_SYS_ADMIN))
748			goto out_put_group;
749		group->fanotify_data.max_marks = UINT_MAX;
750	} else {
751		group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
752	}
753
 
 
 
 
 
 
 
754	fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
755	if (fd < 0)
756		goto out_put_group;
757
758	return fd;
759
760out_put_group:
761	fsnotify_put_group(group);
762	return fd;
763}
764
765SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,
766			      __u64 mask, int dfd,
767			      const char  __user * pathname)
768{
769	struct inode *inode = NULL;
770	struct vfsmount *mnt = NULL;
771	struct fsnotify_group *group;
772	struct file *filp;
773	struct path path;
774	int ret, fput_needed;
 
775
776	pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
777		 __func__, fanotify_fd, flags, dfd, pathname, mask);
778
779	/* we only use the lower 32 bits as of right now. */
780	if (mask & ((__u64)0xffffffff << 32))
781		return -EINVAL;
782
783	if (flags & ~FAN_ALL_MARK_FLAGS)
784		return -EINVAL;
785	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
786	case FAN_MARK_ADD:		/* fallthrough */
787	case FAN_MARK_REMOVE:
788		if (!mask)
789			return -EINVAL;
 
790	case FAN_MARK_FLUSH:
 
 
791		break;
792	default:
793		return -EINVAL;
794	}
795
796	if (mask & FAN_ONDIR) {
797		flags |= FAN_MARK_ONDIR;
798		mask &= ~FAN_ONDIR;
799	}
800
801#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
802	if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
803#else
804	if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
805#endif
806		return -EINVAL;
807
808	filp = fget_light(fanotify_fd, &fput_needed);
809	if (unlikely(!filp))
810		return -EBADF;
811
812	/* verify that this is indeed an fanotify instance */
813	ret = -EINVAL;
814	if (unlikely(filp->f_op != &fanotify_fops))
815		goto fput_and_out;
816	group = filp->private_data;
817
818	/*
819	 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF.  These are not
820	 * allowed to set permissions events.
821	 */
822	ret = -EINVAL;
823	if (mask & FAN_ALL_PERM_EVENTS &&
824	    group->priority == FS_PRIO_0)
825		goto fput_and_out;
826
 
 
 
 
 
 
 
 
 
827	ret = fanotify_find_path(dfd, pathname, &path, flags);
828	if (ret)
829		goto fput_and_out;
830
831	/* inode held in place by reference to path; group by fget on fd */
832	if (!(flags & FAN_MARK_MOUNT))
833		inode = path.dentry->d_inode;
834	else
835		mnt = path.mnt;
836
837	/* create/update an inode mark */
838	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
839	case FAN_MARK_ADD:
840		if (flags & FAN_MARK_MOUNT)
841			ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
842		else
843			ret = fanotify_add_inode_mark(group, inode, mask, flags);
844		break;
845	case FAN_MARK_REMOVE:
846		if (flags & FAN_MARK_MOUNT)
847			ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
848		else
849			ret = fanotify_remove_inode_mark(group, inode, mask, flags);
850		break;
851	case FAN_MARK_FLUSH:
852		if (flags & FAN_MARK_MOUNT)
853			fsnotify_clear_vfsmount_marks_by_group(group);
854		else
855			fsnotify_clear_inode_marks_by_group(group);
856		break;
857	default:
858		ret = -EINVAL;
859	}
860
861	path_put(&path);
862fput_and_out:
863	fput_light(filp, fput_needed);
864	return ret;
865}
866
867#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
868asmlinkage long SyS_fanotify_mark(long fanotify_fd, long flags, __u64 mask,
869				  long dfd, long pathname)
870{
871	return SYSC_fanotify_mark((int) fanotify_fd, (unsigned int) flags,
872				  mask, (int) dfd,
873				  (const char  __user *) pathname);
 
 
 
 
 
 
 
 
 
 
 
 
 
874}
875SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark);
876#endif
877
878/*
879 * fanotify_user_setup - Our initialization function.  Note that we cannot return
880 * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
881 * must result in panic().
882 */
883static int __init fanotify_user_setup(void)
884{
885	fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
886	fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event,
887						   SLAB_PANIC);
 
 
 
888
889	return 0;
890}
891device_initcall(fanotify_user_setup);