Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * fs/inotify_user.c - inotify support for userspace
  3 *
  4 * Authors:
  5 *	John McCutchan	<ttb@tentacle.dhs.org>
  6 *	Robert Love	<rml@novell.com>
  7 *
  8 * Copyright (C) 2005 John McCutchan
  9 * Copyright 2006 Hewlett-Packard Development Company, L.P.
 10 *
 11 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
 12 * inotify was largely rewriten to make use of the fsnotify infrastructure
 13 *
 14 * This program is free software; you can redistribute it and/or modify it
 15 * under the terms of the GNU General Public License as published by the
 16 * Free Software Foundation; either version 2, or (at your option) any
 17 * later version.
 18 *
 19 * This program is distributed in the hope that it will be useful, but
 20 * WITHOUT ANY WARRANTY; without even the implied warranty of
 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 22 * General Public License for more details.
 23 */
 24
 25#include <linux/file.h>
 26#include <linux/fs.h> /* struct inode */
 27#include <linux/fsnotify_backend.h>
 28#include <linux/idr.h>
 29#include <linux/init.h> /* fs_initcall */
 30#include <linux/inotify.h>
 31#include <linux/kernel.h> /* roundup() */
 32#include <linux/namei.h> /* LOOKUP_FOLLOW */
 33#include <linux/sched/signal.h>
 34#include <linux/slab.h> /* struct kmem_cache */
 35#include <linux/syscalls.h>
 36#include <linux/types.h>
 37#include <linux/anon_inodes.h>
 38#include <linux/uaccess.h>
 39#include <linux/poll.h>
 40#include <linux/wait.h>
 
 
 41
 42#include "inotify.h"
 43#include "../fdinfo.h"
 44
 45#include <asm/ioctls.h>
 46
 47/* configurable via /proc/sys/fs/inotify/ */
 48static int inotify_max_queued_events __read_mostly;
 49
 50struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
 51
 52#ifdef CONFIG_SYSCTL
 53
 54#include <linux/sysctl.h>
 55
 56static int zero;
 57
 58struct ctl_table inotify_table[] = {
 59	{
 60		.procname	= "max_user_instances",
 61		.data		= &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES],
 62		.maxlen		= sizeof(int),
 63		.mode		= 0644,
 64		.proc_handler	= proc_dointvec_minmax,
 65		.extra1		= &zero,
 66	},
 67	{
 68		.procname	= "max_user_watches",
 69		.data		= &init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES],
 70		.maxlen		= sizeof(int),
 71		.mode		= 0644,
 72		.proc_handler	= proc_dointvec_minmax,
 73		.extra1		= &zero,
 74	},
 75	{
 76		.procname	= "max_queued_events",
 77		.data		= &inotify_max_queued_events,
 78		.maxlen		= sizeof(int),
 79		.mode		= 0644,
 80		.proc_handler	= proc_dointvec_minmax,
 81		.extra1		= &zero
 82	},
 83	{ }
 84};
 85#endif /* CONFIG_SYSCTL */
 86
 87static inline __u32 inotify_arg_to_mask(u32 arg)
 88{
 89	__u32 mask;
 90
 91	/*
 92	 * everything should accept their own ignored, cares about children,
 93	 * and should receive events when the inode is unmounted
 94	 */
 95	mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT);
 96
 97	/* mask off the flags used to open the fd */
 98	mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK));
 99
100	return mask;
101}
102
103static inline u32 inotify_mask_to_arg(__u32 mask)
104{
105	return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
106		       IN_Q_OVERFLOW);
107}
108
109/* intofiy userspace file descriptor functions */
110static __poll_t inotify_poll(struct file *file, poll_table *wait)
111{
112	struct fsnotify_group *group = file->private_data;
113	__poll_t ret = 0;
114
115	poll_wait(file, &group->notification_waitq, wait);
116	spin_lock(&group->notification_lock);
117	if (!fsnotify_notify_queue_is_empty(group))
118		ret = EPOLLIN | EPOLLRDNORM;
119	spin_unlock(&group->notification_lock);
120
121	return ret;
122}
123
124static int round_event_name_len(struct fsnotify_event *fsn_event)
125{
126	struct inotify_event_info *event;
127
128	event = INOTIFY_E(fsn_event);
129	if (!event->name_len)
130		return 0;
131	return roundup(event->name_len + 1, sizeof(struct inotify_event));
132}
133
134/*
135 * Get an inotify_kernel_event if one exists and is small
136 * enough to fit in "count". Return an error pointer if
137 * not large enough.
138 *
139 * Called with the group->notification_lock held.
140 */
141static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
142					    size_t count)
143{
144	size_t event_size = sizeof(struct inotify_event);
145	struct fsnotify_event *event;
146
147	if (fsnotify_notify_queue_is_empty(group))
148		return NULL;
149
150	event = fsnotify_peek_first_event(group);
151
152	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
153
154	event_size += round_event_name_len(event);
155	if (event_size > count)
156		return ERR_PTR(-EINVAL);
157
158	/* held the notification_lock the whole time, so this is the
159	 * same event we peeked above */
160	fsnotify_remove_first_event(group);
161
162	return event;
163}
164
165/*
166 * Copy an event to user space, returning how much we copied.
167 *
168 * We already checked that the event size is smaller than the
169 * buffer we had in "get_one_event()" above.
170 */
171static ssize_t copy_event_to_user(struct fsnotify_group *group,
172				  struct fsnotify_event *fsn_event,
173				  char __user *buf)
174{
175	struct inotify_event inotify_event;
176	struct inotify_event_info *event;
177	size_t event_size = sizeof(struct inotify_event);
178	size_t name_len;
179	size_t pad_name_len;
180
181	pr_debug("%s: group=%p event=%p\n", __func__, group, fsn_event);
182
183	event = INOTIFY_E(fsn_event);
184	name_len = event->name_len;
185	/*
186	 * round up name length so it is a multiple of event_size
187	 * plus an extra byte for the terminating '\0'.
188	 */
189	pad_name_len = round_event_name_len(fsn_event);
190	inotify_event.len = pad_name_len;
191	inotify_event.mask = inotify_mask_to_arg(fsn_event->mask);
192	inotify_event.wd = event->wd;
193	inotify_event.cookie = event->sync_cookie;
194
195	/* send the main event */
196	if (copy_to_user(buf, &inotify_event, event_size))
197		return -EFAULT;
198
199	buf += event_size;
200
201	/*
202	 * fsnotify only stores the pathname, so here we have to send the pathname
203	 * and then pad that pathname out to a multiple of sizeof(inotify_event)
204	 * with zeros.
205	 */
206	if (pad_name_len) {
207		/* copy the path name */
208		if (copy_to_user(buf, event->name, name_len))
209			return -EFAULT;
210		buf += name_len;
211
212		/* fill userspace with 0's */
213		if (clear_user(buf, pad_name_len - name_len))
214			return -EFAULT;
215		event_size += pad_name_len;
216	}
217
218	return event_size;
219}
220
221static ssize_t inotify_read(struct file *file, char __user *buf,
222			    size_t count, loff_t *pos)
223{
224	struct fsnotify_group *group;
225	struct fsnotify_event *kevent;
226	char __user *start;
227	int ret;
228	DEFINE_WAIT_FUNC(wait, woken_wake_function);
229
230	start = buf;
231	group = file->private_data;
232
233	add_wait_queue(&group->notification_waitq, &wait);
234	while (1) {
235		spin_lock(&group->notification_lock);
236		kevent = get_one_event(group, count);
237		spin_unlock(&group->notification_lock);
238
239		pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
240
241		if (kevent) {
242			ret = PTR_ERR(kevent);
243			if (IS_ERR(kevent))
244				break;
245			ret = copy_event_to_user(group, kevent, buf);
246			fsnotify_destroy_event(group, kevent);
247			if (ret < 0)
248				break;
249			buf += ret;
250			count -= ret;
251			continue;
252		}
253
254		ret = -EAGAIN;
255		if (file->f_flags & O_NONBLOCK)
256			break;
257		ret = -ERESTARTSYS;
258		if (signal_pending(current))
259			break;
260
261		if (start != buf)
262			break;
263
264		wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
265	}
266	remove_wait_queue(&group->notification_waitq, &wait);
267
268	if (start != buf && ret != -EFAULT)
269		ret = buf - start;
270	return ret;
271}
272
273static int inotify_release(struct inode *ignored, struct file *file)
274{
275	struct fsnotify_group *group = file->private_data;
276
277	pr_debug("%s: group=%p\n", __func__, group);
278
279	/* free this group, matching get was inotify_init->fsnotify_obtain_group */
280	fsnotify_destroy_group(group);
281
282	return 0;
283}
284
285static long inotify_ioctl(struct file *file, unsigned int cmd,
286			  unsigned long arg)
287{
288	struct fsnotify_group *group;
289	struct fsnotify_event *fsn_event;
290	void __user *p;
291	int ret = -ENOTTY;
292	size_t send_len = 0;
293
294	group = file->private_data;
295	p = (void __user *) arg;
296
297	pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd);
298
299	switch (cmd) {
300	case FIONREAD:
301		spin_lock(&group->notification_lock);
302		list_for_each_entry(fsn_event, &group->notification_list,
303				    list) {
304			send_len += sizeof(struct inotify_event);
305			send_len += round_event_name_len(fsn_event);
306		}
307		spin_unlock(&group->notification_lock);
308		ret = put_user(send_len, (int __user *) p);
309		break;
310#ifdef CONFIG_CHECKPOINT_RESTORE
311	case INOTIFY_IOC_SETNEXTWD:
312		ret = -EINVAL;
313		if (arg >= 1 && arg <= INT_MAX) {
314			struct inotify_group_private_data *data;
315
316			data = &group->inotify_data;
317			spin_lock(&data->idr_lock);
318			idr_set_cursor(&data->idr, (unsigned int)arg);
319			spin_unlock(&data->idr_lock);
320			ret = 0;
321		}
322		break;
323#endif /* CONFIG_CHECKPOINT_RESTORE */
324	}
325
326	return ret;
327}
328
329static const struct file_operations inotify_fops = {
330	.show_fdinfo	= inotify_show_fdinfo,
331	.poll		= inotify_poll,
332	.read		= inotify_read,
333	.fasync		= fsnotify_fasync,
334	.release	= inotify_release,
335	.unlocked_ioctl	= inotify_ioctl,
336	.compat_ioctl	= inotify_ioctl,
337	.llseek		= noop_llseek,
338};
339
340
341/*
342 * find_inode - resolve a user-given path to a specific inode
343 */
344static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags)
 
345{
346	int error;
347
348	error = user_path_at(AT_FDCWD, dirname, flags, path);
349	if (error)
350		return error;
351	/* you can only watch an inode if you have read permissions on it */
352	error = inode_permission(path->dentry->d_inode, MAY_READ);
 
 
 
 
 
 
353	if (error)
354		path_put(path);
 
355	return error;
356}
357
358static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
359			      struct inotify_inode_mark *i_mark)
360{
361	int ret;
362
363	idr_preload(GFP_KERNEL);
364	spin_lock(idr_lock);
365
366	ret = idr_alloc_cyclic(idr, i_mark, 1, 0, GFP_NOWAIT);
367	if (ret >= 0) {
368		/* we added the mark to the idr, take a reference */
369		i_mark->wd = ret;
370		fsnotify_get_mark(&i_mark->fsn_mark);
371	}
372
373	spin_unlock(idr_lock);
374	idr_preload_end();
375	return ret < 0 ? ret : 0;
376}
377
378static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
379								int wd)
380{
381	struct idr *idr = &group->inotify_data.idr;
382	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
383	struct inotify_inode_mark *i_mark;
384
385	assert_spin_locked(idr_lock);
386
387	i_mark = idr_find(idr, wd);
388	if (i_mark) {
389		struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark;
390
391		fsnotify_get_mark(fsn_mark);
392		/* One ref for being in the idr, one ref we just took */
393		BUG_ON(refcount_read(&fsn_mark->refcnt) < 2);
394	}
395
396	return i_mark;
397}
398
399static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group,
400							 int wd)
401{
402	struct inotify_inode_mark *i_mark;
403	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
404
405	spin_lock(idr_lock);
406	i_mark = inotify_idr_find_locked(group, wd);
407	spin_unlock(idr_lock);
408
409	return i_mark;
410}
411
412/*
413 * Remove the mark from the idr (if present) and drop the reference
414 * on the mark because it was in the idr.
415 */
416static void inotify_remove_from_idr(struct fsnotify_group *group,
417				    struct inotify_inode_mark *i_mark)
418{
419	struct idr *idr = &group->inotify_data.idr;
420	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
421	struct inotify_inode_mark *found_i_mark = NULL;
422	int wd;
423
424	spin_lock(idr_lock);
425	wd = i_mark->wd;
426
427	/*
428	 * does this i_mark think it is in the idr?  we shouldn't get called
429	 * if it wasn't....
430	 */
431	if (wd == -1) {
432		WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
433			__func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
434		goto out;
435	}
436
437	/* Lets look in the idr to see if we find it */
438	found_i_mark = inotify_idr_find_locked(group, wd);
439	if (unlikely(!found_i_mark)) {
440		WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
441			__func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
442		goto out;
443	}
444
445	/*
446	 * We found an mark in the idr at the right wd, but it's
447	 * not the mark we were told to remove.  eparis seriously
448	 * fucked up somewhere.
449	 */
450	if (unlikely(found_i_mark != i_mark)) {
451		WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
452			"found_i_mark=%p found_i_mark->wd=%d "
453			"found_i_mark->group=%p\n", __func__, i_mark,
454			i_mark->wd, i_mark->fsn_mark.group, found_i_mark,
455			found_i_mark->wd, found_i_mark->fsn_mark.group);
456		goto out;
457	}
458
459	/*
460	 * One ref for being in the idr
461	 * one ref grabbed by inotify_idr_find
462	 */
463	if (unlikely(refcount_read(&i_mark->fsn_mark.refcnt) < 2)) {
464		printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
465			 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
466		/* we can't really recover with bad ref cnting.. */
467		BUG();
468	}
469
470	idr_remove(idr, wd);
471	/* Removed from the idr, drop that ref. */
472	fsnotify_put_mark(&i_mark->fsn_mark);
473out:
474	i_mark->wd = -1;
475	spin_unlock(idr_lock);
476	/* match the ref taken by inotify_idr_find_locked() */
477	if (found_i_mark)
478		fsnotify_put_mark(&found_i_mark->fsn_mark);
479}
480
481/*
482 * Send IN_IGNORED for this wd, remove this wd from the idr.
483 */
484void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
485				    struct fsnotify_group *group)
486{
487	struct inotify_inode_mark *i_mark;
 
 
 
 
488
489	/* Queue ignore event for the watch */
490	inotify_handle_event(group, NULL, fsn_mark, NULL, FS_IN_IGNORED,
491			     NULL, FSNOTIFY_EVENT_NONE, NULL, 0, NULL);
492
493	i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
494	/* remove this mark from the idr */
495	inotify_remove_from_idr(group, i_mark);
496
497	dec_inotify_watches(group->inotify_data.ucounts);
498}
499
500static int inotify_update_existing_watch(struct fsnotify_group *group,
501					 struct inode *inode,
502					 u32 arg)
503{
504	struct fsnotify_mark *fsn_mark;
505	struct inotify_inode_mark *i_mark;
506	__u32 old_mask, new_mask;
507	__u32 mask;
508	int add = (arg & IN_MASK_ADD);
 
509	int ret;
510
511	mask = inotify_arg_to_mask(arg);
512
513	fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
514	if (!fsn_mark)
515		return -ENOENT;
 
 
 
 
516
517	i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
518
519	spin_lock(&fsn_mark->lock);
520	old_mask = fsn_mark->mask;
521	if (add)
522		fsn_mark->mask |= mask;
523	else
524		fsn_mark->mask = mask;
525	new_mask = fsn_mark->mask;
526	spin_unlock(&fsn_mark->lock);
527
528	if (old_mask != new_mask) {
529		/* more bits in old than in new? */
530		int dropped = (old_mask & ~new_mask);
531		/* more bits in this fsn_mark than the inode's mask? */
532		int do_inode = (new_mask & ~inode->i_fsnotify_mask);
533
534		/* update the inode with this new fsn_mark */
535		if (dropped || do_inode)
536			fsnotify_recalc_mask(inode->i_fsnotify_marks);
537
538	}
539
540	/* return the wd */
541	ret = i_mark->wd;
542
 
543	/* match the get from fsnotify_find_mark() */
544	fsnotify_put_mark(fsn_mark);
545
546	return ret;
547}
548
549static int inotify_new_watch(struct fsnotify_group *group,
550			     struct inode *inode,
551			     u32 arg)
552{
553	struct inotify_inode_mark *tmp_i_mark;
554	__u32 mask;
555	int ret;
556	struct idr *idr = &group->inotify_data.idr;
557	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
558
559	mask = inotify_arg_to_mask(arg);
560
561	tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
562	if (unlikely(!tmp_i_mark))
563		return -ENOMEM;
564
565	fsnotify_init_mark(&tmp_i_mark->fsn_mark, group);
566	tmp_i_mark->fsn_mark.mask = mask;
567	tmp_i_mark->wd = -1;
568
569	ret = inotify_add_to_idr(idr, idr_lock, tmp_i_mark);
570	if (ret)
571		goto out_err;
572
573	/* increment the number of watches the user has */
574	if (!inc_inotify_watches(group->inotify_data.ucounts)) {
575		inotify_remove_from_idr(group, tmp_i_mark);
576		ret = -ENOSPC;
577		goto out_err;
578	}
579
580	/* we are on the idr, now get on the inode */
581	ret = fsnotify_add_mark_locked(&tmp_i_mark->fsn_mark, inode, NULL, 0);
582	if (ret) {
583		/* we failed to get on the inode, get off the idr */
584		inotify_remove_from_idr(group, tmp_i_mark);
585		goto out_err;
586	}
587
588
589	/* return the watch descriptor for this new mark */
590	ret = tmp_i_mark->wd;
591
592out_err:
593	/* match the ref from fsnotify_init_mark() */
594	fsnotify_put_mark(&tmp_i_mark->fsn_mark);
595
596	return ret;
597}
598
599static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
600{
601	int ret = 0;
602
603	mutex_lock(&group->mark_mutex);
604	/* try to update and existing watch with the new arg */
605	ret = inotify_update_existing_watch(group, inode, arg);
606	/* no mark present, try to add a new one */
607	if (ret == -ENOENT)
608		ret = inotify_new_watch(group, inode, arg);
609	mutex_unlock(&group->mark_mutex);
610
611	return ret;
612}
613
614static struct fsnotify_group *inotify_new_group(unsigned int max_events)
615{
616	struct fsnotify_group *group;
617	struct inotify_event_info *oevent;
618
619	group = fsnotify_alloc_group(&inotify_fsnotify_ops);
620	if (IS_ERR(group))
621		return group;
622
623	oevent = kmalloc(sizeof(struct inotify_event_info), GFP_KERNEL);
624	if (unlikely(!oevent)) {
625		fsnotify_destroy_group(group);
626		return ERR_PTR(-ENOMEM);
627	}
628	group->overflow_event = &oevent->fse;
629	fsnotify_init_event(group->overflow_event, NULL, FS_Q_OVERFLOW);
 
630	oevent->wd = -1;
631	oevent->sync_cookie = 0;
632	oevent->name_len = 0;
633
634	group->max_events = max_events;
 
635
636	spin_lock_init(&group->inotify_data.idr_lock);
637	idr_init(&group->inotify_data.idr);
638	group->inotify_data.ucounts = inc_ucount(current_user_ns(),
639						 current_euid(),
640						 UCOUNT_INOTIFY_INSTANCES);
641
642	if (!group->inotify_data.ucounts) {
643		fsnotify_destroy_group(group);
644		return ERR_PTR(-EMFILE);
645	}
646
647	return group;
648}
649
650
651/* inotify syscalls */
652static int do_inotify_init(int flags)
653{
654	struct fsnotify_group *group;
655	int ret;
656
657	/* Check the IN_* constants for consistency.  */
658	BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
659	BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
660
661	if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
662		return -EINVAL;
663
664	/* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
665	group = inotify_new_group(inotify_max_queued_events);
666	if (IS_ERR(group))
667		return PTR_ERR(group);
668
669	ret = anon_inode_getfd("inotify", &inotify_fops, group,
670				  O_RDONLY | flags);
671	if (ret < 0)
672		fsnotify_destroy_group(group);
673
674	return ret;
675}
676
677SYSCALL_DEFINE1(inotify_init1, int, flags)
678{
679	return do_inotify_init(flags);
680}
681
682SYSCALL_DEFINE0(inotify_init)
683{
684	return do_inotify_init(0);
685}
686
687SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
688		u32, mask)
689{
690	struct fsnotify_group *group;
691	struct inode *inode;
692	struct path path;
693	struct fd f;
694	int ret;
695	unsigned flags = 0;
696
697	/*
698	 * We share a lot of code with fs/dnotify.  We also share
699	 * the bit layout between inotify's IN_* and the fsnotify
700	 * FS_*.  This check ensures that only the inotify IN_*
701	 * bits get passed in and set in watches/events.
702	 */
703	if (unlikely(mask & ~ALL_INOTIFY_BITS))
704		return -EINVAL;
705	/*
706	 * Require at least one valid bit set in the mask.
707	 * Without _something_ set, we would have no events to
708	 * watch for.
709	 */
710	if (unlikely(!(mask & ALL_INOTIFY_BITS)))
711		return -EINVAL;
712
713	f = fdget(fd);
714	if (unlikely(!f.file))
715		return -EBADF;
716
 
 
 
 
 
 
717	/* verify that this is indeed an inotify instance */
718	if (unlikely(f.file->f_op != &inotify_fops)) {
719		ret = -EINVAL;
720		goto fput_and_out;
721	}
722
723	if (!(mask & IN_DONT_FOLLOW))
724		flags |= LOOKUP_FOLLOW;
725	if (mask & IN_ONLYDIR)
726		flags |= LOOKUP_DIRECTORY;
727
728	ret = inotify_find_inode(pathname, &path, flags);
 
729	if (ret)
730		goto fput_and_out;
731
732	/* inode held in place by reference to path; group by fget on fd */
733	inode = path.dentry->d_inode;
734	group = f.file->private_data;
735
736	/* create/update an inode mark */
737	ret = inotify_update_watch(group, inode, mask);
738	path_put(&path);
739fput_and_out:
740	fdput(f);
741	return ret;
742}
743
744SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
745{
746	struct fsnotify_group *group;
747	struct inotify_inode_mark *i_mark;
748	struct fd f;
749	int ret = 0;
750
751	f = fdget(fd);
752	if (unlikely(!f.file))
753		return -EBADF;
754
755	/* verify that this is indeed an inotify instance */
756	ret = -EINVAL;
757	if (unlikely(f.file->f_op != &inotify_fops))
758		goto out;
759
760	group = f.file->private_data;
761
762	ret = -EINVAL;
763	i_mark = inotify_idr_find(group, wd);
764	if (unlikely(!i_mark))
765		goto out;
766
767	ret = 0;
768
769	fsnotify_destroy_mark(&i_mark->fsn_mark, group);
770
771	/* match ref taken by inotify_idr_find */
772	fsnotify_put_mark(&i_mark->fsn_mark);
773
774out:
775	fdput(f);
776	return ret;
777}
778
779/*
780 * inotify_user_setup - Our initialization function.  Note that we cannot return
781 * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
782 * must result in panic().
783 */
784static int __init inotify_user_setup(void)
785{
786	BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
787	BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
788	BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
789	BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
790	BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
791	BUILD_BUG_ON(IN_OPEN != FS_OPEN);
792	BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
793	BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
794	BUILD_BUG_ON(IN_CREATE != FS_CREATE);
795	BUILD_BUG_ON(IN_DELETE != FS_DELETE);
796	BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
797	BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
798	BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
799	BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
800	BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
801	BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK);
802	BUILD_BUG_ON(IN_ISDIR != FS_ISDIR);
803	BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
804
805	BUG_ON(hweight32(ALL_INOTIFY_BITS) != 21);
806
807	inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, SLAB_PANIC);
 
808
809	inotify_max_queued_events = 16384;
810	init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES] = 128;
811	init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES] = 8192;
812
813	return 0;
814}
815fs_initcall(inotify_user_setup);
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * fs/inotify_user.c - inotify support for userspace
  4 *
  5 * Authors:
  6 *	John McCutchan	<ttb@tentacle.dhs.org>
  7 *	Robert Love	<rml@novell.com>
  8 *
  9 * Copyright (C) 2005 John McCutchan
 10 * Copyright 2006 Hewlett-Packard Development Company, L.P.
 11 *
 12 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
 13 * inotify was largely rewriten to make use of the fsnotify infrastructure
 
 
 
 
 
 
 
 
 
 
 14 */
 15
 16#include <linux/file.h>
 17#include <linux/fs.h> /* struct inode */
 18#include <linux/fsnotify_backend.h>
 19#include <linux/idr.h>
 20#include <linux/init.h> /* fs_initcall */
 21#include <linux/inotify.h>
 22#include <linux/kernel.h> /* roundup() */
 23#include <linux/namei.h> /* LOOKUP_FOLLOW */
 24#include <linux/sched/signal.h>
 25#include <linux/slab.h> /* struct kmem_cache */
 26#include <linux/syscalls.h>
 27#include <linux/types.h>
 28#include <linux/anon_inodes.h>
 29#include <linux/uaccess.h>
 30#include <linux/poll.h>
 31#include <linux/wait.h>
 32#include <linux/memcontrol.h>
 33#include <linux/security.h>
 34
 35#include "inotify.h"
 36#include "../fdinfo.h"
 37
 38#include <asm/ioctls.h>
 39
 40/* configurable via /proc/sys/fs/inotify/ */
 41static int inotify_max_queued_events __read_mostly;
 42
 43struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
 44
 45#ifdef CONFIG_SYSCTL
 46
 47#include <linux/sysctl.h>
 48
 
 
 49struct ctl_table inotify_table[] = {
 50	{
 51		.procname	= "max_user_instances",
 52		.data		= &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES],
 53		.maxlen		= sizeof(int),
 54		.mode		= 0644,
 55		.proc_handler	= proc_dointvec_minmax,
 56		.extra1		= SYSCTL_ZERO,
 57	},
 58	{
 59		.procname	= "max_user_watches",
 60		.data		= &init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES],
 61		.maxlen		= sizeof(int),
 62		.mode		= 0644,
 63		.proc_handler	= proc_dointvec_minmax,
 64		.extra1		= SYSCTL_ZERO,
 65	},
 66	{
 67		.procname	= "max_queued_events",
 68		.data		= &inotify_max_queued_events,
 69		.maxlen		= sizeof(int),
 70		.mode		= 0644,
 71		.proc_handler	= proc_dointvec_minmax,
 72		.extra1		= SYSCTL_ZERO
 73	},
 74	{ }
 75};
 76#endif /* CONFIG_SYSCTL */
 77
 78static inline __u32 inotify_arg_to_mask(u32 arg)
 79{
 80	__u32 mask;
 81
 82	/*
 83	 * everything should accept their own ignored, cares about children,
 84	 * and should receive events when the inode is unmounted
 85	 */
 86	mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT);
 87
 88	/* mask off the flags used to open the fd */
 89	mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK));
 90
 91	return mask;
 92}
 93
 94static inline u32 inotify_mask_to_arg(__u32 mask)
 95{
 96	return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
 97		       IN_Q_OVERFLOW);
 98}
 99
100/* intofiy userspace file descriptor functions */
101static __poll_t inotify_poll(struct file *file, poll_table *wait)
102{
103	struct fsnotify_group *group = file->private_data;
104	__poll_t ret = 0;
105
106	poll_wait(file, &group->notification_waitq, wait);
107	spin_lock(&group->notification_lock);
108	if (!fsnotify_notify_queue_is_empty(group))
109		ret = EPOLLIN | EPOLLRDNORM;
110	spin_unlock(&group->notification_lock);
111
112	return ret;
113}
114
115static int round_event_name_len(struct fsnotify_event *fsn_event)
116{
117	struct inotify_event_info *event;
118
119	event = INOTIFY_E(fsn_event);
120	if (!event->name_len)
121		return 0;
122	return roundup(event->name_len + 1, sizeof(struct inotify_event));
123}
124
125/*
126 * Get an inotify_kernel_event if one exists and is small
127 * enough to fit in "count". Return an error pointer if
128 * not large enough.
129 *
130 * Called with the group->notification_lock held.
131 */
132static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
133					    size_t count)
134{
135	size_t event_size = sizeof(struct inotify_event);
136	struct fsnotify_event *event;
137
138	if (fsnotify_notify_queue_is_empty(group))
139		return NULL;
140
141	event = fsnotify_peek_first_event(group);
142
143	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
144
145	event_size += round_event_name_len(event);
146	if (event_size > count)
147		return ERR_PTR(-EINVAL);
148
149	/* held the notification_lock the whole time, so this is the
150	 * same event we peeked above */
151	fsnotify_remove_first_event(group);
152
153	return event;
154}
155
156/*
157 * Copy an event to user space, returning how much we copied.
158 *
159 * We already checked that the event size is smaller than the
160 * buffer we had in "get_one_event()" above.
161 */
162static ssize_t copy_event_to_user(struct fsnotify_group *group,
163				  struct fsnotify_event *fsn_event,
164				  char __user *buf)
165{
166	struct inotify_event inotify_event;
167	struct inotify_event_info *event;
168	size_t event_size = sizeof(struct inotify_event);
169	size_t name_len;
170	size_t pad_name_len;
171
172	pr_debug("%s: group=%p event=%p\n", __func__, group, fsn_event);
173
174	event = INOTIFY_E(fsn_event);
175	name_len = event->name_len;
176	/*
177	 * round up name length so it is a multiple of event_size
178	 * plus an extra byte for the terminating '\0'.
179	 */
180	pad_name_len = round_event_name_len(fsn_event);
181	inotify_event.len = pad_name_len;
182	inotify_event.mask = inotify_mask_to_arg(event->mask);
183	inotify_event.wd = event->wd;
184	inotify_event.cookie = event->sync_cookie;
185
186	/* send the main event */
187	if (copy_to_user(buf, &inotify_event, event_size))
188		return -EFAULT;
189
190	buf += event_size;
191
192	/*
193	 * fsnotify only stores the pathname, so here we have to send the pathname
194	 * and then pad that pathname out to a multiple of sizeof(inotify_event)
195	 * with zeros.
196	 */
197	if (pad_name_len) {
198		/* copy the path name */
199		if (copy_to_user(buf, event->name, name_len))
200			return -EFAULT;
201		buf += name_len;
202
203		/* fill userspace with 0's */
204		if (clear_user(buf, pad_name_len - name_len))
205			return -EFAULT;
206		event_size += pad_name_len;
207	}
208
209	return event_size;
210}
211
212static ssize_t inotify_read(struct file *file, char __user *buf,
213			    size_t count, loff_t *pos)
214{
215	struct fsnotify_group *group;
216	struct fsnotify_event *kevent;
217	char __user *start;
218	int ret;
219	DEFINE_WAIT_FUNC(wait, woken_wake_function);
220
221	start = buf;
222	group = file->private_data;
223
224	add_wait_queue(&group->notification_waitq, &wait);
225	while (1) {
226		spin_lock(&group->notification_lock);
227		kevent = get_one_event(group, count);
228		spin_unlock(&group->notification_lock);
229
230		pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
231
232		if (kevent) {
233			ret = PTR_ERR(kevent);
234			if (IS_ERR(kevent))
235				break;
236			ret = copy_event_to_user(group, kevent, buf);
237			fsnotify_destroy_event(group, kevent);
238			if (ret < 0)
239				break;
240			buf += ret;
241			count -= ret;
242			continue;
243		}
244
245		ret = -EAGAIN;
246		if (file->f_flags & O_NONBLOCK)
247			break;
248		ret = -ERESTARTSYS;
249		if (signal_pending(current))
250			break;
251
252		if (start != buf)
253			break;
254
255		wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
256	}
257	remove_wait_queue(&group->notification_waitq, &wait);
258
259	if (start != buf && ret != -EFAULT)
260		ret = buf - start;
261	return ret;
262}
263
264static int inotify_release(struct inode *ignored, struct file *file)
265{
266	struct fsnotify_group *group = file->private_data;
267
268	pr_debug("%s: group=%p\n", __func__, group);
269
270	/* free this group, matching get was inotify_init->fsnotify_obtain_group */
271	fsnotify_destroy_group(group);
272
273	return 0;
274}
275
276static long inotify_ioctl(struct file *file, unsigned int cmd,
277			  unsigned long arg)
278{
279	struct fsnotify_group *group;
280	struct fsnotify_event *fsn_event;
281	void __user *p;
282	int ret = -ENOTTY;
283	size_t send_len = 0;
284
285	group = file->private_data;
286	p = (void __user *) arg;
287
288	pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd);
289
290	switch (cmd) {
291	case FIONREAD:
292		spin_lock(&group->notification_lock);
293		list_for_each_entry(fsn_event, &group->notification_list,
294				    list) {
295			send_len += sizeof(struct inotify_event);
296			send_len += round_event_name_len(fsn_event);
297		}
298		spin_unlock(&group->notification_lock);
299		ret = put_user(send_len, (int __user *) p);
300		break;
301#ifdef CONFIG_CHECKPOINT_RESTORE
302	case INOTIFY_IOC_SETNEXTWD:
303		ret = -EINVAL;
304		if (arg >= 1 && arg <= INT_MAX) {
305			struct inotify_group_private_data *data;
306
307			data = &group->inotify_data;
308			spin_lock(&data->idr_lock);
309			idr_set_cursor(&data->idr, (unsigned int)arg);
310			spin_unlock(&data->idr_lock);
311			ret = 0;
312		}
313		break;
314#endif /* CONFIG_CHECKPOINT_RESTORE */
315	}
316
317	return ret;
318}
319
320static const struct file_operations inotify_fops = {
321	.show_fdinfo	= inotify_show_fdinfo,
322	.poll		= inotify_poll,
323	.read		= inotify_read,
324	.fasync		= fsnotify_fasync,
325	.release	= inotify_release,
326	.unlocked_ioctl	= inotify_ioctl,
327	.compat_ioctl	= inotify_ioctl,
328	.llseek		= noop_llseek,
329};
330
331
332/*
333 * find_inode - resolve a user-given path to a specific inode
334 */
335static int inotify_find_inode(const char __user *dirname, struct path *path,
336						unsigned int flags, __u64 mask)
337{
338	int error;
339
340	error = user_path_at(AT_FDCWD, dirname, flags, path);
341	if (error)
342		return error;
343	/* you can only watch an inode if you have read permissions on it */
344	error = inode_permission(path->dentry->d_inode, MAY_READ);
345	if (error) {
346		path_put(path);
347		return error;
348	}
349	error = security_path_notify(path, mask,
350				FSNOTIFY_OBJ_TYPE_INODE);
351	if (error)
352		path_put(path);
353
354	return error;
355}
356
357static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
358			      struct inotify_inode_mark *i_mark)
359{
360	int ret;
361
362	idr_preload(GFP_KERNEL);
363	spin_lock(idr_lock);
364
365	ret = idr_alloc_cyclic(idr, i_mark, 1, 0, GFP_NOWAIT);
366	if (ret >= 0) {
367		/* we added the mark to the idr, take a reference */
368		i_mark->wd = ret;
369		fsnotify_get_mark(&i_mark->fsn_mark);
370	}
371
372	spin_unlock(idr_lock);
373	idr_preload_end();
374	return ret < 0 ? ret : 0;
375}
376
377static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
378								int wd)
379{
380	struct idr *idr = &group->inotify_data.idr;
381	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
382	struct inotify_inode_mark *i_mark;
383
384	assert_spin_locked(idr_lock);
385
386	i_mark = idr_find(idr, wd);
387	if (i_mark) {
388		struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark;
389
390		fsnotify_get_mark(fsn_mark);
391		/* One ref for being in the idr, one ref we just took */
392		BUG_ON(refcount_read(&fsn_mark->refcnt) < 2);
393	}
394
395	return i_mark;
396}
397
398static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group,
399							 int wd)
400{
401	struct inotify_inode_mark *i_mark;
402	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
403
404	spin_lock(idr_lock);
405	i_mark = inotify_idr_find_locked(group, wd);
406	spin_unlock(idr_lock);
407
408	return i_mark;
409}
410
411/*
412 * Remove the mark from the idr (if present) and drop the reference
413 * on the mark because it was in the idr.
414 */
415static void inotify_remove_from_idr(struct fsnotify_group *group,
416				    struct inotify_inode_mark *i_mark)
417{
418	struct idr *idr = &group->inotify_data.idr;
419	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
420	struct inotify_inode_mark *found_i_mark = NULL;
421	int wd;
422
423	spin_lock(idr_lock);
424	wd = i_mark->wd;
425
426	/*
427	 * does this i_mark think it is in the idr?  we shouldn't get called
428	 * if it wasn't....
429	 */
430	if (wd == -1) {
431		WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
432			__func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
433		goto out;
434	}
435
436	/* Lets look in the idr to see if we find it */
437	found_i_mark = inotify_idr_find_locked(group, wd);
438	if (unlikely(!found_i_mark)) {
439		WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
440			__func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
441		goto out;
442	}
443
444	/*
445	 * We found an mark in the idr at the right wd, but it's
446	 * not the mark we were told to remove.  eparis seriously
447	 * fucked up somewhere.
448	 */
449	if (unlikely(found_i_mark != i_mark)) {
450		WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
451			"found_i_mark=%p found_i_mark->wd=%d "
452			"found_i_mark->group=%p\n", __func__, i_mark,
453			i_mark->wd, i_mark->fsn_mark.group, found_i_mark,
454			found_i_mark->wd, found_i_mark->fsn_mark.group);
455		goto out;
456	}
457
458	/*
459	 * One ref for being in the idr
460	 * one ref grabbed by inotify_idr_find
461	 */
462	if (unlikely(refcount_read(&i_mark->fsn_mark.refcnt) < 2)) {
463		printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
464			 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
465		/* we can't really recover with bad ref cnting.. */
466		BUG();
467	}
468
469	idr_remove(idr, wd);
470	/* Removed from the idr, drop that ref. */
471	fsnotify_put_mark(&i_mark->fsn_mark);
472out:
473	i_mark->wd = -1;
474	spin_unlock(idr_lock);
475	/* match the ref taken by inotify_idr_find_locked() */
476	if (found_i_mark)
477		fsnotify_put_mark(&found_i_mark->fsn_mark);
478}
479
480/*
481 * Send IN_IGNORED for this wd, remove this wd from the idr.
482 */
483void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
484				    struct fsnotify_group *group)
485{
486	struct inotify_inode_mark *i_mark;
487	struct fsnotify_iter_info iter_info = { };
488
489	fsnotify_iter_set_report_type_mark(&iter_info, FSNOTIFY_OBJ_TYPE_INODE,
490					   fsn_mark);
491
492	/* Queue ignore event for the watch */
493	inotify_handle_event(group, NULL, FS_IN_IGNORED, NULL,
494			     FSNOTIFY_EVENT_NONE, NULL, 0, &iter_info);
495
496	i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
497	/* remove this mark from the idr */
498	inotify_remove_from_idr(group, i_mark);
499
500	dec_inotify_watches(group->inotify_data.ucounts);
501}
502
503static int inotify_update_existing_watch(struct fsnotify_group *group,
504					 struct inode *inode,
505					 u32 arg)
506{
507	struct fsnotify_mark *fsn_mark;
508	struct inotify_inode_mark *i_mark;
509	__u32 old_mask, new_mask;
510	__u32 mask;
511	int add = (arg & IN_MASK_ADD);
512	int create = (arg & IN_MASK_CREATE);
513	int ret;
514
515	mask = inotify_arg_to_mask(arg);
516
517	fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
518	if (!fsn_mark)
519		return -ENOENT;
520	else if (create) {
521		ret = -EEXIST;
522		goto out;
523	}
524
525	i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
526
527	spin_lock(&fsn_mark->lock);
528	old_mask = fsn_mark->mask;
529	if (add)
530		fsn_mark->mask |= mask;
531	else
532		fsn_mark->mask = mask;
533	new_mask = fsn_mark->mask;
534	spin_unlock(&fsn_mark->lock);
535
536	if (old_mask != new_mask) {
537		/* more bits in old than in new? */
538		int dropped = (old_mask & ~new_mask);
539		/* more bits in this fsn_mark than the inode's mask? */
540		int do_inode = (new_mask & ~inode->i_fsnotify_mask);
541
542		/* update the inode with this new fsn_mark */
543		if (dropped || do_inode)
544			fsnotify_recalc_mask(inode->i_fsnotify_marks);
545
546	}
547
548	/* return the wd */
549	ret = i_mark->wd;
550
551out:
552	/* match the get from fsnotify_find_mark() */
553	fsnotify_put_mark(fsn_mark);
554
555	return ret;
556}
557
558static int inotify_new_watch(struct fsnotify_group *group,
559			     struct inode *inode,
560			     u32 arg)
561{
562	struct inotify_inode_mark *tmp_i_mark;
563	__u32 mask;
564	int ret;
565	struct idr *idr = &group->inotify_data.idr;
566	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
567
568	mask = inotify_arg_to_mask(arg);
569
570	tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
571	if (unlikely(!tmp_i_mark))
572		return -ENOMEM;
573
574	fsnotify_init_mark(&tmp_i_mark->fsn_mark, group);
575	tmp_i_mark->fsn_mark.mask = mask;
576	tmp_i_mark->wd = -1;
577
578	ret = inotify_add_to_idr(idr, idr_lock, tmp_i_mark);
579	if (ret)
580		goto out_err;
581
582	/* increment the number of watches the user has */
583	if (!inc_inotify_watches(group->inotify_data.ucounts)) {
584		inotify_remove_from_idr(group, tmp_i_mark);
585		ret = -ENOSPC;
586		goto out_err;
587	}
588
589	/* we are on the idr, now get on the inode */
590	ret = fsnotify_add_inode_mark_locked(&tmp_i_mark->fsn_mark, inode, 0);
591	if (ret) {
592		/* we failed to get on the inode, get off the idr */
593		inotify_remove_from_idr(group, tmp_i_mark);
594		goto out_err;
595	}
596
597
598	/* return the watch descriptor for this new mark */
599	ret = tmp_i_mark->wd;
600
601out_err:
602	/* match the ref from fsnotify_init_mark() */
603	fsnotify_put_mark(&tmp_i_mark->fsn_mark);
604
605	return ret;
606}
607
608static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
609{
610	int ret = 0;
611
612	mutex_lock(&group->mark_mutex);
613	/* try to update and existing watch with the new arg */
614	ret = inotify_update_existing_watch(group, inode, arg);
615	/* no mark present, try to add a new one */
616	if (ret == -ENOENT)
617		ret = inotify_new_watch(group, inode, arg);
618	mutex_unlock(&group->mark_mutex);
619
620	return ret;
621}
622
623static struct fsnotify_group *inotify_new_group(unsigned int max_events)
624{
625	struct fsnotify_group *group;
626	struct inotify_event_info *oevent;
627
628	group = fsnotify_alloc_group(&inotify_fsnotify_ops);
629	if (IS_ERR(group))
630		return group;
631
632	oevent = kmalloc(sizeof(struct inotify_event_info), GFP_KERNEL);
633	if (unlikely(!oevent)) {
634		fsnotify_destroy_group(group);
635		return ERR_PTR(-ENOMEM);
636	}
637	group->overflow_event = &oevent->fse;
638	fsnotify_init_event(group->overflow_event, NULL);
639	oevent->mask = FS_Q_OVERFLOW;
640	oevent->wd = -1;
641	oevent->sync_cookie = 0;
642	oevent->name_len = 0;
643
644	group->max_events = max_events;
645	group->memcg = get_mem_cgroup_from_mm(current->mm);
646
647	spin_lock_init(&group->inotify_data.idr_lock);
648	idr_init(&group->inotify_data.idr);
649	group->inotify_data.ucounts = inc_ucount(current_user_ns(),
650						 current_euid(),
651						 UCOUNT_INOTIFY_INSTANCES);
652
653	if (!group->inotify_data.ucounts) {
654		fsnotify_destroy_group(group);
655		return ERR_PTR(-EMFILE);
656	}
657
658	return group;
659}
660
661
662/* inotify syscalls */
663static int do_inotify_init(int flags)
664{
665	struct fsnotify_group *group;
666	int ret;
667
668	/* Check the IN_* constants for consistency.  */
669	BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
670	BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
671
672	if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
673		return -EINVAL;
674
675	/* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
676	group = inotify_new_group(inotify_max_queued_events);
677	if (IS_ERR(group))
678		return PTR_ERR(group);
679
680	ret = anon_inode_getfd("inotify", &inotify_fops, group,
681				  O_RDONLY | flags);
682	if (ret < 0)
683		fsnotify_destroy_group(group);
684
685	return ret;
686}
687
688SYSCALL_DEFINE1(inotify_init1, int, flags)
689{
690	return do_inotify_init(flags);
691}
692
693SYSCALL_DEFINE0(inotify_init)
694{
695	return do_inotify_init(0);
696}
697
698SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
699		u32, mask)
700{
701	struct fsnotify_group *group;
702	struct inode *inode;
703	struct path path;
704	struct fd f;
705	int ret;
706	unsigned flags = 0;
707
708	/*
709	 * We share a lot of code with fs/dnotify.  We also share
710	 * the bit layout between inotify's IN_* and the fsnotify
711	 * FS_*.  This check ensures that only the inotify IN_*
712	 * bits get passed in and set in watches/events.
713	 */
714	if (unlikely(mask & ~ALL_INOTIFY_BITS))
715		return -EINVAL;
716	/*
717	 * Require at least one valid bit set in the mask.
718	 * Without _something_ set, we would have no events to
719	 * watch for.
720	 */
721	if (unlikely(!(mask & ALL_INOTIFY_BITS)))
722		return -EINVAL;
723
724	f = fdget(fd);
725	if (unlikely(!f.file))
726		return -EBADF;
727
728	/* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */
729	if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) {
730		ret = -EINVAL;
731		goto fput_and_out;
732	}
733
734	/* verify that this is indeed an inotify instance */
735	if (unlikely(f.file->f_op != &inotify_fops)) {
736		ret = -EINVAL;
737		goto fput_and_out;
738	}
739
740	if (!(mask & IN_DONT_FOLLOW))
741		flags |= LOOKUP_FOLLOW;
742	if (mask & IN_ONLYDIR)
743		flags |= LOOKUP_DIRECTORY;
744
745	ret = inotify_find_inode(pathname, &path, flags,
746			(mask & IN_ALL_EVENTS));
747	if (ret)
748		goto fput_and_out;
749
750	/* inode held in place by reference to path; group by fget on fd */
751	inode = path.dentry->d_inode;
752	group = f.file->private_data;
753
754	/* create/update an inode mark */
755	ret = inotify_update_watch(group, inode, mask);
756	path_put(&path);
757fput_and_out:
758	fdput(f);
759	return ret;
760}
761
762SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
763{
764	struct fsnotify_group *group;
765	struct inotify_inode_mark *i_mark;
766	struct fd f;
767	int ret = 0;
768
769	f = fdget(fd);
770	if (unlikely(!f.file))
771		return -EBADF;
772
773	/* verify that this is indeed an inotify instance */
774	ret = -EINVAL;
775	if (unlikely(f.file->f_op != &inotify_fops))
776		goto out;
777
778	group = f.file->private_data;
779
780	ret = -EINVAL;
781	i_mark = inotify_idr_find(group, wd);
782	if (unlikely(!i_mark))
783		goto out;
784
785	ret = 0;
786
787	fsnotify_destroy_mark(&i_mark->fsn_mark, group);
788
789	/* match ref taken by inotify_idr_find */
790	fsnotify_put_mark(&i_mark->fsn_mark);
791
792out:
793	fdput(f);
794	return ret;
795}
796
797/*
798 * inotify_user_setup - Our initialization function.  Note that we cannot return
799 * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
800 * must result in panic().
801 */
802static int __init inotify_user_setup(void)
803{
804	BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
805	BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
806	BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
807	BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
808	BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
809	BUILD_BUG_ON(IN_OPEN != FS_OPEN);
810	BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
811	BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
812	BUILD_BUG_ON(IN_CREATE != FS_CREATE);
813	BUILD_BUG_ON(IN_DELETE != FS_DELETE);
814	BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
815	BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
816	BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
817	BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
818	BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
819	BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK);
820	BUILD_BUG_ON(IN_ISDIR != FS_ISDIR);
821	BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
822
823	BUILD_BUG_ON(HWEIGHT32(ALL_INOTIFY_BITS) != 22);
824
825	inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark,
826					       SLAB_PANIC|SLAB_ACCOUNT);
827
828	inotify_max_queued_events = 16384;
829	init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES] = 128;
830	init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES] = 8192;
831
832	return 0;
833}
834fs_initcall(inotify_user_setup);