Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1/*
  2 *  Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
  3 *
  4 *  This program is free software; you can redistribute it and/or modify
  5 *  it under the terms of the GNU General Public License as published by
  6 *  the Free Software Foundation; either version 2, or (at your option)
  7 *  any later version.
  8 *
  9 *  This program is distributed in the hope that it will be useful,
 10 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 *  GNU General Public License for more details.
 13 *
 14 *  You should have received a copy of the GNU General Public License
 15 *  along with this program; see the file COPYING.  If not, write to
 16 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
 17 */
 18
 19#include <linux/fs.h>
 20#include <linux/init.h>
 21#include <linux/kernel.h>
 22#include <linux/module.h>
 23#include <linux/mutex.h>
 24#include <linux/spinlock.h>
 25
 26#include <linux/atomic.h>
 27
 28#include <linux/fsnotify_backend.h>
 29#include "fsnotify.h"
 30
 31#include "../internal.h"
 32
 33/*
 34 * Recalculate the mask of events relevant to a given inode locked.
 35 */
 36static void fsnotify_recalc_inode_mask_locked(struct inode *inode)
 37{
 38	struct fsnotify_mark *mark;
 39	__u32 new_mask = 0;
 40
 41	assert_spin_locked(&inode->i_lock);
 42
 43	hlist_for_each_entry(mark, &inode->i_fsnotify_marks, i.i_list)
 44		new_mask |= mark->mask;
 45	inode->i_fsnotify_mask = new_mask;
 46}
 47
 48/*
 49 * Recalculate the inode->i_fsnotify_mask, or the mask of all FS_* event types
 50 * any notifier is interested in hearing for this inode.
 51 */
 52void fsnotify_recalc_inode_mask(struct inode *inode)
 53{
 54	spin_lock(&inode->i_lock);
 55	fsnotify_recalc_inode_mask_locked(inode);
 56	spin_unlock(&inode->i_lock);
 57
 58	__fsnotify_update_child_dentry_flags(inode);
 59}
 60
 61void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark)
 62{
 63	struct inode *inode = mark->i.inode;
 64
 65	BUG_ON(!mutex_is_locked(&mark->group->mark_mutex));
 66	assert_spin_locked(&mark->lock);
 67
 68	spin_lock(&inode->i_lock);
 69
 70	hlist_del_init_rcu(&mark->i.i_list);
 71	mark->i.inode = NULL;
 72
 73	/*
 74	 * this mark is now off the inode->i_fsnotify_marks list and we
 75	 * hold the inode->i_lock, so this is the perfect time to update the
 76	 * inode->i_fsnotify_mask
 77	 */
 78	fsnotify_recalc_inode_mask_locked(inode);
 79
 80	spin_unlock(&inode->i_lock);
 81}
 82
 83/*
 84 * Given an inode, destroy all of the marks associated with that inode.
 85 */
 86void fsnotify_clear_marks_by_inode(struct inode *inode)
 87{
 88	struct fsnotify_mark *mark, *lmark;
 89	struct hlist_node *n;
 90	LIST_HEAD(free_list);
 91
 92	spin_lock(&inode->i_lock);
 93	hlist_for_each_entry_safe(mark, n, &inode->i_fsnotify_marks, i.i_list) {
 94		list_add(&mark->i.free_i_list, &free_list);
 95		hlist_del_init_rcu(&mark->i.i_list);
 96		fsnotify_get_mark(mark);
 97	}
 98	spin_unlock(&inode->i_lock);
 99
100	list_for_each_entry_safe(mark, lmark, &free_list, i.free_i_list) {
101		struct fsnotify_group *group;
102
103		spin_lock(&mark->lock);
104		fsnotify_get_group(mark->group);
105		group = mark->group;
106		spin_unlock(&mark->lock);
107
108		fsnotify_destroy_mark(mark, group);
109		fsnotify_put_mark(mark);
110		fsnotify_put_group(group);
111	}
112}
113
114/*
115 * Given a group clear all of the inode marks associated with that group.
116 */
117void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group)
118{
119	fsnotify_clear_marks_by_group_flags(group, FSNOTIFY_MARK_FLAG_INODE);
120}
121
122/*
123 * given a group and inode, find the mark associated with that combination.
124 * if found take a reference to that mark and return it, else return NULL
125 */
126static struct fsnotify_mark *fsnotify_find_inode_mark_locked(
127		struct fsnotify_group *group,
128		struct inode *inode)
129{
130	struct fsnotify_mark *mark;
131
132	assert_spin_locked(&inode->i_lock);
133
134	hlist_for_each_entry(mark, &inode->i_fsnotify_marks, i.i_list) {
135		if (mark->group == group) {
136			fsnotify_get_mark(mark);
137			return mark;
138		}
139	}
140	return NULL;
141}
142
143/*
144 * given a group and inode, find the mark associated with that combination.
145 * if found take a reference to that mark and return it, else return NULL
146 */
147struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group,
148					       struct inode *inode)
149{
150	struct fsnotify_mark *mark;
151
152	spin_lock(&inode->i_lock);
153	mark = fsnotify_find_inode_mark_locked(group, inode);
154	spin_unlock(&inode->i_lock);
155
156	return mark;
157}
158
159/*
160 * If we are setting a mark mask on an inode mark we should pin the inode
161 * in memory.
162 */
163void fsnotify_set_inode_mark_mask_locked(struct fsnotify_mark *mark,
164					 __u32 mask)
165{
166	struct inode *inode;
167
168	assert_spin_locked(&mark->lock);
169
170	if (mask &&
171	    mark->i.inode &&
172	    !(mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED)) {
173		mark->flags |= FSNOTIFY_MARK_FLAG_OBJECT_PINNED;
174		inode = igrab(mark->i.inode);
175		/*
176		 * we shouldn't be able to get here if the inode wasn't
177		 * already safely held in memory.  But bug in case it
178		 * ever is wrong.
179		 */
180		BUG_ON(!inode);
181	}
182}
183
184/*
185 * Attach an initialized mark to a given inode.
186 * These marks may be used for the fsnotify backend to determine which
187 * event types should be delivered to which group and for which inodes.  These
188 * marks are ordered according to priority, highest number first, and then by
189 * the group's location in memory.
190 */
191int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
192			    struct fsnotify_group *group, struct inode *inode,
193			    int allow_dups)
194{
195	struct fsnotify_mark *lmark, *last = NULL;
196	int ret = 0;
197
198	mark->flags |= FSNOTIFY_MARK_FLAG_INODE;
199
200	BUG_ON(!mutex_is_locked(&group->mark_mutex));
201	assert_spin_locked(&mark->lock);
202
203	spin_lock(&inode->i_lock);
204
205	mark->i.inode = inode;
206
207	/* is mark the first mark? */
208	if (hlist_empty(&inode->i_fsnotify_marks)) {
209		hlist_add_head_rcu(&mark->i.i_list, &inode->i_fsnotify_marks);
210		goto out;
211	}
212
213	/* should mark be in the middle of the current list? */
214	hlist_for_each_entry(lmark, &inode->i_fsnotify_marks, i.i_list) {
215		last = lmark;
216
217		if ((lmark->group == group) && !allow_dups) {
218			ret = -EEXIST;
219			goto out;
220		}
221
222		if (mark->group->priority < lmark->group->priority)
223			continue;
224
225		if ((mark->group->priority == lmark->group->priority) &&
226		    (mark->group < lmark->group))
227			continue;
228
229		hlist_add_before_rcu(&mark->i.i_list, &lmark->i.i_list);
230		goto out;
231	}
232
233	BUG_ON(last == NULL);
234	/* mark should be the last entry.  last is the current last entry */
235	hlist_add_after_rcu(&last->i.i_list, &mark->i.i_list);
236out:
237	fsnotify_recalc_inode_mask_locked(inode);
238	spin_unlock(&inode->i_lock);
239
240	return ret;
241}
242
243/**
244 * fsnotify_unmount_inodes - an sb is unmounting.  handle any watched inodes.
245 * @list: list of inodes being unmounted (sb->s_inodes)
246 *
247 * Called during unmount with no locks held, so needs to be safe against
248 * concurrent modifiers. We temporarily drop inode_sb_list_lock and CAN block.
249 */
250void fsnotify_unmount_inodes(struct list_head *list)
251{
252	struct inode *inode, *next_i, *need_iput = NULL;
253
254	spin_lock(&inode_sb_list_lock);
255	list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
256		struct inode *need_iput_tmp;
257
258		/*
259		 * We cannot __iget() an inode in state I_FREEING,
260		 * I_WILL_FREE, or I_NEW which is fine because by that point
261		 * the inode cannot have any associated watches.
262		 */
263		spin_lock(&inode->i_lock);
264		if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
265			spin_unlock(&inode->i_lock);
266			continue;
267		}
268
269		/*
270		 * If i_count is zero, the inode cannot have any watches and
271		 * doing an __iget/iput with MS_ACTIVE clear would actually
272		 * evict all inodes with zero i_count from icache which is
273		 * unnecessarily violent and may in fact be illegal to do.
274		 */
275		if (!atomic_read(&inode->i_count)) {
276			spin_unlock(&inode->i_lock);
277			continue;
278		}
279
280		need_iput_tmp = need_iput;
281		need_iput = NULL;
282
283		/* In case fsnotify_inode_delete() drops a reference. */
284		if (inode != need_iput_tmp)
285			__iget(inode);
286		else
287			need_iput_tmp = NULL;
288		spin_unlock(&inode->i_lock);
289
290		/* In case the dropping of a reference would nuke next_i. */
291		if ((&next_i->i_sb_list != list) &&
292		    atomic_read(&next_i->i_count)) {
293			spin_lock(&next_i->i_lock);
294			if (!(next_i->i_state & (I_FREEING | I_WILL_FREE))) {
295				__iget(next_i);
296				need_iput = next_i;
297			}
298			spin_unlock(&next_i->i_lock);
299		}
300
301		/*
302		 * We can safely drop inode_sb_list_lock here because we hold
303		 * references on both inode and next_i.  Also no new inodes
304		 * will be added since the umount has begun.
305		 */
306		spin_unlock(&inode_sb_list_lock);
307
308		if (need_iput_tmp)
309			iput(need_iput_tmp);
310
311		/* for each watch, send FS_UNMOUNT and then remove it */
312		fsnotify(inode, FS_UNMOUNT, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
313
314		fsnotify_inode_delete(inode);
315
316		iput(inode);
317
318		spin_lock(&inode_sb_list_lock);
319	}
320	spin_unlock(&inode_sb_list_lock);
321}