Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 *  Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
  3 *
  4 *  This program is free software; you can redistribute it and/or modify
  5 *  it under the terms of the GNU General Public License as published by
  6 *  the Free Software Foundation; either version 2, or (at your option)
  7 *  any later version.
  8 *
  9 *  This program is distributed in the hope that it will be useful,
 10 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 *  GNU General Public License for more details.
 13 *
 14 *  You should have received a copy of the GNU General Public License
 15 *  along with this program; see the file COPYING.  If not, write to
 16 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
 17 */
 18
 19/*
 20 * fsnotify inode mark locking/lifetime/and refcnting
 21 *
 22 * REFCNT:
 23 * The mark->refcnt tells how many "things" in the kernel currently are
 24 * referencing this object.  The object typically will live inside the kernel
 25 * with a refcnt of 2, one for each list it is on (i_list, g_list).  Any task
 26 * which can find this object holding the appropriete locks, can take a reference
 27 * and the object itself is guaranteed to survive until the reference is dropped.
 28 *
 29 * LOCKING:
 30 * There are 3 spinlocks involved with fsnotify inode marks and they MUST
 31 * be taken in order as follows:
 32 *
 33 * mark->lock
 34 * group->mark_lock
 35 * inode->i_lock
 36 *
 37 * mark->lock protects 2 things, mark->group and mark->inode.  You must hold
 38 * that lock to dereference either of these things (they could be NULL even with
 39 * the lock)
 40 *
 41 * group->mark_lock protects the marks_list anchored inside a given group
 42 * and each mark is hooked via the g_list.  It also sorta protects the
 43 * free_g_list, which when used is anchored by a private list on the stack of the
 44 * task which held the group->mark_lock.
 45 *
 46 * inode->i_lock protects the i_fsnotify_marks list anchored inside a
 47 * given inode and each mark is hooked via the i_list. (and sorta the
 48 * free_i_list)
 49 *
 50 *
 51 * LIFETIME:
 52 * Inode marks survive between when they are added to an inode and when their
 53 * refcnt==0.
 54 *
 55 * The inode mark can be cleared for a number of different reasons including:
 56 * - The inode is unlinked for the last time.  (fsnotify_inode_remove)
 57 * - The inode is being evicted from cache. (fsnotify_inode_delete)
 58 * - The fs the inode is on is unmounted.  (fsnotify_inode_delete/fsnotify_unmount_inodes)
 59 * - Something explicitly requests that it be removed.  (fsnotify_destroy_mark)
 60 * - The fsnotify_group associated with the mark is going away and all such marks
 61 *   need to be cleaned up. (fsnotify_clear_marks_by_group)
 62 *
 63 * Worst case we are given an inode and need to clean up all the marks on that
 64 * inode.  We take i_lock and walk the i_fsnotify_marks safely.  For each
 65 * mark on the list we take a reference (so the mark can't disappear under us).
 66 * We remove that mark form the inode's list of marks and we add this mark to a
 67 * private list anchored on the stack using i_free_list;  At this point we no
 68 * longer fear anything finding the mark using the inode's list of marks.
 69 *
 70 * We can safely and locklessly run the private list on the stack of everything
 71 * we just unattached from the original inode.  For each mark on the private list
 72 * we grab the mark-> and can thus dereference mark->group and mark->inode.  If
 73 * we see the group and inode are not NULL we take those locks.  Now holding all
 74 * 3 locks we can completely remove the mark from other tasks finding it in the
 75 * future.  Remember, 10 things might already be referencing this mark, but they
 76 * better be holding a ref.  We drop our reference we took before we unhooked it
 77 * from the inode.  When the ref hits 0 we can free the mark.
 78 *
 79 * Very similarly for freeing by group, except we use free_g_list.
 80 *
 81 * This has the very interesting property of being able to run concurrently with
 82 * any (or all) other directions.
 83 */
 84
 85#include <linux/fs.h>
 86#include <linux/init.h>
 87#include <linux/kernel.h>
 88#include <linux/kthread.h>
 89#include <linux/module.h>
 90#include <linux/mutex.h>
 91#include <linux/slab.h>
 92#include <linux/spinlock.h>
 93#include <linux/srcu.h>
 94
 95#include <linux/atomic.h>
 96
 97#include <linux/fsnotify_backend.h>
 98#include "fsnotify.h"
 99
100struct srcu_struct fsnotify_mark_srcu;
101static DEFINE_SPINLOCK(destroy_lock);
102static LIST_HEAD(destroy_list);
103static DECLARE_WAIT_QUEUE_HEAD(destroy_waitq);
104
105void fsnotify_get_mark(struct fsnotify_mark *mark)
106{
107	atomic_inc(&mark->refcnt);
108}
109
110void fsnotify_put_mark(struct fsnotify_mark *mark)
111{
112	if (atomic_dec_and_test(&mark->refcnt))
113		mark->free_mark(mark);
114}
115
116/*
117 * Any time a mark is getting freed we end up here.
118 * The caller had better be holding a reference to this mark so we don't actually
119 * do the final put under the mark->lock
120 */
121void fsnotify_destroy_mark(struct fsnotify_mark *mark)
122{
123	struct fsnotify_group *group;
124	struct inode *inode = NULL;
125
126	spin_lock(&mark->lock);
127
128	group = mark->group;
129
130	/* something else already called this function on this mark */
131	if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) {
132		spin_unlock(&mark->lock);
133		return;
134	}
135
136	mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
137
138	/* 1 from caller and 1 for being on i_list/g_list */
139	BUG_ON(atomic_read(&mark->refcnt) < 2);
140
141	spin_lock(&group->mark_lock);
142
143	if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
144		inode = mark->i.inode;
145		fsnotify_destroy_inode_mark(mark);
146	} else if (mark->flags & FSNOTIFY_MARK_FLAG_VFSMOUNT)
147		fsnotify_destroy_vfsmount_mark(mark);
148	else
149		BUG();
150
151	list_del_init(&mark->g_list);
152
153	spin_unlock(&group->mark_lock);
154	spin_unlock(&mark->lock);
155
156	spin_lock(&destroy_lock);
157	list_add(&mark->destroy_list, &destroy_list);
158	spin_unlock(&destroy_lock);
159	wake_up(&destroy_waitq);
160
161	/*
162	 * Some groups like to know that marks are being freed.  This is a
163	 * callback to the group function to let it know that this mark
164	 * is being freed.
165	 */
166	if (group->ops->freeing_mark)
167		group->ops->freeing_mark(mark, group);
168
169	/*
170	 * __fsnotify_update_child_dentry_flags(inode);
171	 *
172	 * I really want to call that, but we can't, we have no idea if the inode
173	 * still exists the second we drop the mark->lock.
174	 *
175	 * The next time an event arrive to this inode from one of it's children
176	 * __fsnotify_parent will see that the inode doesn't care about it's
177	 * children and will update all of these flags then.  So really this
178	 * is just a lazy update (and could be a perf win...)
179	 */
180
181	if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED))
182		iput(inode);
 
 
 
 
 
183
184	/*
185	 * it's possible that this group tried to destroy itself, but this
186	 * this mark was simultaneously being freed by inode.  If that's the
187	 * case, we finish freeing the group here.
188	 */
189	if (unlikely(atomic_dec_and_test(&group->num_marks)))
190		fsnotify_final_destroy_group(group);
191}
192
193void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask)
194{
195	assert_spin_locked(&mark->lock);
196
197	mark->mask = mask;
198
199	if (mark->flags & FSNOTIFY_MARK_FLAG_INODE)
200		fsnotify_set_inode_mark_mask_locked(mark, mask);
201}
202
203void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask)
204{
205	assert_spin_locked(&mark->lock);
206
207	mark->ignored_mask = mask;
208}
209
210/*
211 * Attach an initialized mark to a given group and fs object.
212 * These marks may be used for the fsnotify backend to determine which
213 * event types should be delivered to which group.
214 */
215int fsnotify_add_mark(struct fsnotify_mark *mark,
216		      struct fsnotify_group *group, struct inode *inode,
217		      struct vfsmount *mnt, int allow_dups)
218{
219	int ret = 0;
220
221	BUG_ON(inode && mnt);
222	BUG_ON(!inode && !mnt);
223
224	/*
225	 * LOCKING ORDER!!!!
226	 * mark->lock
227	 * group->mark_lock
228	 * inode->i_lock
229	 */
230	spin_lock(&mark->lock);
231	spin_lock(&group->mark_lock);
232
233	mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE;
234
235	mark->group = group;
236	list_add(&mark->g_list, &group->marks_list);
237	atomic_inc(&group->num_marks);
238	fsnotify_get_mark(mark); /* for i_list and g_list */
239
240	if (inode) {
241		ret = fsnotify_add_inode_mark(mark, group, inode, allow_dups);
242		if (ret)
243			goto err;
244	} else if (mnt) {
245		ret = fsnotify_add_vfsmount_mark(mark, group, mnt, allow_dups);
246		if (ret)
247			goto err;
248	} else {
249		BUG();
250	}
251
252	spin_unlock(&group->mark_lock);
253
254	/* this will pin the object if appropriate */
255	fsnotify_set_mark_mask_locked(mark, mark->mask);
256
257	spin_unlock(&mark->lock);
258
259	if (inode)
260		__fsnotify_update_child_dentry_flags(inode);
261
262	return ret;
263err:
264	mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
265	list_del_init(&mark->g_list);
266	mark->group = NULL;
267	atomic_dec(&group->num_marks);
268
269	spin_unlock(&group->mark_lock);
270	spin_unlock(&mark->lock);
271
272	spin_lock(&destroy_lock);
273	list_add(&mark->destroy_list, &destroy_list);
274	spin_unlock(&destroy_lock);
275	wake_up(&destroy_waitq);
276
277	return ret;
278}
279
280/*
281 * clear any marks in a group in which mark->flags & flags is true
282 */
283void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
284					 unsigned int flags)
285{
286	struct fsnotify_mark *lmark, *mark;
287	LIST_HEAD(free_list);
288
289	spin_lock(&group->mark_lock);
290	list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
291		if (mark->flags & flags) {
292			list_add(&mark->free_g_list, &free_list);
293			list_del_init(&mark->g_list);
294			fsnotify_get_mark(mark);
295		}
296	}
297	spin_unlock(&group->mark_lock);
298
299	list_for_each_entry_safe(mark, lmark, &free_list, free_g_list) {
300		fsnotify_destroy_mark(mark);
301		fsnotify_put_mark(mark);
302	}
303}
304
305/*
306 * Given a group, destroy all of the marks associated with that group.
307 */
308void fsnotify_clear_marks_by_group(struct fsnotify_group *group)
309{
310	fsnotify_clear_marks_by_group_flags(group, (unsigned int)-1);
311}
312
313void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old)
314{
315	assert_spin_locked(&old->lock);
316	new->i.inode = old->i.inode;
317	new->m.mnt = old->m.mnt;
318	new->group = old->group;
319	new->mask = old->mask;
320	new->free_mark = old->free_mark;
321}
322
323/*
324 * Nothing fancy, just initialize lists and locks and counters.
325 */
326void fsnotify_init_mark(struct fsnotify_mark *mark,
327			void (*free_mark)(struct fsnotify_mark *mark))
328{
329	memset(mark, 0, sizeof(*mark));
330	spin_lock_init(&mark->lock);
331	atomic_set(&mark->refcnt, 1);
332	mark->free_mark = free_mark;
333}
334
335static int fsnotify_mark_destroy(void *ignored)
336{
337	struct fsnotify_mark *mark, *next;
338	LIST_HEAD(private_destroy_list);
339
340	for (;;) {
341		spin_lock(&destroy_lock);
342		/* exchange the list head */
343		list_replace_init(&destroy_list, &private_destroy_list);
344		spin_unlock(&destroy_lock);
345
346		synchronize_srcu(&fsnotify_mark_srcu);
347
348		list_for_each_entry_safe(mark, next, &private_destroy_list, destroy_list) {
349			list_del_init(&mark->destroy_list);
350			fsnotify_put_mark(mark);
351		}
352
353		wait_event_interruptible(destroy_waitq, !list_empty(&destroy_list));
354	}
355
356	return 0;
357}
358
359static int __init fsnotify_mark_init(void)
360{
361	struct task_struct *thread;
362
363	thread = kthread_run(fsnotify_mark_destroy, NULL,
364			     "fsnotify_mark");
365	if (IS_ERR(thread))
366		panic("unable to start fsnotify mark destruction thread.");
367
368	return 0;
369}
370device_initcall(fsnotify_mark_init);
v3.5.6
  1/*
  2 *  Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
  3 *
  4 *  This program is free software; you can redistribute it and/or modify
  5 *  it under the terms of the GNU General Public License as published by
  6 *  the Free Software Foundation; either version 2, or (at your option)
  7 *  any later version.
  8 *
  9 *  This program is distributed in the hope that it will be useful,
 10 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 *  GNU General Public License for more details.
 13 *
 14 *  You should have received a copy of the GNU General Public License
 15 *  along with this program; see the file COPYING.  If not, write to
 16 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
 17 */
 18
 19/*
 20 * fsnotify inode mark locking/lifetime/and refcnting
 21 *
 22 * REFCNT:
 23 * The mark->refcnt tells how many "things" in the kernel currently are
 24 * referencing this object.  The object typically will live inside the kernel
 25 * with a refcnt of 2, one for each list it is on (i_list, g_list).  Any task
 26 * which can find this object holding the appropriete locks, can take a reference
 27 * and the object itself is guaranteed to survive until the reference is dropped.
 28 *
 29 * LOCKING:
 30 * There are 3 spinlocks involved with fsnotify inode marks and they MUST
 31 * be taken in order as follows:
 32 *
 33 * mark->lock
 34 * group->mark_lock
 35 * inode->i_lock
 36 *
 37 * mark->lock protects 2 things, mark->group and mark->inode.  You must hold
 38 * that lock to dereference either of these things (they could be NULL even with
 39 * the lock)
 40 *
 41 * group->mark_lock protects the marks_list anchored inside a given group
 42 * and each mark is hooked via the g_list.  It also sorta protects the
 43 * free_g_list, which when used is anchored by a private list on the stack of the
 44 * task which held the group->mark_lock.
 45 *
 46 * inode->i_lock protects the i_fsnotify_marks list anchored inside a
 47 * given inode and each mark is hooked via the i_list. (and sorta the
 48 * free_i_list)
 49 *
 50 *
 51 * LIFETIME:
 52 * Inode marks survive between when they are added to an inode and when their
 53 * refcnt==0.
 54 *
 55 * The inode mark can be cleared for a number of different reasons including:
 56 * - The inode is unlinked for the last time.  (fsnotify_inode_remove)
 57 * - The inode is being evicted from cache. (fsnotify_inode_delete)
 58 * - The fs the inode is on is unmounted.  (fsnotify_inode_delete/fsnotify_unmount_inodes)
 59 * - Something explicitly requests that it be removed.  (fsnotify_destroy_mark)
 60 * - The fsnotify_group associated with the mark is going away and all such marks
 61 *   need to be cleaned up. (fsnotify_clear_marks_by_group)
 62 *
 63 * Worst case we are given an inode and need to clean up all the marks on that
 64 * inode.  We take i_lock and walk the i_fsnotify_marks safely.  For each
 65 * mark on the list we take a reference (so the mark can't disappear under us).
 66 * We remove that mark form the inode's list of marks and we add this mark to a
 67 * private list anchored on the stack using i_free_list;  At this point we no
 68 * longer fear anything finding the mark using the inode's list of marks.
 69 *
 70 * We can safely and locklessly run the private list on the stack of everything
 71 * we just unattached from the original inode.  For each mark on the private list
 72 * we grab the mark-> and can thus dereference mark->group and mark->inode.  If
 73 * we see the group and inode are not NULL we take those locks.  Now holding all
 74 * 3 locks we can completely remove the mark from other tasks finding it in the
 75 * future.  Remember, 10 things might already be referencing this mark, but they
 76 * better be holding a ref.  We drop our reference we took before we unhooked it
 77 * from the inode.  When the ref hits 0 we can free the mark.
 78 *
 79 * Very similarly for freeing by group, except we use free_g_list.
 80 *
 81 * This has the very interesting property of being able to run concurrently with
 82 * any (or all) other directions.
 83 */
 84
 85#include <linux/fs.h>
 86#include <linux/init.h>
 87#include <linux/kernel.h>
 88#include <linux/kthread.h>
 89#include <linux/module.h>
 90#include <linux/mutex.h>
 91#include <linux/slab.h>
 92#include <linux/spinlock.h>
 93#include <linux/srcu.h>
 94
 95#include <linux/atomic.h>
 96
 97#include <linux/fsnotify_backend.h>
 98#include "fsnotify.h"
 99
100struct srcu_struct fsnotify_mark_srcu;
101static DEFINE_SPINLOCK(destroy_lock);
102static LIST_HEAD(destroy_list);
103static DECLARE_WAIT_QUEUE_HEAD(destroy_waitq);
104
105void fsnotify_get_mark(struct fsnotify_mark *mark)
106{
107	atomic_inc(&mark->refcnt);
108}
109
110void fsnotify_put_mark(struct fsnotify_mark *mark)
111{
112	if (atomic_dec_and_test(&mark->refcnt))
113		mark->free_mark(mark);
114}
115
116/*
117 * Any time a mark is getting freed we end up here.
118 * The caller had better be holding a reference to this mark so we don't actually
119 * do the final put under the mark->lock
120 */
121void fsnotify_destroy_mark(struct fsnotify_mark *mark)
122{
123	struct fsnotify_group *group;
124	struct inode *inode = NULL;
125
126	spin_lock(&mark->lock);
127
128	group = mark->group;
129
130	/* something else already called this function on this mark */
131	if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) {
132		spin_unlock(&mark->lock);
133		return;
134	}
135
136	mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
137
 
 
 
138	spin_lock(&group->mark_lock);
139
140	if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
141		inode = mark->i.inode;
142		fsnotify_destroy_inode_mark(mark);
143	} else if (mark->flags & FSNOTIFY_MARK_FLAG_VFSMOUNT)
144		fsnotify_destroy_vfsmount_mark(mark);
145	else
146		BUG();
147
148	list_del_init(&mark->g_list);
149
150	spin_unlock(&group->mark_lock);
151	spin_unlock(&mark->lock);
152
153	spin_lock(&destroy_lock);
154	list_add(&mark->destroy_list, &destroy_list);
155	spin_unlock(&destroy_lock);
156	wake_up(&destroy_waitq);
157
158	/*
159	 * Some groups like to know that marks are being freed.  This is a
160	 * callback to the group function to let it know that this mark
161	 * is being freed.
162	 */
163	if (group->ops->freeing_mark)
164		group->ops->freeing_mark(mark, group);
165
166	/*
167	 * __fsnotify_update_child_dentry_flags(inode);
168	 *
169	 * I really want to call that, but we can't, we have no idea if the inode
170	 * still exists the second we drop the mark->lock.
171	 *
172	 * The next time an event arrive to this inode from one of it's children
173	 * __fsnotify_parent will see that the inode doesn't care about it's
174	 * children and will update all of these flags then.  So really this
175	 * is just a lazy update (and could be a perf win...)
176	 */
177
178	if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED))
179		iput(inode);
180
181	/*
182	 * We don't necessarily have a ref on mark from caller so the above iput
183	 * may have already destroyed it.  Don't touch from now on.
184	 */
185
186	/*
187	 * it's possible that this group tried to destroy itself, but this
188	 * this mark was simultaneously being freed by inode.  If that's the
189	 * case, we finish freeing the group here.
190	 */
191	if (unlikely(atomic_dec_and_test(&group->num_marks)))
192		fsnotify_final_destroy_group(group);
193}
194
195void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask)
196{
197	assert_spin_locked(&mark->lock);
198
199	mark->mask = mask;
200
201	if (mark->flags & FSNOTIFY_MARK_FLAG_INODE)
202		fsnotify_set_inode_mark_mask_locked(mark, mask);
203}
204
205void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask)
206{
207	assert_spin_locked(&mark->lock);
208
209	mark->ignored_mask = mask;
210}
211
212/*
213 * Attach an initialized mark to a given group and fs object.
214 * These marks may be used for the fsnotify backend to determine which
215 * event types should be delivered to which group.
216 */
217int fsnotify_add_mark(struct fsnotify_mark *mark,
218		      struct fsnotify_group *group, struct inode *inode,
219		      struct vfsmount *mnt, int allow_dups)
220{
221	int ret = 0;
222
223	BUG_ON(inode && mnt);
224	BUG_ON(!inode && !mnt);
225
226	/*
227	 * LOCKING ORDER!!!!
228	 * mark->lock
229	 * group->mark_lock
230	 * inode->i_lock
231	 */
232	spin_lock(&mark->lock);
233	spin_lock(&group->mark_lock);
234
235	mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE;
236
237	mark->group = group;
238	list_add(&mark->g_list, &group->marks_list);
239	atomic_inc(&group->num_marks);
240	fsnotify_get_mark(mark); /* for i_list and g_list */
241
242	if (inode) {
243		ret = fsnotify_add_inode_mark(mark, group, inode, allow_dups);
244		if (ret)
245			goto err;
246	} else if (mnt) {
247		ret = fsnotify_add_vfsmount_mark(mark, group, mnt, allow_dups);
248		if (ret)
249			goto err;
250	} else {
251		BUG();
252	}
253
254	spin_unlock(&group->mark_lock);
255
256	/* this will pin the object if appropriate */
257	fsnotify_set_mark_mask_locked(mark, mark->mask);
258
259	spin_unlock(&mark->lock);
260
261	if (inode)
262		__fsnotify_update_child_dentry_flags(inode);
263
264	return ret;
265err:
266	mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
267	list_del_init(&mark->g_list);
268	mark->group = NULL;
269	atomic_dec(&group->num_marks);
270
271	spin_unlock(&group->mark_lock);
272	spin_unlock(&mark->lock);
273
274	spin_lock(&destroy_lock);
275	list_add(&mark->destroy_list, &destroy_list);
276	spin_unlock(&destroy_lock);
277	wake_up(&destroy_waitq);
278
279	return ret;
280}
281
282/*
283 * clear any marks in a group in which mark->flags & flags is true
284 */
285void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
286					 unsigned int flags)
287{
288	struct fsnotify_mark *lmark, *mark;
289	LIST_HEAD(free_list);
290
291	spin_lock(&group->mark_lock);
292	list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
293		if (mark->flags & flags) {
294			list_add(&mark->free_g_list, &free_list);
295			list_del_init(&mark->g_list);
296			fsnotify_get_mark(mark);
297		}
298	}
299	spin_unlock(&group->mark_lock);
300
301	list_for_each_entry_safe(mark, lmark, &free_list, free_g_list) {
302		fsnotify_destroy_mark(mark);
303		fsnotify_put_mark(mark);
304	}
305}
306
307/*
308 * Given a group, destroy all of the marks associated with that group.
309 */
310void fsnotify_clear_marks_by_group(struct fsnotify_group *group)
311{
312	fsnotify_clear_marks_by_group_flags(group, (unsigned int)-1);
313}
314
315void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old)
316{
317	assert_spin_locked(&old->lock);
318	new->i.inode = old->i.inode;
319	new->m.mnt = old->m.mnt;
320	new->group = old->group;
321	new->mask = old->mask;
322	new->free_mark = old->free_mark;
323}
324
325/*
326 * Nothing fancy, just initialize lists and locks and counters.
327 */
328void fsnotify_init_mark(struct fsnotify_mark *mark,
329			void (*free_mark)(struct fsnotify_mark *mark))
330{
331	memset(mark, 0, sizeof(*mark));
332	spin_lock_init(&mark->lock);
333	atomic_set(&mark->refcnt, 1);
334	mark->free_mark = free_mark;
335}
336
337static int fsnotify_mark_destroy(void *ignored)
338{
339	struct fsnotify_mark *mark, *next;
340	LIST_HEAD(private_destroy_list);
341
342	for (;;) {
343		spin_lock(&destroy_lock);
344		/* exchange the list head */
345		list_replace_init(&destroy_list, &private_destroy_list);
346		spin_unlock(&destroy_lock);
347
348		synchronize_srcu(&fsnotify_mark_srcu);
349
350		list_for_each_entry_safe(mark, next, &private_destroy_list, destroy_list) {
351			list_del_init(&mark->destroy_list);
352			fsnotify_put_mark(mark);
353		}
354
355		wait_event_interruptible(destroy_waitq, !list_empty(&destroy_list));
356	}
357
358	return 0;
359}
360
361static int __init fsnotify_mark_init(void)
362{
363	struct task_struct *thread;
364
365	thread = kthread_run(fsnotify_mark_destroy, NULL,
366			     "fsnotify_mark");
367	if (IS_ERR(thread))
368		panic("unable to start fsnotify mark destruction thread.");
369
370	return 0;
371}
372device_initcall(fsnotify_mark_init);