Linux Audio

Check our new training course

Loading...
v4.6
  1#include "audit.h"
  2#include <linux/fsnotify_backend.h>
  3#include <linux/namei.h>
  4#include <linux/mount.h>
  5#include <linux/kthread.h>
  6#include <linux/slab.h>
  7
  8struct audit_tree;
  9struct audit_chunk;
 10
 11struct audit_tree {
 12	atomic_t count;
 13	int goner;
 14	struct audit_chunk *root;
 15	struct list_head chunks;
 16	struct list_head rules;
 17	struct list_head list;
 18	struct list_head same_root;
 19	struct rcu_head head;
 20	char pathname[];
 21};
 22
 23struct audit_chunk {
 24	struct list_head hash;
 25	struct fsnotify_mark mark;
 26	struct list_head trees;		/* with root here */
 27	int dead;
 28	int count;
 29	atomic_long_t refs;
 30	struct rcu_head head;
 31	struct node {
 32		struct list_head list;
 33		struct audit_tree *owner;
 34		unsigned index;		/* index; upper bit indicates 'will prune' */
 35	} owners[];
 36};
 37
 38static LIST_HEAD(tree_list);
 39static LIST_HEAD(prune_list);
 40static struct task_struct *prune_thread;
 41
 42/*
 43 * One struct chunk is attached to each inode of interest.
 44 * We replace struct chunk on tagging/untagging.
 45 * Rules have pointer to struct audit_tree.
 46 * Rules have struct list_head rlist forming a list of rules over
 47 * the same tree.
 48 * References to struct chunk are collected at audit_inode{,_child}()
 49 * time and used in AUDIT_TREE rule matching.
 50 * These references are dropped at the same time we are calling
 51 * audit_free_names(), etc.
 52 *
 53 * Cyclic lists galore:
 54 * tree.chunks anchors chunk.owners[].list			hash_lock
 55 * tree.rules anchors rule.rlist				audit_filter_mutex
 56 * chunk.trees anchors tree.same_root				hash_lock
 57 * chunk.hash is a hash with middle bits of watch.inode as
 58 * a hash function.						RCU, hash_lock
 59 *
 60 * tree is refcounted; one reference for "some rules on rules_list refer to
 61 * it", one for each chunk with pointer to it.
 62 *
 63 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
 64 * of watch contributes 1 to .refs).
 65 *
 66 * node.index allows to get from node.list to containing chunk.
 67 * MSB of that sucker is stolen to mark taggings that we might have to
 68 * revert - several operations have very unpleasant cleanup logics and
 69 * that makes a difference.  Some.
 70 */
 71
 72static struct fsnotify_group *audit_tree_group;
 73
 74static struct audit_tree *alloc_tree(const char *s)
 75{
 76	struct audit_tree *tree;
 77
 78	tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
 79	if (tree) {
 80		atomic_set(&tree->count, 1);
 81		tree->goner = 0;
 82		INIT_LIST_HEAD(&tree->chunks);
 83		INIT_LIST_HEAD(&tree->rules);
 84		INIT_LIST_HEAD(&tree->list);
 85		INIT_LIST_HEAD(&tree->same_root);
 86		tree->root = NULL;
 87		strcpy(tree->pathname, s);
 88	}
 89	return tree;
 90}
 91
 92static inline void get_tree(struct audit_tree *tree)
 93{
 94	atomic_inc(&tree->count);
 95}
 96
 97static inline void put_tree(struct audit_tree *tree)
 98{
 99	if (atomic_dec_and_test(&tree->count))
100		kfree_rcu(tree, head);
101}
102
103/* to avoid bringing the entire thing in audit.h */
104const char *audit_tree_path(struct audit_tree *tree)
105{
106	return tree->pathname;
107}
108
109static void free_chunk(struct audit_chunk *chunk)
110{
111	int i;
112
113	for (i = 0; i < chunk->count; i++) {
114		if (chunk->owners[i].owner)
115			put_tree(chunk->owners[i].owner);
116	}
117	kfree(chunk);
118}
119
120void audit_put_chunk(struct audit_chunk *chunk)
121{
122	if (atomic_long_dec_and_test(&chunk->refs))
123		free_chunk(chunk);
124}
125
126static void __put_chunk(struct rcu_head *rcu)
127{
128	struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
129	audit_put_chunk(chunk);
130}
131
132static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
133{
134	struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
135	call_rcu(&chunk->head, __put_chunk);
136}
137
138static struct audit_chunk *alloc_chunk(int count)
139{
140	struct audit_chunk *chunk;
141	size_t size;
142	int i;
143
144	size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
145	chunk = kzalloc(size, GFP_KERNEL);
146	if (!chunk)
147		return NULL;
148
149	INIT_LIST_HEAD(&chunk->hash);
150	INIT_LIST_HEAD(&chunk->trees);
151	chunk->count = count;
152	atomic_long_set(&chunk->refs, 1);
153	for (i = 0; i < count; i++) {
154		INIT_LIST_HEAD(&chunk->owners[i].list);
155		chunk->owners[i].index = i;
156	}
157	fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
158	chunk->mark.mask = FS_IN_IGNORED;
159	return chunk;
160}
161
162enum {HASH_SIZE = 128};
163static struct list_head chunk_hash_heads[HASH_SIZE];
164static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
165
166static inline struct list_head *chunk_hash(const struct inode *inode)
167{
168	unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
169	return chunk_hash_heads + n % HASH_SIZE;
170}
171
172/* hash_lock & entry->lock is held by caller */
173static void insert_hash(struct audit_chunk *chunk)
174{
175	struct fsnotify_mark *entry = &chunk->mark;
176	struct list_head *list;
177
178	if (!entry->inode)
179		return;
180	list = chunk_hash(entry->inode);
181	list_add_rcu(&chunk->hash, list);
182}
183
184/* called under rcu_read_lock */
185struct audit_chunk *audit_tree_lookup(const struct inode *inode)
186{
187	struct list_head *list = chunk_hash(inode);
188	struct audit_chunk *p;
189
190	list_for_each_entry_rcu(p, list, hash) {
191		/* mark.inode may have gone NULL, but who cares? */
192		if (p->mark.inode == inode) {
193			atomic_long_inc(&p->refs);
194			return p;
195		}
196	}
197	return NULL;
198}
199
200bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
201{
202	int n;
203	for (n = 0; n < chunk->count; n++)
204		if (chunk->owners[n].owner == tree)
205			return true;
206	return false;
207}
208
209/* tagging and untagging inodes with trees */
210
211static struct audit_chunk *find_chunk(struct node *p)
212{
213	int index = p->index & ~(1U<<31);
214	p -= index;
215	return container_of(p, struct audit_chunk, owners[0]);
216}
217
218static void untag_chunk(struct node *p)
219{
220	struct audit_chunk *chunk = find_chunk(p);
221	struct fsnotify_mark *entry = &chunk->mark;
222	struct audit_chunk *new = NULL;
223	struct audit_tree *owner;
224	int size = chunk->count - 1;
225	int i, j;
226
227	fsnotify_get_mark(entry);
228
229	spin_unlock(&hash_lock);
230
231	if (size)
232		new = alloc_chunk(size);
233
234	spin_lock(&entry->lock);
235	if (chunk->dead || !entry->inode) {
236		spin_unlock(&entry->lock);
237		if (new)
238			free_chunk(new);
239		goto out;
240	}
241
242	owner = p->owner;
243
244	if (!size) {
245		chunk->dead = 1;
246		spin_lock(&hash_lock);
247		list_del_init(&chunk->trees);
248		if (owner->root == chunk)
249			owner->root = NULL;
250		list_del_init(&p->list);
251		list_del_rcu(&chunk->hash);
252		spin_unlock(&hash_lock);
253		spin_unlock(&entry->lock);
254		fsnotify_destroy_mark(entry, audit_tree_group);
255		goto out;
256	}
257
258	if (!new)
259		goto Fallback;
260
261	fsnotify_duplicate_mark(&new->mark, entry);
262	if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.inode, NULL, 1)) {
263		fsnotify_put_mark(&new->mark);
264		goto Fallback;
265	}
266
267	chunk->dead = 1;
268	spin_lock(&hash_lock);
269	list_replace_init(&chunk->trees, &new->trees);
270	if (owner->root == chunk) {
271		list_del_init(&owner->same_root);
272		owner->root = NULL;
273	}
274
275	for (i = j = 0; j <= size; i++, j++) {
276		struct audit_tree *s;
277		if (&chunk->owners[j] == p) {
278			list_del_init(&p->list);
279			i--;
280			continue;
281		}
282		s = chunk->owners[j].owner;
283		new->owners[i].owner = s;
284		new->owners[i].index = chunk->owners[j].index - j + i;
285		if (!s) /* result of earlier fallback */
286			continue;
287		get_tree(s);
288		list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
289	}
290
291	list_replace_rcu(&chunk->hash, &new->hash);
292	list_for_each_entry(owner, &new->trees, same_root)
293		owner->root = new;
294	spin_unlock(&hash_lock);
295	spin_unlock(&entry->lock);
296	fsnotify_destroy_mark(entry, audit_tree_group);
297	fsnotify_put_mark(&new->mark);	/* drop initial reference */
298	goto out;
299
300Fallback:
301	// do the best we can
302	spin_lock(&hash_lock);
303	if (owner->root == chunk) {
304		list_del_init(&owner->same_root);
305		owner->root = NULL;
306	}
307	list_del_init(&p->list);
308	p->owner = NULL;
309	put_tree(owner);
310	spin_unlock(&hash_lock);
311	spin_unlock(&entry->lock);
312out:
313	fsnotify_put_mark(entry);
314	spin_lock(&hash_lock);
315}
316
317static int create_chunk(struct inode *inode, struct audit_tree *tree)
318{
319	struct fsnotify_mark *entry;
320	struct audit_chunk *chunk = alloc_chunk(1);
321	if (!chunk)
322		return -ENOMEM;
323
324	entry = &chunk->mark;
325	if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
326		fsnotify_put_mark(entry);
327		return -ENOSPC;
328	}
329
330	spin_lock(&entry->lock);
331	spin_lock(&hash_lock);
332	if (tree->goner) {
333		spin_unlock(&hash_lock);
334		chunk->dead = 1;
335		spin_unlock(&entry->lock);
336		fsnotify_destroy_mark(entry, audit_tree_group);
337		fsnotify_put_mark(entry);
338		return 0;
339	}
340	chunk->owners[0].index = (1U << 31);
341	chunk->owners[0].owner = tree;
342	get_tree(tree);
343	list_add(&chunk->owners[0].list, &tree->chunks);
344	if (!tree->root) {
345		tree->root = chunk;
346		list_add(&tree->same_root, &chunk->trees);
347	}
348	insert_hash(chunk);
349	spin_unlock(&hash_lock);
350	spin_unlock(&entry->lock);
351	fsnotify_put_mark(entry);	/* drop initial reference */
352	return 0;
353}
354
355/* the first tagged inode becomes root of tree */
356static int tag_chunk(struct inode *inode, struct audit_tree *tree)
357{
358	struct fsnotify_mark *old_entry, *chunk_entry;
359	struct audit_tree *owner;
360	struct audit_chunk *chunk, *old;
361	struct node *p;
362	int n;
363
364	old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
365	if (!old_entry)
366		return create_chunk(inode, tree);
367
368	old = container_of(old_entry, struct audit_chunk, mark);
369
370	/* are we already there? */
371	spin_lock(&hash_lock);
372	for (n = 0; n < old->count; n++) {
373		if (old->owners[n].owner == tree) {
374			spin_unlock(&hash_lock);
375			fsnotify_put_mark(old_entry);
376			return 0;
377		}
378	}
379	spin_unlock(&hash_lock);
380
381	chunk = alloc_chunk(old->count + 1);
382	if (!chunk) {
383		fsnotify_put_mark(old_entry);
384		return -ENOMEM;
385	}
386
387	chunk_entry = &chunk->mark;
388
389	spin_lock(&old_entry->lock);
390	if (!old_entry->inode) {
391		/* old_entry is being shot, lets just lie */
392		spin_unlock(&old_entry->lock);
393		fsnotify_put_mark(old_entry);
394		free_chunk(chunk);
395		return -ENOENT;
396	}
397
398	fsnotify_duplicate_mark(chunk_entry, old_entry);
399	if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->inode, NULL, 1)) {
400		spin_unlock(&old_entry->lock);
401		fsnotify_put_mark(chunk_entry);
402		fsnotify_put_mark(old_entry);
403		return -ENOSPC;
404	}
405
406	/* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
407	spin_lock(&chunk_entry->lock);
408	spin_lock(&hash_lock);
409
410	/* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
411	if (tree->goner) {
412		spin_unlock(&hash_lock);
413		chunk->dead = 1;
414		spin_unlock(&chunk_entry->lock);
415		spin_unlock(&old_entry->lock);
416
417		fsnotify_destroy_mark(chunk_entry, audit_tree_group);
418
419		fsnotify_put_mark(chunk_entry);
420		fsnotify_put_mark(old_entry);
421		return 0;
422	}
423	list_replace_init(&old->trees, &chunk->trees);
424	for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
425		struct audit_tree *s = old->owners[n].owner;
426		p->owner = s;
427		p->index = old->owners[n].index;
428		if (!s) /* result of fallback in untag */
429			continue;
430		get_tree(s);
431		list_replace_init(&old->owners[n].list, &p->list);
432	}
433	p->index = (chunk->count - 1) | (1U<<31);
434	p->owner = tree;
435	get_tree(tree);
436	list_add(&p->list, &tree->chunks);
437	list_replace_rcu(&old->hash, &chunk->hash);
438	list_for_each_entry(owner, &chunk->trees, same_root)
439		owner->root = chunk;
440	old->dead = 1;
441	if (!tree->root) {
442		tree->root = chunk;
443		list_add(&tree->same_root, &chunk->trees);
444	}
445	spin_unlock(&hash_lock);
446	spin_unlock(&chunk_entry->lock);
447	spin_unlock(&old_entry->lock);
448	fsnotify_destroy_mark(old_entry, audit_tree_group);
449	fsnotify_put_mark(chunk_entry);	/* drop initial reference */
450	fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
451	return 0;
452}
453
454static void audit_tree_log_remove_rule(struct audit_krule *rule)
455{
456	struct audit_buffer *ab;
457
458	ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
459	if (unlikely(!ab))
460		return;
461	audit_log_format(ab, "op=");
462	audit_log_string(ab, "remove_rule");
463	audit_log_format(ab, " dir=");
464	audit_log_untrustedstring(ab, rule->tree->pathname);
465	audit_log_key(ab, rule->filterkey);
466	audit_log_format(ab, " list=%d res=1", rule->listnr);
467	audit_log_end(ab);
468}
469
470static void kill_rules(struct audit_tree *tree)
471{
472	struct audit_krule *rule, *next;
473	struct audit_entry *entry;
474
475	list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
476		entry = container_of(rule, struct audit_entry, rule);
477
478		list_del_init(&rule->rlist);
479		if (rule->tree) {
480			/* not a half-baked one */
481			audit_tree_log_remove_rule(rule);
482			if (entry->rule.exe)
483				audit_remove_mark(entry->rule.exe);
484			rule->tree = NULL;
485			list_del_rcu(&entry->list);
486			list_del(&entry->rule.list);
487			call_rcu(&entry->rcu, audit_free_rule_rcu);
488		}
489	}
490}
491
492/*
493 * finish killing struct audit_tree
494 */
495static void prune_one(struct audit_tree *victim)
496{
497	spin_lock(&hash_lock);
498	while (!list_empty(&victim->chunks)) {
499		struct node *p;
500
501		p = list_entry(victim->chunks.next, struct node, list);
502
503		untag_chunk(p);
504	}
505	spin_unlock(&hash_lock);
506	put_tree(victim);
507}
508
509/* trim the uncommitted chunks from tree */
510
511static void trim_marked(struct audit_tree *tree)
512{
513	struct list_head *p, *q;
514	spin_lock(&hash_lock);
515	if (tree->goner) {
516		spin_unlock(&hash_lock);
517		return;
518	}
519	/* reorder */
520	for (p = tree->chunks.next; p != &tree->chunks; p = q) {
521		struct node *node = list_entry(p, struct node, list);
522		q = p->next;
523		if (node->index & (1U<<31)) {
524			list_del_init(p);
525			list_add(p, &tree->chunks);
526		}
527	}
528
529	while (!list_empty(&tree->chunks)) {
530		struct node *node;
531
532		node = list_entry(tree->chunks.next, struct node, list);
533
534		/* have we run out of marked? */
535		if (!(node->index & (1U<<31)))
536			break;
537
538		untag_chunk(node);
539	}
540	if (!tree->root && !tree->goner) {
541		tree->goner = 1;
542		spin_unlock(&hash_lock);
543		mutex_lock(&audit_filter_mutex);
544		kill_rules(tree);
545		list_del_init(&tree->list);
546		mutex_unlock(&audit_filter_mutex);
547		prune_one(tree);
548	} else {
549		spin_unlock(&hash_lock);
550	}
551}
552
553static void audit_schedule_prune(void);
554
555/* called with audit_filter_mutex */
556int audit_remove_tree_rule(struct audit_krule *rule)
557{
558	struct audit_tree *tree;
559	tree = rule->tree;
560	if (tree) {
561		spin_lock(&hash_lock);
562		list_del_init(&rule->rlist);
563		if (list_empty(&tree->rules) && !tree->goner) {
564			tree->root = NULL;
565			list_del_init(&tree->same_root);
566			tree->goner = 1;
567			list_move(&tree->list, &prune_list);
568			rule->tree = NULL;
569			spin_unlock(&hash_lock);
570			audit_schedule_prune();
571			return 1;
572		}
573		rule->tree = NULL;
574		spin_unlock(&hash_lock);
575		return 1;
576	}
577	return 0;
578}
579
580static int compare_root(struct vfsmount *mnt, void *arg)
581{
582	return d_backing_inode(mnt->mnt_root) == arg;
583}
584
585void audit_trim_trees(void)
586{
587	struct list_head cursor;
588
589	mutex_lock(&audit_filter_mutex);
590	list_add(&cursor, &tree_list);
591	while (cursor.next != &tree_list) {
592		struct audit_tree *tree;
593		struct path path;
594		struct vfsmount *root_mnt;
595		struct node *node;
596		int err;
597
598		tree = container_of(cursor.next, struct audit_tree, list);
599		get_tree(tree);
600		list_del(&cursor);
601		list_add(&cursor, &tree->list);
602		mutex_unlock(&audit_filter_mutex);
603
604		err = kern_path(tree->pathname, 0, &path);
605		if (err)
606			goto skip_it;
607
608		root_mnt = collect_mounts(&path);
609		path_put(&path);
610		if (IS_ERR(root_mnt))
611			goto skip_it;
612
613		spin_lock(&hash_lock);
614		list_for_each_entry(node, &tree->chunks, list) {
615			struct audit_chunk *chunk = find_chunk(node);
616			/* this could be NULL if the watch is dying else where... */
617			struct inode *inode = chunk->mark.inode;
618			node->index |= 1U<<31;
619			if (iterate_mounts(compare_root, inode, root_mnt))
620				node->index &= ~(1U<<31);
621		}
622		spin_unlock(&hash_lock);
623		trim_marked(tree);
624		drop_collected_mounts(root_mnt);
625skip_it:
626		put_tree(tree);
627		mutex_lock(&audit_filter_mutex);
628	}
629	list_del(&cursor);
630	mutex_unlock(&audit_filter_mutex);
631}
632
633int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
634{
635
636	if (pathname[0] != '/' ||
637	    rule->listnr != AUDIT_FILTER_EXIT ||
638	    op != Audit_equal ||
639	    rule->inode_f || rule->watch || rule->tree)
640		return -EINVAL;
641	rule->tree = alloc_tree(pathname);
642	if (!rule->tree)
643		return -ENOMEM;
644	return 0;
645}
646
647void audit_put_tree(struct audit_tree *tree)
648{
649	put_tree(tree);
650}
651
652static int tag_mount(struct vfsmount *mnt, void *arg)
653{
654	return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
655}
656
657/*
658 * That gets run when evict_chunk() ends up needing to kill audit_tree.
659 * Runs from a separate thread.
660 */
661static int prune_tree_thread(void *unused)
662{
663	for (;;) {
664		set_current_state(TASK_INTERRUPTIBLE);
665		if (list_empty(&prune_list))
666			schedule();
667		__set_current_state(TASK_RUNNING);
668
669		mutex_lock(&audit_cmd_mutex);
670		mutex_lock(&audit_filter_mutex);
671
672		while (!list_empty(&prune_list)) {
673			struct audit_tree *victim;
674
675			victim = list_entry(prune_list.next,
676					struct audit_tree, list);
677			list_del_init(&victim->list);
678
679			mutex_unlock(&audit_filter_mutex);
680
681			prune_one(victim);
682
683			mutex_lock(&audit_filter_mutex);
684		}
685
686		mutex_unlock(&audit_filter_mutex);
687		mutex_unlock(&audit_cmd_mutex);
688	}
689	return 0;
690}
691
692static int audit_launch_prune(void)
693{
694	if (prune_thread)
695		return 0;
696	prune_thread = kthread_create(prune_tree_thread, NULL,
697				"audit_prune_tree");
698	if (IS_ERR(prune_thread)) {
699		pr_err("cannot start thread audit_prune_tree");
700		prune_thread = NULL;
701		return -ENOMEM;
702	} else {
703		wake_up_process(prune_thread);
704		return 0;
705	}
706}
707
708/* called with audit_filter_mutex */
709int audit_add_tree_rule(struct audit_krule *rule)
710{
711	struct audit_tree *seed = rule->tree, *tree;
712	struct path path;
713	struct vfsmount *mnt;
714	int err;
715
716	rule->tree = NULL;
717	list_for_each_entry(tree, &tree_list, list) {
718		if (!strcmp(seed->pathname, tree->pathname)) {
719			put_tree(seed);
720			rule->tree = tree;
721			list_add(&rule->rlist, &tree->rules);
722			return 0;
723		}
724	}
725	tree = seed;
726	list_add(&tree->list, &tree_list);
727	list_add(&rule->rlist, &tree->rules);
728	/* do not set rule->tree yet */
729	mutex_unlock(&audit_filter_mutex);
730
731	if (unlikely(!prune_thread)) {
732		err = audit_launch_prune();
733		if (err)
734			goto Err;
735	}
736
737	err = kern_path(tree->pathname, 0, &path);
738	if (err)
739		goto Err;
740	mnt = collect_mounts(&path);
741	path_put(&path);
742	if (IS_ERR(mnt)) {
743		err = PTR_ERR(mnt);
744		goto Err;
745	}
746
747	get_tree(tree);
748	err = iterate_mounts(tag_mount, tree, mnt);
749	drop_collected_mounts(mnt);
750
751	if (!err) {
752		struct node *node;
753		spin_lock(&hash_lock);
754		list_for_each_entry(node, &tree->chunks, list)
755			node->index &= ~(1U<<31);
756		spin_unlock(&hash_lock);
757	} else {
758		trim_marked(tree);
759		goto Err;
760	}
761
762	mutex_lock(&audit_filter_mutex);
763	if (list_empty(&rule->rlist)) {
764		put_tree(tree);
765		return -ENOENT;
766	}
767	rule->tree = tree;
768	put_tree(tree);
769
770	return 0;
771Err:
772	mutex_lock(&audit_filter_mutex);
773	list_del_init(&tree->list);
774	list_del_init(&tree->rules);
775	put_tree(tree);
776	return err;
777}
778
779int audit_tag_tree(char *old, char *new)
780{
781	struct list_head cursor, barrier;
782	int failed = 0;
783	struct path path1, path2;
784	struct vfsmount *tagged;
785	int err;
786
787	err = kern_path(new, 0, &path2);
788	if (err)
789		return err;
790	tagged = collect_mounts(&path2);
791	path_put(&path2);
792	if (IS_ERR(tagged))
793		return PTR_ERR(tagged);
794
795	err = kern_path(old, 0, &path1);
796	if (err) {
797		drop_collected_mounts(tagged);
798		return err;
799	}
800
801	mutex_lock(&audit_filter_mutex);
802	list_add(&barrier, &tree_list);
803	list_add(&cursor, &barrier);
804
805	while (cursor.next != &tree_list) {
806		struct audit_tree *tree;
807		int good_one = 0;
808
809		tree = container_of(cursor.next, struct audit_tree, list);
810		get_tree(tree);
811		list_del(&cursor);
812		list_add(&cursor, &tree->list);
813		mutex_unlock(&audit_filter_mutex);
814
815		err = kern_path(tree->pathname, 0, &path2);
816		if (!err) {
817			good_one = path_is_under(&path1, &path2);
818			path_put(&path2);
819		}
820
821		if (!good_one) {
822			put_tree(tree);
823			mutex_lock(&audit_filter_mutex);
824			continue;
825		}
826
827		failed = iterate_mounts(tag_mount, tree, tagged);
828		if (failed) {
829			put_tree(tree);
830			mutex_lock(&audit_filter_mutex);
831			break;
832		}
833
834		mutex_lock(&audit_filter_mutex);
835		spin_lock(&hash_lock);
836		if (!tree->goner) {
837			list_del(&tree->list);
838			list_add(&tree->list, &tree_list);
839		}
840		spin_unlock(&hash_lock);
841		put_tree(tree);
842	}
843
844	while (barrier.prev != &tree_list) {
845		struct audit_tree *tree;
846
847		tree = container_of(barrier.prev, struct audit_tree, list);
848		get_tree(tree);
849		list_del(&tree->list);
850		list_add(&tree->list, &barrier);
851		mutex_unlock(&audit_filter_mutex);
852
853		if (!failed) {
854			struct node *node;
855			spin_lock(&hash_lock);
856			list_for_each_entry(node, &tree->chunks, list)
857				node->index &= ~(1U<<31);
858			spin_unlock(&hash_lock);
859		} else {
860			trim_marked(tree);
861		}
862
863		put_tree(tree);
864		mutex_lock(&audit_filter_mutex);
865	}
866	list_del(&barrier);
867	list_del(&cursor);
868	mutex_unlock(&audit_filter_mutex);
869	path_put(&path1);
870	drop_collected_mounts(tagged);
871	return failed;
872}
873
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
874
875static void audit_schedule_prune(void)
876{
877	wake_up_process(prune_thread);
878}
879
880/*
881 * ... and that one is done if evict_chunk() decides to delay until the end
882 * of syscall.  Runs synchronously.
883 */
884void audit_kill_trees(struct list_head *list)
885{
886	mutex_lock(&audit_cmd_mutex);
887	mutex_lock(&audit_filter_mutex);
888
889	while (!list_empty(list)) {
890		struct audit_tree *victim;
891
892		victim = list_entry(list->next, struct audit_tree, list);
893		kill_rules(victim);
894		list_del_init(&victim->list);
895
896		mutex_unlock(&audit_filter_mutex);
897
898		prune_one(victim);
899
900		mutex_lock(&audit_filter_mutex);
901	}
902
903	mutex_unlock(&audit_filter_mutex);
904	mutex_unlock(&audit_cmd_mutex);
905}
906
907/*
908 *  Here comes the stuff asynchronous to auditctl operations
909 */
910
911static void evict_chunk(struct audit_chunk *chunk)
912{
913	struct audit_tree *owner;
914	struct list_head *postponed = audit_killed_trees();
915	int need_prune = 0;
916	int n;
917
918	if (chunk->dead)
919		return;
920
921	chunk->dead = 1;
922	mutex_lock(&audit_filter_mutex);
923	spin_lock(&hash_lock);
924	while (!list_empty(&chunk->trees)) {
925		owner = list_entry(chunk->trees.next,
926				   struct audit_tree, same_root);
927		owner->goner = 1;
928		owner->root = NULL;
929		list_del_init(&owner->same_root);
930		spin_unlock(&hash_lock);
931		if (!postponed) {
932			kill_rules(owner);
933			list_move(&owner->list, &prune_list);
934			need_prune = 1;
935		} else {
936			list_move(&owner->list, postponed);
937		}
938		spin_lock(&hash_lock);
939	}
940	list_del_rcu(&chunk->hash);
941	for (n = 0; n < chunk->count; n++)
942		list_del_init(&chunk->owners[n].list);
943	spin_unlock(&hash_lock);
944	mutex_unlock(&audit_filter_mutex);
945	if (need_prune)
946		audit_schedule_prune();
 
947}
948
949static int audit_tree_handle_event(struct fsnotify_group *group,
950				   struct inode *to_tell,
951				   struct fsnotify_mark *inode_mark,
952				   struct fsnotify_mark *vfsmount_mark,
953				   u32 mask, void *data, int data_type,
954				   const unsigned char *file_name, u32 cookie)
955{
956	return 0;
957}
958
959static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
960{
961	struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
962
963	evict_chunk(chunk);
964
965	/*
966	 * We are guaranteed to have at least one reference to the mark from
967	 * either the inode or the caller of fsnotify_destroy_mark().
968	 */
969	BUG_ON(atomic_read(&entry->refcnt) < 1);
970}
971
972static const struct fsnotify_ops audit_tree_ops = {
973	.handle_event = audit_tree_handle_event,
974	.freeing_mark = audit_tree_freeing_mark,
975};
976
977static int __init audit_tree_init(void)
978{
979	int i;
980
981	audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
982	if (IS_ERR(audit_tree_group))
983		audit_panic("cannot initialize fsnotify group for rectree watches");
984
985	for (i = 0; i < HASH_SIZE; i++)
986		INIT_LIST_HEAD(&chunk_hash_heads[i]);
987
988	return 0;
989}
990__initcall(audit_tree_init);
v3.15
  1#include "audit.h"
  2#include <linux/fsnotify_backend.h>
  3#include <linux/namei.h>
  4#include <linux/mount.h>
  5#include <linux/kthread.h>
  6#include <linux/slab.h>
  7
  8struct audit_tree;
  9struct audit_chunk;
 10
 11struct audit_tree {
 12	atomic_t count;
 13	int goner;
 14	struct audit_chunk *root;
 15	struct list_head chunks;
 16	struct list_head rules;
 17	struct list_head list;
 18	struct list_head same_root;
 19	struct rcu_head head;
 20	char pathname[];
 21};
 22
 23struct audit_chunk {
 24	struct list_head hash;
 25	struct fsnotify_mark mark;
 26	struct list_head trees;		/* with root here */
 27	int dead;
 28	int count;
 29	atomic_long_t refs;
 30	struct rcu_head head;
 31	struct node {
 32		struct list_head list;
 33		struct audit_tree *owner;
 34		unsigned index;		/* index; upper bit indicates 'will prune' */
 35	} owners[];
 36};
 37
 38static LIST_HEAD(tree_list);
 39static LIST_HEAD(prune_list);
 
 40
 41/*
 42 * One struct chunk is attached to each inode of interest.
 43 * We replace struct chunk on tagging/untagging.
 44 * Rules have pointer to struct audit_tree.
 45 * Rules have struct list_head rlist forming a list of rules over
 46 * the same tree.
 47 * References to struct chunk are collected at audit_inode{,_child}()
 48 * time and used in AUDIT_TREE rule matching.
 49 * These references are dropped at the same time we are calling
 50 * audit_free_names(), etc.
 51 *
 52 * Cyclic lists galore:
 53 * tree.chunks anchors chunk.owners[].list			hash_lock
 54 * tree.rules anchors rule.rlist				audit_filter_mutex
 55 * chunk.trees anchors tree.same_root				hash_lock
 56 * chunk.hash is a hash with middle bits of watch.inode as
 57 * a hash function.						RCU, hash_lock
 58 *
 59 * tree is refcounted; one reference for "some rules on rules_list refer to
 60 * it", one for each chunk with pointer to it.
 61 *
 62 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
 63 * of watch contributes 1 to .refs).
 64 *
 65 * node.index allows to get from node.list to containing chunk.
 66 * MSB of that sucker is stolen to mark taggings that we might have to
 67 * revert - several operations have very unpleasant cleanup logics and
 68 * that makes a difference.  Some.
 69 */
 70
 71static struct fsnotify_group *audit_tree_group;
 72
 73static struct audit_tree *alloc_tree(const char *s)
 74{
 75	struct audit_tree *tree;
 76
 77	tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
 78	if (tree) {
 79		atomic_set(&tree->count, 1);
 80		tree->goner = 0;
 81		INIT_LIST_HEAD(&tree->chunks);
 82		INIT_LIST_HEAD(&tree->rules);
 83		INIT_LIST_HEAD(&tree->list);
 84		INIT_LIST_HEAD(&tree->same_root);
 85		tree->root = NULL;
 86		strcpy(tree->pathname, s);
 87	}
 88	return tree;
 89}
 90
 91static inline void get_tree(struct audit_tree *tree)
 92{
 93	atomic_inc(&tree->count);
 94}
 95
 96static inline void put_tree(struct audit_tree *tree)
 97{
 98	if (atomic_dec_and_test(&tree->count))
 99		kfree_rcu(tree, head);
100}
101
102/* to avoid bringing the entire thing in audit.h */
103const char *audit_tree_path(struct audit_tree *tree)
104{
105	return tree->pathname;
106}
107
108static void free_chunk(struct audit_chunk *chunk)
109{
110	int i;
111
112	for (i = 0; i < chunk->count; i++) {
113		if (chunk->owners[i].owner)
114			put_tree(chunk->owners[i].owner);
115	}
116	kfree(chunk);
117}
118
119void audit_put_chunk(struct audit_chunk *chunk)
120{
121	if (atomic_long_dec_and_test(&chunk->refs))
122		free_chunk(chunk);
123}
124
125static void __put_chunk(struct rcu_head *rcu)
126{
127	struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
128	audit_put_chunk(chunk);
129}
130
131static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
132{
133	struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
134	call_rcu(&chunk->head, __put_chunk);
135}
136
137static struct audit_chunk *alloc_chunk(int count)
138{
139	struct audit_chunk *chunk;
140	size_t size;
141	int i;
142
143	size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
144	chunk = kzalloc(size, GFP_KERNEL);
145	if (!chunk)
146		return NULL;
147
148	INIT_LIST_HEAD(&chunk->hash);
149	INIT_LIST_HEAD(&chunk->trees);
150	chunk->count = count;
151	atomic_long_set(&chunk->refs, 1);
152	for (i = 0; i < count; i++) {
153		INIT_LIST_HEAD(&chunk->owners[i].list);
154		chunk->owners[i].index = i;
155	}
156	fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
 
157	return chunk;
158}
159
160enum {HASH_SIZE = 128};
161static struct list_head chunk_hash_heads[HASH_SIZE];
162static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
163
164static inline struct list_head *chunk_hash(const struct inode *inode)
165{
166	unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
167	return chunk_hash_heads + n % HASH_SIZE;
168}
169
170/* hash_lock & entry->lock is held by caller */
171static void insert_hash(struct audit_chunk *chunk)
172{
173	struct fsnotify_mark *entry = &chunk->mark;
174	struct list_head *list;
175
176	if (!entry->i.inode)
177		return;
178	list = chunk_hash(entry->i.inode);
179	list_add_rcu(&chunk->hash, list);
180}
181
182/* called under rcu_read_lock */
183struct audit_chunk *audit_tree_lookup(const struct inode *inode)
184{
185	struct list_head *list = chunk_hash(inode);
186	struct audit_chunk *p;
187
188	list_for_each_entry_rcu(p, list, hash) {
189		/* mark.inode may have gone NULL, but who cares? */
190		if (p->mark.i.inode == inode) {
191			atomic_long_inc(&p->refs);
192			return p;
193		}
194	}
195	return NULL;
196}
197
198int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
199{
200	int n;
201	for (n = 0; n < chunk->count; n++)
202		if (chunk->owners[n].owner == tree)
203			return 1;
204	return 0;
205}
206
207/* tagging and untagging inodes with trees */
208
209static struct audit_chunk *find_chunk(struct node *p)
210{
211	int index = p->index & ~(1U<<31);
212	p -= index;
213	return container_of(p, struct audit_chunk, owners[0]);
214}
215
216static void untag_chunk(struct node *p)
217{
218	struct audit_chunk *chunk = find_chunk(p);
219	struct fsnotify_mark *entry = &chunk->mark;
220	struct audit_chunk *new = NULL;
221	struct audit_tree *owner;
222	int size = chunk->count - 1;
223	int i, j;
224
225	fsnotify_get_mark(entry);
226
227	spin_unlock(&hash_lock);
228
229	if (size)
230		new = alloc_chunk(size);
231
232	spin_lock(&entry->lock);
233	if (chunk->dead || !entry->i.inode) {
234		spin_unlock(&entry->lock);
235		if (new)
236			free_chunk(new);
237		goto out;
238	}
239
240	owner = p->owner;
241
242	if (!size) {
243		chunk->dead = 1;
244		spin_lock(&hash_lock);
245		list_del_init(&chunk->trees);
246		if (owner->root == chunk)
247			owner->root = NULL;
248		list_del_init(&p->list);
249		list_del_rcu(&chunk->hash);
250		spin_unlock(&hash_lock);
251		spin_unlock(&entry->lock);
252		fsnotify_destroy_mark(entry, audit_tree_group);
253		goto out;
254	}
255
256	if (!new)
257		goto Fallback;
258
259	fsnotify_duplicate_mark(&new->mark, entry);
260	if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) {
261		fsnotify_put_mark(&new->mark);
262		goto Fallback;
263	}
264
265	chunk->dead = 1;
266	spin_lock(&hash_lock);
267	list_replace_init(&chunk->trees, &new->trees);
268	if (owner->root == chunk) {
269		list_del_init(&owner->same_root);
270		owner->root = NULL;
271	}
272
273	for (i = j = 0; j <= size; i++, j++) {
274		struct audit_tree *s;
275		if (&chunk->owners[j] == p) {
276			list_del_init(&p->list);
277			i--;
278			continue;
279		}
280		s = chunk->owners[j].owner;
281		new->owners[i].owner = s;
282		new->owners[i].index = chunk->owners[j].index - j + i;
283		if (!s) /* result of earlier fallback */
284			continue;
285		get_tree(s);
286		list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
287	}
288
289	list_replace_rcu(&chunk->hash, &new->hash);
290	list_for_each_entry(owner, &new->trees, same_root)
291		owner->root = new;
292	spin_unlock(&hash_lock);
293	spin_unlock(&entry->lock);
294	fsnotify_destroy_mark(entry, audit_tree_group);
295	fsnotify_put_mark(&new->mark);	/* drop initial reference */
296	goto out;
297
298Fallback:
299	// do the best we can
300	spin_lock(&hash_lock);
301	if (owner->root == chunk) {
302		list_del_init(&owner->same_root);
303		owner->root = NULL;
304	}
305	list_del_init(&p->list);
306	p->owner = NULL;
307	put_tree(owner);
308	spin_unlock(&hash_lock);
309	spin_unlock(&entry->lock);
310out:
311	fsnotify_put_mark(entry);
312	spin_lock(&hash_lock);
313}
314
315static int create_chunk(struct inode *inode, struct audit_tree *tree)
316{
317	struct fsnotify_mark *entry;
318	struct audit_chunk *chunk = alloc_chunk(1);
319	if (!chunk)
320		return -ENOMEM;
321
322	entry = &chunk->mark;
323	if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
324		fsnotify_put_mark(entry);
325		return -ENOSPC;
326	}
327
328	spin_lock(&entry->lock);
329	spin_lock(&hash_lock);
330	if (tree->goner) {
331		spin_unlock(&hash_lock);
332		chunk->dead = 1;
333		spin_unlock(&entry->lock);
334		fsnotify_destroy_mark(entry, audit_tree_group);
335		fsnotify_put_mark(entry);
336		return 0;
337	}
338	chunk->owners[0].index = (1U << 31);
339	chunk->owners[0].owner = tree;
340	get_tree(tree);
341	list_add(&chunk->owners[0].list, &tree->chunks);
342	if (!tree->root) {
343		tree->root = chunk;
344		list_add(&tree->same_root, &chunk->trees);
345	}
346	insert_hash(chunk);
347	spin_unlock(&hash_lock);
348	spin_unlock(&entry->lock);
349	fsnotify_put_mark(entry);	/* drop initial reference */
350	return 0;
351}
352
353/* the first tagged inode becomes root of tree */
354static int tag_chunk(struct inode *inode, struct audit_tree *tree)
355{
356	struct fsnotify_mark *old_entry, *chunk_entry;
357	struct audit_tree *owner;
358	struct audit_chunk *chunk, *old;
359	struct node *p;
360	int n;
361
362	old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
363	if (!old_entry)
364		return create_chunk(inode, tree);
365
366	old = container_of(old_entry, struct audit_chunk, mark);
367
368	/* are we already there? */
369	spin_lock(&hash_lock);
370	for (n = 0; n < old->count; n++) {
371		if (old->owners[n].owner == tree) {
372			spin_unlock(&hash_lock);
373			fsnotify_put_mark(old_entry);
374			return 0;
375		}
376	}
377	spin_unlock(&hash_lock);
378
379	chunk = alloc_chunk(old->count + 1);
380	if (!chunk) {
381		fsnotify_put_mark(old_entry);
382		return -ENOMEM;
383	}
384
385	chunk_entry = &chunk->mark;
386
387	spin_lock(&old_entry->lock);
388	if (!old_entry->i.inode) {
389		/* old_entry is being shot, lets just lie */
390		spin_unlock(&old_entry->lock);
391		fsnotify_put_mark(old_entry);
392		free_chunk(chunk);
393		return -ENOENT;
394	}
395
396	fsnotify_duplicate_mark(chunk_entry, old_entry);
397	if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) {
398		spin_unlock(&old_entry->lock);
399		fsnotify_put_mark(chunk_entry);
400		fsnotify_put_mark(old_entry);
401		return -ENOSPC;
402	}
403
404	/* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
405	spin_lock(&chunk_entry->lock);
406	spin_lock(&hash_lock);
407
408	/* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
409	if (tree->goner) {
410		spin_unlock(&hash_lock);
411		chunk->dead = 1;
412		spin_unlock(&chunk_entry->lock);
413		spin_unlock(&old_entry->lock);
414
415		fsnotify_destroy_mark(chunk_entry, audit_tree_group);
416
417		fsnotify_put_mark(chunk_entry);
418		fsnotify_put_mark(old_entry);
419		return 0;
420	}
421	list_replace_init(&old->trees, &chunk->trees);
422	for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
423		struct audit_tree *s = old->owners[n].owner;
424		p->owner = s;
425		p->index = old->owners[n].index;
426		if (!s) /* result of fallback in untag */
427			continue;
428		get_tree(s);
429		list_replace_init(&old->owners[n].list, &p->list);
430	}
431	p->index = (chunk->count - 1) | (1U<<31);
432	p->owner = tree;
433	get_tree(tree);
434	list_add(&p->list, &tree->chunks);
435	list_replace_rcu(&old->hash, &chunk->hash);
436	list_for_each_entry(owner, &chunk->trees, same_root)
437		owner->root = chunk;
438	old->dead = 1;
439	if (!tree->root) {
440		tree->root = chunk;
441		list_add(&tree->same_root, &chunk->trees);
442	}
443	spin_unlock(&hash_lock);
444	spin_unlock(&chunk_entry->lock);
445	spin_unlock(&old_entry->lock);
446	fsnotify_destroy_mark(old_entry, audit_tree_group);
447	fsnotify_put_mark(chunk_entry);	/* drop initial reference */
448	fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
449	return 0;
450}
451
452static void audit_log_remove_rule(struct audit_krule *rule)
453{
454	struct audit_buffer *ab;
455
456	ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
457	if (unlikely(!ab))
458		return;
459	audit_log_format(ab, "op=");
460	audit_log_string(ab, "remove rule");
461	audit_log_format(ab, " dir=");
462	audit_log_untrustedstring(ab, rule->tree->pathname);
463	audit_log_key(ab, rule->filterkey);
464	audit_log_format(ab, " list=%d res=1", rule->listnr);
465	audit_log_end(ab);
466}
467
468static void kill_rules(struct audit_tree *tree)
469{
470	struct audit_krule *rule, *next;
471	struct audit_entry *entry;
472
473	list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
474		entry = container_of(rule, struct audit_entry, rule);
475
476		list_del_init(&rule->rlist);
477		if (rule->tree) {
478			/* not a half-baked one */
479			audit_log_remove_rule(rule);
 
 
480			rule->tree = NULL;
481			list_del_rcu(&entry->list);
482			list_del(&entry->rule.list);
483			call_rcu(&entry->rcu, audit_free_rule_rcu);
484		}
485	}
486}
487
488/*
489 * finish killing struct audit_tree
490 */
491static void prune_one(struct audit_tree *victim)
492{
493	spin_lock(&hash_lock);
494	while (!list_empty(&victim->chunks)) {
495		struct node *p;
496
497		p = list_entry(victim->chunks.next, struct node, list);
498
499		untag_chunk(p);
500	}
501	spin_unlock(&hash_lock);
502	put_tree(victim);
503}
504
505/* trim the uncommitted chunks from tree */
506
507static void trim_marked(struct audit_tree *tree)
508{
509	struct list_head *p, *q;
510	spin_lock(&hash_lock);
511	if (tree->goner) {
512		spin_unlock(&hash_lock);
513		return;
514	}
515	/* reorder */
516	for (p = tree->chunks.next; p != &tree->chunks; p = q) {
517		struct node *node = list_entry(p, struct node, list);
518		q = p->next;
519		if (node->index & (1U<<31)) {
520			list_del_init(p);
521			list_add(p, &tree->chunks);
522		}
523	}
524
525	while (!list_empty(&tree->chunks)) {
526		struct node *node;
527
528		node = list_entry(tree->chunks.next, struct node, list);
529
530		/* have we run out of marked? */
531		if (!(node->index & (1U<<31)))
532			break;
533
534		untag_chunk(node);
535	}
536	if (!tree->root && !tree->goner) {
537		tree->goner = 1;
538		spin_unlock(&hash_lock);
539		mutex_lock(&audit_filter_mutex);
540		kill_rules(tree);
541		list_del_init(&tree->list);
542		mutex_unlock(&audit_filter_mutex);
543		prune_one(tree);
544	} else {
545		spin_unlock(&hash_lock);
546	}
547}
548
549static void audit_schedule_prune(void);
550
551/* called with audit_filter_mutex */
552int audit_remove_tree_rule(struct audit_krule *rule)
553{
554	struct audit_tree *tree;
555	tree = rule->tree;
556	if (tree) {
557		spin_lock(&hash_lock);
558		list_del_init(&rule->rlist);
559		if (list_empty(&tree->rules) && !tree->goner) {
560			tree->root = NULL;
561			list_del_init(&tree->same_root);
562			tree->goner = 1;
563			list_move(&tree->list, &prune_list);
564			rule->tree = NULL;
565			spin_unlock(&hash_lock);
566			audit_schedule_prune();
567			return 1;
568		}
569		rule->tree = NULL;
570		spin_unlock(&hash_lock);
571		return 1;
572	}
573	return 0;
574}
575
576static int compare_root(struct vfsmount *mnt, void *arg)
577{
578	return mnt->mnt_root->d_inode == arg;
579}
580
581void audit_trim_trees(void)
582{
583	struct list_head cursor;
584
585	mutex_lock(&audit_filter_mutex);
586	list_add(&cursor, &tree_list);
587	while (cursor.next != &tree_list) {
588		struct audit_tree *tree;
589		struct path path;
590		struct vfsmount *root_mnt;
591		struct node *node;
592		int err;
593
594		tree = container_of(cursor.next, struct audit_tree, list);
595		get_tree(tree);
596		list_del(&cursor);
597		list_add(&cursor, &tree->list);
598		mutex_unlock(&audit_filter_mutex);
599
600		err = kern_path(tree->pathname, 0, &path);
601		if (err)
602			goto skip_it;
603
604		root_mnt = collect_mounts(&path);
605		path_put(&path);
606		if (IS_ERR(root_mnt))
607			goto skip_it;
608
609		spin_lock(&hash_lock);
610		list_for_each_entry(node, &tree->chunks, list) {
611			struct audit_chunk *chunk = find_chunk(node);
612			/* this could be NULL if the watch is dying else where... */
613			struct inode *inode = chunk->mark.i.inode;
614			node->index |= 1U<<31;
615			if (iterate_mounts(compare_root, inode, root_mnt))
616				node->index &= ~(1U<<31);
617		}
618		spin_unlock(&hash_lock);
619		trim_marked(tree);
620		drop_collected_mounts(root_mnt);
621skip_it:
622		put_tree(tree);
623		mutex_lock(&audit_filter_mutex);
624	}
625	list_del(&cursor);
626	mutex_unlock(&audit_filter_mutex);
627}
628
629int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
630{
631
632	if (pathname[0] != '/' ||
633	    rule->listnr != AUDIT_FILTER_EXIT ||
634	    op != Audit_equal ||
635	    rule->inode_f || rule->watch || rule->tree)
636		return -EINVAL;
637	rule->tree = alloc_tree(pathname);
638	if (!rule->tree)
639		return -ENOMEM;
640	return 0;
641}
642
643void audit_put_tree(struct audit_tree *tree)
644{
645	put_tree(tree);
646}
647
648static int tag_mount(struct vfsmount *mnt, void *arg)
649{
650	return tag_chunk(mnt->mnt_root->d_inode, arg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
651}
652
653/* called with audit_filter_mutex */
654int audit_add_tree_rule(struct audit_krule *rule)
655{
656	struct audit_tree *seed = rule->tree, *tree;
657	struct path path;
658	struct vfsmount *mnt;
659	int err;
660
661	rule->tree = NULL;
662	list_for_each_entry(tree, &tree_list, list) {
663		if (!strcmp(seed->pathname, tree->pathname)) {
664			put_tree(seed);
665			rule->tree = tree;
666			list_add(&rule->rlist, &tree->rules);
667			return 0;
668		}
669	}
670	tree = seed;
671	list_add(&tree->list, &tree_list);
672	list_add(&rule->rlist, &tree->rules);
673	/* do not set rule->tree yet */
674	mutex_unlock(&audit_filter_mutex);
675
 
 
 
 
 
 
676	err = kern_path(tree->pathname, 0, &path);
677	if (err)
678		goto Err;
679	mnt = collect_mounts(&path);
680	path_put(&path);
681	if (IS_ERR(mnt)) {
682		err = PTR_ERR(mnt);
683		goto Err;
684	}
685
686	get_tree(tree);
687	err = iterate_mounts(tag_mount, tree, mnt);
688	drop_collected_mounts(mnt);
689
690	if (!err) {
691		struct node *node;
692		spin_lock(&hash_lock);
693		list_for_each_entry(node, &tree->chunks, list)
694			node->index &= ~(1U<<31);
695		spin_unlock(&hash_lock);
696	} else {
697		trim_marked(tree);
698		goto Err;
699	}
700
701	mutex_lock(&audit_filter_mutex);
702	if (list_empty(&rule->rlist)) {
703		put_tree(tree);
704		return -ENOENT;
705	}
706	rule->tree = tree;
707	put_tree(tree);
708
709	return 0;
710Err:
711	mutex_lock(&audit_filter_mutex);
712	list_del_init(&tree->list);
713	list_del_init(&tree->rules);
714	put_tree(tree);
715	return err;
716}
717
718int audit_tag_tree(char *old, char *new)
719{
720	struct list_head cursor, barrier;
721	int failed = 0;
722	struct path path1, path2;
723	struct vfsmount *tagged;
724	int err;
725
726	err = kern_path(new, 0, &path2);
727	if (err)
728		return err;
729	tagged = collect_mounts(&path2);
730	path_put(&path2);
731	if (IS_ERR(tagged))
732		return PTR_ERR(tagged);
733
734	err = kern_path(old, 0, &path1);
735	if (err) {
736		drop_collected_mounts(tagged);
737		return err;
738	}
739
740	mutex_lock(&audit_filter_mutex);
741	list_add(&barrier, &tree_list);
742	list_add(&cursor, &barrier);
743
744	while (cursor.next != &tree_list) {
745		struct audit_tree *tree;
746		int good_one = 0;
747
748		tree = container_of(cursor.next, struct audit_tree, list);
749		get_tree(tree);
750		list_del(&cursor);
751		list_add(&cursor, &tree->list);
752		mutex_unlock(&audit_filter_mutex);
753
754		err = kern_path(tree->pathname, 0, &path2);
755		if (!err) {
756			good_one = path_is_under(&path1, &path2);
757			path_put(&path2);
758		}
759
760		if (!good_one) {
761			put_tree(tree);
762			mutex_lock(&audit_filter_mutex);
763			continue;
764		}
765
766		failed = iterate_mounts(tag_mount, tree, tagged);
767		if (failed) {
768			put_tree(tree);
769			mutex_lock(&audit_filter_mutex);
770			break;
771		}
772
773		mutex_lock(&audit_filter_mutex);
774		spin_lock(&hash_lock);
775		if (!tree->goner) {
776			list_del(&tree->list);
777			list_add(&tree->list, &tree_list);
778		}
779		spin_unlock(&hash_lock);
780		put_tree(tree);
781	}
782
783	while (barrier.prev != &tree_list) {
784		struct audit_tree *tree;
785
786		tree = container_of(barrier.prev, struct audit_tree, list);
787		get_tree(tree);
788		list_del(&tree->list);
789		list_add(&tree->list, &barrier);
790		mutex_unlock(&audit_filter_mutex);
791
792		if (!failed) {
793			struct node *node;
794			spin_lock(&hash_lock);
795			list_for_each_entry(node, &tree->chunks, list)
796				node->index &= ~(1U<<31);
797			spin_unlock(&hash_lock);
798		} else {
799			trim_marked(tree);
800		}
801
802		put_tree(tree);
803		mutex_lock(&audit_filter_mutex);
804	}
805	list_del(&barrier);
806	list_del(&cursor);
807	mutex_unlock(&audit_filter_mutex);
808	path_put(&path1);
809	drop_collected_mounts(tagged);
810	return failed;
811}
812
813/*
814 * That gets run when evict_chunk() ends up needing to kill audit_tree.
815 * Runs from a separate thread.
816 */
817static int prune_tree_thread(void *unused)
818{
819	mutex_lock(&audit_cmd_mutex);
820	mutex_lock(&audit_filter_mutex);
821
822	while (!list_empty(&prune_list)) {
823		struct audit_tree *victim;
824
825		victim = list_entry(prune_list.next, struct audit_tree, list);
826		list_del_init(&victim->list);
827
828		mutex_unlock(&audit_filter_mutex);
829
830		prune_one(victim);
831
832		mutex_lock(&audit_filter_mutex);
833	}
834
835	mutex_unlock(&audit_filter_mutex);
836	mutex_unlock(&audit_cmd_mutex);
837	return 0;
838}
839
840static void audit_schedule_prune(void)
841{
842	kthread_run(prune_tree_thread, NULL, "audit_prune_tree");
843}
844
845/*
846 * ... and that one is done if evict_chunk() decides to delay until the end
847 * of syscall.  Runs synchronously.
848 */
849void audit_kill_trees(struct list_head *list)
850{
851	mutex_lock(&audit_cmd_mutex);
852	mutex_lock(&audit_filter_mutex);
853
854	while (!list_empty(list)) {
855		struct audit_tree *victim;
856
857		victim = list_entry(list->next, struct audit_tree, list);
858		kill_rules(victim);
859		list_del_init(&victim->list);
860
861		mutex_unlock(&audit_filter_mutex);
862
863		prune_one(victim);
864
865		mutex_lock(&audit_filter_mutex);
866	}
867
868	mutex_unlock(&audit_filter_mutex);
869	mutex_unlock(&audit_cmd_mutex);
870}
871
872/*
873 *  Here comes the stuff asynchronous to auditctl operations
874 */
875
876static void evict_chunk(struct audit_chunk *chunk)
877{
878	struct audit_tree *owner;
879	struct list_head *postponed = audit_killed_trees();
880	int need_prune = 0;
881	int n;
882
883	if (chunk->dead)
884		return;
885
886	chunk->dead = 1;
887	mutex_lock(&audit_filter_mutex);
888	spin_lock(&hash_lock);
889	while (!list_empty(&chunk->trees)) {
890		owner = list_entry(chunk->trees.next,
891				   struct audit_tree, same_root);
892		owner->goner = 1;
893		owner->root = NULL;
894		list_del_init(&owner->same_root);
895		spin_unlock(&hash_lock);
896		if (!postponed) {
897			kill_rules(owner);
898			list_move(&owner->list, &prune_list);
899			need_prune = 1;
900		} else {
901			list_move(&owner->list, postponed);
902		}
903		spin_lock(&hash_lock);
904	}
905	list_del_rcu(&chunk->hash);
906	for (n = 0; n < chunk->count; n++)
907		list_del_init(&chunk->owners[n].list);
908	spin_unlock(&hash_lock);
 
909	if (need_prune)
910		audit_schedule_prune();
911	mutex_unlock(&audit_filter_mutex);
912}
913
914static int audit_tree_handle_event(struct fsnotify_group *group,
915				   struct inode *to_tell,
916				   struct fsnotify_mark *inode_mark,
917				   struct fsnotify_mark *vfsmount_mark,
918				   u32 mask, void *data, int data_type,
919				   const unsigned char *file_name, u32 cookie)
920{
921	return 0;
922}
923
924static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
925{
926	struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
927
928	evict_chunk(chunk);
929
930	/*
931	 * We are guaranteed to have at least one reference to the mark from
932	 * either the inode or the caller of fsnotify_destroy_mark().
933	 */
934	BUG_ON(atomic_read(&entry->refcnt) < 1);
935}
936
937static const struct fsnotify_ops audit_tree_ops = {
938	.handle_event = audit_tree_handle_event,
939	.freeing_mark = audit_tree_freeing_mark,
940};
941
942static int __init audit_tree_init(void)
943{
944	int i;
945
946	audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
947	if (IS_ERR(audit_tree_group))
948		audit_panic("cannot initialize fsnotify group for rectree watches");
949
950	for (i = 0; i < HASH_SIZE; i++)
951		INIT_LIST_HEAD(&chunk_hash_heads[i]);
952
953	return 0;
954}
955__initcall(audit_tree_init);