Linux Audio

Check our new training course

Loading...
v3.15
 
  1#include "audit.h"
  2#include <linux/fsnotify_backend.h>
  3#include <linux/namei.h>
  4#include <linux/mount.h>
  5#include <linux/kthread.h>
 
  6#include <linux/slab.h>
  7
  8struct audit_tree;
  9struct audit_chunk;
 10
 11struct audit_tree {
 12	atomic_t count;
 13	int goner;
 14	struct audit_chunk *root;
 15	struct list_head chunks;
 16	struct list_head rules;
 17	struct list_head list;
 18	struct list_head same_root;
 19	struct rcu_head head;
 20	char pathname[];
 21};
 22
 23struct audit_chunk {
 24	struct list_head hash;
 25	struct fsnotify_mark mark;
 26	struct list_head trees;		/* with root here */
 27	int dead;
 28	int count;
 29	atomic_long_t refs;
 30	struct rcu_head head;
 31	struct node {
 32		struct list_head list;
 33		struct audit_tree *owner;
 34		unsigned index;		/* index; upper bit indicates 'will prune' */
 35	} owners[];
 36};
 37
 38static LIST_HEAD(tree_list);
 39static LIST_HEAD(prune_list);
 
 40
 41/*
 42 * One struct chunk is attached to each inode of interest.
 43 * We replace struct chunk on tagging/untagging.
 44 * Rules have pointer to struct audit_tree.
 45 * Rules have struct list_head rlist forming a list of rules over
 46 * the same tree.
 47 * References to struct chunk are collected at audit_inode{,_child}()
 48 * time and used in AUDIT_TREE rule matching.
 49 * These references are dropped at the same time we are calling
 50 * audit_free_names(), etc.
 51 *
 52 * Cyclic lists galore:
 53 * tree.chunks anchors chunk.owners[].list			hash_lock
 54 * tree.rules anchors rule.rlist				audit_filter_mutex
 55 * chunk.trees anchors tree.same_root				hash_lock
 56 * chunk.hash is a hash with middle bits of watch.inode as
 57 * a hash function.						RCU, hash_lock
 58 *
 59 * tree is refcounted; one reference for "some rules on rules_list refer to
 60 * it", one for each chunk with pointer to it.
 61 *
 62 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
 63 * of watch contributes 1 to .refs).
 64 *
 65 * node.index allows to get from node.list to containing chunk.
 66 * MSB of that sucker is stolen to mark taggings that we might have to
 67 * revert - several operations have very unpleasant cleanup logics and
 68 * that makes a difference.  Some.
 69 */
 70
 71static struct fsnotify_group *audit_tree_group;
 72
 73static struct audit_tree *alloc_tree(const char *s)
 74{
 75	struct audit_tree *tree;
 76
 77	tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
 78	if (tree) {
 79		atomic_set(&tree->count, 1);
 80		tree->goner = 0;
 81		INIT_LIST_HEAD(&tree->chunks);
 82		INIT_LIST_HEAD(&tree->rules);
 83		INIT_LIST_HEAD(&tree->list);
 84		INIT_LIST_HEAD(&tree->same_root);
 85		tree->root = NULL;
 86		strcpy(tree->pathname, s);
 87	}
 88	return tree;
 89}
 90
 91static inline void get_tree(struct audit_tree *tree)
 92{
 93	atomic_inc(&tree->count);
 94}
 95
 96static inline void put_tree(struct audit_tree *tree)
 97{
 98	if (atomic_dec_and_test(&tree->count))
 99		kfree_rcu(tree, head);
100}
101
102/* to avoid bringing the entire thing in audit.h */
103const char *audit_tree_path(struct audit_tree *tree)
104{
105	return tree->pathname;
106}
107
108static void free_chunk(struct audit_chunk *chunk)
109{
110	int i;
111
112	for (i = 0; i < chunk->count; i++) {
113		if (chunk->owners[i].owner)
114			put_tree(chunk->owners[i].owner);
115	}
116	kfree(chunk);
117}
118
119void audit_put_chunk(struct audit_chunk *chunk)
120{
121	if (atomic_long_dec_and_test(&chunk->refs))
122		free_chunk(chunk);
123}
124
125static void __put_chunk(struct rcu_head *rcu)
126{
127	struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
128	audit_put_chunk(chunk);
129}
130
131static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
132{
133	struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
134	call_rcu(&chunk->head, __put_chunk);
135}
136
137static struct audit_chunk *alloc_chunk(int count)
138{
139	struct audit_chunk *chunk;
140	size_t size;
141	int i;
142
143	size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
144	chunk = kzalloc(size, GFP_KERNEL);
145	if (!chunk)
146		return NULL;
147
148	INIT_LIST_HEAD(&chunk->hash);
149	INIT_LIST_HEAD(&chunk->trees);
150	chunk->count = count;
151	atomic_long_set(&chunk->refs, 1);
152	for (i = 0; i < count; i++) {
153		INIT_LIST_HEAD(&chunk->owners[i].list);
154		chunk->owners[i].index = i;
155	}
156	fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
 
157	return chunk;
158}
159
160enum {HASH_SIZE = 128};
161static struct list_head chunk_hash_heads[HASH_SIZE];
162static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
163
164static inline struct list_head *chunk_hash(const struct inode *inode)
 
165{
166	unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167	return chunk_hash_heads + n % HASH_SIZE;
168}
169
170/* hash_lock & entry->lock is held by caller */
171static void insert_hash(struct audit_chunk *chunk)
172{
173	struct fsnotify_mark *entry = &chunk->mark;
174	struct list_head *list;
175
176	if (!entry->i.inode)
177		return;
178	list = chunk_hash(entry->i.inode);
179	list_add_rcu(&chunk->hash, list);
180}
181
182/* called under rcu_read_lock */
183struct audit_chunk *audit_tree_lookup(const struct inode *inode)
184{
185	struct list_head *list = chunk_hash(inode);
 
186	struct audit_chunk *p;
187
188	list_for_each_entry_rcu(p, list, hash) {
189		/* mark.inode may have gone NULL, but who cares? */
190		if (p->mark.i.inode == inode) {
191			atomic_long_inc(&p->refs);
192			return p;
193		}
194	}
195	return NULL;
196}
197
198int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
199{
200	int n;
201	for (n = 0; n < chunk->count; n++)
202		if (chunk->owners[n].owner == tree)
203			return 1;
204	return 0;
205}
206
207/* tagging and untagging inodes with trees */
208
209static struct audit_chunk *find_chunk(struct node *p)
210{
211	int index = p->index & ~(1U<<31);
212	p -= index;
213	return container_of(p, struct audit_chunk, owners[0]);
214}
215
216static void untag_chunk(struct node *p)
217{
218	struct audit_chunk *chunk = find_chunk(p);
219	struct fsnotify_mark *entry = &chunk->mark;
220	struct audit_chunk *new = NULL;
221	struct audit_tree *owner;
222	int size = chunk->count - 1;
223	int i, j;
224
225	fsnotify_get_mark(entry);
226
227	spin_unlock(&hash_lock);
228
229	if (size)
230		new = alloc_chunk(size);
231
 
232	spin_lock(&entry->lock);
233	if (chunk->dead || !entry->i.inode) {
 
 
 
 
234		spin_unlock(&entry->lock);
 
235		if (new)
236			free_chunk(new);
237		goto out;
238	}
239
240	owner = p->owner;
241
242	if (!size) {
243		chunk->dead = 1;
244		spin_lock(&hash_lock);
245		list_del_init(&chunk->trees);
246		if (owner->root == chunk)
247			owner->root = NULL;
248		list_del_init(&p->list);
249		list_del_rcu(&chunk->hash);
250		spin_unlock(&hash_lock);
251		spin_unlock(&entry->lock);
 
252		fsnotify_destroy_mark(entry, audit_tree_group);
253		goto out;
254	}
255
256	if (!new)
257		goto Fallback;
258
259	fsnotify_duplicate_mark(&new->mark, entry);
260	if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) {
261		fsnotify_put_mark(&new->mark);
262		goto Fallback;
263	}
264
265	chunk->dead = 1;
266	spin_lock(&hash_lock);
267	list_replace_init(&chunk->trees, &new->trees);
268	if (owner->root == chunk) {
269		list_del_init(&owner->same_root);
270		owner->root = NULL;
271	}
272
273	for (i = j = 0; j <= size; i++, j++) {
274		struct audit_tree *s;
275		if (&chunk->owners[j] == p) {
276			list_del_init(&p->list);
277			i--;
278			continue;
279		}
280		s = chunk->owners[j].owner;
281		new->owners[i].owner = s;
282		new->owners[i].index = chunk->owners[j].index - j + i;
283		if (!s) /* result of earlier fallback */
284			continue;
285		get_tree(s);
286		list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
287	}
288
289	list_replace_rcu(&chunk->hash, &new->hash);
290	list_for_each_entry(owner, &new->trees, same_root)
291		owner->root = new;
292	spin_unlock(&hash_lock);
293	spin_unlock(&entry->lock);
 
294	fsnotify_destroy_mark(entry, audit_tree_group);
295	fsnotify_put_mark(&new->mark);	/* drop initial reference */
296	goto out;
297
298Fallback:
299	// do the best we can
300	spin_lock(&hash_lock);
301	if (owner->root == chunk) {
302		list_del_init(&owner->same_root);
303		owner->root = NULL;
304	}
305	list_del_init(&p->list);
306	p->owner = NULL;
307	put_tree(owner);
308	spin_unlock(&hash_lock);
309	spin_unlock(&entry->lock);
 
310out:
311	fsnotify_put_mark(entry);
312	spin_lock(&hash_lock);
313}
314
315static int create_chunk(struct inode *inode, struct audit_tree *tree)
316{
317	struct fsnotify_mark *entry;
318	struct audit_chunk *chunk = alloc_chunk(1);
319	if (!chunk)
320		return -ENOMEM;
321
322	entry = &chunk->mark;
323	if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
324		fsnotify_put_mark(entry);
325		return -ENOSPC;
326	}
327
328	spin_lock(&entry->lock);
329	spin_lock(&hash_lock);
330	if (tree->goner) {
331		spin_unlock(&hash_lock);
332		chunk->dead = 1;
333		spin_unlock(&entry->lock);
334		fsnotify_destroy_mark(entry, audit_tree_group);
335		fsnotify_put_mark(entry);
336		return 0;
337	}
338	chunk->owners[0].index = (1U << 31);
339	chunk->owners[0].owner = tree;
340	get_tree(tree);
341	list_add(&chunk->owners[0].list, &tree->chunks);
342	if (!tree->root) {
343		tree->root = chunk;
344		list_add(&tree->same_root, &chunk->trees);
345	}
346	insert_hash(chunk);
347	spin_unlock(&hash_lock);
348	spin_unlock(&entry->lock);
349	fsnotify_put_mark(entry);	/* drop initial reference */
350	return 0;
351}
352
353/* the first tagged inode becomes root of tree */
354static int tag_chunk(struct inode *inode, struct audit_tree *tree)
355{
356	struct fsnotify_mark *old_entry, *chunk_entry;
357	struct audit_tree *owner;
358	struct audit_chunk *chunk, *old;
359	struct node *p;
360	int n;
361
362	old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
 
363	if (!old_entry)
364		return create_chunk(inode, tree);
365
366	old = container_of(old_entry, struct audit_chunk, mark);
367
368	/* are we already there? */
369	spin_lock(&hash_lock);
370	for (n = 0; n < old->count; n++) {
371		if (old->owners[n].owner == tree) {
372			spin_unlock(&hash_lock);
373			fsnotify_put_mark(old_entry);
374			return 0;
375		}
376	}
377	spin_unlock(&hash_lock);
378
379	chunk = alloc_chunk(old->count + 1);
380	if (!chunk) {
381		fsnotify_put_mark(old_entry);
382		return -ENOMEM;
383	}
384
385	chunk_entry = &chunk->mark;
386
 
387	spin_lock(&old_entry->lock);
388	if (!old_entry->i.inode) {
 
 
 
 
389		/* old_entry is being shot, lets just lie */
390		spin_unlock(&old_entry->lock);
 
391		fsnotify_put_mark(old_entry);
392		free_chunk(chunk);
393		return -ENOENT;
394	}
395
396	fsnotify_duplicate_mark(chunk_entry, old_entry);
397	if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) {
398		spin_unlock(&old_entry->lock);
 
399		fsnotify_put_mark(chunk_entry);
400		fsnotify_put_mark(old_entry);
401		return -ENOSPC;
402	}
403
404	/* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
405	spin_lock(&chunk_entry->lock);
406	spin_lock(&hash_lock);
407
408	/* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
409	if (tree->goner) {
410		spin_unlock(&hash_lock);
411		chunk->dead = 1;
412		spin_unlock(&chunk_entry->lock);
413		spin_unlock(&old_entry->lock);
 
414
415		fsnotify_destroy_mark(chunk_entry, audit_tree_group);
416
417		fsnotify_put_mark(chunk_entry);
418		fsnotify_put_mark(old_entry);
419		return 0;
420	}
421	list_replace_init(&old->trees, &chunk->trees);
422	for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
423		struct audit_tree *s = old->owners[n].owner;
424		p->owner = s;
425		p->index = old->owners[n].index;
426		if (!s) /* result of fallback in untag */
427			continue;
428		get_tree(s);
429		list_replace_init(&old->owners[n].list, &p->list);
430	}
431	p->index = (chunk->count - 1) | (1U<<31);
432	p->owner = tree;
433	get_tree(tree);
434	list_add(&p->list, &tree->chunks);
435	list_replace_rcu(&old->hash, &chunk->hash);
436	list_for_each_entry(owner, &chunk->trees, same_root)
437		owner->root = chunk;
438	old->dead = 1;
439	if (!tree->root) {
440		tree->root = chunk;
441		list_add(&tree->same_root, &chunk->trees);
442	}
443	spin_unlock(&hash_lock);
444	spin_unlock(&chunk_entry->lock);
445	spin_unlock(&old_entry->lock);
 
446	fsnotify_destroy_mark(old_entry, audit_tree_group);
447	fsnotify_put_mark(chunk_entry);	/* drop initial reference */
448	fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
449	return 0;
450}
451
452static void audit_log_remove_rule(struct audit_krule *rule)
453{
454	struct audit_buffer *ab;
455
456	ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
457	if (unlikely(!ab))
458		return;
459	audit_log_format(ab, "op=");
460	audit_log_string(ab, "remove rule");
461	audit_log_format(ab, " dir=");
462	audit_log_untrustedstring(ab, rule->tree->pathname);
463	audit_log_key(ab, rule->filterkey);
464	audit_log_format(ab, " list=%d res=1", rule->listnr);
465	audit_log_end(ab);
466}
467
468static void kill_rules(struct audit_tree *tree)
469{
470	struct audit_krule *rule, *next;
471	struct audit_entry *entry;
472
473	list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
474		entry = container_of(rule, struct audit_entry, rule);
475
476		list_del_init(&rule->rlist);
477		if (rule->tree) {
478			/* not a half-baked one */
479			audit_log_remove_rule(rule);
 
 
480			rule->tree = NULL;
481			list_del_rcu(&entry->list);
482			list_del(&entry->rule.list);
483			call_rcu(&entry->rcu, audit_free_rule_rcu);
484		}
485	}
486}
487
488/*
489 * finish killing struct audit_tree
490 */
491static void prune_one(struct audit_tree *victim)
492{
493	spin_lock(&hash_lock);
494	while (!list_empty(&victim->chunks)) {
495		struct node *p;
496
497		p = list_entry(victim->chunks.next, struct node, list);
498
499		untag_chunk(p);
500	}
501	spin_unlock(&hash_lock);
502	put_tree(victim);
503}
504
505/* trim the uncommitted chunks from tree */
506
507static void trim_marked(struct audit_tree *tree)
508{
509	struct list_head *p, *q;
510	spin_lock(&hash_lock);
511	if (tree->goner) {
512		spin_unlock(&hash_lock);
513		return;
514	}
515	/* reorder */
516	for (p = tree->chunks.next; p != &tree->chunks; p = q) {
517		struct node *node = list_entry(p, struct node, list);
518		q = p->next;
519		if (node->index & (1U<<31)) {
520			list_del_init(p);
521			list_add(p, &tree->chunks);
522		}
523	}
524
525	while (!list_empty(&tree->chunks)) {
526		struct node *node;
527
528		node = list_entry(tree->chunks.next, struct node, list);
529
530		/* have we run out of marked? */
531		if (!(node->index & (1U<<31)))
532			break;
533
534		untag_chunk(node);
535	}
536	if (!tree->root && !tree->goner) {
537		tree->goner = 1;
538		spin_unlock(&hash_lock);
539		mutex_lock(&audit_filter_mutex);
540		kill_rules(tree);
541		list_del_init(&tree->list);
542		mutex_unlock(&audit_filter_mutex);
543		prune_one(tree);
544	} else {
545		spin_unlock(&hash_lock);
546	}
547}
548
549static void audit_schedule_prune(void);
550
551/* called with audit_filter_mutex */
552int audit_remove_tree_rule(struct audit_krule *rule)
553{
554	struct audit_tree *tree;
555	tree = rule->tree;
556	if (tree) {
557		spin_lock(&hash_lock);
558		list_del_init(&rule->rlist);
559		if (list_empty(&tree->rules) && !tree->goner) {
560			tree->root = NULL;
561			list_del_init(&tree->same_root);
562			tree->goner = 1;
563			list_move(&tree->list, &prune_list);
564			rule->tree = NULL;
565			spin_unlock(&hash_lock);
566			audit_schedule_prune();
567			return 1;
568		}
569		rule->tree = NULL;
570		spin_unlock(&hash_lock);
571		return 1;
572	}
573	return 0;
574}
575
576static int compare_root(struct vfsmount *mnt, void *arg)
577{
578	return mnt->mnt_root->d_inode == arg;
 
579}
580
581void audit_trim_trees(void)
582{
583	struct list_head cursor;
584
585	mutex_lock(&audit_filter_mutex);
586	list_add(&cursor, &tree_list);
587	while (cursor.next != &tree_list) {
588		struct audit_tree *tree;
589		struct path path;
590		struct vfsmount *root_mnt;
591		struct node *node;
592		int err;
593
594		tree = container_of(cursor.next, struct audit_tree, list);
595		get_tree(tree);
596		list_del(&cursor);
597		list_add(&cursor, &tree->list);
598		mutex_unlock(&audit_filter_mutex);
599
600		err = kern_path(tree->pathname, 0, &path);
601		if (err)
602			goto skip_it;
603
604		root_mnt = collect_mounts(&path);
605		path_put(&path);
606		if (IS_ERR(root_mnt))
607			goto skip_it;
608
609		spin_lock(&hash_lock);
610		list_for_each_entry(node, &tree->chunks, list) {
611			struct audit_chunk *chunk = find_chunk(node);
612			/* this could be NULL if the watch is dying else where... */
613			struct inode *inode = chunk->mark.i.inode;
614			node->index |= 1U<<31;
615			if (iterate_mounts(compare_root, inode, root_mnt))
 
 
616				node->index &= ~(1U<<31);
617		}
618		spin_unlock(&hash_lock);
619		trim_marked(tree);
620		drop_collected_mounts(root_mnt);
621skip_it:
622		put_tree(tree);
623		mutex_lock(&audit_filter_mutex);
624	}
625	list_del(&cursor);
626	mutex_unlock(&audit_filter_mutex);
627}
628
629int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
630{
631
632	if (pathname[0] != '/' ||
633	    rule->listnr != AUDIT_FILTER_EXIT ||
634	    op != Audit_equal ||
635	    rule->inode_f || rule->watch || rule->tree)
636		return -EINVAL;
637	rule->tree = alloc_tree(pathname);
638	if (!rule->tree)
639		return -ENOMEM;
640	return 0;
641}
642
643void audit_put_tree(struct audit_tree *tree)
644{
645	put_tree(tree);
646}
647
648static int tag_mount(struct vfsmount *mnt, void *arg)
649{
650	return tag_chunk(mnt->mnt_root->d_inode, arg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
651}
652
653/* called with audit_filter_mutex */
654int audit_add_tree_rule(struct audit_krule *rule)
655{
656	struct audit_tree *seed = rule->tree, *tree;
657	struct path path;
658	struct vfsmount *mnt;
659	int err;
660
661	rule->tree = NULL;
662	list_for_each_entry(tree, &tree_list, list) {
663		if (!strcmp(seed->pathname, tree->pathname)) {
664			put_tree(seed);
665			rule->tree = tree;
666			list_add(&rule->rlist, &tree->rules);
667			return 0;
668		}
669	}
670	tree = seed;
671	list_add(&tree->list, &tree_list);
672	list_add(&rule->rlist, &tree->rules);
673	/* do not set rule->tree yet */
674	mutex_unlock(&audit_filter_mutex);
675
 
 
 
 
 
 
676	err = kern_path(tree->pathname, 0, &path);
677	if (err)
678		goto Err;
679	mnt = collect_mounts(&path);
680	path_put(&path);
681	if (IS_ERR(mnt)) {
682		err = PTR_ERR(mnt);
683		goto Err;
684	}
685
686	get_tree(tree);
687	err = iterate_mounts(tag_mount, tree, mnt);
688	drop_collected_mounts(mnt);
689
690	if (!err) {
691		struct node *node;
692		spin_lock(&hash_lock);
693		list_for_each_entry(node, &tree->chunks, list)
694			node->index &= ~(1U<<31);
695		spin_unlock(&hash_lock);
696	} else {
697		trim_marked(tree);
698		goto Err;
699	}
700
701	mutex_lock(&audit_filter_mutex);
702	if (list_empty(&rule->rlist)) {
703		put_tree(tree);
704		return -ENOENT;
705	}
706	rule->tree = tree;
707	put_tree(tree);
708
709	return 0;
710Err:
711	mutex_lock(&audit_filter_mutex);
712	list_del_init(&tree->list);
713	list_del_init(&tree->rules);
714	put_tree(tree);
715	return err;
716}
717
718int audit_tag_tree(char *old, char *new)
719{
720	struct list_head cursor, barrier;
721	int failed = 0;
722	struct path path1, path2;
723	struct vfsmount *tagged;
724	int err;
725
726	err = kern_path(new, 0, &path2);
727	if (err)
728		return err;
729	tagged = collect_mounts(&path2);
730	path_put(&path2);
731	if (IS_ERR(tagged))
732		return PTR_ERR(tagged);
733
734	err = kern_path(old, 0, &path1);
735	if (err) {
736		drop_collected_mounts(tagged);
737		return err;
738	}
739
740	mutex_lock(&audit_filter_mutex);
741	list_add(&barrier, &tree_list);
742	list_add(&cursor, &barrier);
743
744	while (cursor.next != &tree_list) {
745		struct audit_tree *tree;
746		int good_one = 0;
747
748		tree = container_of(cursor.next, struct audit_tree, list);
749		get_tree(tree);
750		list_del(&cursor);
751		list_add(&cursor, &tree->list);
752		mutex_unlock(&audit_filter_mutex);
753
754		err = kern_path(tree->pathname, 0, &path2);
755		if (!err) {
756			good_one = path_is_under(&path1, &path2);
757			path_put(&path2);
758		}
759
760		if (!good_one) {
761			put_tree(tree);
762			mutex_lock(&audit_filter_mutex);
763			continue;
764		}
765
766		failed = iterate_mounts(tag_mount, tree, tagged);
767		if (failed) {
768			put_tree(tree);
769			mutex_lock(&audit_filter_mutex);
770			break;
771		}
772
773		mutex_lock(&audit_filter_mutex);
774		spin_lock(&hash_lock);
775		if (!tree->goner) {
776			list_del(&tree->list);
777			list_add(&tree->list, &tree_list);
778		}
779		spin_unlock(&hash_lock);
780		put_tree(tree);
781	}
782
783	while (barrier.prev != &tree_list) {
784		struct audit_tree *tree;
785
786		tree = container_of(barrier.prev, struct audit_tree, list);
787		get_tree(tree);
788		list_del(&tree->list);
789		list_add(&tree->list, &barrier);
790		mutex_unlock(&audit_filter_mutex);
791
792		if (!failed) {
793			struct node *node;
794			spin_lock(&hash_lock);
795			list_for_each_entry(node, &tree->chunks, list)
796				node->index &= ~(1U<<31);
797			spin_unlock(&hash_lock);
798		} else {
799			trim_marked(tree);
800		}
801
802		put_tree(tree);
803		mutex_lock(&audit_filter_mutex);
804	}
805	list_del(&barrier);
806	list_del(&cursor);
807	mutex_unlock(&audit_filter_mutex);
808	path_put(&path1);
809	drop_collected_mounts(tagged);
810	return failed;
811}
812
813/*
814 * That gets run when evict_chunk() ends up needing to kill audit_tree.
815 * Runs from a separate thread.
816 */
817static int prune_tree_thread(void *unused)
818{
819	mutex_lock(&audit_cmd_mutex);
820	mutex_lock(&audit_filter_mutex);
821
822	while (!list_empty(&prune_list)) {
823		struct audit_tree *victim;
824
825		victim = list_entry(prune_list.next, struct audit_tree, list);
826		list_del_init(&victim->list);
827
828		mutex_unlock(&audit_filter_mutex);
829
830		prune_one(victim);
831
832		mutex_lock(&audit_filter_mutex);
833	}
834
835	mutex_unlock(&audit_filter_mutex);
836	mutex_unlock(&audit_cmd_mutex);
837	return 0;
838}
839
840static void audit_schedule_prune(void)
841{
842	kthread_run(prune_tree_thread, NULL, "audit_prune_tree");
843}
844
845/*
846 * ... and that one is done if evict_chunk() decides to delay until the end
847 * of syscall.  Runs synchronously.
848 */
849void audit_kill_trees(struct list_head *list)
850{
851	mutex_lock(&audit_cmd_mutex);
852	mutex_lock(&audit_filter_mutex);
853
854	while (!list_empty(list)) {
855		struct audit_tree *victim;
856
857		victim = list_entry(list->next, struct audit_tree, list);
858		kill_rules(victim);
859		list_del_init(&victim->list);
860
861		mutex_unlock(&audit_filter_mutex);
862
863		prune_one(victim);
864
865		mutex_lock(&audit_filter_mutex);
866	}
867
868	mutex_unlock(&audit_filter_mutex);
869	mutex_unlock(&audit_cmd_mutex);
870}
871
872/*
873 *  Here comes the stuff asynchronous to auditctl operations
874 */
875
876static void evict_chunk(struct audit_chunk *chunk)
877{
878	struct audit_tree *owner;
879	struct list_head *postponed = audit_killed_trees();
880	int need_prune = 0;
881	int n;
882
883	if (chunk->dead)
884		return;
885
886	chunk->dead = 1;
887	mutex_lock(&audit_filter_mutex);
888	spin_lock(&hash_lock);
889	while (!list_empty(&chunk->trees)) {
890		owner = list_entry(chunk->trees.next,
891				   struct audit_tree, same_root);
892		owner->goner = 1;
893		owner->root = NULL;
894		list_del_init(&owner->same_root);
895		spin_unlock(&hash_lock);
896		if (!postponed) {
897			kill_rules(owner);
898			list_move(&owner->list, &prune_list);
899			need_prune = 1;
900		} else {
901			list_move(&owner->list, postponed);
902		}
903		spin_lock(&hash_lock);
904	}
905	list_del_rcu(&chunk->hash);
906	for (n = 0; n < chunk->count; n++)
907		list_del_init(&chunk->owners[n].list);
908	spin_unlock(&hash_lock);
 
909	if (need_prune)
910		audit_schedule_prune();
911	mutex_unlock(&audit_filter_mutex);
912}
913
914static int audit_tree_handle_event(struct fsnotify_group *group,
915				   struct inode *to_tell,
916				   struct fsnotify_mark *inode_mark,
917				   struct fsnotify_mark *vfsmount_mark,
918				   u32 mask, void *data, int data_type,
919				   const unsigned char *file_name, u32 cookie)
 
920{
921	return 0;
922}
923
924static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
925{
926	struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
927
928	evict_chunk(chunk);
929
930	/*
931	 * We are guaranteed to have at least one reference to the mark from
932	 * either the inode or the caller of fsnotify_destroy_mark().
933	 */
934	BUG_ON(atomic_read(&entry->refcnt) < 1);
935}
936
937static const struct fsnotify_ops audit_tree_ops = {
938	.handle_event = audit_tree_handle_event,
939	.freeing_mark = audit_tree_freeing_mark,
 
940};
941
942static int __init audit_tree_init(void)
943{
944	int i;
945
946	audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
947	if (IS_ERR(audit_tree_group))
948		audit_panic("cannot initialize fsnotify group for rectree watches");
949
950	for (i = 0; i < HASH_SIZE; i++)
951		INIT_LIST_HEAD(&chunk_hash_heads[i]);
952
953	return 0;
954}
955__initcall(audit_tree_init);
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2#include "audit.h"
   3#include <linux/fsnotify_backend.h>
   4#include <linux/namei.h>
   5#include <linux/mount.h>
   6#include <linux/kthread.h>
   7#include <linux/refcount.h>
   8#include <linux/slab.h>
   9
  10struct audit_tree;
  11struct audit_chunk;
  12
  13struct audit_tree {
  14	refcount_t count;
  15	int goner;
  16	struct audit_chunk *root;
  17	struct list_head chunks;
  18	struct list_head rules;
  19	struct list_head list;
  20	struct list_head same_root;
  21	struct rcu_head head;
  22	char pathname[];
  23};
  24
  25struct audit_chunk {
  26	struct list_head hash;
  27	struct fsnotify_mark mark;
  28	struct list_head trees;		/* with root here */
  29	int dead;
  30	int count;
  31	atomic_long_t refs;
  32	struct rcu_head head;
  33	struct node {
  34		struct list_head list;
  35		struct audit_tree *owner;
  36		unsigned index;		/* index; upper bit indicates 'will prune' */
  37	} owners[];
  38};
  39
  40static LIST_HEAD(tree_list);
  41static LIST_HEAD(prune_list);
  42static struct task_struct *prune_thread;
  43
  44/*
  45 * One struct chunk is attached to each inode of interest.
  46 * We replace struct chunk on tagging/untagging.
  47 * Rules have pointer to struct audit_tree.
  48 * Rules have struct list_head rlist forming a list of rules over
  49 * the same tree.
  50 * References to struct chunk are collected at audit_inode{,_child}()
  51 * time and used in AUDIT_TREE rule matching.
  52 * These references are dropped at the same time we are calling
  53 * audit_free_names(), etc.
  54 *
  55 * Cyclic lists galore:
  56 * tree.chunks anchors chunk.owners[].list			hash_lock
  57 * tree.rules anchors rule.rlist				audit_filter_mutex
  58 * chunk.trees anchors tree.same_root				hash_lock
  59 * chunk.hash is a hash with middle bits of watch.inode as
  60 * a hash function.						RCU, hash_lock
  61 *
  62 * tree is refcounted; one reference for "some rules on rules_list refer to
  63 * it", one for each chunk with pointer to it.
  64 *
  65 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
  66 * of watch contributes 1 to .refs).
  67 *
  68 * node.index allows to get from node.list to containing chunk.
  69 * MSB of that sucker is stolen to mark taggings that we might have to
  70 * revert - several operations have very unpleasant cleanup logics and
  71 * that makes a difference.  Some.
  72 */
  73
  74static struct fsnotify_group *audit_tree_group;
  75
  76static struct audit_tree *alloc_tree(const char *s)
  77{
  78	struct audit_tree *tree;
  79
  80	tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
  81	if (tree) {
  82		refcount_set(&tree->count, 1);
  83		tree->goner = 0;
  84		INIT_LIST_HEAD(&tree->chunks);
  85		INIT_LIST_HEAD(&tree->rules);
  86		INIT_LIST_HEAD(&tree->list);
  87		INIT_LIST_HEAD(&tree->same_root);
  88		tree->root = NULL;
  89		strcpy(tree->pathname, s);
  90	}
  91	return tree;
  92}
  93
  94static inline void get_tree(struct audit_tree *tree)
  95{
  96	refcount_inc(&tree->count);
  97}
  98
  99static inline void put_tree(struct audit_tree *tree)
 100{
 101	if (refcount_dec_and_test(&tree->count))
 102		kfree_rcu(tree, head);
 103}
 104
 105/* to avoid bringing the entire thing in audit.h */
 106const char *audit_tree_path(struct audit_tree *tree)
 107{
 108	return tree->pathname;
 109}
 110
 111static void free_chunk(struct audit_chunk *chunk)
 112{
 113	int i;
 114
 115	for (i = 0; i < chunk->count; i++) {
 116		if (chunk->owners[i].owner)
 117			put_tree(chunk->owners[i].owner);
 118	}
 119	kfree(chunk);
 120}
 121
 122void audit_put_chunk(struct audit_chunk *chunk)
 123{
 124	if (atomic_long_dec_and_test(&chunk->refs))
 125		free_chunk(chunk);
 126}
 127
 128static void __put_chunk(struct rcu_head *rcu)
 129{
 130	struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
 131	audit_put_chunk(chunk);
 132}
 133
 134static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
 135{
 136	struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
 137	call_rcu(&chunk->head, __put_chunk);
 138}
 139
 140static struct audit_chunk *alloc_chunk(int count)
 141{
 142	struct audit_chunk *chunk;
 143	size_t size;
 144	int i;
 145
 146	size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
 147	chunk = kzalloc(size, GFP_KERNEL);
 148	if (!chunk)
 149		return NULL;
 150
 151	INIT_LIST_HEAD(&chunk->hash);
 152	INIT_LIST_HEAD(&chunk->trees);
 153	chunk->count = count;
 154	atomic_long_set(&chunk->refs, 1);
 155	for (i = 0; i < count; i++) {
 156		INIT_LIST_HEAD(&chunk->owners[i].list);
 157		chunk->owners[i].index = i;
 158	}
 159	fsnotify_init_mark(&chunk->mark, audit_tree_group);
 160	chunk->mark.mask = FS_IN_IGNORED;
 161	return chunk;
 162}
 163
 164enum {HASH_SIZE = 128};
 165static struct list_head chunk_hash_heads[HASH_SIZE];
 166static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
 167
 168/* Function to return search key in our hash from inode. */
 169static unsigned long inode_to_key(const struct inode *inode)
 170{
 171	return (unsigned long)inode;
 172}
 173
 174/*
 175 * Function to return search key in our hash from chunk. Key 0 is special and
 176 * should never be present in the hash.
 177 */
 178static unsigned long chunk_to_key(struct audit_chunk *chunk)
 179{
 180	/*
 181	 * We have a reference to the mark so it should be attached to a
 182	 * connector.
 183	 */
 184	if (WARN_ON_ONCE(!chunk->mark.connector))
 185		return 0;
 186	return (unsigned long)chunk->mark.connector->inode;
 187}
 188
 189static inline struct list_head *chunk_hash(unsigned long key)
 190{
 191	unsigned long n = key / L1_CACHE_BYTES;
 192	return chunk_hash_heads + n % HASH_SIZE;
 193}
 194
 195/* hash_lock & entry->lock is held by caller */
 196static void insert_hash(struct audit_chunk *chunk)
 197{
 198	unsigned long key = chunk_to_key(chunk);
 199	struct list_head *list;
 200
 201	if (!(chunk->mark.flags & FSNOTIFY_MARK_FLAG_ATTACHED))
 202		return;
 203	list = chunk_hash(key);
 204	list_add_rcu(&chunk->hash, list);
 205}
 206
 207/* called under rcu_read_lock */
 208struct audit_chunk *audit_tree_lookup(const struct inode *inode)
 209{
 210	unsigned long key = inode_to_key(inode);
 211	struct list_head *list = chunk_hash(key);
 212	struct audit_chunk *p;
 213
 214	list_for_each_entry_rcu(p, list, hash) {
 215		if (chunk_to_key(p) == key) {
 
 216			atomic_long_inc(&p->refs);
 217			return p;
 218		}
 219	}
 220	return NULL;
 221}
 222
 223bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
 224{
 225	int n;
 226	for (n = 0; n < chunk->count; n++)
 227		if (chunk->owners[n].owner == tree)
 228			return true;
 229	return false;
 230}
 231
 232/* tagging and untagging inodes with trees */
 233
 234static struct audit_chunk *find_chunk(struct node *p)
 235{
 236	int index = p->index & ~(1U<<31);
 237	p -= index;
 238	return container_of(p, struct audit_chunk, owners[0]);
 239}
 240
 241static void untag_chunk(struct node *p)
 242{
 243	struct audit_chunk *chunk = find_chunk(p);
 244	struct fsnotify_mark *entry = &chunk->mark;
 245	struct audit_chunk *new = NULL;
 246	struct audit_tree *owner;
 247	int size = chunk->count - 1;
 248	int i, j;
 249
 250	fsnotify_get_mark(entry);
 251
 252	spin_unlock(&hash_lock);
 253
 254	if (size)
 255		new = alloc_chunk(size);
 256
 257	mutex_lock(&entry->group->mark_mutex);
 258	spin_lock(&entry->lock);
 259	/*
 260	 * mark_mutex protects mark from getting detached and thus also from
 261	 * mark->connector->inode getting NULL.
 262	 */
 263	if (chunk->dead || !(entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
 264		spin_unlock(&entry->lock);
 265		mutex_unlock(&entry->group->mark_mutex);
 266		if (new)
 267			fsnotify_put_mark(&new->mark);
 268		goto out;
 269	}
 270
 271	owner = p->owner;
 272
 273	if (!size) {
 274		chunk->dead = 1;
 275		spin_lock(&hash_lock);
 276		list_del_init(&chunk->trees);
 277		if (owner->root == chunk)
 278			owner->root = NULL;
 279		list_del_init(&p->list);
 280		list_del_rcu(&chunk->hash);
 281		spin_unlock(&hash_lock);
 282		spin_unlock(&entry->lock);
 283		mutex_unlock(&entry->group->mark_mutex);
 284		fsnotify_destroy_mark(entry, audit_tree_group);
 285		goto out;
 286	}
 287
 288	if (!new)
 289		goto Fallback;
 290
 291	if (fsnotify_add_mark_locked(&new->mark, entry->connector->inode,
 292				     NULL, 1)) {
 293		fsnotify_put_mark(&new->mark);
 294		goto Fallback;
 295	}
 296
 297	chunk->dead = 1;
 298	spin_lock(&hash_lock);
 299	list_replace_init(&chunk->trees, &new->trees);
 300	if (owner->root == chunk) {
 301		list_del_init(&owner->same_root);
 302		owner->root = NULL;
 303	}
 304
 305	for (i = j = 0; j <= size; i++, j++) {
 306		struct audit_tree *s;
 307		if (&chunk->owners[j] == p) {
 308			list_del_init(&p->list);
 309			i--;
 310			continue;
 311		}
 312		s = chunk->owners[j].owner;
 313		new->owners[i].owner = s;
 314		new->owners[i].index = chunk->owners[j].index - j + i;
 315		if (!s) /* result of earlier fallback */
 316			continue;
 317		get_tree(s);
 318		list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
 319	}
 320
 321	list_replace_rcu(&chunk->hash, &new->hash);
 322	list_for_each_entry(owner, &new->trees, same_root)
 323		owner->root = new;
 324	spin_unlock(&hash_lock);
 325	spin_unlock(&entry->lock);
 326	mutex_unlock(&entry->group->mark_mutex);
 327	fsnotify_destroy_mark(entry, audit_tree_group);
 328	fsnotify_put_mark(&new->mark);	/* drop initial reference */
 329	goto out;
 330
 331Fallback:
 332	// do the best we can
 333	spin_lock(&hash_lock);
 334	if (owner->root == chunk) {
 335		list_del_init(&owner->same_root);
 336		owner->root = NULL;
 337	}
 338	list_del_init(&p->list);
 339	p->owner = NULL;
 340	put_tree(owner);
 341	spin_unlock(&hash_lock);
 342	spin_unlock(&entry->lock);
 343	mutex_unlock(&entry->group->mark_mutex);
 344out:
 345	fsnotify_put_mark(entry);
 346	spin_lock(&hash_lock);
 347}
 348
 349static int create_chunk(struct inode *inode, struct audit_tree *tree)
 350{
 351	struct fsnotify_mark *entry;
 352	struct audit_chunk *chunk = alloc_chunk(1);
 353	if (!chunk)
 354		return -ENOMEM;
 355
 356	entry = &chunk->mark;
 357	if (fsnotify_add_mark(entry, inode, NULL, 0)) {
 358		fsnotify_put_mark(entry);
 359		return -ENOSPC;
 360	}
 361
 362	spin_lock(&entry->lock);
 363	spin_lock(&hash_lock);
 364	if (tree->goner) {
 365		spin_unlock(&hash_lock);
 366		chunk->dead = 1;
 367		spin_unlock(&entry->lock);
 368		fsnotify_destroy_mark(entry, audit_tree_group);
 369		fsnotify_put_mark(entry);
 370		return 0;
 371	}
 372	chunk->owners[0].index = (1U << 31);
 373	chunk->owners[0].owner = tree;
 374	get_tree(tree);
 375	list_add(&chunk->owners[0].list, &tree->chunks);
 376	if (!tree->root) {
 377		tree->root = chunk;
 378		list_add(&tree->same_root, &chunk->trees);
 379	}
 380	insert_hash(chunk);
 381	spin_unlock(&hash_lock);
 382	spin_unlock(&entry->lock);
 383	fsnotify_put_mark(entry);	/* drop initial reference */
 384	return 0;
 385}
 386
 387/* the first tagged inode becomes root of tree */
 388static int tag_chunk(struct inode *inode, struct audit_tree *tree)
 389{
 390	struct fsnotify_mark *old_entry, *chunk_entry;
 391	struct audit_tree *owner;
 392	struct audit_chunk *chunk, *old;
 393	struct node *p;
 394	int n;
 395
 396	old_entry = fsnotify_find_mark(&inode->i_fsnotify_marks,
 397				       audit_tree_group);
 398	if (!old_entry)
 399		return create_chunk(inode, tree);
 400
 401	old = container_of(old_entry, struct audit_chunk, mark);
 402
 403	/* are we already there? */
 404	spin_lock(&hash_lock);
 405	for (n = 0; n < old->count; n++) {
 406		if (old->owners[n].owner == tree) {
 407			spin_unlock(&hash_lock);
 408			fsnotify_put_mark(old_entry);
 409			return 0;
 410		}
 411	}
 412	spin_unlock(&hash_lock);
 413
 414	chunk = alloc_chunk(old->count + 1);
 415	if (!chunk) {
 416		fsnotify_put_mark(old_entry);
 417		return -ENOMEM;
 418	}
 419
 420	chunk_entry = &chunk->mark;
 421
 422	mutex_lock(&old_entry->group->mark_mutex);
 423	spin_lock(&old_entry->lock);
 424	/*
 425	 * mark_mutex protects mark from getting detached and thus also from
 426	 * mark->connector->inode getting NULL.
 427	 */
 428	if (!(old_entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
 429		/* old_entry is being shot, lets just lie */
 430		spin_unlock(&old_entry->lock);
 431		mutex_unlock(&old_entry->group->mark_mutex);
 432		fsnotify_put_mark(old_entry);
 433		fsnotify_put_mark(&chunk->mark);
 434		return -ENOENT;
 435	}
 436
 437	if (fsnotify_add_mark_locked(chunk_entry,
 438			     old_entry->connector->inode, NULL, 1)) {
 439		spin_unlock(&old_entry->lock);
 440		mutex_unlock(&old_entry->group->mark_mutex);
 441		fsnotify_put_mark(chunk_entry);
 442		fsnotify_put_mark(old_entry);
 443		return -ENOSPC;
 444	}
 445
 446	/* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
 447	spin_lock(&chunk_entry->lock);
 448	spin_lock(&hash_lock);
 449
 450	/* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
 451	if (tree->goner) {
 452		spin_unlock(&hash_lock);
 453		chunk->dead = 1;
 454		spin_unlock(&chunk_entry->lock);
 455		spin_unlock(&old_entry->lock);
 456		mutex_unlock(&old_entry->group->mark_mutex);
 457
 458		fsnotify_destroy_mark(chunk_entry, audit_tree_group);
 459
 460		fsnotify_put_mark(chunk_entry);
 461		fsnotify_put_mark(old_entry);
 462		return 0;
 463	}
 464	list_replace_init(&old->trees, &chunk->trees);
 465	for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
 466		struct audit_tree *s = old->owners[n].owner;
 467		p->owner = s;
 468		p->index = old->owners[n].index;
 469		if (!s) /* result of fallback in untag */
 470			continue;
 471		get_tree(s);
 472		list_replace_init(&old->owners[n].list, &p->list);
 473	}
 474	p->index = (chunk->count - 1) | (1U<<31);
 475	p->owner = tree;
 476	get_tree(tree);
 477	list_add(&p->list, &tree->chunks);
 478	list_replace_rcu(&old->hash, &chunk->hash);
 479	list_for_each_entry(owner, &chunk->trees, same_root)
 480		owner->root = chunk;
 481	old->dead = 1;
 482	if (!tree->root) {
 483		tree->root = chunk;
 484		list_add(&tree->same_root, &chunk->trees);
 485	}
 486	spin_unlock(&hash_lock);
 487	spin_unlock(&chunk_entry->lock);
 488	spin_unlock(&old_entry->lock);
 489	mutex_unlock(&old_entry->group->mark_mutex);
 490	fsnotify_destroy_mark(old_entry, audit_tree_group);
 491	fsnotify_put_mark(chunk_entry);	/* drop initial reference */
 492	fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
 493	return 0;
 494}
 495
 496static void audit_tree_log_remove_rule(struct audit_krule *rule)
 497{
 498	struct audit_buffer *ab;
 499
 500	ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
 501	if (unlikely(!ab))
 502		return;
 503	audit_log_format(ab, "op=remove_rule");
 
 504	audit_log_format(ab, " dir=");
 505	audit_log_untrustedstring(ab, rule->tree->pathname);
 506	audit_log_key(ab, rule->filterkey);
 507	audit_log_format(ab, " list=%d res=1", rule->listnr);
 508	audit_log_end(ab);
 509}
 510
 511static void kill_rules(struct audit_tree *tree)
 512{
 513	struct audit_krule *rule, *next;
 514	struct audit_entry *entry;
 515
 516	list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
 517		entry = container_of(rule, struct audit_entry, rule);
 518
 519		list_del_init(&rule->rlist);
 520		if (rule->tree) {
 521			/* not a half-baked one */
 522			audit_tree_log_remove_rule(rule);
 523			if (entry->rule.exe)
 524				audit_remove_mark(entry->rule.exe);
 525			rule->tree = NULL;
 526			list_del_rcu(&entry->list);
 527			list_del(&entry->rule.list);
 528			call_rcu(&entry->rcu, audit_free_rule_rcu);
 529		}
 530	}
 531}
 532
 533/*
 534 * finish killing struct audit_tree
 535 */
 536static void prune_one(struct audit_tree *victim)
 537{
 538	spin_lock(&hash_lock);
 539	while (!list_empty(&victim->chunks)) {
 540		struct node *p;
 541
 542		p = list_entry(victim->chunks.next, struct node, list);
 543
 544		untag_chunk(p);
 545	}
 546	spin_unlock(&hash_lock);
 547	put_tree(victim);
 548}
 549
 550/* trim the uncommitted chunks from tree */
 551
 552static void trim_marked(struct audit_tree *tree)
 553{
 554	struct list_head *p, *q;
 555	spin_lock(&hash_lock);
 556	if (tree->goner) {
 557		spin_unlock(&hash_lock);
 558		return;
 559	}
 560	/* reorder */
 561	for (p = tree->chunks.next; p != &tree->chunks; p = q) {
 562		struct node *node = list_entry(p, struct node, list);
 563		q = p->next;
 564		if (node->index & (1U<<31)) {
 565			list_del_init(p);
 566			list_add(p, &tree->chunks);
 567		}
 568	}
 569
 570	while (!list_empty(&tree->chunks)) {
 571		struct node *node;
 572
 573		node = list_entry(tree->chunks.next, struct node, list);
 574
 575		/* have we run out of marked? */
 576		if (!(node->index & (1U<<31)))
 577			break;
 578
 579		untag_chunk(node);
 580	}
 581	if (!tree->root && !tree->goner) {
 582		tree->goner = 1;
 583		spin_unlock(&hash_lock);
 584		mutex_lock(&audit_filter_mutex);
 585		kill_rules(tree);
 586		list_del_init(&tree->list);
 587		mutex_unlock(&audit_filter_mutex);
 588		prune_one(tree);
 589	} else {
 590		spin_unlock(&hash_lock);
 591	}
 592}
 593
 594static void audit_schedule_prune(void);
 595
 596/* called with audit_filter_mutex */
 597int audit_remove_tree_rule(struct audit_krule *rule)
 598{
 599	struct audit_tree *tree;
 600	tree = rule->tree;
 601	if (tree) {
 602		spin_lock(&hash_lock);
 603		list_del_init(&rule->rlist);
 604		if (list_empty(&tree->rules) && !tree->goner) {
 605			tree->root = NULL;
 606			list_del_init(&tree->same_root);
 607			tree->goner = 1;
 608			list_move(&tree->list, &prune_list);
 609			rule->tree = NULL;
 610			spin_unlock(&hash_lock);
 611			audit_schedule_prune();
 612			return 1;
 613		}
 614		rule->tree = NULL;
 615		spin_unlock(&hash_lock);
 616		return 1;
 617	}
 618	return 0;
 619}
 620
 621static int compare_root(struct vfsmount *mnt, void *arg)
 622{
 623	return inode_to_key(d_backing_inode(mnt->mnt_root)) ==
 624	       (unsigned long)arg;
 625}
 626
 627void audit_trim_trees(void)
 628{
 629	struct list_head cursor;
 630
 631	mutex_lock(&audit_filter_mutex);
 632	list_add(&cursor, &tree_list);
 633	while (cursor.next != &tree_list) {
 634		struct audit_tree *tree;
 635		struct path path;
 636		struct vfsmount *root_mnt;
 637		struct node *node;
 638		int err;
 639
 640		tree = container_of(cursor.next, struct audit_tree, list);
 641		get_tree(tree);
 642		list_del(&cursor);
 643		list_add(&cursor, &tree->list);
 644		mutex_unlock(&audit_filter_mutex);
 645
 646		err = kern_path(tree->pathname, 0, &path);
 647		if (err)
 648			goto skip_it;
 649
 650		root_mnt = collect_mounts(&path);
 651		path_put(&path);
 652		if (IS_ERR(root_mnt))
 653			goto skip_it;
 654
 655		spin_lock(&hash_lock);
 656		list_for_each_entry(node, &tree->chunks, list) {
 657			struct audit_chunk *chunk = find_chunk(node);
 658			/* this could be NULL if the watch is dying else where... */
 
 659			node->index |= 1U<<31;
 660			if (iterate_mounts(compare_root,
 661					   (void *)chunk_to_key(chunk),
 662					   root_mnt))
 663				node->index &= ~(1U<<31);
 664		}
 665		spin_unlock(&hash_lock);
 666		trim_marked(tree);
 667		drop_collected_mounts(root_mnt);
 668skip_it:
 669		put_tree(tree);
 670		mutex_lock(&audit_filter_mutex);
 671	}
 672	list_del(&cursor);
 673	mutex_unlock(&audit_filter_mutex);
 674}
 675
 676int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
 677{
 678
 679	if (pathname[0] != '/' ||
 680	    rule->listnr != AUDIT_FILTER_EXIT ||
 681	    op != Audit_equal ||
 682	    rule->inode_f || rule->watch || rule->tree)
 683		return -EINVAL;
 684	rule->tree = alloc_tree(pathname);
 685	if (!rule->tree)
 686		return -ENOMEM;
 687	return 0;
 688}
 689
 690void audit_put_tree(struct audit_tree *tree)
 691{
 692	put_tree(tree);
 693}
 694
 695static int tag_mount(struct vfsmount *mnt, void *arg)
 696{
 697	return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
 698}
 699
 700/*
 701 * That gets run when evict_chunk() ends up needing to kill audit_tree.
 702 * Runs from a separate thread.
 703 */
 704static int prune_tree_thread(void *unused)
 705{
 706	for (;;) {
 707		if (list_empty(&prune_list)) {
 708			set_current_state(TASK_INTERRUPTIBLE);
 709			schedule();
 710		}
 711
 712		audit_ctl_lock();
 713		mutex_lock(&audit_filter_mutex);
 714
 715		while (!list_empty(&prune_list)) {
 716			struct audit_tree *victim;
 717
 718			victim = list_entry(prune_list.next,
 719					struct audit_tree, list);
 720			list_del_init(&victim->list);
 721
 722			mutex_unlock(&audit_filter_mutex);
 723
 724			prune_one(victim);
 725
 726			mutex_lock(&audit_filter_mutex);
 727		}
 728
 729		mutex_unlock(&audit_filter_mutex);
 730		audit_ctl_unlock();
 731	}
 732	return 0;
 733}
 734
 735static int audit_launch_prune(void)
 736{
 737	if (prune_thread)
 738		return 0;
 739	prune_thread = kthread_run(prune_tree_thread, NULL,
 740				"audit_prune_tree");
 741	if (IS_ERR(prune_thread)) {
 742		pr_err("cannot start thread audit_prune_tree");
 743		prune_thread = NULL;
 744		return -ENOMEM;
 745	}
 746	return 0;
 747}
 748
 749/* called with audit_filter_mutex */
 750int audit_add_tree_rule(struct audit_krule *rule)
 751{
 752	struct audit_tree *seed = rule->tree, *tree;
 753	struct path path;
 754	struct vfsmount *mnt;
 755	int err;
 756
 757	rule->tree = NULL;
 758	list_for_each_entry(tree, &tree_list, list) {
 759		if (!strcmp(seed->pathname, tree->pathname)) {
 760			put_tree(seed);
 761			rule->tree = tree;
 762			list_add(&rule->rlist, &tree->rules);
 763			return 0;
 764		}
 765	}
 766	tree = seed;
 767	list_add(&tree->list, &tree_list);
 768	list_add(&rule->rlist, &tree->rules);
 769	/* do not set rule->tree yet */
 770	mutex_unlock(&audit_filter_mutex);
 771
 772	if (unlikely(!prune_thread)) {
 773		err = audit_launch_prune();
 774		if (err)
 775			goto Err;
 776	}
 777
 778	err = kern_path(tree->pathname, 0, &path);
 779	if (err)
 780		goto Err;
 781	mnt = collect_mounts(&path);
 782	path_put(&path);
 783	if (IS_ERR(mnt)) {
 784		err = PTR_ERR(mnt);
 785		goto Err;
 786	}
 787
 788	get_tree(tree);
 789	err = iterate_mounts(tag_mount, tree, mnt);
 790	drop_collected_mounts(mnt);
 791
 792	if (!err) {
 793		struct node *node;
 794		spin_lock(&hash_lock);
 795		list_for_each_entry(node, &tree->chunks, list)
 796			node->index &= ~(1U<<31);
 797		spin_unlock(&hash_lock);
 798	} else {
 799		trim_marked(tree);
 800		goto Err;
 801	}
 802
 803	mutex_lock(&audit_filter_mutex);
 804	if (list_empty(&rule->rlist)) {
 805		put_tree(tree);
 806		return -ENOENT;
 807	}
 808	rule->tree = tree;
 809	put_tree(tree);
 810
 811	return 0;
 812Err:
 813	mutex_lock(&audit_filter_mutex);
 814	list_del_init(&tree->list);
 815	list_del_init(&tree->rules);
 816	put_tree(tree);
 817	return err;
 818}
 819
 820int audit_tag_tree(char *old, char *new)
 821{
 822	struct list_head cursor, barrier;
 823	int failed = 0;
 824	struct path path1, path2;
 825	struct vfsmount *tagged;
 826	int err;
 827
 828	err = kern_path(new, 0, &path2);
 829	if (err)
 830		return err;
 831	tagged = collect_mounts(&path2);
 832	path_put(&path2);
 833	if (IS_ERR(tagged))
 834		return PTR_ERR(tagged);
 835
 836	err = kern_path(old, 0, &path1);
 837	if (err) {
 838		drop_collected_mounts(tagged);
 839		return err;
 840	}
 841
 842	mutex_lock(&audit_filter_mutex);
 843	list_add(&barrier, &tree_list);
 844	list_add(&cursor, &barrier);
 845
 846	while (cursor.next != &tree_list) {
 847		struct audit_tree *tree;
 848		int good_one = 0;
 849
 850		tree = container_of(cursor.next, struct audit_tree, list);
 851		get_tree(tree);
 852		list_del(&cursor);
 853		list_add(&cursor, &tree->list);
 854		mutex_unlock(&audit_filter_mutex);
 855
 856		err = kern_path(tree->pathname, 0, &path2);
 857		if (!err) {
 858			good_one = path_is_under(&path1, &path2);
 859			path_put(&path2);
 860		}
 861
 862		if (!good_one) {
 863			put_tree(tree);
 864			mutex_lock(&audit_filter_mutex);
 865			continue;
 866		}
 867
 868		failed = iterate_mounts(tag_mount, tree, tagged);
 869		if (failed) {
 870			put_tree(tree);
 871			mutex_lock(&audit_filter_mutex);
 872			break;
 873		}
 874
 875		mutex_lock(&audit_filter_mutex);
 876		spin_lock(&hash_lock);
 877		if (!tree->goner) {
 878			list_del(&tree->list);
 879			list_add(&tree->list, &tree_list);
 880		}
 881		spin_unlock(&hash_lock);
 882		put_tree(tree);
 883	}
 884
 885	while (barrier.prev != &tree_list) {
 886		struct audit_tree *tree;
 887
 888		tree = container_of(barrier.prev, struct audit_tree, list);
 889		get_tree(tree);
 890		list_del(&tree->list);
 891		list_add(&tree->list, &barrier);
 892		mutex_unlock(&audit_filter_mutex);
 893
 894		if (!failed) {
 895			struct node *node;
 896			spin_lock(&hash_lock);
 897			list_for_each_entry(node, &tree->chunks, list)
 898				node->index &= ~(1U<<31);
 899			spin_unlock(&hash_lock);
 900		} else {
 901			trim_marked(tree);
 902		}
 903
 904		put_tree(tree);
 905		mutex_lock(&audit_filter_mutex);
 906	}
 907	list_del(&barrier);
 908	list_del(&cursor);
 909	mutex_unlock(&audit_filter_mutex);
 910	path_put(&path1);
 911	drop_collected_mounts(tagged);
 912	return failed;
 913}
 914
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 915
 916static void audit_schedule_prune(void)
 917{
 918	wake_up_process(prune_thread);
 919}
 920
 921/*
 922 * ... and that one is done if evict_chunk() decides to delay until the end
 923 * of syscall.  Runs synchronously.
 924 */
 925void audit_kill_trees(struct list_head *list)
 926{
 927	audit_ctl_lock();
 928	mutex_lock(&audit_filter_mutex);
 929
 930	while (!list_empty(list)) {
 931		struct audit_tree *victim;
 932
 933		victim = list_entry(list->next, struct audit_tree, list);
 934		kill_rules(victim);
 935		list_del_init(&victim->list);
 936
 937		mutex_unlock(&audit_filter_mutex);
 938
 939		prune_one(victim);
 940
 941		mutex_lock(&audit_filter_mutex);
 942	}
 943
 944	mutex_unlock(&audit_filter_mutex);
 945	audit_ctl_unlock();
 946}
 947
 948/*
 949 *  Here comes the stuff asynchronous to auditctl operations
 950 */
 951
 952static void evict_chunk(struct audit_chunk *chunk)
 953{
 954	struct audit_tree *owner;
 955	struct list_head *postponed = audit_killed_trees();
 956	int need_prune = 0;
 957	int n;
 958
 959	if (chunk->dead)
 960		return;
 961
 962	chunk->dead = 1;
 963	mutex_lock(&audit_filter_mutex);
 964	spin_lock(&hash_lock);
 965	while (!list_empty(&chunk->trees)) {
 966		owner = list_entry(chunk->trees.next,
 967				   struct audit_tree, same_root);
 968		owner->goner = 1;
 969		owner->root = NULL;
 970		list_del_init(&owner->same_root);
 971		spin_unlock(&hash_lock);
 972		if (!postponed) {
 973			kill_rules(owner);
 974			list_move(&owner->list, &prune_list);
 975			need_prune = 1;
 976		} else {
 977			list_move(&owner->list, postponed);
 978		}
 979		spin_lock(&hash_lock);
 980	}
 981	list_del_rcu(&chunk->hash);
 982	for (n = 0; n < chunk->count; n++)
 983		list_del_init(&chunk->owners[n].list);
 984	spin_unlock(&hash_lock);
 985	mutex_unlock(&audit_filter_mutex);
 986	if (need_prune)
 987		audit_schedule_prune();
 
 988}
 989
 990static int audit_tree_handle_event(struct fsnotify_group *group,
 991				   struct inode *to_tell,
 992				   struct fsnotify_mark *inode_mark,
 993				   struct fsnotify_mark *vfsmount_mark,
 994				   u32 mask, const void *data, int data_type,
 995				   const unsigned char *file_name, u32 cookie,
 996				   struct fsnotify_iter_info *iter_info)
 997{
 998	return 0;
 999}
1000
1001static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
1002{
1003	struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
1004
1005	evict_chunk(chunk);
1006
1007	/*
1008	 * We are guaranteed to have at least one reference to the mark from
1009	 * either the inode or the caller of fsnotify_destroy_mark().
1010	 */
1011	BUG_ON(refcount_read(&entry->refcnt) < 1);
1012}
1013
1014static const struct fsnotify_ops audit_tree_ops = {
1015	.handle_event = audit_tree_handle_event,
1016	.freeing_mark = audit_tree_freeing_mark,
1017	.free_mark = audit_tree_destroy_watch,
1018};
1019
1020static int __init audit_tree_init(void)
1021{
1022	int i;
1023
1024	audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
1025	if (IS_ERR(audit_tree_group))
1026		audit_panic("cannot initialize fsnotify group for rectree watches");
1027
1028	for (i = 0; i < HASH_SIZE; i++)
1029		INIT_LIST_HEAD(&chunk_hash_heads[i]);
1030
1031	return 0;
1032}
1033__initcall(audit_tree_init);