Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include "audit.h"
3#include <linux/fsnotify_backend.h>
4#include <linux/namei.h>
5#include <linux/mount.h>
6#include <linux/kthread.h>
7#include <linux/refcount.h>
8#include <linux/slab.h>
9
10struct audit_tree;
11struct audit_chunk;
12
13struct audit_tree {
14 refcount_t count;
15 int goner;
16 struct audit_chunk *root;
17 struct list_head chunks;
18 struct list_head rules;
19 struct list_head list;
20 struct list_head same_root;
21 struct rcu_head head;
22 char pathname[];
23};
24
25struct audit_chunk {
26 struct list_head hash;
27 unsigned long key;
28 struct fsnotify_mark *mark;
29 struct list_head trees; /* with root here */
30 int count;
31 atomic_long_t refs;
32 struct rcu_head head;
33 struct node {
34 struct list_head list;
35 struct audit_tree *owner;
36 unsigned index; /* index; upper bit indicates 'will prune' */
37 } owners[];
38};
39
40struct audit_tree_mark {
41 struct fsnotify_mark mark;
42 struct audit_chunk *chunk;
43};
44
45static LIST_HEAD(tree_list);
46static LIST_HEAD(prune_list);
47static struct task_struct *prune_thread;
48
49/*
50 * One struct chunk is attached to each inode of interest through
51 * audit_tree_mark (fsnotify mark). We replace struct chunk on tagging /
52 * untagging, the mark is stable as long as there is chunk attached. The
53 * association between mark and chunk is protected by hash_lock and
54 * audit_tree_group->mark_mutex. Thus as long as we hold
55 * audit_tree_group->mark_mutex and check that the mark is alive by
56 * FSNOTIFY_MARK_FLAG_ATTACHED flag check, we are sure the mark points to
57 * the current chunk.
58 *
59 * Rules have pointer to struct audit_tree.
60 * Rules have struct list_head rlist forming a list of rules over
61 * the same tree.
62 * References to struct chunk are collected at audit_inode{,_child}()
63 * time and used in AUDIT_TREE rule matching.
64 * These references are dropped at the same time we are calling
65 * audit_free_names(), etc.
66 *
67 * Cyclic lists galore:
68 * tree.chunks anchors chunk.owners[].list hash_lock
69 * tree.rules anchors rule.rlist audit_filter_mutex
70 * chunk.trees anchors tree.same_root hash_lock
71 * chunk.hash is a hash with middle bits of watch.inode as
72 * a hash function. RCU, hash_lock
73 *
74 * tree is refcounted; one reference for "some rules on rules_list refer to
75 * it", one for each chunk with pointer to it.
76 *
77 * chunk is refcounted by embedded .refs. Mark associated with the chunk holds
78 * one chunk reference. This reference is dropped either when a mark is going
79 * to be freed (corresponding inode goes away) or when chunk attached to the
80 * mark gets replaced. This reference must be dropped using
81 * audit_mark_put_chunk() to make sure the reference is dropped only after RCU
82 * grace period as it protects RCU readers of the hash table.
83 *
84 * node.index allows to get from node.list to containing chunk.
85 * MSB of that sucker is stolen to mark taggings that we might have to
86 * revert - several operations have very unpleasant cleanup logics and
87 * that makes a difference. Some.
88 */
89
90static struct fsnotify_group *audit_tree_group;
91static struct kmem_cache *audit_tree_mark_cachep __read_mostly;
92
93static struct audit_tree *alloc_tree(const char *s)
94{
95 struct audit_tree *tree;
96
97 tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
98 if (tree) {
99 refcount_set(&tree->count, 1);
100 tree->goner = 0;
101 INIT_LIST_HEAD(&tree->chunks);
102 INIT_LIST_HEAD(&tree->rules);
103 INIT_LIST_HEAD(&tree->list);
104 INIT_LIST_HEAD(&tree->same_root);
105 tree->root = NULL;
106 strcpy(tree->pathname, s);
107 }
108 return tree;
109}
110
111static inline void get_tree(struct audit_tree *tree)
112{
113 refcount_inc(&tree->count);
114}
115
116static inline void put_tree(struct audit_tree *tree)
117{
118 if (refcount_dec_and_test(&tree->count))
119 kfree_rcu(tree, head);
120}
121
122/* to avoid bringing the entire thing in audit.h */
123const char *audit_tree_path(struct audit_tree *tree)
124{
125 return tree->pathname;
126}
127
128static void free_chunk(struct audit_chunk *chunk)
129{
130 int i;
131
132 for (i = 0; i < chunk->count; i++) {
133 if (chunk->owners[i].owner)
134 put_tree(chunk->owners[i].owner);
135 }
136 kfree(chunk);
137}
138
139void audit_put_chunk(struct audit_chunk *chunk)
140{
141 if (atomic_long_dec_and_test(&chunk->refs))
142 free_chunk(chunk);
143}
144
145static void __put_chunk(struct rcu_head *rcu)
146{
147 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
148 audit_put_chunk(chunk);
149}
150
151/*
152 * Drop reference to the chunk that was held by the mark. This is the reference
153 * that gets dropped after we've removed the chunk from the hash table and we
154 * use it to make sure chunk cannot be freed before RCU grace period expires.
155 */
156static void audit_mark_put_chunk(struct audit_chunk *chunk)
157{
158 call_rcu(&chunk->head, __put_chunk);
159}
160
161static inline struct audit_tree_mark *audit_mark(struct fsnotify_mark *mark)
162{
163 return container_of(mark, struct audit_tree_mark, mark);
164}
165
166static struct audit_chunk *mark_chunk(struct fsnotify_mark *mark)
167{
168 return audit_mark(mark)->chunk;
169}
170
171static void audit_tree_destroy_watch(struct fsnotify_mark *mark)
172{
173 kmem_cache_free(audit_tree_mark_cachep, audit_mark(mark));
174}
175
176static struct fsnotify_mark *alloc_mark(void)
177{
178 struct audit_tree_mark *amark;
179
180 amark = kmem_cache_zalloc(audit_tree_mark_cachep, GFP_KERNEL);
181 if (!amark)
182 return NULL;
183 fsnotify_init_mark(&amark->mark, audit_tree_group);
184 amark->mark.mask = FS_IN_IGNORED;
185 return &amark->mark;
186}
187
188static struct audit_chunk *alloc_chunk(int count)
189{
190 struct audit_chunk *chunk;
191 size_t size;
192 int i;
193
194 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
195 chunk = kzalloc(size, GFP_KERNEL);
196 if (!chunk)
197 return NULL;
198
199 INIT_LIST_HEAD(&chunk->hash);
200 INIT_LIST_HEAD(&chunk->trees);
201 chunk->count = count;
202 atomic_long_set(&chunk->refs, 1);
203 for (i = 0; i < count; i++) {
204 INIT_LIST_HEAD(&chunk->owners[i].list);
205 chunk->owners[i].index = i;
206 }
207 return chunk;
208}
209
210enum {HASH_SIZE = 128};
211static struct list_head chunk_hash_heads[HASH_SIZE];
212static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
213
214/* Function to return search key in our hash from inode. */
215static unsigned long inode_to_key(const struct inode *inode)
216{
217 /* Use address pointed to by connector->obj as the key */
218 return (unsigned long)&inode->i_fsnotify_marks;
219}
220
221static inline struct list_head *chunk_hash(unsigned long key)
222{
223 unsigned long n = key / L1_CACHE_BYTES;
224 return chunk_hash_heads + n % HASH_SIZE;
225}
226
227/* hash_lock & mark->group->mark_mutex is held by caller */
228static void insert_hash(struct audit_chunk *chunk)
229{
230 struct list_head *list;
231
232 /*
233 * Make sure chunk is fully initialized before making it visible in the
234 * hash. Pairs with a data dependency barrier in READ_ONCE() in
235 * audit_tree_lookup().
236 */
237 smp_wmb();
238 WARN_ON_ONCE(!chunk->key);
239 list = chunk_hash(chunk->key);
240 list_add_rcu(&chunk->hash, list);
241}
242
243/* called under rcu_read_lock */
244struct audit_chunk *audit_tree_lookup(const struct inode *inode)
245{
246 unsigned long key = inode_to_key(inode);
247 struct list_head *list = chunk_hash(key);
248 struct audit_chunk *p;
249
250 list_for_each_entry_rcu(p, list, hash) {
251 /*
252 * We use a data dependency barrier in READ_ONCE() to make sure
253 * the chunk we see is fully initialized.
254 */
255 if (READ_ONCE(p->key) == key) {
256 atomic_long_inc(&p->refs);
257 return p;
258 }
259 }
260 return NULL;
261}
262
263bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
264{
265 int n;
266 for (n = 0; n < chunk->count; n++)
267 if (chunk->owners[n].owner == tree)
268 return true;
269 return false;
270}
271
272/* tagging and untagging inodes with trees */
273
274static struct audit_chunk *find_chunk(struct node *p)
275{
276 int index = p->index & ~(1U<<31);
277 p -= index;
278 return container_of(p, struct audit_chunk, owners[0]);
279}
280
281static void replace_mark_chunk(struct fsnotify_mark *mark,
282 struct audit_chunk *chunk)
283{
284 struct audit_chunk *old;
285
286 assert_spin_locked(&hash_lock);
287 old = mark_chunk(mark);
288 audit_mark(mark)->chunk = chunk;
289 if (chunk)
290 chunk->mark = mark;
291 if (old)
292 old->mark = NULL;
293}
294
295static void replace_chunk(struct audit_chunk *new, struct audit_chunk *old)
296{
297 struct audit_tree *owner;
298 int i, j;
299
300 new->key = old->key;
301 list_splice_init(&old->trees, &new->trees);
302 list_for_each_entry(owner, &new->trees, same_root)
303 owner->root = new;
304 for (i = j = 0; j < old->count; i++, j++) {
305 if (!old->owners[j].owner) {
306 i--;
307 continue;
308 }
309 owner = old->owners[j].owner;
310 new->owners[i].owner = owner;
311 new->owners[i].index = old->owners[j].index - j + i;
312 if (!owner) /* result of earlier fallback */
313 continue;
314 get_tree(owner);
315 list_replace_init(&old->owners[j].list, &new->owners[i].list);
316 }
317 replace_mark_chunk(old->mark, new);
318 /*
319 * Make sure chunk is fully initialized before making it visible in the
320 * hash. Pairs with a data dependency barrier in READ_ONCE() in
321 * audit_tree_lookup().
322 */
323 smp_wmb();
324 list_replace_rcu(&old->hash, &new->hash);
325}
326
327static void remove_chunk_node(struct audit_chunk *chunk, struct node *p)
328{
329 struct audit_tree *owner = p->owner;
330
331 if (owner->root == chunk) {
332 list_del_init(&owner->same_root);
333 owner->root = NULL;
334 }
335 list_del_init(&p->list);
336 p->owner = NULL;
337 put_tree(owner);
338}
339
340static int chunk_count_trees(struct audit_chunk *chunk)
341{
342 int i;
343 int ret = 0;
344
345 for (i = 0; i < chunk->count; i++)
346 if (chunk->owners[i].owner)
347 ret++;
348 return ret;
349}
350
351static void untag_chunk(struct audit_chunk *chunk, struct fsnotify_mark *mark)
352{
353 struct audit_chunk *new;
354 int size;
355
356 mutex_lock(&audit_tree_group->mark_mutex);
357 /*
358 * mark_mutex stabilizes chunk attached to the mark so we can check
359 * whether it didn't change while we've dropped hash_lock.
360 */
361 if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) ||
362 mark_chunk(mark) != chunk)
363 goto out_mutex;
364
365 size = chunk_count_trees(chunk);
366 if (!size) {
367 spin_lock(&hash_lock);
368 list_del_init(&chunk->trees);
369 list_del_rcu(&chunk->hash);
370 replace_mark_chunk(mark, NULL);
371 spin_unlock(&hash_lock);
372 fsnotify_detach_mark(mark);
373 mutex_unlock(&audit_tree_group->mark_mutex);
374 audit_mark_put_chunk(chunk);
375 fsnotify_free_mark(mark);
376 return;
377 }
378
379 new = alloc_chunk(size);
380 if (!new)
381 goto out_mutex;
382
383 spin_lock(&hash_lock);
384 /*
385 * This has to go last when updating chunk as once replace_chunk() is
386 * called, new RCU readers can see the new chunk.
387 */
388 replace_chunk(new, chunk);
389 spin_unlock(&hash_lock);
390 mutex_unlock(&audit_tree_group->mark_mutex);
391 audit_mark_put_chunk(chunk);
392 return;
393
394out_mutex:
395 mutex_unlock(&audit_tree_group->mark_mutex);
396}
397
398/* Call with group->mark_mutex held, releases it */
399static int create_chunk(struct inode *inode, struct audit_tree *tree)
400{
401 struct fsnotify_mark *mark;
402 struct audit_chunk *chunk = alloc_chunk(1);
403
404 if (!chunk) {
405 mutex_unlock(&audit_tree_group->mark_mutex);
406 return -ENOMEM;
407 }
408
409 mark = alloc_mark();
410 if (!mark) {
411 mutex_unlock(&audit_tree_group->mark_mutex);
412 kfree(chunk);
413 return -ENOMEM;
414 }
415
416 if (fsnotify_add_inode_mark_locked(mark, inode, 0)) {
417 mutex_unlock(&audit_tree_group->mark_mutex);
418 fsnotify_put_mark(mark);
419 kfree(chunk);
420 return -ENOSPC;
421 }
422
423 spin_lock(&hash_lock);
424 if (tree->goner) {
425 spin_unlock(&hash_lock);
426 fsnotify_detach_mark(mark);
427 mutex_unlock(&audit_tree_group->mark_mutex);
428 fsnotify_free_mark(mark);
429 fsnotify_put_mark(mark);
430 kfree(chunk);
431 return 0;
432 }
433 replace_mark_chunk(mark, chunk);
434 chunk->owners[0].index = (1U << 31);
435 chunk->owners[0].owner = tree;
436 get_tree(tree);
437 list_add(&chunk->owners[0].list, &tree->chunks);
438 if (!tree->root) {
439 tree->root = chunk;
440 list_add(&tree->same_root, &chunk->trees);
441 }
442 chunk->key = inode_to_key(inode);
443 /*
444 * Inserting into the hash table has to go last as once we do that RCU
445 * readers can see the chunk.
446 */
447 insert_hash(chunk);
448 spin_unlock(&hash_lock);
449 mutex_unlock(&audit_tree_group->mark_mutex);
450 /*
451 * Drop our initial reference. When mark we point to is getting freed,
452 * we get notification through ->freeing_mark callback and cleanup
453 * chunk pointing to this mark.
454 */
455 fsnotify_put_mark(mark);
456 return 0;
457}
458
459/* the first tagged inode becomes root of tree */
460static int tag_chunk(struct inode *inode, struct audit_tree *tree)
461{
462 struct fsnotify_mark *mark;
463 struct audit_chunk *chunk, *old;
464 struct node *p;
465 int n;
466
467 mutex_lock(&audit_tree_group->mark_mutex);
468 mark = fsnotify_find_mark(&inode->i_fsnotify_marks, audit_tree_group);
469 if (!mark)
470 return create_chunk(inode, tree);
471
472 /*
473 * Found mark is guaranteed to be attached and mark_mutex protects mark
474 * from getting detached and thus it makes sure there is chunk attached
475 * to the mark.
476 */
477 /* are we already there? */
478 spin_lock(&hash_lock);
479 old = mark_chunk(mark);
480 for (n = 0; n < old->count; n++) {
481 if (old->owners[n].owner == tree) {
482 spin_unlock(&hash_lock);
483 mutex_unlock(&audit_tree_group->mark_mutex);
484 fsnotify_put_mark(mark);
485 return 0;
486 }
487 }
488 spin_unlock(&hash_lock);
489
490 chunk = alloc_chunk(old->count + 1);
491 if (!chunk) {
492 mutex_unlock(&audit_tree_group->mark_mutex);
493 fsnotify_put_mark(mark);
494 return -ENOMEM;
495 }
496
497 spin_lock(&hash_lock);
498 if (tree->goner) {
499 spin_unlock(&hash_lock);
500 mutex_unlock(&audit_tree_group->mark_mutex);
501 fsnotify_put_mark(mark);
502 kfree(chunk);
503 return 0;
504 }
505 p = &chunk->owners[chunk->count - 1];
506 p->index = (chunk->count - 1) | (1U<<31);
507 p->owner = tree;
508 get_tree(tree);
509 list_add(&p->list, &tree->chunks);
510 if (!tree->root) {
511 tree->root = chunk;
512 list_add(&tree->same_root, &chunk->trees);
513 }
514 /*
515 * This has to go last when updating chunk as once replace_chunk() is
516 * called, new RCU readers can see the new chunk.
517 */
518 replace_chunk(chunk, old);
519 spin_unlock(&hash_lock);
520 mutex_unlock(&audit_tree_group->mark_mutex);
521 fsnotify_put_mark(mark); /* pair to fsnotify_find_mark */
522 audit_mark_put_chunk(old);
523
524 return 0;
525}
526
527static void audit_tree_log_remove_rule(struct audit_context *context,
528 struct audit_krule *rule)
529{
530 struct audit_buffer *ab;
531
532 if (!audit_enabled)
533 return;
534 ab = audit_log_start(context, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
535 if (unlikely(!ab))
536 return;
537 audit_log_format(ab, "op=remove_rule dir=");
538 audit_log_untrustedstring(ab, rule->tree->pathname);
539 audit_log_key(ab, rule->filterkey);
540 audit_log_format(ab, " list=%d res=1", rule->listnr);
541 audit_log_end(ab);
542}
543
544static void kill_rules(struct audit_context *context, struct audit_tree *tree)
545{
546 struct audit_krule *rule, *next;
547 struct audit_entry *entry;
548
549 list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
550 entry = container_of(rule, struct audit_entry, rule);
551
552 list_del_init(&rule->rlist);
553 if (rule->tree) {
554 /* not a half-baked one */
555 audit_tree_log_remove_rule(context, rule);
556 if (entry->rule.exe)
557 audit_remove_mark(entry->rule.exe);
558 rule->tree = NULL;
559 list_del_rcu(&entry->list);
560 list_del(&entry->rule.list);
561 call_rcu(&entry->rcu, audit_free_rule_rcu);
562 }
563 }
564}
565
566/*
567 * Remove tree from chunks. If 'tagged' is set, remove tree only from tagged
568 * chunks. The function expects tagged chunks are all at the beginning of the
569 * chunks list.
570 */
571static void prune_tree_chunks(struct audit_tree *victim, bool tagged)
572{
573 spin_lock(&hash_lock);
574 while (!list_empty(&victim->chunks)) {
575 struct node *p;
576 struct audit_chunk *chunk;
577 struct fsnotify_mark *mark;
578
579 p = list_first_entry(&victim->chunks, struct node, list);
580 /* have we run out of marked? */
581 if (tagged && !(p->index & (1U<<31)))
582 break;
583 chunk = find_chunk(p);
584 mark = chunk->mark;
585 remove_chunk_node(chunk, p);
586 /* Racing with audit_tree_freeing_mark()? */
587 if (!mark)
588 continue;
589 fsnotify_get_mark(mark);
590 spin_unlock(&hash_lock);
591
592 untag_chunk(chunk, mark);
593 fsnotify_put_mark(mark);
594
595 spin_lock(&hash_lock);
596 }
597 spin_unlock(&hash_lock);
598 put_tree(victim);
599}
600
601/*
602 * finish killing struct audit_tree
603 */
604static void prune_one(struct audit_tree *victim)
605{
606 prune_tree_chunks(victim, false);
607}
608
609/* trim the uncommitted chunks from tree */
610
611static void trim_marked(struct audit_tree *tree)
612{
613 struct list_head *p, *q;
614 spin_lock(&hash_lock);
615 if (tree->goner) {
616 spin_unlock(&hash_lock);
617 return;
618 }
619 /* reorder */
620 for (p = tree->chunks.next; p != &tree->chunks; p = q) {
621 struct node *node = list_entry(p, struct node, list);
622 q = p->next;
623 if (node->index & (1U<<31)) {
624 list_del_init(p);
625 list_add(p, &tree->chunks);
626 }
627 }
628 spin_unlock(&hash_lock);
629
630 prune_tree_chunks(tree, true);
631
632 spin_lock(&hash_lock);
633 if (!tree->root && !tree->goner) {
634 tree->goner = 1;
635 spin_unlock(&hash_lock);
636 mutex_lock(&audit_filter_mutex);
637 kill_rules(audit_context(), tree);
638 list_del_init(&tree->list);
639 mutex_unlock(&audit_filter_mutex);
640 prune_one(tree);
641 } else {
642 spin_unlock(&hash_lock);
643 }
644}
645
646static void audit_schedule_prune(void);
647
648/* called with audit_filter_mutex */
649int audit_remove_tree_rule(struct audit_krule *rule)
650{
651 struct audit_tree *tree;
652 tree = rule->tree;
653 if (tree) {
654 spin_lock(&hash_lock);
655 list_del_init(&rule->rlist);
656 if (list_empty(&tree->rules) && !tree->goner) {
657 tree->root = NULL;
658 list_del_init(&tree->same_root);
659 tree->goner = 1;
660 list_move(&tree->list, &prune_list);
661 rule->tree = NULL;
662 spin_unlock(&hash_lock);
663 audit_schedule_prune();
664 return 1;
665 }
666 rule->tree = NULL;
667 spin_unlock(&hash_lock);
668 return 1;
669 }
670 return 0;
671}
672
673static int compare_root(struct vfsmount *mnt, void *arg)
674{
675 return inode_to_key(d_backing_inode(mnt->mnt_root)) ==
676 (unsigned long)arg;
677}
678
679void audit_trim_trees(void)
680{
681 struct list_head cursor;
682
683 mutex_lock(&audit_filter_mutex);
684 list_add(&cursor, &tree_list);
685 while (cursor.next != &tree_list) {
686 struct audit_tree *tree;
687 struct path path;
688 struct vfsmount *root_mnt;
689 struct node *node;
690 int err;
691
692 tree = container_of(cursor.next, struct audit_tree, list);
693 get_tree(tree);
694 list_del(&cursor);
695 list_add(&cursor, &tree->list);
696 mutex_unlock(&audit_filter_mutex);
697
698 err = kern_path(tree->pathname, 0, &path);
699 if (err)
700 goto skip_it;
701
702 root_mnt = collect_mounts(&path);
703 path_put(&path);
704 if (IS_ERR(root_mnt))
705 goto skip_it;
706
707 spin_lock(&hash_lock);
708 list_for_each_entry(node, &tree->chunks, list) {
709 struct audit_chunk *chunk = find_chunk(node);
710 /* this could be NULL if the watch is dying else where... */
711 node->index |= 1U<<31;
712 if (iterate_mounts(compare_root,
713 (void *)(chunk->key),
714 root_mnt))
715 node->index &= ~(1U<<31);
716 }
717 spin_unlock(&hash_lock);
718 trim_marked(tree);
719 drop_collected_mounts(root_mnt);
720skip_it:
721 put_tree(tree);
722 mutex_lock(&audit_filter_mutex);
723 }
724 list_del(&cursor);
725 mutex_unlock(&audit_filter_mutex);
726}
727
728int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
729{
730
731 if (pathname[0] != '/' ||
732 rule->listnr != AUDIT_FILTER_EXIT ||
733 op != Audit_equal ||
734 rule->inode_f || rule->watch || rule->tree)
735 return -EINVAL;
736 rule->tree = alloc_tree(pathname);
737 if (!rule->tree)
738 return -ENOMEM;
739 return 0;
740}
741
742void audit_put_tree(struct audit_tree *tree)
743{
744 put_tree(tree);
745}
746
747static int tag_mount(struct vfsmount *mnt, void *arg)
748{
749 return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
750}
751
752/*
753 * That gets run when evict_chunk() ends up needing to kill audit_tree.
754 * Runs from a separate thread.
755 */
756static int prune_tree_thread(void *unused)
757{
758 for (;;) {
759 if (list_empty(&prune_list)) {
760 set_current_state(TASK_INTERRUPTIBLE);
761 schedule();
762 }
763
764 audit_ctl_lock();
765 mutex_lock(&audit_filter_mutex);
766
767 while (!list_empty(&prune_list)) {
768 struct audit_tree *victim;
769
770 victim = list_entry(prune_list.next,
771 struct audit_tree, list);
772 list_del_init(&victim->list);
773
774 mutex_unlock(&audit_filter_mutex);
775
776 prune_one(victim);
777
778 mutex_lock(&audit_filter_mutex);
779 }
780
781 mutex_unlock(&audit_filter_mutex);
782 audit_ctl_unlock();
783 }
784 return 0;
785}
786
787static int audit_launch_prune(void)
788{
789 if (prune_thread)
790 return 0;
791 prune_thread = kthread_run(prune_tree_thread, NULL,
792 "audit_prune_tree");
793 if (IS_ERR(prune_thread)) {
794 pr_err("cannot start thread audit_prune_tree");
795 prune_thread = NULL;
796 return -ENOMEM;
797 }
798 return 0;
799}
800
801/* called with audit_filter_mutex */
802int audit_add_tree_rule(struct audit_krule *rule)
803{
804 struct audit_tree *seed = rule->tree, *tree;
805 struct path path;
806 struct vfsmount *mnt;
807 int err;
808
809 rule->tree = NULL;
810 list_for_each_entry(tree, &tree_list, list) {
811 if (!strcmp(seed->pathname, tree->pathname)) {
812 put_tree(seed);
813 rule->tree = tree;
814 list_add(&rule->rlist, &tree->rules);
815 return 0;
816 }
817 }
818 tree = seed;
819 list_add(&tree->list, &tree_list);
820 list_add(&rule->rlist, &tree->rules);
821 /* do not set rule->tree yet */
822 mutex_unlock(&audit_filter_mutex);
823
824 if (unlikely(!prune_thread)) {
825 err = audit_launch_prune();
826 if (err)
827 goto Err;
828 }
829
830 err = kern_path(tree->pathname, 0, &path);
831 if (err)
832 goto Err;
833 mnt = collect_mounts(&path);
834 path_put(&path);
835 if (IS_ERR(mnt)) {
836 err = PTR_ERR(mnt);
837 goto Err;
838 }
839
840 get_tree(tree);
841 err = iterate_mounts(tag_mount, tree, mnt);
842 drop_collected_mounts(mnt);
843
844 if (!err) {
845 struct node *node;
846 spin_lock(&hash_lock);
847 list_for_each_entry(node, &tree->chunks, list)
848 node->index &= ~(1U<<31);
849 spin_unlock(&hash_lock);
850 } else {
851 trim_marked(tree);
852 goto Err;
853 }
854
855 mutex_lock(&audit_filter_mutex);
856 if (list_empty(&rule->rlist)) {
857 put_tree(tree);
858 return -ENOENT;
859 }
860 rule->tree = tree;
861 put_tree(tree);
862
863 return 0;
864Err:
865 mutex_lock(&audit_filter_mutex);
866 list_del_init(&tree->list);
867 list_del_init(&tree->rules);
868 put_tree(tree);
869 return err;
870}
871
872int audit_tag_tree(char *old, char *new)
873{
874 struct list_head cursor, barrier;
875 int failed = 0;
876 struct path path1, path2;
877 struct vfsmount *tagged;
878 int err;
879
880 err = kern_path(new, 0, &path2);
881 if (err)
882 return err;
883 tagged = collect_mounts(&path2);
884 path_put(&path2);
885 if (IS_ERR(tagged))
886 return PTR_ERR(tagged);
887
888 err = kern_path(old, 0, &path1);
889 if (err) {
890 drop_collected_mounts(tagged);
891 return err;
892 }
893
894 mutex_lock(&audit_filter_mutex);
895 list_add(&barrier, &tree_list);
896 list_add(&cursor, &barrier);
897
898 while (cursor.next != &tree_list) {
899 struct audit_tree *tree;
900 int good_one = 0;
901
902 tree = container_of(cursor.next, struct audit_tree, list);
903 get_tree(tree);
904 list_del(&cursor);
905 list_add(&cursor, &tree->list);
906 mutex_unlock(&audit_filter_mutex);
907
908 err = kern_path(tree->pathname, 0, &path2);
909 if (!err) {
910 good_one = path_is_under(&path1, &path2);
911 path_put(&path2);
912 }
913
914 if (!good_one) {
915 put_tree(tree);
916 mutex_lock(&audit_filter_mutex);
917 continue;
918 }
919
920 failed = iterate_mounts(tag_mount, tree, tagged);
921 if (failed) {
922 put_tree(tree);
923 mutex_lock(&audit_filter_mutex);
924 break;
925 }
926
927 mutex_lock(&audit_filter_mutex);
928 spin_lock(&hash_lock);
929 if (!tree->goner) {
930 list_del(&tree->list);
931 list_add(&tree->list, &tree_list);
932 }
933 spin_unlock(&hash_lock);
934 put_tree(tree);
935 }
936
937 while (barrier.prev != &tree_list) {
938 struct audit_tree *tree;
939
940 tree = container_of(barrier.prev, struct audit_tree, list);
941 get_tree(tree);
942 list_del(&tree->list);
943 list_add(&tree->list, &barrier);
944 mutex_unlock(&audit_filter_mutex);
945
946 if (!failed) {
947 struct node *node;
948 spin_lock(&hash_lock);
949 list_for_each_entry(node, &tree->chunks, list)
950 node->index &= ~(1U<<31);
951 spin_unlock(&hash_lock);
952 } else {
953 trim_marked(tree);
954 }
955
956 put_tree(tree);
957 mutex_lock(&audit_filter_mutex);
958 }
959 list_del(&barrier);
960 list_del(&cursor);
961 mutex_unlock(&audit_filter_mutex);
962 path_put(&path1);
963 drop_collected_mounts(tagged);
964 return failed;
965}
966
967
968static void audit_schedule_prune(void)
969{
970 wake_up_process(prune_thread);
971}
972
973/*
974 * ... and that one is done if evict_chunk() decides to delay until the end
975 * of syscall. Runs synchronously.
976 */
977void audit_kill_trees(struct audit_context *context)
978{
979 struct list_head *list = &context->killed_trees;
980
981 audit_ctl_lock();
982 mutex_lock(&audit_filter_mutex);
983
984 while (!list_empty(list)) {
985 struct audit_tree *victim;
986
987 victim = list_entry(list->next, struct audit_tree, list);
988 kill_rules(context, victim);
989 list_del_init(&victim->list);
990
991 mutex_unlock(&audit_filter_mutex);
992
993 prune_one(victim);
994
995 mutex_lock(&audit_filter_mutex);
996 }
997
998 mutex_unlock(&audit_filter_mutex);
999 audit_ctl_unlock();
1000}
1001
1002/*
1003 * Here comes the stuff asynchronous to auditctl operations
1004 */
1005
1006static void evict_chunk(struct audit_chunk *chunk)
1007{
1008 struct audit_tree *owner;
1009 struct list_head *postponed = audit_killed_trees();
1010 int need_prune = 0;
1011 int n;
1012
1013 mutex_lock(&audit_filter_mutex);
1014 spin_lock(&hash_lock);
1015 while (!list_empty(&chunk->trees)) {
1016 owner = list_entry(chunk->trees.next,
1017 struct audit_tree, same_root);
1018 owner->goner = 1;
1019 owner->root = NULL;
1020 list_del_init(&owner->same_root);
1021 spin_unlock(&hash_lock);
1022 if (!postponed) {
1023 kill_rules(audit_context(), owner);
1024 list_move(&owner->list, &prune_list);
1025 need_prune = 1;
1026 } else {
1027 list_move(&owner->list, postponed);
1028 }
1029 spin_lock(&hash_lock);
1030 }
1031 list_del_rcu(&chunk->hash);
1032 for (n = 0; n < chunk->count; n++)
1033 list_del_init(&chunk->owners[n].list);
1034 spin_unlock(&hash_lock);
1035 mutex_unlock(&audit_filter_mutex);
1036 if (need_prune)
1037 audit_schedule_prune();
1038}
1039
1040static int audit_tree_handle_event(struct fsnotify_group *group,
1041 struct inode *to_tell,
1042 u32 mask, const void *data, int data_type,
1043 const struct qstr *file_name, u32 cookie,
1044 struct fsnotify_iter_info *iter_info)
1045{
1046 return 0;
1047}
1048
1049static void audit_tree_freeing_mark(struct fsnotify_mark *mark,
1050 struct fsnotify_group *group)
1051{
1052 struct audit_chunk *chunk;
1053
1054 mutex_lock(&mark->group->mark_mutex);
1055 spin_lock(&hash_lock);
1056 chunk = mark_chunk(mark);
1057 replace_mark_chunk(mark, NULL);
1058 spin_unlock(&hash_lock);
1059 mutex_unlock(&mark->group->mark_mutex);
1060 if (chunk) {
1061 evict_chunk(chunk);
1062 audit_mark_put_chunk(chunk);
1063 }
1064
1065 /*
1066 * We are guaranteed to have at least one reference to the mark from
1067 * either the inode or the caller of fsnotify_destroy_mark().
1068 */
1069 BUG_ON(refcount_read(&mark->refcnt) < 1);
1070}
1071
1072static const struct fsnotify_ops audit_tree_ops = {
1073 .handle_event = audit_tree_handle_event,
1074 .freeing_mark = audit_tree_freeing_mark,
1075 .free_mark = audit_tree_destroy_watch,
1076};
1077
1078static int __init audit_tree_init(void)
1079{
1080 int i;
1081
1082 audit_tree_mark_cachep = KMEM_CACHE(audit_tree_mark, SLAB_PANIC);
1083
1084 audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
1085 if (IS_ERR(audit_tree_group))
1086 audit_panic("cannot initialize fsnotify group for rectree watches");
1087
1088 for (i = 0; i < HASH_SIZE; i++)
1089 INIT_LIST_HEAD(&chunk_hash_heads[i]);
1090
1091 return 0;
1092}
1093__initcall(audit_tree_init);
1#include "audit.h"
2#include <linux/fsnotify_backend.h>
3#include <linux/namei.h>
4#include <linux/mount.h>
5#include <linux/kthread.h>
6#include <linux/slab.h>
7
8struct audit_tree;
9struct audit_chunk;
10
11struct audit_tree {
12 atomic_t count;
13 int goner;
14 struct audit_chunk *root;
15 struct list_head chunks;
16 struct list_head rules;
17 struct list_head list;
18 struct list_head same_root;
19 struct rcu_head head;
20 char pathname[];
21};
22
23struct audit_chunk {
24 struct list_head hash;
25 struct fsnotify_mark mark;
26 struct list_head trees; /* with root here */
27 int dead;
28 int count;
29 atomic_long_t refs;
30 struct rcu_head head;
31 struct node {
32 struct list_head list;
33 struct audit_tree *owner;
34 unsigned index; /* index; upper bit indicates 'will prune' */
35 } owners[];
36};
37
38static LIST_HEAD(tree_list);
39static LIST_HEAD(prune_list);
40static struct task_struct *prune_thread;
41
42/*
43 * One struct chunk is attached to each inode of interest.
44 * We replace struct chunk on tagging/untagging.
45 * Rules have pointer to struct audit_tree.
46 * Rules have struct list_head rlist forming a list of rules over
47 * the same tree.
48 * References to struct chunk are collected at audit_inode{,_child}()
49 * time and used in AUDIT_TREE rule matching.
50 * These references are dropped at the same time we are calling
51 * audit_free_names(), etc.
52 *
53 * Cyclic lists galore:
54 * tree.chunks anchors chunk.owners[].list hash_lock
55 * tree.rules anchors rule.rlist audit_filter_mutex
56 * chunk.trees anchors tree.same_root hash_lock
57 * chunk.hash is a hash with middle bits of watch.inode as
58 * a hash function. RCU, hash_lock
59 *
60 * tree is refcounted; one reference for "some rules on rules_list refer to
61 * it", one for each chunk with pointer to it.
62 *
63 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
64 * of watch contributes 1 to .refs).
65 *
66 * node.index allows to get from node.list to containing chunk.
67 * MSB of that sucker is stolen to mark taggings that we might have to
68 * revert - several operations have very unpleasant cleanup logics and
69 * that makes a difference. Some.
70 */
71
72static struct fsnotify_group *audit_tree_group;
73
74static struct audit_tree *alloc_tree(const char *s)
75{
76 struct audit_tree *tree;
77
78 tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
79 if (tree) {
80 atomic_set(&tree->count, 1);
81 tree->goner = 0;
82 INIT_LIST_HEAD(&tree->chunks);
83 INIT_LIST_HEAD(&tree->rules);
84 INIT_LIST_HEAD(&tree->list);
85 INIT_LIST_HEAD(&tree->same_root);
86 tree->root = NULL;
87 strcpy(tree->pathname, s);
88 }
89 return tree;
90}
91
92static inline void get_tree(struct audit_tree *tree)
93{
94 atomic_inc(&tree->count);
95}
96
97static inline void put_tree(struct audit_tree *tree)
98{
99 if (atomic_dec_and_test(&tree->count))
100 kfree_rcu(tree, head);
101}
102
103/* to avoid bringing the entire thing in audit.h */
104const char *audit_tree_path(struct audit_tree *tree)
105{
106 return tree->pathname;
107}
108
109static void free_chunk(struct audit_chunk *chunk)
110{
111 int i;
112
113 for (i = 0; i < chunk->count; i++) {
114 if (chunk->owners[i].owner)
115 put_tree(chunk->owners[i].owner);
116 }
117 kfree(chunk);
118}
119
120void audit_put_chunk(struct audit_chunk *chunk)
121{
122 if (atomic_long_dec_and_test(&chunk->refs))
123 free_chunk(chunk);
124}
125
126static void __put_chunk(struct rcu_head *rcu)
127{
128 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
129 audit_put_chunk(chunk);
130}
131
132static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
133{
134 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
135 call_rcu(&chunk->head, __put_chunk);
136}
137
138static struct audit_chunk *alloc_chunk(int count)
139{
140 struct audit_chunk *chunk;
141 size_t size;
142 int i;
143
144 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
145 chunk = kzalloc(size, GFP_KERNEL);
146 if (!chunk)
147 return NULL;
148
149 INIT_LIST_HEAD(&chunk->hash);
150 INIT_LIST_HEAD(&chunk->trees);
151 chunk->count = count;
152 atomic_long_set(&chunk->refs, 1);
153 for (i = 0; i < count; i++) {
154 INIT_LIST_HEAD(&chunk->owners[i].list);
155 chunk->owners[i].index = i;
156 }
157 fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
158 chunk->mark.mask = FS_IN_IGNORED;
159 return chunk;
160}
161
162enum {HASH_SIZE = 128};
163static struct list_head chunk_hash_heads[HASH_SIZE];
164static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
165
166static inline struct list_head *chunk_hash(const struct inode *inode)
167{
168 unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
169 return chunk_hash_heads + n % HASH_SIZE;
170}
171
172/* hash_lock & entry->lock is held by caller */
173static void insert_hash(struct audit_chunk *chunk)
174{
175 struct fsnotify_mark *entry = &chunk->mark;
176 struct list_head *list;
177
178 if (!entry->inode)
179 return;
180 list = chunk_hash(entry->inode);
181 list_add_rcu(&chunk->hash, list);
182}
183
184/* called under rcu_read_lock */
185struct audit_chunk *audit_tree_lookup(const struct inode *inode)
186{
187 struct list_head *list = chunk_hash(inode);
188 struct audit_chunk *p;
189
190 list_for_each_entry_rcu(p, list, hash) {
191 /* mark.inode may have gone NULL, but who cares? */
192 if (p->mark.inode == inode) {
193 atomic_long_inc(&p->refs);
194 return p;
195 }
196 }
197 return NULL;
198}
199
200bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
201{
202 int n;
203 for (n = 0; n < chunk->count; n++)
204 if (chunk->owners[n].owner == tree)
205 return true;
206 return false;
207}
208
209/* tagging and untagging inodes with trees */
210
211static struct audit_chunk *find_chunk(struct node *p)
212{
213 int index = p->index & ~(1U<<31);
214 p -= index;
215 return container_of(p, struct audit_chunk, owners[0]);
216}
217
218static void untag_chunk(struct node *p)
219{
220 struct audit_chunk *chunk = find_chunk(p);
221 struct fsnotify_mark *entry = &chunk->mark;
222 struct audit_chunk *new = NULL;
223 struct audit_tree *owner;
224 int size = chunk->count - 1;
225 int i, j;
226
227 fsnotify_get_mark(entry);
228
229 spin_unlock(&hash_lock);
230
231 if (size)
232 new = alloc_chunk(size);
233
234 mutex_lock(&entry->group->mark_mutex);
235 spin_lock(&entry->lock);
236 if (chunk->dead || !entry->inode) {
237 spin_unlock(&entry->lock);
238 mutex_unlock(&entry->group->mark_mutex);
239 if (new)
240 free_chunk(new);
241 goto out;
242 }
243
244 owner = p->owner;
245
246 if (!size) {
247 chunk->dead = 1;
248 spin_lock(&hash_lock);
249 list_del_init(&chunk->trees);
250 if (owner->root == chunk)
251 owner->root = NULL;
252 list_del_init(&p->list);
253 list_del_rcu(&chunk->hash);
254 spin_unlock(&hash_lock);
255 spin_unlock(&entry->lock);
256 mutex_unlock(&entry->group->mark_mutex);
257 fsnotify_destroy_mark(entry, audit_tree_group);
258 goto out;
259 }
260
261 if (!new)
262 goto Fallback;
263
264 if (fsnotify_add_mark_locked(&new->mark, entry->group, entry->inode,
265 NULL, 1)) {
266 fsnotify_put_mark(&new->mark);
267 goto Fallback;
268 }
269
270 chunk->dead = 1;
271 spin_lock(&hash_lock);
272 list_replace_init(&chunk->trees, &new->trees);
273 if (owner->root == chunk) {
274 list_del_init(&owner->same_root);
275 owner->root = NULL;
276 }
277
278 for (i = j = 0; j <= size; i++, j++) {
279 struct audit_tree *s;
280 if (&chunk->owners[j] == p) {
281 list_del_init(&p->list);
282 i--;
283 continue;
284 }
285 s = chunk->owners[j].owner;
286 new->owners[i].owner = s;
287 new->owners[i].index = chunk->owners[j].index - j + i;
288 if (!s) /* result of earlier fallback */
289 continue;
290 get_tree(s);
291 list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
292 }
293
294 list_replace_rcu(&chunk->hash, &new->hash);
295 list_for_each_entry(owner, &new->trees, same_root)
296 owner->root = new;
297 spin_unlock(&hash_lock);
298 spin_unlock(&entry->lock);
299 mutex_unlock(&entry->group->mark_mutex);
300 fsnotify_destroy_mark(entry, audit_tree_group);
301 fsnotify_put_mark(&new->mark); /* drop initial reference */
302 goto out;
303
304Fallback:
305 // do the best we can
306 spin_lock(&hash_lock);
307 if (owner->root == chunk) {
308 list_del_init(&owner->same_root);
309 owner->root = NULL;
310 }
311 list_del_init(&p->list);
312 p->owner = NULL;
313 put_tree(owner);
314 spin_unlock(&hash_lock);
315 spin_unlock(&entry->lock);
316 mutex_unlock(&entry->group->mark_mutex);
317out:
318 fsnotify_put_mark(entry);
319 spin_lock(&hash_lock);
320}
321
322static int create_chunk(struct inode *inode, struct audit_tree *tree)
323{
324 struct fsnotify_mark *entry;
325 struct audit_chunk *chunk = alloc_chunk(1);
326 if (!chunk)
327 return -ENOMEM;
328
329 entry = &chunk->mark;
330 if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
331 fsnotify_put_mark(entry);
332 return -ENOSPC;
333 }
334
335 spin_lock(&entry->lock);
336 spin_lock(&hash_lock);
337 if (tree->goner) {
338 spin_unlock(&hash_lock);
339 chunk->dead = 1;
340 spin_unlock(&entry->lock);
341 fsnotify_destroy_mark(entry, audit_tree_group);
342 fsnotify_put_mark(entry);
343 return 0;
344 }
345 chunk->owners[0].index = (1U << 31);
346 chunk->owners[0].owner = tree;
347 get_tree(tree);
348 list_add(&chunk->owners[0].list, &tree->chunks);
349 if (!tree->root) {
350 tree->root = chunk;
351 list_add(&tree->same_root, &chunk->trees);
352 }
353 insert_hash(chunk);
354 spin_unlock(&hash_lock);
355 spin_unlock(&entry->lock);
356 fsnotify_put_mark(entry); /* drop initial reference */
357 return 0;
358}
359
360/* the first tagged inode becomes root of tree */
361static int tag_chunk(struct inode *inode, struct audit_tree *tree)
362{
363 struct fsnotify_mark *old_entry, *chunk_entry;
364 struct audit_tree *owner;
365 struct audit_chunk *chunk, *old;
366 struct node *p;
367 int n;
368
369 old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
370 if (!old_entry)
371 return create_chunk(inode, tree);
372
373 old = container_of(old_entry, struct audit_chunk, mark);
374
375 /* are we already there? */
376 spin_lock(&hash_lock);
377 for (n = 0; n < old->count; n++) {
378 if (old->owners[n].owner == tree) {
379 spin_unlock(&hash_lock);
380 fsnotify_put_mark(old_entry);
381 return 0;
382 }
383 }
384 spin_unlock(&hash_lock);
385
386 chunk = alloc_chunk(old->count + 1);
387 if (!chunk) {
388 fsnotify_put_mark(old_entry);
389 return -ENOMEM;
390 }
391
392 chunk_entry = &chunk->mark;
393
394 mutex_lock(&old_entry->group->mark_mutex);
395 spin_lock(&old_entry->lock);
396 if (!old_entry->inode) {
397 /* old_entry is being shot, lets just lie */
398 spin_unlock(&old_entry->lock);
399 mutex_unlock(&old_entry->group->mark_mutex);
400 fsnotify_put_mark(old_entry);
401 free_chunk(chunk);
402 return -ENOENT;
403 }
404
405 if (fsnotify_add_mark_locked(chunk_entry, old_entry->group,
406 old_entry->inode, NULL, 1)) {
407 spin_unlock(&old_entry->lock);
408 mutex_unlock(&old_entry->group->mark_mutex);
409 fsnotify_put_mark(chunk_entry);
410 fsnotify_put_mark(old_entry);
411 return -ENOSPC;
412 }
413
414 /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
415 spin_lock(&chunk_entry->lock);
416 spin_lock(&hash_lock);
417
418 /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
419 if (tree->goner) {
420 spin_unlock(&hash_lock);
421 chunk->dead = 1;
422 spin_unlock(&chunk_entry->lock);
423 spin_unlock(&old_entry->lock);
424 mutex_unlock(&old_entry->group->mark_mutex);
425
426 fsnotify_destroy_mark(chunk_entry, audit_tree_group);
427
428 fsnotify_put_mark(chunk_entry);
429 fsnotify_put_mark(old_entry);
430 return 0;
431 }
432 list_replace_init(&old->trees, &chunk->trees);
433 for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
434 struct audit_tree *s = old->owners[n].owner;
435 p->owner = s;
436 p->index = old->owners[n].index;
437 if (!s) /* result of fallback in untag */
438 continue;
439 get_tree(s);
440 list_replace_init(&old->owners[n].list, &p->list);
441 }
442 p->index = (chunk->count - 1) | (1U<<31);
443 p->owner = tree;
444 get_tree(tree);
445 list_add(&p->list, &tree->chunks);
446 list_replace_rcu(&old->hash, &chunk->hash);
447 list_for_each_entry(owner, &chunk->trees, same_root)
448 owner->root = chunk;
449 old->dead = 1;
450 if (!tree->root) {
451 tree->root = chunk;
452 list_add(&tree->same_root, &chunk->trees);
453 }
454 spin_unlock(&hash_lock);
455 spin_unlock(&chunk_entry->lock);
456 spin_unlock(&old_entry->lock);
457 mutex_unlock(&old_entry->group->mark_mutex);
458 fsnotify_destroy_mark(old_entry, audit_tree_group);
459 fsnotify_put_mark(chunk_entry); /* drop initial reference */
460 fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
461 return 0;
462}
463
464static void audit_tree_log_remove_rule(struct audit_krule *rule)
465{
466 struct audit_buffer *ab;
467
468 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
469 if (unlikely(!ab))
470 return;
471 audit_log_format(ab, "op=remove_rule");
472 audit_log_format(ab, " dir=");
473 audit_log_untrustedstring(ab, rule->tree->pathname);
474 audit_log_key(ab, rule->filterkey);
475 audit_log_format(ab, " list=%d res=1", rule->listnr);
476 audit_log_end(ab);
477}
478
479static void kill_rules(struct audit_tree *tree)
480{
481 struct audit_krule *rule, *next;
482 struct audit_entry *entry;
483
484 list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
485 entry = container_of(rule, struct audit_entry, rule);
486
487 list_del_init(&rule->rlist);
488 if (rule->tree) {
489 /* not a half-baked one */
490 audit_tree_log_remove_rule(rule);
491 if (entry->rule.exe)
492 audit_remove_mark(entry->rule.exe);
493 rule->tree = NULL;
494 list_del_rcu(&entry->list);
495 list_del(&entry->rule.list);
496 call_rcu(&entry->rcu, audit_free_rule_rcu);
497 }
498 }
499}
500
501/*
502 * finish killing struct audit_tree
503 */
504static void prune_one(struct audit_tree *victim)
505{
506 spin_lock(&hash_lock);
507 while (!list_empty(&victim->chunks)) {
508 struct node *p;
509
510 p = list_entry(victim->chunks.next, struct node, list);
511
512 untag_chunk(p);
513 }
514 spin_unlock(&hash_lock);
515 put_tree(victim);
516}
517
518/* trim the uncommitted chunks from tree */
519
520static void trim_marked(struct audit_tree *tree)
521{
522 struct list_head *p, *q;
523 spin_lock(&hash_lock);
524 if (tree->goner) {
525 spin_unlock(&hash_lock);
526 return;
527 }
528 /* reorder */
529 for (p = tree->chunks.next; p != &tree->chunks; p = q) {
530 struct node *node = list_entry(p, struct node, list);
531 q = p->next;
532 if (node->index & (1U<<31)) {
533 list_del_init(p);
534 list_add(p, &tree->chunks);
535 }
536 }
537
538 while (!list_empty(&tree->chunks)) {
539 struct node *node;
540
541 node = list_entry(tree->chunks.next, struct node, list);
542
543 /* have we run out of marked? */
544 if (!(node->index & (1U<<31)))
545 break;
546
547 untag_chunk(node);
548 }
549 if (!tree->root && !tree->goner) {
550 tree->goner = 1;
551 spin_unlock(&hash_lock);
552 mutex_lock(&audit_filter_mutex);
553 kill_rules(tree);
554 list_del_init(&tree->list);
555 mutex_unlock(&audit_filter_mutex);
556 prune_one(tree);
557 } else {
558 spin_unlock(&hash_lock);
559 }
560}
561
562static void audit_schedule_prune(void);
563
564/* called with audit_filter_mutex */
565int audit_remove_tree_rule(struct audit_krule *rule)
566{
567 struct audit_tree *tree;
568 tree = rule->tree;
569 if (tree) {
570 spin_lock(&hash_lock);
571 list_del_init(&rule->rlist);
572 if (list_empty(&tree->rules) && !tree->goner) {
573 tree->root = NULL;
574 list_del_init(&tree->same_root);
575 tree->goner = 1;
576 list_move(&tree->list, &prune_list);
577 rule->tree = NULL;
578 spin_unlock(&hash_lock);
579 audit_schedule_prune();
580 return 1;
581 }
582 rule->tree = NULL;
583 spin_unlock(&hash_lock);
584 return 1;
585 }
586 return 0;
587}
588
589static int compare_root(struct vfsmount *mnt, void *arg)
590{
591 return d_backing_inode(mnt->mnt_root) == arg;
592}
593
594void audit_trim_trees(void)
595{
596 struct list_head cursor;
597
598 mutex_lock(&audit_filter_mutex);
599 list_add(&cursor, &tree_list);
600 while (cursor.next != &tree_list) {
601 struct audit_tree *tree;
602 struct path path;
603 struct vfsmount *root_mnt;
604 struct node *node;
605 int err;
606
607 tree = container_of(cursor.next, struct audit_tree, list);
608 get_tree(tree);
609 list_del(&cursor);
610 list_add(&cursor, &tree->list);
611 mutex_unlock(&audit_filter_mutex);
612
613 err = kern_path(tree->pathname, 0, &path);
614 if (err)
615 goto skip_it;
616
617 root_mnt = collect_mounts(&path);
618 path_put(&path);
619 if (IS_ERR(root_mnt))
620 goto skip_it;
621
622 spin_lock(&hash_lock);
623 list_for_each_entry(node, &tree->chunks, list) {
624 struct audit_chunk *chunk = find_chunk(node);
625 /* this could be NULL if the watch is dying else where... */
626 struct inode *inode = chunk->mark.inode;
627 node->index |= 1U<<31;
628 if (iterate_mounts(compare_root, inode, root_mnt))
629 node->index &= ~(1U<<31);
630 }
631 spin_unlock(&hash_lock);
632 trim_marked(tree);
633 drop_collected_mounts(root_mnt);
634skip_it:
635 put_tree(tree);
636 mutex_lock(&audit_filter_mutex);
637 }
638 list_del(&cursor);
639 mutex_unlock(&audit_filter_mutex);
640}
641
642int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
643{
644
645 if (pathname[0] != '/' ||
646 rule->listnr != AUDIT_FILTER_EXIT ||
647 op != Audit_equal ||
648 rule->inode_f || rule->watch || rule->tree)
649 return -EINVAL;
650 rule->tree = alloc_tree(pathname);
651 if (!rule->tree)
652 return -ENOMEM;
653 return 0;
654}
655
656void audit_put_tree(struct audit_tree *tree)
657{
658 put_tree(tree);
659}
660
661static int tag_mount(struct vfsmount *mnt, void *arg)
662{
663 return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
664}
665
666/*
667 * That gets run when evict_chunk() ends up needing to kill audit_tree.
668 * Runs from a separate thread.
669 */
670static int prune_tree_thread(void *unused)
671{
672 for (;;) {
673 if (list_empty(&prune_list)) {
674 set_current_state(TASK_INTERRUPTIBLE);
675 schedule();
676 }
677
678 mutex_lock(&audit_cmd_mutex);
679 mutex_lock(&audit_filter_mutex);
680
681 while (!list_empty(&prune_list)) {
682 struct audit_tree *victim;
683
684 victim = list_entry(prune_list.next,
685 struct audit_tree, list);
686 list_del_init(&victim->list);
687
688 mutex_unlock(&audit_filter_mutex);
689
690 prune_one(victim);
691
692 mutex_lock(&audit_filter_mutex);
693 }
694
695 mutex_unlock(&audit_filter_mutex);
696 mutex_unlock(&audit_cmd_mutex);
697 }
698 return 0;
699}
700
701static int audit_launch_prune(void)
702{
703 if (prune_thread)
704 return 0;
705 prune_thread = kthread_run(prune_tree_thread, NULL,
706 "audit_prune_tree");
707 if (IS_ERR(prune_thread)) {
708 pr_err("cannot start thread audit_prune_tree");
709 prune_thread = NULL;
710 return -ENOMEM;
711 }
712 return 0;
713}
714
715/* called with audit_filter_mutex */
716int audit_add_tree_rule(struct audit_krule *rule)
717{
718 struct audit_tree *seed = rule->tree, *tree;
719 struct path path;
720 struct vfsmount *mnt;
721 int err;
722
723 rule->tree = NULL;
724 list_for_each_entry(tree, &tree_list, list) {
725 if (!strcmp(seed->pathname, tree->pathname)) {
726 put_tree(seed);
727 rule->tree = tree;
728 list_add(&rule->rlist, &tree->rules);
729 return 0;
730 }
731 }
732 tree = seed;
733 list_add(&tree->list, &tree_list);
734 list_add(&rule->rlist, &tree->rules);
735 /* do not set rule->tree yet */
736 mutex_unlock(&audit_filter_mutex);
737
738 if (unlikely(!prune_thread)) {
739 err = audit_launch_prune();
740 if (err)
741 goto Err;
742 }
743
744 err = kern_path(tree->pathname, 0, &path);
745 if (err)
746 goto Err;
747 mnt = collect_mounts(&path);
748 path_put(&path);
749 if (IS_ERR(mnt)) {
750 err = PTR_ERR(mnt);
751 goto Err;
752 }
753
754 get_tree(tree);
755 err = iterate_mounts(tag_mount, tree, mnt);
756 drop_collected_mounts(mnt);
757
758 if (!err) {
759 struct node *node;
760 spin_lock(&hash_lock);
761 list_for_each_entry(node, &tree->chunks, list)
762 node->index &= ~(1U<<31);
763 spin_unlock(&hash_lock);
764 } else {
765 trim_marked(tree);
766 goto Err;
767 }
768
769 mutex_lock(&audit_filter_mutex);
770 if (list_empty(&rule->rlist)) {
771 put_tree(tree);
772 return -ENOENT;
773 }
774 rule->tree = tree;
775 put_tree(tree);
776
777 return 0;
778Err:
779 mutex_lock(&audit_filter_mutex);
780 list_del_init(&tree->list);
781 list_del_init(&tree->rules);
782 put_tree(tree);
783 return err;
784}
785
786int audit_tag_tree(char *old, char *new)
787{
788 struct list_head cursor, barrier;
789 int failed = 0;
790 struct path path1, path2;
791 struct vfsmount *tagged;
792 int err;
793
794 err = kern_path(new, 0, &path2);
795 if (err)
796 return err;
797 tagged = collect_mounts(&path2);
798 path_put(&path2);
799 if (IS_ERR(tagged))
800 return PTR_ERR(tagged);
801
802 err = kern_path(old, 0, &path1);
803 if (err) {
804 drop_collected_mounts(tagged);
805 return err;
806 }
807
808 mutex_lock(&audit_filter_mutex);
809 list_add(&barrier, &tree_list);
810 list_add(&cursor, &barrier);
811
812 while (cursor.next != &tree_list) {
813 struct audit_tree *tree;
814 int good_one = 0;
815
816 tree = container_of(cursor.next, struct audit_tree, list);
817 get_tree(tree);
818 list_del(&cursor);
819 list_add(&cursor, &tree->list);
820 mutex_unlock(&audit_filter_mutex);
821
822 err = kern_path(tree->pathname, 0, &path2);
823 if (!err) {
824 good_one = path_is_under(&path1, &path2);
825 path_put(&path2);
826 }
827
828 if (!good_one) {
829 put_tree(tree);
830 mutex_lock(&audit_filter_mutex);
831 continue;
832 }
833
834 failed = iterate_mounts(tag_mount, tree, tagged);
835 if (failed) {
836 put_tree(tree);
837 mutex_lock(&audit_filter_mutex);
838 break;
839 }
840
841 mutex_lock(&audit_filter_mutex);
842 spin_lock(&hash_lock);
843 if (!tree->goner) {
844 list_del(&tree->list);
845 list_add(&tree->list, &tree_list);
846 }
847 spin_unlock(&hash_lock);
848 put_tree(tree);
849 }
850
851 while (barrier.prev != &tree_list) {
852 struct audit_tree *tree;
853
854 tree = container_of(barrier.prev, struct audit_tree, list);
855 get_tree(tree);
856 list_del(&tree->list);
857 list_add(&tree->list, &barrier);
858 mutex_unlock(&audit_filter_mutex);
859
860 if (!failed) {
861 struct node *node;
862 spin_lock(&hash_lock);
863 list_for_each_entry(node, &tree->chunks, list)
864 node->index &= ~(1U<<31);
865 spin_unlock(&hash_lock);
866 } else {
867 trim_marked(tree);
868 }
869
870 put_tree(tree);
871 mutex_lock(&audit_filter_mutex);
872 }
873 list_del(&barrier);
874 list_del(&cursor);
875 mutex_unlock(&audit_filter_mutex);
876 path_put(&path1);
877 drop_collected_mounts(tagged);
878 return failed;
879}
880
881
882static void audit_schedule_prune(void)
883{
884 wake_up_process(prune_thread);
885}
886
887/*
888 * ... and that one is done if evict_chunk() decides to delay until the end
889 * of syscall. Runs synchronously.
890 */
891void audit_kill_trees(struct list_head *list)
892{
893 mutex_lock(&audit_cmd_mutex);
894 mutex_lock(&audit_filter_mutex);
895
896 while (!list_empty(list)) {
897 struct audit_tree *victim;
898
899 victim = list_entry(list->next, struct audit_tree, list);
900 kill_rules(victim);
901 list_del_init(&victim->list);
902
903 mutex_unlock(&audit_filter_mutex);
904
905 prune_one(victim);
906
907 mutex_lock(&audit_filter_mutex);
908 }
909
910 mutex_unlock(&audit_filter_mutex);
911 mutex_unlock(&audit_cmd_mutex);
912}
913
914/*
915 * Here comes the stuff asynchronous to auditctl operations
916 */
917
918static void evict_chunk(struct audit_chunk *chunk)
919{
920 struct audit_tree *owner;
921 struct list_head *postponed = audit_killed_trees();
922 int need_prune = 0;
923 int n;
924
925 if (chunk->dead)
926 return;
927
928 chunk->dead = 1;
929 mutex_lock(&audit_filter_mutex);
930 spin_lock(&hash_lock);
931 while (!list_empty(&chunk->trees)) {
932 owner = list_entry(chunk->trees.next,
933 struct audit_tree, same_root);
934 owner->goner = 1;
935 owner->root = NULL;
936 list_del_init(&owner->same_root);
937 spin_unlock(&hash_lock);
938 if (!postponed) {
939 kill_rules(owner);
940 list_move(&owner->list, &prune_list);
941 need_prune = 1;
942 } else {
943 list_move(&owner->list, postponed);
944 }
945 spin_lock(&hash_lock);
946 }
947 list_del_rcu(&chunk->hash);
948 for (n = 0; n < chunk->count; n++)
949 list_del_init(&chunk->owners[n].list);
950 spin_unlock(&hash_lock);
951 mutex_unlock(&audit_filter_mutex);
952 if (need_prune)
953 audit_schedule_prune();
954}
955
956static int audit_tree_handle_event(struct fsnotify_group *group,
957 struct inode *to_tell,
958 struct fsnotify_mark *inode_mark,
959 struct fsnotify_mark *vfsmount_mark,
960 u32 mask, const void *data, int data_type,
961 const unsigned char *file_name, u32 cookie)
962{
963 return 0;
964}
965
966static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
967{
968 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
969
970 evict_chunk(chunk);
971
972 /*
973 * We are guaranteed to have at least one reference to the mark from
974 * either the inode or the caller of fsnotify_destroy_mark().
975 */
976 BUG_ON(atomic_read(&entry->refcnt) < 1);
977}
978
979static const struct fsnotify_ops audit_tree_ops = {
980 .handle_event = audit_tree_handle_event,
981 .freeing_mark = audit_tree_freeing_mark,
982};
983
984static int __init audit_tree_init(void)
985{
986 int i;
987
988 audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
989 if (IS_ERR(audit_tree_group))
990 audit_panic("cannot initialize fsnotify group for rectree watches");
991
992 for (i = 0; i < HASH_SIZE; i++)
993 INIT_LIST_HEAD(&chunk_hash_heads[i]);
994
995 return 0;
996}
997__initcall(audit_tree_init);