Loading...
1#include "audit.h"
2#include <linux/fsnotify_backend.h>
3#include <linux/namei.h>
4#include <linux/mount.h>
5#include <linux/kthread.h>
6#include <linux/slab.h>
7
8struct audit_tree;
9struct audit_chunk;
10
11struct audit_tree {
12 atomic_t count;
13 int goner;
14 struct audit_chunk *root;
15 struct list_head chunks;
16 struct list_head rules;
17 struct list_head list;
18 struct list_head same_root;
19 struct rcu_head head;
20 char pathname[];
21};
22
23struct audit_chunk {
24 struct list_head hash;
25 struct fsnotify_mark mark;
26 struct list_head trees; /* with root here */
27 int dead;
28 int count;
29 atomic_long_t refs;
30 struct rcu_head head;
31 struct node {
32 struct list_head list;
33 struct audit_tree *owner;
34 unsigned index; /* index; upper bit indicates 'will prune' */
35 } owners[];
36};
37
38static LIST_HEAD(tree_list);
39static LIST_HEAD(prune_list);
40static struct task_struct *prune_thread;
41
42/*
43 * One struct chunk is attached to each inode of interest.
44 * We replace struct chunk on tagging/untagging.
45 * Rules have pointer to struct audit_tree.
46 * Rules have struct list_head rlist forming a list of rules over
47 * the same tree.
48 * References to struct chunk are collected at audit_inode{,_child}()
49 * time and used in AUDIT_TREE rule matching.
50 * These references are dropped at the same time we are calling
51 * audit_free_names(), etc.
52 *
53 * Cyclic lists galore:
54 * tree.chunks anchors chunk.owners[].list hash_lock
55 * tree.rules anchors rule.rlist audit_filter_mutex
56 * chunk.trees anchors tree.same_root hash_lock
57 * chunk.hash is a hash with middle bits of watch.inode as
58 * a hash function. RCU, hash_lock
59 *
60 * tree is refcounted; one reference for "some rules on rules_list refer to
61 * it", one for each chunk with pointer to it.
62 *
63 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
64 * of watch contributes 1 to .refs).
65 *
66 * node.index allows to get from node.list to containing chunk.
67 * MSB of that sucker is stolen to mark taggings that we might have to
68 * revert - several operations have very unpleasant cleanup logics and
69 * that makes a difference. Some.
70 */
71
72static struct fsnotify_group *audit_tree_group;
73
74static struct audit_tree *alloc_tree(const char *s)
75{
76 struct audit_tree *tree;
77
78 tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
79 if (tree) {
80 atomic_set(&tree->count, 1);
81 tree->goner = 0;
82 INIT_LIST_HEAD(&tree->chunks);
83 INIT_LIST_HEAD(&tree->rules);
84 INIT_LIST_HEAD(&tree->list);
85 INIT_LIST_HEAD(&tree->same_root);
86 tree->root = NULL;
87 strcpy(tree->pathname, s);
88 }
89 return tree;
90}
91
92static inline void get_tree(struct audit_tree *tree)
93{
94 atomic_inc(&tree->count);
95}
96
97static inline void put_tree(struct audit_tree *tree)
98{
99 if (atomic_dec_and_test(&tree->count))
100 kfree_rcu(tree, head);
101}
102
103/* to avoid bringing the entire thing in audit.h */
104const char *audit_tree_path(struct audit_tree *tree)
105{
106 return tree->pathname;
107}
108
109static void free_chunk(struct audit_chunk *chunk)
110{
111 int i;
112
113 for (i = 0; i < chunk->count; i++) {
114 if (chunk->owners[i].owner)
115 put_tree(chunk->owners[i].owner);
116 }
117 kfree(chunk);
118}
119
120void audit_put_chunk(struct audit_chunk *chunk)
121{
122 if (atomic_long_dec_and_test(&chunk->refs))
123 free_chunk(chunk);
124}
125
126static void __put_chunk(struct rcu_head *rcu)
127{
128 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
129 audit_put_chunk(chunk);
130}
131
132static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
133{
134 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
135 call_rcu(&chunk->head, __put_chunk);
136}
137
138static struct audit_chunk *alloc_chunk(int count)
139{
140 struct audit_chunk *chunk;
141 size_t size;
142 int i;
143
144 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
145 chunk = kzalloc(size, GFP_KERNEL);
146 if (!chunk)
147 return NULL;
148
149 INIT_LIST_HEAD(&chunk->hash);
150 INIT_LIST_HEAD(&chunk->trees);
151 chunk->count = count;
152 atomic_long_set(&chunk->refs, 1);
153 for (i = 0; i < count; i++) {
154 INIT_LIST_HEAD(&chunk->owners[i].list);
155 chunk->owners[i].index = i;
156 }
157 fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
158 chunk->mark.mask = FS_IN_IGNORED;
159 return chunk;
160}
161
162enum {HASH_SIZE = 128};
163static struct list_head chunk_hash_heads[HASH_SIZE];
164static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
165
166static inline struct list_head *chunk_hash(const struct inode *inode)
167{
168 unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
169 return chunk_hash_heads + n % HASH_SIZE;
170}
171
172/* hash_lock & entry->lock is held by caller */
173static void insert_hash(struct audit_chunk *chunk)
174{
175 struct fsnotify_mark *entry = &chunk->mark;
176 struct list_head *list;
177
178 if (!entry->inode)
179 return;
180 list = chunk_hash(entry->inode);
181 list_add_rcu(&chunk->hash, list);
182}
183
184/* called under rcu_read_lock */
185struct audit_chunk *audit_tree_lookup(const struct inode *inode)
186{
187 struct list_head *list = chunk_hash(inode);
188 struct audit_chunk *p;
189
190 list_for_each_entry_rcu(p, list, hash) {
191 /* mark.inode may have gone NULL, but who cares? */
192 if (p->mark.inode == inode) {
193 atomic_long_inc(&p->refs);
194 return p;
195 }
196 }
197 return NULL;
198}
199
200bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
201{
202 int n;
203 for (n = 0; n < chunk->count; n++)
204 if (chunk->owners[n].owner == tree)
205 return true;
206 return false;
207}
208
209/* tagging and untagging inodes with trees */
210
211static struct audit_chunk *find_chunk(struct node *p)
212{
213 int index = p->index & ~(1U<<31);
214 p -= index;
215 return container_of(p, struct audit_chunk, owners[0]);
216}
217
218static void untag_chunk(struct node *p)
219{
220 struct audit_chunk *chunk = find_chunk(p);
221 struct fsnotify_mark *entry = &chunk->mark;
222 struct audit_chunk *new = NULL;
223 struct audit_tree *owner;
224 int size = chunk->count - 1;
225 int i, j;
226
227 fsnotify_get_mark(entry);
228
229 spin_unlock(&hash_lock);
230
231 if (size)
232 new = alloc_chunk(size);
233
234 mutex_lock(&entry->group->mark_mutex);
235 spin_lock(&entry->lock);
236 if (chunk->dead || !entry->inode) {
237 spin_unlock(&entry->lock);
238 mutex_unlock(&entry->group->mark_mutex);
239 if (new)
240 free_chunk(new);
241 goto out;
242 }
243
244 owner = p->owner;
245
246 if (!size) {
247 chunk->dead = 1;
248 spin_lock(&hash_lock);
249 list_del_init(&chunk->trees);
250 if (owner->root == chunk)
251 owner->root = NULL;
252 list_del_init(&p->list);
253 list_del_rcu(&chunk->hash);
254 spin_unlock(&hash_lock);
255 spin_unlock(&entry->lock);
256 mutex_unlock(&entry->group->mark_mutex);
257 fsnotify_destroy_mark(entry, audit_tree_group);
258 goto out;
259 }
260
261 if (!new)
262 goto Fallback;
263
264 if (fsnotify_add_mark_locked(&new->mark, entry->group, entry->inode,
265 NULL, 1)) {
266 fsnotify_put_mark(&new->mark);
267 goto Fallback;
268 }
269
270 chunk->dead = 1;
271 spin_lock(&hash_lock);
272 list_replace_init(&chunk->trees, &new->trees);
273 if (owner->root == chunk) {
274 list_del_init(&owner->same_root);
275 owner->root = NULL;
276 }
277
278 for (i = j = 0; j <= size; i++, j++) {
279 struct audit_tree *s;
280 if (&chunk->owners[j] == p) {
281 list_del_init(&p->list);
282 i--;
283 continue;
284 }
285 s = chunk->owners[j].owner;
286 new->owners[i].owner = s;
287 new->owners[i].index = chunk->owners[j].index - j + i;
288 if (!s) /* result of earlier fallback */
289 continue;
290 get_tree(s);
291 list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
292 }
293
294 list_replace_rcu(&chunk->hash, &new->hash);
295 list_for_each_entry(owner, &new->trees, same_root)
296 owner->root = new;
297 spin_unlock(&hash_lock);
298 spin_unlock(&entry->lock);
299 mutex_unlock(&entry->group->mark_mutex);
300 fsnotify_destroy_mark(entry, audit_tree_group);
301 fsnotify_put_mark(&new->mark); /* drop initial reference */
302 goto out;
303
304Fallback:
305 // do the best we can
306 spin_lock(&hash_lock);
307 if (owner->root == chunk) {
308 list_del_init(&owner->same_root);
309 owner->root = NULL;
310 }
311 list_del_init(&p->list);
312 p->owner = NULL;
313 put_tree(owner);
314 spin_unlock(&hash_lock);
315 spin_unlock(&entry->lock);
316 mutex_unlock(&entry->group->mark_mutex);
317out:
318 fsnotify_put_mark(entry);
319 spin_lock(&hash_lock);
320}
321
322static int create_chunk(struct inode *inode, struct audit_tree *tree)
323{
324 struct fsnotify_mark *entry;
325 struct audit_chunk *chunk = alloc_chunk(1);
326 if (!chunk)
327 return -ENOMEM;
328
329 entry = &chunk->mark;
330 if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
331 fsnotify_put_mark(entry);
332 return -ENOSPC;
333 }
334
335 spin_lock(&entry->lock);
336 spin_lock(&hash_lock);
337 if (tree->goner) {
338 spin_unlock(&hash_lock);
339 chunk->dead = 1;
340 spin_unlock(&entry->lock);
341 fsnotify_destroy_mark(entry, audit_tree_group);
342 fsnotify_put_mark(entry);
343 return 0;
344 }
345 chunk->owners[0].index = (1U << 31);
346 chunk->owners[0].owner = tree;
347 get_tree(tree);
348 list_add(&chunk->owners[0].list, &tree->chunks);
349 if (!tree->root) {
350 tree->root = chunk;
351 list_add(&tree->same_root, &chunk->trees);
352 }
353 insert_hash(chunk);
354 spin_unlock(&hash_lock);
355 spin_unlock(&entry->lock);
356 fsnotify_put_mark(entry); /* drop initial reference */
357 return 0;
358}
359
360/* the first tagged inode becomes root of tree */
361static int tag_chunk(struct inode *inode, struct audit_tree *tree)
362{
363 struct fsnotify_mark *old_entry, *chunk_entry;
364 struct audit_tree *owner;
365 struct audit_chunk *chunk, *old;
366 struct node *p;
367 int n;
368
369 old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
370 if (!old_entry)
371 return create_chunk(inode, tree);
372
373 old = container_of(old_entry, struct audit_chunk, mark);
374
375 /* are we already there? */
376 spin_lock(&hash_lock);
377 for (n = 0; n < old->count; n++) {
378 if (old->owners[n].owner == tree) {
379 spin_unlock(&hash_lock);
380 fsnotify_put_mark(old_entry);
381 return 0;
382 }
383 }
384 spin_unlock(&hash_lock);
385
386 chunk = alloc_chunk(old->count + 1);
387 if (!chunk) {
388 fsnotify_put_mark(old_entry);
389 return -ENOMEM;
390 }
391
392 chunk_entry = &chunk->mark;
393
394 mutex_lock(&old_entry->group->mark_mutex);
395 spin_lock(&old_entry->lock);
396 if (!old_entry->inode) {
397 /* old_entry is being shot, lets just lie */
398 spin_unlock(&old_entry->lock);
399 mutex_unlock(&old_entry->group->mark_mutex);
400 fsnotify_put_mark(old_entry);
401 free_chunk(chunk);
402 return -ENOENT;
403 }
404
405 if (fsnotify_add_mark_locked(chunk_entry, old_entry->group,
406 old_entry->inode, NULL, 1)) {
407 spin_unlock(&old_entry->lock);
408 mutex_unlock(&old_entry->group->mark_mutex);
409 fsnotify_put_mark(chunk_entry);
410 fsnotify_put_mark(old_entry);
411 return -ENOSPC;
412 }
413
414 /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
415 spin_lock(&chunk_entry->lock);
416 spin_lock(&hash_lock);
417
418 /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
419 if (tree->goner) {
420 spin_unlock(&hash_lock);
421 chunk->dead = 1;
422 spin_unlock(&chunk_entry->lock);
423 spin_unlock(&old_entry->lock);
424 mutex_unlock(&old_entry->group->mark_mutex);
425
426 fsnotify_destroy_mark(chunk_entry, audit_tree_group);
427
428 fsnotify_put_mark(chunk_entry);
429 fsnotify_put_mark(old_entry);
430 return 0;
431 }
432 list_replace_init(&old->trees, &chunk->trees);
433 for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
434 struct audit_tree *s = old->owners[n].owner;
435 p->owner = s;
436 p->index = old->owners[n].index;
437 if (!s) /* result of fallback in untag */
438 continue;
439 get_tree(s);
440 list_replace_init(&old->owners[n].list, &p->list);
441 }
442 p->index = (chunk->count - 1) | (1U<<31);
443 p->owner = tree;
444 get_tree(tree);
445 list_add(&p->list, &tree->chunks);
446 list_replace_rcu(&old->hash, &chunk->hash);
447 list_for_each_entry(owner, &chunk->trees, same_root)
448 owner->root = chunk;
449 old->dead = 1;
450 if (!tree->root) {
451 tree->root = chunk;
452 list_add(&tree->same_root, &chunk->trees);
453 }
454 spin_unlock(&hash_lock);
455 spin_unlock(&chunk_entry->lock);
456 spin_unlock(&old_entry->lock);
457 mutex_unlock(&old_entry->group->mark_mutex);
458 fsnotify_destroy_mark(old_entry, audit_tree_group);
459 fsnotify_put_mark(chunk_entry); /* drop initial reference */
460 fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
461 return 0;
462}
463
464static void audit_tree_log_remove_rule(struct audit_krule *rule)
465{
466 struct audit_buffer *ab;
467
468 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
469 if (unlikely(!ab))
470 return;
471 audit_log_format(ab, "op=remove_rule");
472 audit_log_format(ab, " dir=");
473 audit_log_untrustedstring(ab, rule->tree->pathname);
474 audit_log_key(ab, rule->filterkey);
475 audit_log_format(ab, " list=%d res=1", rule->listnr);
476 audit_log_end(ab);
477}
478
479static void kill_rules(struct audit_tree *tree)
480{
481 struct audit_krule *rule, *next;
482 struct audit_entry *entry;
483
484 list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
485 entry = container_of(rule, struct audit_entry, rule);
486
487 list_del_init(&rule->rlist);
488 if (rule->tree) {
489 /* not a half-baked one */
490 audit_tree_log_remove_rule(rule);
491 if (entry->rule.exe)
492 audit_remove_mark(entry->rule.exe);
493 rule->tree = NULL;
494 list_del_rcu(&entry->list);
495 list_del(&entry->rule.list);
496 call_rcu(&entry->rcu, audit_free_rule_rcu);
497 }
498 }
499}
500
501/*
502 * finish killing struct audit_tree
503 */
504static void prune_one(struct audit_tree *victim)
505{
506 spin_lock(&hash_lock);
507 while (!list_empty(&victim->chunks)) {
508 struct node *p;
509
510 p = list_entry(victim->chunks.next, struct node, list);
511
512 untag_chunk(p);
513 }
514 spin_unlock(&hash_lock);
515 put_tree(victim);
516}
517
518/* trim the uncommitted chunks from tree */
519
520static void trim_marked(struct audit_tree *tree)
521{
522 struct list_head *p, *q;
523 spin_lock(&hash_lock);
524 if (tree->goner) {
525 spin_unlock(&hash_lock);
526 return;
527 }
528 /* reorder */
529 for (p = tree->chunks.next; p != &tree->chunks; p = q) {
530 struct node *node = list_entry(p, struct node, list);
531 q = p->next;
532 if (node->index & (1U<<31)) {
533 list_del_init(p);
534 list_add(p, &tree->chunks);
535 }
536 }
537
538 while (!list_empty(&tree->chunks)) {
539 struct node *node;
540
541 node = list_entry(tree->chunks.next, struct node, list);
542
543 /* have we run out of marked? */
544 if (!(node->index & (1U<<31)))
545 break;
546
547 untag_chunk(node);
548 }
549 if (!tree->root && !tree->goner) {
550 tree->goner = 1;
551 spin_unlock(&hash_lock);
552 mutex_lock(&audit_filter_mutex);
553 kill_rules(tree);
554 list_del_init(&tree->list);
555 mutex_unlock(&audit_filter_mutex);
556 prune_one(tree);
557 } else {
558 spin_unlock(&hash_lock);
559 }
560}
561
562static void audit_schedule_prune(void);
563
564/* called with audit_filter_mutex */
565int audit_remove_tree_rule(struct audit_krule *rule)
566{
567 struct audit_tree *tree;
568 tree = rule->tree;
569 if (tree) {
570 spin_lock(&hash_lock);
571 list_del_init(&rule->rlist);
572 if (list_empty(&tree->rules) && !tree->goner) {
573 tree->root = NULL;
574 list_del_init(&tree->same_root);
575 tree->goner = 1;
576 list_move(&tree->list, &prune_list);
577 rule->tree = NULL;
578 spin_unlock(&hash_lock);
579 audit_schedule_prune();
580 return 1;
581 }
582 rule->tree = NULL;
583 spin_unlock(&hash_lock);
584 return 1;
585 }
586 return 0;
587}
588
589static int compare_root(struct vfsmount *mnt, void *arg)
590{
591 return d_backing_inode(mnt->mnt_root) == arg;
592}
593
594void audit_trim_trees(void)
595{
596 struct list_head cursor;
597
598 mutex_lock(&audit_filter_mutex);
599 list_add(&cursor, &tree_list);
600 while (cursor.next != &tree_list) {
601 struct audit_tree *tree;
602 struct path path;
603 struct vfsmount *root_mnt;
604 struct node *node;
605 int err;
606
607 tree = container_of(cursor.next, struct audit_tree, list);
608 get_tree(tree);
609 list_del(&cursor);
610 list_add(&cursor, &tree->list);
611 mutex_unlock(&audit_filter_mutex);
612
613 err = kern_path(tree->pathname, 0, &path);
614 if (err)
615 goto skip_it;
616
617 root_mnt = collect_mounts(&path);
618 path_put(&path);
619 if (IS_ERR(root_mnt))
620 goto skip_it;
621
622 spin_lock(&hash_lock);
623 list_for_each_entry(node, &tree->chunks, list) {
624 struct audit_chunk *chunk = find_chunk(node);
625 /* this could be NULL if the watch is dying else where... */
626 struct inode *inode = chunk->mark.inode;
627 node->index |= 1U<<31;
628 if (iterate_mounts(compare_root, inode, root_mnt))
629 node->index &= ~(1U<<31);
630 }
631 spin_unlock(&hash_lock);
632 trim_marked(tree);
633 drop_collected_mounts(root_mnt);
634skip_it:
635 put_tree(tree);
636 mutex_lock(&audit_filter_mutex);
637 }
638 list_del(&cursor);
639 mutex_unlock(&audit_filter_mutex);
640}
641
642int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
643{
644
645 if (pathname[0] != '/' ||
646 rule->listnr != AUDIT_FILTER_EXIT ||
647 op != Audit_equal ||
648 rule->inode_f || rule->watch || rule->tree)
649 return -EINVAL;
650 rule->tree = alloc_tree(pathname);
651 if (!rule->tree)
652 return -ENOMEM;
653 return 0;
654}
655
656void audit_put_tree(struct audit_tree *tree)
657{
658 put_tree(tree);
659}
660
661static int tag_mount(struct vfsmount *mnt, void *arg)
662{
663 return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
664}
665
666/*
667 * That gets run when evict_chunk() ends up needing to kill audit_tree.
668 * Runs from a separate thread.
669 */
670static int prune_tree_thread(void *unused)
671{
672 for (;;) {
673 if (list_empty(&prune_list)) {
674 set_current_state(TASK_INTERRUPTIBLE);
675 schedule();
676 }
677
678 mutex_lock(&audit_cmd_mutex);
679 mutex_lock(&audit_filter_mutex);
680
681 while (!list_empty(&prune_list)) {
682 struct audit_tree *victim;
683
684 victim = list_entry(prune_list.next,
685 struct audit_tree, list);
686 list_del_init(&victim->list);
687
688 mutex_unlock(&audit_filter_mutex);
689
690 prune_one(victim);
691
692 mutex_lock(&audit_filter_mutex);
693 }
694
695 mutex_unlock(&audit_filter_mutex);
696 mutex_unlock(&audit_cmd_mutex);
697 }
698 return 0;
699}
700
701static int audit_launch_prune(void)
702{
703 if (prune_thread)
704 return 0;
705 prune_thread = kthread_run(prune_tree_thread, NULL,
706 "audit_prune_tree");
707 if (IS_ERR(prune_thread)) {
708 pr_err("cannot start thread audit_prune_tree");
709 prune_thread = NULL;
710 return -ENOMEM;
711 }
712 return 0;
713}
714
715/* called with audit_filter_mutex */
716int audit_add_tree_rule(struct audit_krule *rule)
717{
718 struct audit_tree *seed = rule->tree, *tree;
719 struct path path;
720 struct vfsmount *mnt;
721 int err;
722
723 rule->tree = NULL;
724 list_for_each_entry(tree, &tree_list, list) {
725 if (!strcmp(seed->pathname, tree->pathname)) {
726 put_tree(seed);
727 rule->tree = tree;
728 list_add(&rule->rlist, &tree->rules);
729 return 0;
730 }
731 }
732 tree = seed;
733 list_add(&tree->list, &tree_list);
734 list_add(&rule->rlist, &tree->rules);
735 /* do not set rule->tree yet */
736 mutex_unlock(&audit_filter_mutex);
737
738 if (unlikely(!prune_thread)) {
739 err = audit_launch_prune();
740 if (err)
741 goto Err;
742 }
743
744 err = kern_path(tree->pathname, 0, &path);
745 if (err)
746 goto Err;
747 mnt = collect_mounts(&path);
748 path_put(&path);
749 if (IS_ERR(mnt)) {
750 err = PTR_ERR(mnt);
751 goto Err;
752 }
753
754 get_tree(tree);
755 err = iterate_mounts(tag_mount, tree, mnt);
756 drop_collected_mounts(mnt);
757
758 if (!err) {
759 struct node *node;
760 spin_lock(&hash_lock);
761 list_for_each_entry(node, &tree->chunks, list)
762 node->index &= ~(1U<<31);
763 spin_unlock(&hash_lock);
764 } else {
765 trim_marked(tree);
766 goto Err;
767 }
768
769 mutex_lock(&audit_filter_mutex);
770 if (list_empty(&rule->rlist)) {
771 put_tree(tree);
772 return -ENOENT;
773 }
774 rule->tree = tree;
775 put_tree(tree);
776
777 return 0;
778Err:
779 mutex_lock(&audit_filter_mutex);
780 list_del_init(&tree->list);
781 list_del_init(&tree->rules);
782 put_tree(tree);
783 return err;
784}
785
786int audit_tag_tree(char *old, char *new)
787{
788 struct list_head cursor, barrier;
789 int failed = 0;
790 struct path path1, path2;
791 struct vfsmount *tagged;
792 int err;
793
794 err = kern_path(new, 0, &path2);
795 if (err)
796 return err;
797 tagged = collect_mounts(&path2);
798 path_put(&path2);
799 if (IS_ERR(tagged))
800 return PTR_ERR(tagged);
801
802 err = kern_path(old, 0, &path1);
803 if (err) {
804 drop_collected_mounts(tagged);
805 return err;
806 }
807
808 mutex_lock(&audit_filter_mutex);
809 list_add(&barrier, &tree_list);
810 list_add(&cursor, &barrier);
811
812 while (cursor.next != &tree_list) {
813 struct audit_tree *tree;
814 int good_one = 0;
815
816 tree = container_of(cursor.next, struct audit_tree, list);
817 get_tree(tree);
818 list_del(&cursor);
819 list_add(&cursor, &tree->list);
820 mutex_unlock(&audit_filter_mutex);
821
822 err = kern_path(tree->pathname, 0, &path2);
823 if (!err) {
824 good_one = path_is_under(&path1, &path2);
825 path_put(&path2);
826 }
827
828 if (!good_one) {
829 put_tree(tree);
830 mutex_lock(&audit_filter_mutex);
831 continue;
832 }
833
834 failed = iterate_mounts(tag_mount, tree, tagged);
835 if (failed) {
836 put_tree(tree);
837 mutex_lock(&audit_filter_mutex);
838 break;
839 }
840
841 mutex_lock(&audit_filter_mutex);
842 spin_lock(&hash_lock);
843 if (!tree->goner) {
844 list_del(&tree->list);
845 list_add(&tree->list, &tree_list);
846 }
847 spin_unlock(&hash_lock);
848 put_tree(tree);
849 }
850
851 while (barrier.prev != &tree_list) {
852 struct audit_tree *tree;
853
854 tree = container_of(barrier.prev, struct audit_tree, list);
855 get_tree(tree);
856 list_del(&tree->list);
857 list_add(&tree->list, &barrier);
858 mutex_unlock(&audit_filter_mutex);
859
860 if (!failed) {
861 struct node *node;
862 spin_lock(&hash_lock);
863 list_for_each_entry(node, &tree->chunks, list)
864 node->index &= ~(1U<<31);
865 spin_unlock(&hash_lock);
866 } else {
867 trim_marked(tree);
868 }
869
870 put_tree(tree);
871 mutex_lock(&audit_filter_mutex);
872 }
873 list_del(&barrier);
874 list_del(&cursor);
875 mutex_unlock(&audit_filter_mutex);
876 path_put(&path1);
877 drop_collected_mounts(tagged);
878 return failed;
879}
880
881
882static void audit_schedule_prune(void)
883{
884 wake_up_process(prune_thread);
885}
886
887/*
888 * ... and that one is done if evict_chunk() decides to delay until the end
889 * of syscall. Runs synchronously.
890 */
891void audit_kill_trees(struct list_head *list)
892{
893 mutex_lock(&audit_cmd_mutex);
894 mutex_lock(&audit_filter_mutex);
895
896 while (!list_empty(list)) {
897 struct audit_tree *victim;
898
899 victim = list_entry(list->next, struct audit_tree, list);
900 kill_rules(victim);
901 list_del_init(&victim->list);
902
903 mutex_unlock(&audit_filter_mutex);
904
905 prune_one(victim);
906
907 mutex_lock(&audit_filter_mutex);
908 }
909
910 mutex_unlock(&audit_filter_mutex);
911 mutex_unlock(&audit_cmd_mutex);
912}
913
914/*
915 * Here comes the stuff asynchronous to auditctl operations
916 */
917
918static void evict_chunk(struct audit_chunk *chunk)
919{
920 struct audit_tree *owner;
921 struct list_head *postponed = audit_killed_trees();
922 int need_prune = 0;
923 int n;
924
925 if (chunk->dead)
926 return;
927
928 chunk->dead = 1;
929 mutex_lock(&audit_filter_mutex);
930 spin_lock(&hash_lock);
931 while (!list_empty(&chunk->trees)) {
932 owner = list_entry(chunk->trees.next,
933 struct audit_tree, same_root);
934 owner->goner = 1;
935 owner->root = NULL;
936 list_del_init(&owner->same_root);
937 spin_unlock(&hash_lock);
938 if (!postponed) {
939 kill_rules(owner);
940 list_move(&owner->list, &prune_list);
941 need_prune = 1;
942 } else {
943 list_move(&owner->list, postponed);
944 }
945 spin_lock(&hash_lock);
946 }
947 list_del_rcu(&chunk->hash);
948 for (n = 0; n < chunk->count; n++)
949 list_del_init(&chunk->owners[n].list);
950 spin_unlock(&hash_lock);
951 mutex_unlock(&audit_filter_mutex);
952 if (need_prune)
953 audit_schedule_prune();
954}
955
956static int audit_tree_handle_event(struct fsnotify_group *group,
957 struct inode *to_tell,
958 struct fsnotify_mark *inode_mark,
959 struct fsnotify_mark *vfsmount_mark,
960 u32 mask, const void *data, int data_type,
961 const unsigned char *file_name, u32 cookie)
962{
963 return 0;
964}
965
966static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
967{
968 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
969
970 evict_chunk(chunk);
971
972 /*
973 * We are guaranteed to have at least one reference to the mark from
974 * either the inode or the caller of fsnotify_destroy_mark().
975 */
976 BUG_ON(atomic_read(&entry->refcnt) < 1);
977}
978
979static const struct fsnotify_ops audit_tree_ops = {
980 .handle_event = audit_tree_handle_event,
981 .freeing_mark = audit_tree_freeing_mark,
982};
983
984static int __init audit_tree_init(void)
985{
986 int i;
987
988 audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
989 if (IS_ERR(audit_tree_group))
990 audit_panic("cannot initialize fsnotify group for rectree watches");
991
992 for (i = 0; i < HASH_SIZE; i++)
993 INIT_LIST_HEAD(&chunk_hash_heads[i]);
994
995 return 0;
996}
997__initcall(audit_tree_init);
1// SPDX-License-Identifier: GPL-2.0
2#include "audit.h"
3#include <linux/fsnotify_backend.h>
4#include <linux/namei.h>
5#include <linux/mount.h>
6#include <linux/kthread.h>
7#include <linux/refcount.h>
8#include <linux/slab.h>
9
10struct audit_tree;
11struct audit_chunk;
12
13struct audit_tree {
14 refcount_t count;
15 int goner;
16 struct audit_chunk *root;
17 struct list_head chunks;
18 struct list_head rules;
19 struct list_head list;
20 struct list_head same_root;
21 struct rcu_head head;
22 char pathname[];
23};
24
25struct audit_chunk {
26 struct list_head hash;
27 unsigned long key;
28 struct fsnotify_mark *mark;
29 struct list_head trees; /* with root here */
30 int count;
31 atomic_long_t refs;
32 struct rcu_head head;
33 struct node {
34 struct list_head list;
35 struct audit_tree *owner;
36 unsigned index; /* index; upper bit indicates 'will prune' */
37 } owners[];
38};
39
40struct audit_tree_mark {
41 struct fsnotify_mark mark;
42 struct audit_chunk *chunk;
43};
44
45static LIST_HEAD(tree_list);
46static LIST_HEAD(prune_list);
47static struct task_struct *prune_thread;
48
49/*
50 * One struct chunk is attached to each inode of interest through
51 * audit_tree_mark (fsnotify mark). We replace struct chunk on tagging /
52 * untagging, the mark is stable as long as there is chunk attached. The
53 * association between mark and chunk is protected by hash_lock and
54 * audit_tree_group->mark_mutex. Thus as long as we hold
55 * audit_tree_group->mark_mutex and check that the mark is alive by
56 * FSNOTIFY_MARK_FLAG_ATTACHED flag check, we are sure the mark points to
57 * the current chunk.
58 *
59 * Rules have pointer to struct audit_tree.
60 * Rules have struct list_head rlist forming a list of rules over
61 * the same tree.
62 * References to struct chunk are collected at audit_inode{,_child}()
63 * time and used in AUDIT_TREE rule matching.
64 * These references are dropped at the same time we are calling
65 * audit_free_names(), etc.
66 *
67 * Cyclic lists galore:
68 * tree.chunks anchors chunk.owners[].list hash_lock
69 * tree.rules anchors rule.rlist audit_filter_mutex
70 * chunk.trees anchors tree.same_root hash_lock
71 * chunk.hash is a hash with middle bits of watch.inode as
72 * a hash function. RCU, hash_lock
73 *
74 * tree is refcounted; one reference for "some rules on rules_list refer to
75 * it", one for each chunk with pointer to it.
76 *
77 * chunk is refcounted by embedded .refs. Mark associated with the chunk holds
78 * one chunk reference. This reference is dropped either when a mark is going
79 * to be freed (corresponding inode goes away) or when chunk attached to the
80 * mark gets replaced. This reference must be dropped using
81 * audit_mark_put_chunk() to make sure the reference is dropped only after RCU
82 * grace period as it protects RCU readers of the hash table.
83 *
84 * node.index allows to get from node.list to containing chunk.
85 * MSB of that sucker is stolen to mark taggings that we might have to
86 * revert - several operations have very unpleasant cleanup logics and
87 * that makes a difference. Some.
88 */
89
90static struct fsnotify_group *audit_tree_group;
91static struct kmem_cache *audit_tree_mark_cachep __read_mostly;
92
93static struct audit_tree *alloc_tree(const char *s)
94{
95 struct audit_tree *tree;
96
97 tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
98 if (tree) {
99 refcount_set(&tree->count, 1);
100 tree->goner = 0;
101 INIT_LIST_HEAD(&tree->chunks);
102 INIT_LIST_HEAD(&tree->rules);
103 INIT_LIST_HEAD(&tree->list);
104 INIT_LIST_HEAD(&tree->same_root);
105 tree->root = NULL;
106 strcpy(tree->pathname, s);
107 }
108 return tree;
109}
110
111static inline void get_tree(struct audit_tree *tree)
112{
113 refcount_inc(&tree->count);
114}
115
116static inline void put_tree(struct audit_tree *tree)
117{
118 if (refcount_dec_and_test(&tree->count))
119 kfree_rcu(tree, head);
120}
121
122/* to avoid bringing the entire thing in audit.h */
123const char *audit_tree_path(struct audit_tree *tree)
124{
125 return tree->pathname;
126}
127
128static void free_chunk(struct audit_chunk *chunk)
129{
130 int i;
131
132 for (i = 0; i < chunk->count; i++) {
133 if (chunk->owners[i].owner)
134 put_tree(chunk->owners[i].owner);
135 }
136 kfree(chunk);
137}
138
139void audit_put_chunk(struct audit_chunk *chunk)
140{
141 if (atomic_long_dec_and_test(&chunk->refs))
142 free_chunk(chunk);
143}
144
145static void __put_chunk(struct rcu_head *rcu)
146{
147 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
148 audit_put_chunk(chunk);
149}
150
151/*
152 * Drop reference to the chunk that was held by the mark. This is the reference
153 * that gets dropped after we've removed the chunk from the hash table and we
154 * use it to make sure chunk cannot be freed before RCU grace period expires.
155 */
156static void audit_mark_put_chunk(struct audit_chunk *chunk)
157{
158 call_rcu(&chunk->head, __put_chunk);
159}
160
161static inline struct audit_tree_mark *audit_mark(struct fsnotify_mark *mark)
162{
163 return container_of(mark, struct audit_tree_mark, mark);
164}
165
166static struct audit_chunk *mark_chunk(struct fsnotify_mark *mark)
167{
168 return audit_mark(mark)->chunk;
169}
170
171static void audit_tree_destroy_watch(struct fsnotify_mark *mark)
172{
173 kmem_cache_free(audit_tree_mark_cachep, audit_mark(mark));
174}
175
176static struct fsnotify_mark *alloc_mark(void)
177{
178 struct audit_tree_mark *amark;
179
180 amark = kmem_cache_zalloc(audit_tree_mark_cachep, GFP_KERNEL);
181 if (!amark)
182 return NULL;
183 fsnotify_init_mark(&amark->mark, audit_tree_group);
184 amark->mark.mask = FS_IN_IGNORED;
185 return &amark->mark;
186}
187
188static struct audit_chunk *alloc_chunk(int count)
189{
190 struct audit_chunk *chunk;
191 int i;
192
193 chunk = kzalloc(struct_size(chunk, owners, count), GFP_KERNEL);
194 if (!chunk)
195 return NULL;
196
197 INIT_LIST_HEAD(&chunk->hash);
198 INIT_LIST_HEAD(&chunk->trees);
199 chunk->count = count;
200 atomic_long_set(&chunk->refs, 1);
201 for (i = 0; i < count; i++) {
202 INIT_LIST_HEAD(&chunk->owners[i].list);
203 chunk->owners[i].index = i;
204 }
205 return chunk;
206}
207
208enum {HASH_SIZE = 128};
209static struct list_head chunk_hash_heads[HASH_SIZE];
210static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
211
212/* Function to return search key in our hash from inode. */
213static unsigned long inode_to_key(const struct inode *inode)
214{
215 /* Use address pointed to by connector->obj as the key */
216 return (unsigned long)&inode->i_fsnotify_marks;
217}
218
219static inline struct list_head *chunk_hash(unsigned long key)
220{
221 unsigned long n = key / L1_CACHE_BYTES;
222 return chunk_hash_heads + n % HASH_SIZE;
223}
224
225/* hash_lock & mark->group->mark_mutex is held by caller */
226static void insert_hash(struct audit_chunk *chunk)
227{
228 struct list_head *list;
229
230 /*
231 * Make sure chunk is fully initialized before making it visible in the
232 * hash. Pairs with a data dependency barrier in READ_ONCE() in
233 * audit_tree_lookup().
234 */
235 smp_wmb();
236 WARN_ON_ONCE(!chunk->key);
237 list = chunk_hash(chunk->key);
238 list_add_rcu(&chunk->hash, list);
239}
240
241/* called under rcu_read_lock */
242struct audit_chunk *audit_tree_lookup(const struct inode *inode)
243{
244 unsigned long key = inode_to_key(inode);
245 struct list_head *list = chunk_hash(key);
246 struct audit_chunk *p;
247
248 list_for_each_entry_rcu(p, list, hash) {
249 /*
250 * We use a data dependency barrier in READ_ONCE() to make sure
251 * the chunk we see is fully initialized.
252 */
253 if (READ_ONCE(p->key) == key) {
254 atomic_long_inc(&p->refs);
255 return p;
256 }
257 }
258 return NULL;
259}
260
261bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
262{
263 int n;
264 for (n = 0; n < chunk->count; n++)
265 if (chunk->owners[n].owner == tree)
266 return true;
267 return false;
268}
269
270/* tagging and untagging inodes with trees */
271
272static struct audit_chunk *find_chunk(struct node *p)
273{
274 int index = p->index & ~(1U<<31);
275 p -= index;
276 return container_of(p, struct audit_chunk, owners[0]);
277}
278
279static void replace_mark_chunk(struct fsnotify_mark *mark,
280 struct audit_chunk *chunk)
281{
282 struct audit_chunk *old;
283
284 assert_spin_locked(&hash_lock);
285 old = mark_chunk(mark);
286 audit_mark(mark)->chunk = chunk;
287 if (chunk)
288 chunk->mark = mark;
289 if (old)
290 old->mark = NULL;
291}
292
293static void replace_chunk(struct audit_chunk *new, struct audit_chunk *old)
294{
295 struct audit_tree *owner;
296 int i, j;
297
298 new->key = old->key;
299 list_splice_init(&old->trees, &new->trees);
300 list_for_each_entry(owner, &new->trees, same_root)
301 owner->root = new;
302 for (i = j = 0; j < old->count; i++, j++) {
303 if (!old->owners[j].owner) {
304 i--;
305 continue;
306 }
307 owner = old->owners[j].owner;
308 new->owners[i].owner = owner;
309 new->owners[i].index = old->owners[j].index - j + i;
310 if (!owner) /* result of earlier fallback */
311 continue;
312 get_tree(owner);
313 list_replace_init(&old->owners[j].list, &new->owners[i].list);
314 }
315 replace_mark_chunk(old->mark, new);
316 /*
317 * Make sure chunk is fully initialized before making it visible in the
318 * hash. Pairs with a data dependency barrier in READ_ONCE() in
319 * audit_tree_lookup().
320 */
321 smp_wmb();
322 list_replace_rcu(&old->hash, &new->hash);
323}
324
325static void remove_chunk_node(struct audit_chunk *chunk, struct node *p)
326{
327 struct audit_tree *owner = p->owner;
328
329 if (owner->root == chunk) {
330 list_del_init(&owner->same_root);
331 owner->root = NULL;
332 }
333 list_del_init(&p->list);
334 p->owner = NULL;
335 put_tree(owner);
336}
337
338static int chunk_count_trees(struct audit_chunk *chunk)
339{
340 int i;
341 int ret = 0;
342
343 for (i = 0; i < chunk->count; i++)
344 if (chunk->owners[i].owner)
345 ret++;
346 return ret;
347}
348
349static void untag_chunk(struct audit_chunk *chunk, struct fsnotify_mark *mark)
350{
351 struct audit_chunk *new;
352 int size;
353
354 mutex_lock(&audit_tree_group->mark_mutex);
355 /*
356 * mark_mutex stabilizes chunk attached to the mark so we can check
357 * whether it didn't change while we've dropped hash_lock.
358 */
359 if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) ||
360 mark_chunk(mark) != chunk)
361 goto out_mutex;
362
363 size = chunk_count_trees(chunk);
364 if (!size) {
365 spin_lock(&hash_lock);
366 list_del_init(&chunk->trees);
367 list_del_rcu(&chunk->hash);
368 replace_mark_chunk(mark, NULL);
369 spin_unlock(&hash_lock);
370 fsnotify_detach_mark(mark);
371 mutex_unlock(&audit_tree_group->mark_mutex);
372 audit_mark_put_chunk(chunk);
373 fsnotify_free_mark(mark);
374 return;
375 }
376
377 new = alloc_chunk(size);
378 if (!new)
379 goto out_mutex;
380
381 spin_lock(&hash_lock);
382 /*
383 * This has to go last when updating chunk as once replace_chunk() is
384 * called, new RCU readers can see the new chunk.
385 */
386 replace_chunk(new, chunk);
387 spin_unlock(&hash_lock);
388 mutex_unlock(&audit_tree_group->mark_mutex);
389 audit_mark_put_chunk(chunk);
390 return;
391
392out_mutex:
393 mutex_unlock(&audit_tree_group->mark_mutex);
394}
395
396/* Call with group->mark_mutex held, releases it */
397static int create_chunk(struct inode *inode, struct audit_tree *tree)
398{
399 struct fsnotify_mark *mark;
400 struct audit_chunk *chunk = alloc_chunk(1);
401
402 if (!chunk) {
403 mutex_unlock(&audit_tree_group->mark_mutex);
404 return -ENOMEM;
405 }
406
407 mark = alloc_mark();
408 if (!mark) {
409 mutex_unlock(&audit_tree_group->mark_mutex);
410 kfree(chunk);
411 return -ENOMEM;
412 }
413
414 if (fsnotify_add_inode_mark_locked(mark, inode, 0)) {
415 mutex_unlock(&audit_tree_group->mark_mutex);
416 fsnotify_put_mark(mark);
417 kfree(chunk);
418 return -ENOSPC;
419 }
420
421 spin_lock(&hash_lock);
422 if (tree->goner) {
423 spin_unlock(&hash_lock);
424 fsnotify_detach_mark(mark);
425 mutex_unlock(&audit_tree_group->mark_mutex);
426 fsnotify_free_mark(mark);
427 fsnotify_put_mark(mark);
428 kfree(chunk);
429 return 0;
430 }
431 replace_mark_chunk(mark, chunk);
432 chunk->owners[0].index = (1U << 31);
433 chunk->owners[0].owner = tree;
434 get_tree(tree);
435 list_add(&chunk->owners[0].list, &tree->chunks);
436 if (!tree->root) {
437 tree->root = chunk;
438 list_add(&tree->same_root, &chunk->trees);
439 }
440 chunk->key = inode_to_key(inode);
441 /*
442 * Inserting into the hash table has to go last as once we do that RCU
443 * readers can see the chunk.
444 */
445 insert_hash(chunk);
446 spin_unlock(&hash_lock);
447 mutex_unlock(&audit_tree_group->mark_mutex);
448 /*
449 * Drop our initial reference. When mark we point to is getting freed,
450 * we get notification through ->freeing_mark callback and cleanup
451 * chunk pointing to this mark.
452 */
453 fsnotify_put_mark(mark);
454 return 0;
455}
456
457/* the first tagged inode becomes root of tree */
458static int tag_chunk(struct inode *inode, struct audit_tree *tree)
459{
460 struct fsnotify_mark *mark;
461 struct audit_chunk *chunk, *old;
462 struct node *p;
463 int n;
464
465 mutex_lock(&audit_tree_group->mark_mutex);
466 mark = fsnotify_find_mark(&inode->i_fsnotify_marks, audit_tree_group);
467 if (!mark)
468 return create_chunk(inode, tree);
469
470 /*
471 * Found mark is guaranteed to be attached and mark_mutex protects mark
472 * from getting detached and thus it makes sure there is chunk attached
473 * to the mark.
474 */
475 /* are we already there? */
476 spin_lock(&hash_lock);
477 old = mark_chunk(mark);
478 for (n = 0; n < old->count; n++) {
479 if (old->owners[n].owner == tree) {
480 spin_unlock(&hash_lock);
481 mutex_unlock(&audit_tree_group->mark_mutex);
482 fsnotify_put_mark(mark);
483 return 0;
484 }
485 }
486 spin_unlock(&hash_lock);
487
488 chunk = alloc_chunk(old->count + 1);
489 if (!chunk) {
490 mutex_unlock(&audit_tree_group->mark_mutex);
491 fsnotify_put_mark(mark);
492 return -ENOMEM;
493 }
494
495 spin_lock(&hash_lock);
496 if (tree->goner) {
497 spin_unlock(&hash_lock);
498 mutex_unlock(&audit_tree_group->mark_mutex);
499 fsnotify_put_mark(mark);
500 kfree(chunk);
501 return 0;
502 }
503 p = &chunk->owners[chunk->count - 1];
504 p->index = (chunk->count - 1) | (1U<<31);
505 p->owner = tree;
506 get_tree(tree);
507 list_add(&p->list, &tree->chunks);
508 if (!tree->root) {
509 tree->root = chunk;
510 list_add(&tree->same_root, &chunk->trees);
511 }
512 /*
513 * This has to go last when updating chunk as once replace_chunk() is
514 * called, new RCU readers can see the new chunk.
515 */
516 replace_chunk(chunk, old);
517 spin_unlock(&hash_lock);
518 mutex_unlock(&audit_tree_group->mark_mutex);
519 fsnotify_put_mark(mark); /* pair to fsnotify_find_mark */
520 audit_mark_put_chunk(old);
521
522 return 0;
523}
524
525static void audit_tree_log_remove_rule(struct audit_context *context,
526 struct audit_krule *rule)
527{
528 struct audit_buffer *ab;
529
530 if (!audit_enabled)
531 return;
532 ab = audit_log_start(context, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
533 if (unlikely(!ab))
534 return;
535 audit_log_format(ab, "op=remove_rule dir=");
536 audit_log_untrustedstring(ab, rule->tree->pathname);
537 audit_log_key(ab, rule->filterkey);
538 audit_log_format(ab, " list=%d res=1", rule->listnr);
539 audit_log_end(ab);
540}
541
542static void kill_rules(struct audit_context *context, struct audit_tree *tree)
543{
544 struct audit_krule *rule, *next;
545 struct audit_entry *entry;
546
547 list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
548 entry = container_of(rule, struct audit_entry, rule);
549
550 list_del_init(&rule->rlist);
551 if (rule->tree) {
552 /* not a half-baked one */
553 audit_tree_log_remove_rule(context, rule);
554 if (entry->rule.exe)
555 audit_remove_mark(entry->rule.exe);
556 rule->tree = NULL;
557 list_del_rcu(&entry->list);
558 list_del(&entry->rule.list);
559 call_rcu(&entry->rcu, audit_free_rule_rcu);
560 }
561 }
562}
563
564/*
565 * Remove tree from chunks. If 'tagged' is set, remove tree only from tagged
566 * chunks. The function expects tagged chunks are all at the beginning of the
567 * chunks list.
568 */
569static void prune_tree_chunks(struct audit_tree *victim, bool tagged)
570{
571 spin_lock(&hash_lock);
572 while (!list_empty(&victim->chunks)) {
573 struct node *p;
574 struct audit_chunk *chunk;
575 struct fsnotify_mark *mark;
576
577 p = list_first_entry(&victim->chunks, struct node, list);
578 /* have we run out of marked? */
579 if (tagged && !(p->index & (1U<<31)))
580 break;
581 chunk = find_chunk(p);
582 mark = chunk->mark;
583 remove_chunk_node(chunk, p);
584 /* Racing with audit_tree_freeing_mark()? */
585 if (!mark)
586 continue;
587 fsnotify_get_mark(mark);
588 spin_unlock(&hash_lock);
589
590 untag_chunk(chunk, mark);
591 fsnotify_put_mark(mark);
592
593 spin_lock(&hash_lock);
594 }
595 spin_unlock(&hash_lock);
596}
597
598/*
599 * finish killing struct audit_tree
600 */
601static void prune_one(struct audit_tree *victim)
602{
603 prune_tree_chunks(victim, false);
604 put_tree(victim);
605}
606
607/* trim the uncommitted chunks from tree */
608
609static void trim_marked(struct audit_tree *tree)
610{
611 struct list_head *p, *q;
612 spin_lock(&hash_lock);
613 if (tree->goner) {
614 spin_unlock(&hash_lock);
615 return;
616 }
617 /* reorder */
618 for (p = tree->chunks.next; p != &tree->chunks; p = q) {
619 struct node *node = list_entry(p, struct node, list);
620 q = p->next;
621 if (node->index & (1U<<31)) {
622 list_del_init(p);
623 list_add(p, &tree->chunks);
624 }
625 }
626 spin_unlock(&hash_lock);
627
628 prune_tree_chunks(tree, true);
629
630 spin_lock(&hash_lock);
631 if (!tree->root && !tree->goner) {
632 tree->goner = 1;
633 spin_unlock(&hash_lock);
634 mutex_lock(&audit_filter_mutex);
635 kill_rules(audit_context(), tree);
636 list_del_init(&tree->list);
637 mutex_unlock(&audit_filter_mutex);
638 prune_one(tree);
639 } else {
640 spin_unlock(&hash_lock);
641 }
642}
643
644static void audit_schedule_prune(void);
645
646/* called with audit_filter_mutex */
647int audit_remove_tree_rule(struct audit_krule *rule)
648{
649 struct audit_tree *tree;
650 tree = rule->tree;
651 if (tree) {
652 spin_lock(&hash_lock);
653 list_del_init(&rule->rlist);
654 if (list_empty(&tree->rules) && !tree->goner) {
655 tree->root = NULL;
656 list_del_init(&tree->same_root);
657 tree->goner = 1;
658 list_move(&tree->list, &prune_list);
659 rule->tree = NULL;
660 spin_unlock(&hash_lock);
661 audit_schedule_prune();
662 return 1;
663 }
664 rule->tree = NULL;
665 spin_unlock(&hash_lock);
666 return 1;
667 }
668 return 0;
669}
670
671static int compare_root(struct vfsmount *mnt, void *arg)
672{
673 return inode_to_key(d_backing_inode(mnt->mnt_root)) ==
674 (unsigned long)arg;
675}
676
677void audit_trim_trees(void)
678{
679 struct list_head cursor;
680
681 mutex_lock(&audit_filter_mutex);
682 list_add(&cursor, &tree_list);
683 while (cursor.next != &tree_list) {
684 struct audit_tree *tree;
685 struct path path;
686 struct vfsmount *root_mnt;
687 struct node *node;
688 int err;
689
690 tree = container_of(cursor.next, struct audit_tree, list);
691 get_tree(tree);
692 list_move(&cursor, &tree->list);
693 mutex_unlock(&audit_filter_mutex);
694
695 err = kern_path(tree->pathname, 0, &path);
696 if (err)
697 goto skip_it;
698
699 root_mnt = collect_mounts(&path);
700 path_put(&path);
701 if (IS_ERR(root_mnt))
702 goto skip_it;
703
704 spin_lock(&hash_lock);
705 list_for_each_entry(node, &tree->chunks, list) {
706 struct audit_chunk *chunk = find_chunk(node);
707 /* this could be NULL if the watch is dying else where... */
708 node->index |= 1U<<31;
709 if (iterate_mounts(compare_root,
710 (void *)(chunk->key),
711 root_mnt))
712 node->index &= ~(1U<<31);
713 }
714 spin_unlock(&hash_lock);
715 trim_marked(tree);
716 drop_collected_mounts(root_mnt);
717skip_it:
718 put_tree(tree);
719 mutex_lock(&audit_filter_mutex);
720 }
721 list_del(&cursor);
722 mutex_unlock(&audit_filter_mutex);
723}
724
725int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
726{
727
728 if (pathname[0] != '/' ||
729 rule->listnr != AUDIT_FILTER_EXIT ||
730 op != Audit_equal ||
731 rule->inode_f || rule->watch || rule->tree)
732 return -EINVAL;
733 rule->tree = alloc_tree(pathname);
734 if (!rule->tree)
735 return -ENOMEM;
736 return 0;
737}
738
739void audit_put_tree(struct audit_tree *tree)
740{
741 put_tree(tree);
742}
743
744static int tag_mount(struct vfsmount *mnt, void *arg)
745{
746 return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
747}
748
749/*
750 * That gets run when evict_chunk() ends up needing to kill audit_tree.
751 * Runs from a separate thread.
752 */
753static int prune_tree_thread(void *unused)
754{
755 for (;;) {
756 if (list_empty(&prune_list)) {
757 set_current_state(TASK_INTERRUPTIBLE);
758 schedule();
759 }
760
761 audit_ctl_lock();
762 mutex_lock(&audit_filter_mutex);
763
764 while (!list_empty(&prune_list)) {
765 struct audit_tree *victim;
766
767 victim = list_entry(prune_list.next,
768 struct audit_tree, list);
769 list_del_init(&victim->list);
770
771 mutex_unlock(&audit_filter_mutex);
772
773 prune_one(victim);
774
775 mutex_lock(&audit_filter_mutex);
776 }
777
778 mutex_unlock(&audit_filter_mutex);
779 audit_ctl_unlock();
780 }
781 return 0;
782}
783
784static int audit_launch_prune(void)
785{
786 if (prune_thread)
787 return 0;
788 prune_thread = kthread_run(prune_tree_thread, NULL,
789 "audit_prune_tree");
790 if (IS_ERR(prune_thread)) {
791 pr_err("cannot start thread audit_prune_tree");
792 prune_thread = NULL;
793 return -ENOMEM;
794 }
795 return 0;
796}
797
798/* called with audit_filter_mutex */
799int audit_add_tree_rule(struct audit_krule *rule)
800{
801 struct audit_tree *seed = rule->tree, *tree;
802 struct path path;
803 struct vfsmount *mnt;
804 int err;
805
806 rule->tree = NULL;
807 list_for_each_entry(tree, &tree_list, list) {
808 if (!strcmp(seed->pathname, tree->pathname)) {
809 put_tree(seed);
810 rule->tree = tree;
811 list_add(&rule->rlist, &tree->rules);
812 return 0;
813 }
814 }
815 tree = seed;
816 list_add(&tree->list, &tree_list);
817 list_add(&rule->rlist, &tree->rules);
818 /* do not set rule->tree yet */
819 mutex_unlock(&audit_filter_mutex);
820
821 if (unlikely(!prune_thread)) {
822 err = audit_launch_prune();
823 if (err)
824 goto Err;
825 }
826
827 err = kern_path(tree->pathname, 0, &path);
828 if (err)
829 goto Err;
830 mnt = collect_mounts(&path);
831 path_put(&path);
832 if (IS_ERR(mnt)) {
833 err = PTR_ERR(mnt);
834 goto Err;
835 }
836
837 get_tree(tree);
838 err = iterate_mounts(tag_mount, tree, mnt);
839 drop_collected_mounts(mnt);
840
841 if (!err) {
842 struct node *node;
843 spin_lock(&hash_lock);
844 list_for_each_entry(node, &tree->chunks, list)
845 node->index &= ~(1U<<31);
846 spin_unlock(&hash_lock);
847 } else {
848 trim_marked(tree);
849 goto Err;
850 }
851
852 mutex_lock(&audit_filter_mutex);
853 if (list_empty(&rule->rlist)) {
854 put_tree(tree);
855 return -ENOENT;
856 }
857 rule->tree = tree;
858 put_tree(tree);
859
860 return 0;
861Err:
862 mutex_lock(&audit_filter_mutex);
863 list_del_init(&tree->list);
864 list_del_init(&tree->rules);
865 put_tree(tree);
866 return err;
867}
868
869int audit_tag_tree(char *old, char *new)
870{
871 struct list_head cursor, barrier;
872 int failed = 0;
873 struct path path1, path2;
874 struct vfsmount *tagged;
875 int err;
876
877 err = kern_path(new, 0, &path2);
878 if (err)
879 return err;
880 tagged = collect_mounts(&path2);
881 path_put(&path2);
882 if (IS_ERR(tagged))
883 return PTR_ERR(tagged);
884
885 err = kern_path(old, 0, &path1);
886 if (err) {
887 drop_collected_mounts(tagged);
888 return err;
889 }
890
891 mutex_lock(&audit_filter_mutex);
892 list_add(&barrier, &tree_list);
893 list_add(&cursor, &barrier);
894
895 while (cursor.next != &tree_list) {
896 struct audit_tree *tree;
897 int good_one = 0;
898
899 tree = container_of(cursor.next, struct audit_tree, list);
900 get_tree(tree);
901 list_move(&cursor, &tree->list);
902 mutex_unlock(&audit_filter_mutex);
903
904 err = kern_path(tree->pathname, 0, &path2);
905 if (!err) {
906 good_one = path_is_under(&path1, &path2);
907 path_put(&path2);
908 }
909
910 if (!good_one) {
911 put_tree(tree);
912 mutex_lock(&audit_filter_mutex);
913 continue;
914 }
915
916 failed = iterate_mounts(tag_mount, tree, tagged);
917 if (failed) {
918 put_tree(tree);
919 mutex_lock(&audit_filter_mutex);
920 break;
921 }
922
923 mutex_lock(&audit_filter_mutex);
924 spin_lock(&hash_lock);
925 if (!tree->goner) {
926 list_move(&tree->list, &tree_list);
927 }
928 spin_unlock(&hash_lock);
929 put_tree(tree);
930 }
931
932 while (barrier.prev != &tree_list) {
933 struct audit_tree *tree;
934
935 tree = container_of(barrier.prev, struct audit_tree, list);
936 get_tree(tree);
937 list_move(&tree->list, &barrier);
938 mutex_unlock(&audit_filter_mutex);
939
940 if (!failed) {
941 struct node *node;
942 spin_lock(&hash_lock);
943 list_for_each_entry(node, &tree->chunks, list)
944 node->index &= ~(1U<<31);
945 spin_unlock(&hash_lock);
946 } else {
947 trim_marked(tree);
948 }
949
950 put_tree(tree);
951 mutex_lock(&audit_filter_mutex);
952 }
953 list_del(&barrier);
954 list_del(&cursor);
955 mutex_unlock(&audit_filter_mutex);
956 path_put(&path1);
957 drop_collected_mounts(tagged);
958 return failed;
959}
960
961
962static void audit_schedule_prune(void)
963{
964 wake_up_process(prune_thread);
965}
966
967/*
968 * ... and that one is done if evict_chunk() decides to delay until the end
969 * of syscall. Runs synchronously.
970 */
971void audit_kill_trees(struct audit_context *context)
972{
973 struct list_head *list = &context->killed_trees;
974
975 audit_ctl_lock();
976 mutex_lock(&audit_filter_mutex);
977
978 while (!list_empty(list)) {
979 struct audit_tree *victim;
980
981 victim = list_entry(list->next, struct audit_tree, list);
982 kill_rules(context, victim);
983 list_del_init(&victim->list);
984
985 mutex_unlock(&audit_filter_mutex);
986
987 prune_one(victim);
988
989 mutex_lock(&audit_filter_mutex);
990 }
991
992 mutex_unlock(&audit_filter_mutex);
993 audit_ctl_unlock();
994}
995
996/*
997 * Here comes the stuff asynchronous to auditctl operations
998 */
999
1000static void evict_chunk(struct audit_chunk *chunk)
1001{
1002 struct audit_tree *owner;
1003 struct list_head *postponed = audit_killed_trees();
1004 int need_prune = 0;
1005 int n;
1006
1007 mutex_lock(&audit_filter_mutex);
1008 spin_lock(&hash_lock);
1009 while (!list_empty(&chunk->trees)) {
1010 owner = list_entry(chunk->trees.next,
1011 struct audit_tree, same_root);
1012 owner->goner = 1;
1013 owner->root = NULL;
1014 list_del_init(&owner->same_root);
1015 spin_unlock(&hash_lock);
1016 if (!postponed) {
1017 kill_rules(audit_context(), owner);
1018 list_move(&owner->list, &prune_list);
1019 need_prune = 1;
1020 } else {
1021 list_move(&owner->list, postponed);
1022 }
1023 spin_lock(&hash_lock);
1024 }
1025 list_del_rcu(&chunk->hash);
1026 for (n = 0; n < chunk->count; n++)
1027 list_del_init(&chunk->owners[n].list);
1028 spin_unlock(&hash_lock);
1029 mutex_unlock(&audit_filter_mutex);
1030 if (need_prune)
1031 audit_schedule_prune();
1032}
1033
1034static int audit_tree_handle_event(struct fsnotify_mark *mark, u32 mask,
1035 struct inode *inode, struct inode *dir,
1036 const struct qstr *file_name, u32 cookie)
1037{
1038 return 0;
1039}
1040
1041static void audit_tree_freeing_mark(struct fsnotify_mark *mark,
1042 struct fsnotify_group *group)
1043{
1044 struct audit_chunk *chunk;
1045
1046 mutex_lock(&mark->group->mark_mutex);
1047 spin_lock(&hash_lock);
1048 chunk = mark_chunk(mark);
1049 replace_mark_chunk(mark, NULL);
1050 spin_unlock(&hash_lock);
1051 mutex_unlock(&mark->group->mark_mutex);
1052 if (chunk) {
1053 evict_chunk(chunk);
1054 audit_mark_put_chunk(chunk);
1055 }
1056
1057 /*
1058 * We are guaranteed to have at least one reference to the mark from
1059 * either the inode or the caller of fsnotify_destroy_mark().
1060 */
1061 BUG_ON(refcount_read(&mark->refcnt) < 1);
1062}
1063
1064static const struct fsnotify_ops audit_tree_ops = {
1065 .handle_inode_event = audit_tree_handle_event,
1066 .freeing_mark = audit_tree_freeing_mark,
1067 .free_mark = audit_tree_destroy_watch,
1068};
1069
1070static int __init audit_tree_init(void)
1071{
1072 int i;
1073
1074 audit_tree_mark_cachep = KMEM_CACHE(audit_tree_mark, SLAB_PANIC);
1075
1076 audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
1077 if (IS_ERR(audit_tree_group))
1078 audit_panic("cannot initialize fsnotify group for rectree watches");
1079
1080 for (i = 0; i < HASH_SIZE; i++)
1081 INIT_LIST_HEAD(&chunk_hash_heads[i]);
1082
1083 return 0;
1084}
1085__initcall(audit_tree_init);