Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2#include "audit.h"
   3#include <linux/fsnotify_backend.h>
   4#include <linux/namei.h>
   5#include <linux/mount.h>
   6#include <linux/kthread.h>
   7#include <linux/refcount.h>
   8#include <linux/slab.h>
   9
  10struct audit_tree;
  11struct audit_chunk;
  12
  13struct audit_tree {
  14	refcount_t count;
  15	int goner;
  16	struct audit_chunk *root;
  17	struct list_head chunks;
  18	struct list_head rules;
  19	struct list_head list;
  20	struct list_head same_root;
  21	struct rcu_head head;
  22	char pathname[];
  23};
  24
  25struct audit_chunk {
  26	struct list_head hash;
  27	unsigned long key;
  28	struct fsnotify_mark *mark;
  29	struct list_head trees;		/* with root here */
 
  30	int count;
  31	atomic_long_t refs;
  32	struct rcu_head head;
  33	struct node {
  34		struct list_head list;
  35		struct audit_tree *owner;
  36		unsigned index;		/* index; upper bit indicates 'will prune' */
  37	} owners[];
  38};
  39
  40struct audit_tree_mark {
  41	struct fsnotify_mark mark;
  42	struct audit_chunk *chunk;
  43};
  44
  45static LIST_HEAD(tree_list);
  46static LIST_HEAD(prune_list);
  47static struct task_struct *prune_thread;
  48
  49/*
  50 * One struct chunk is attached to each inode of interest through
  51 * audit_tree_mark (fsnotify mark). We replace struct chunk on tagging /
  52 * untagging, the mark is stable as long as there is chunk attached. The
  53 * association between mark and chunk is protected by hash_lock and
  54 * audit_tree_group->mark_mutex. Thus as long as we hold
  55 * audit_tree_group->mark_mutex and check that the mark is alive by
  56 * FSNOTIFY_MARK_FLAG_ATTACHED flag check, we are sure the mark points to
  57 * the current chunk.
  58 *
  59 * Rules have pointer to struct audit_tree.
  60 * Rules have struct list_head rlist forming a list of rules over
  61 * the same tree.
  62 * References to struct chunk are collected at audit_inode{,_child}()
  63 * time and used in AUDIT_TREE rule matching.
  64 * These references are dropped at the same time we are calling
  65 * audit_free_names(), etc.
  66 *
  67 * Cyclic lists galore:
  68 * tree.chunks anchors chunk.owners[].list			hash_lock
  69 * tree.rules anchors rule.rlist				audit_filter_mutex
  70 * chunk.trees anchors tree.same_root				hash_lock
  71 * chunk.hash is a hash with middle bits of watch.inode as
  72 * a hash function.						RCU, hash_lock
  73 *
  74 * tree is refcounted; one reference for "some rules on rules_list refer to
  75 * it", one for each chunk with pointer to it.
  76 *
  77 * chunk is refcounted by embedded .refs. Mark associated with the chunk holds
  78 * one chunk reference. This reference is dropped either when a mark is going
  79 * to be freed (corresponding inode goes away) or when chunk attached to the
  80 * mark gets replaced. This reference must be dropped using
  81 * audit_mark_put_chunk() to make sure the reference is dropped only after RCU
  82 * grace period as it protects RCU readers of the hash table.
  83 *
  84 * node.index allows to get from node.list to containing chunk.
  85 * MSB of that sucker is stolen to mark taggings that we might have to
  86 * revert - several operations have very unpleasant cleanup logics and
  87 * that makes a difference.  Some.
  88 */
  89
  90static struct fsnotify_group *audit_tree_group;
  91static struct kmem_cache *audit_tree_mark_cachep __read_mostly;
  92
  93static struct audit_tree *alloc_tree(const char *s)
  94{
  95	struct audit_tree *tree;
  96
  97	tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
  98	if (tree) {
  99		refcount_set(&tree->count, 1);
 100		tree->goner = 0;
 101		INIT_LIST_HEAD(&tree->chunks);
 102		INIT_LIST_HEAD(&tree->rules);
 103		INIT_LIST_HEAD(&tree->list);
 104		INIT_LIST_HEAD(&tree->same_root);
 105		tree->root = NULL;
 106		strcpy(tree->pathname, s);
 107	}
 108	return tree;
 109}
 110
 111static inline void get_tree(struct audit_tree *tree)
 112{
 113	refcount_inc(&tree->count);
 114}
 115
 116static inline void put_tree(struct audit_tree *tree)
 117{
 118	if (refcount_dec_and_test(&tree->count))
 119		kfree_rcu(tree, head);
 120}
 121
 122/* to avoid bringing the entire thing in audit.h */
 123const char *audit_tree_path(struct audit_tree *tree)
 124{
 125	return tree->pathname;
 126}
 127
 128static void free_chunk(struct audit_chunk *chunk)
 129{
 130	int i;
 131
 132	for (i = 0; i < chunk->count; i++) {
 133		if (chunk->owners[i].owner)
 134			put_tree(chunk->owners[i].owner);
 135	}
 136	kfree(chunk);
 137}
 138
 139void audit_put_chunk(struct audit_chunk *chunk)
 140{
 141	if (atomic_long_dec_and_test(&chunk->refs))
 142		free_chunk(chunk);
 143}
 144
 145static void __put_chunk(struct rcu_head *rcu)
 146{
 147	struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
 148	audit_put_chunk(chunk);
 149}
 150
 151/*
 152 * Drop reference to the chunk that was held by the mark. This is the reference
 153 * that gets dropped after we've removed the chunk from the hash table and we
 154 * use it to make sure chunk cannot be freed before RCU grace period expires.
 155 */
 156static void audit_mark_put_chunk(struct audit_chunk *chunk)
 157{
 
 158	call_rcu(&chunk->head, __put_chunk);
 159}
 160
 161static inline struct audit_tree_mark *audit_mark(struct fsnotify_mark *mark)
 162{
 163	return container_of(mark, struct audit_tree_mark, mark);
 164}
 165
 166static struct audit_chunk *mark_chunk(struct fsnotify_mark *mark)
 167{
 168	return audit_mark(mark)->chunk;
 169}
 170
 171static void audit_tree_destroy_watch(struct fsnotify_mark *mark)
 172{
 173	kmem_cache_free(audit_tree_mark_cachep, audit_mark(mark));
 174}
 175
 176static struct fsnotify_mark *alloc_mark(void)
 177{
 178	struct audit_tree_mark *amark;
 179
 180	amark = kmem_cache_zalloc(audit_tree_mark_cachep, GFP_KERNEL);
 181	if (!amark)
 182		return NULL;
 183	fsnotify_init_mark(&amark->mark, audit_tree_group);
 184	amark->mark.mask = FS_IN_IGNORED;
 185	return &amark->mark;
 186}
 187
 188static struct audit_chunk *alloc_chunk(int count)
 189{
 190	struct audit_chunk *chunk;
 191	size_t size;
 192	int i;
 193
 194	size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
 195	chunk = kzalloc(size, GFP_KERNEL);
 196	if (!chunk)
 197		return NULL;
 198
 199	INIT_LIST_HEAD(&chunk->hash);
 200	INIT_LIST_HEAD(&chunk->trees);
 201	chunk->count = count;
 202	atomic_long_set(&chunk->refs, 1);
 203	for (i = 0; i < count; i++) {
 204		INIT_LIST_HEAD(&chunk->owners[i].list);
 205		chunk->owners[i].index = i;
 206	}
 
 
 207	return chunk;
 208}
 209
 210enum {HASH_SIZE = 128};
 211static struct list_head chunk_hash_heads[HASH_SIZE];
 212static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
 213
 214/* Function to return search key in our hash from inode. */
 215static unsigned long inode_to_key(const struct inode *inode)
 216{
 217	/* Use address pointed to by connector->obj as the key */
 218	return (unsigned long)&inode->i_fsnotify_marks;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 219}
 220
 221static inline struct list_head *chunk_hash(unsigned long key)
 222{
 223	unsigned long n = key / L1_CACHE_BYTES;
 224	return chunk_hash_heads + n % HASH_SIZE;
 225}
 226
 227/* hash_lock & mark->group->mark_mutex is held by caller */
 228static void insert_hash(struct audit_chunk *chunk)
 229{
 
 230	struct list_head *list;
 231
 232	/*
 233	 * Make sure chunk is fully initialized before making it visible in the
 234	 * hash. Pairs with a data dependency barrier in READ_ONCE() in
 235	 * audit_tree_lookup().
 236	 */
 237	smp_wmb();
 238	WARN_ON_ONCE(!chunk->key);
 239	list = chunk_hash(chunk->key);
 240	list_add_rcu(&chunk->hash, list);
 241}
 242
 243/* called under rcu_read_lock */
 244struct audit_chunk *audit_tree_lookup(const struct inode *inode)
 245{
 246	unsigned long key = inode_to_key(inode);
 247	struct list_head *list = chunk_hash(key);
 248	struct audit_chunk *p;
 249
 250	list_for_each_entry_rcu(p, list, hash) {
 251		/*
 252		 * We use a data dependency barrier in READ_ONCE() to make sure
 253		 * the chunk we see is fully initialized.
 254		 */
 255		if (READ_ONCE(p->key) == key) {
 256			atomic_long_inc(&p->refs);
 257			return p;
 258		}
 259	}
 260	return NULL;
 261}
 262
 263bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
 264{
 265	int n;
 266	for (n = 0; n < chunk->count; n++)
 267		if (chunk->owners[n].owner == tree)
 268			return true;
 269	return false;
 270}
 271
 272/* tagging and untagging inodes with trees */
 273
 274static struct audit_chunk *find_chunk(struct node *p)
 275{
 276	int index = p->index & ~(1U<<31);
 277	p -= index;
 278	return container_of(p, struct audit_chunk, owners[0]);
 279}
 280
 281static void replace_mark_chunk(struct fsnotify_mark *mark,
 282			       struct audit_chunk *chunk)
 283{
 284	struct audit_chunk *old;
 285
 286	assert_spin_locked(&hash_lock);
 287	old = mark_chunk(mark);
 288	audit_mark(mark)->chunk = chunk;
 289	if (chunk)
 290		chunk->mark = mark;
 291	if (old)
 292		old->mark = NULL;
 293}
 294
 295static void replace_chunk(struct audit_chunk *new, struct audit_chunk *old)
 296{
 
 
 
 297	struct audit_tree *owner;
 
 298	int i, j;
 299
 300	new->key = old->key;
 301	list_splice_init(&old->trees, &new->trees);
 302	list_for_each_entry(owner, &new->trees, same_root)
 303		owner->root = new;
 304	for (i = j = 0; j < old->count; i++, j++) {
 305		if (!old->owners[j].owner) {
 306			i--;
 307			continue;
 308		}
 309		owner = old->owners[j].owner;
 310		new->owners[i].owner = owner;
 311		new->owners[i].index = old->owners[j].index - j + i;
 312		if (!owner) /* result of earlier fallback */
 313			continue;
 314		get_tree(owner);
 315		list_replace_init(&old->owners[j].list, &new->owners[i].list);
 316	}
 317	replace_mark_chunk(old->mark, new);
 318	/*
 319	 * Make sure chunk is fully initialized before making it visible in the
 320	 * hash. Pairs with a data dependency barrier in READ_ONCE() in
 321	 * audit_tree_lookup().
 322	 */
 323	smp_wmb();
 324	list_replace_rcu(&old->hash, &new->hash);
 325}
 326
 327static void remove_chunk_node(struct audit_chunk *chunk, struct node *p)
 328{
 329	struct audit_tree *owner = p->owner;
 330
 331	if (owner->root == chunk) {
 332		list_del_init(&owner->same_root);
 333		owner->root = NULL;
 334	}
 335	list_del_init(&p->list);
 336	p->owner = NULL;
 337	put_tree(owner);
 338}
 339
 340static int chunk_count_trees(struct audit_chunk *chunk)
 341{
 342	int i;
 343	int ret = 0;
 344
 345	for (i = 0; i < chunk->count; i++)
 346		if (chunk->owners[i].owner)
 347			ret++;
 348	return ret;
 349}
 350
 351static void untag_chunk(struct audit_chunk *chunk, struct fsnotify_mark *mark)
 352{
 353	struct audit_chunk *new;
 354	int size;
 355
 356	mutex_lock(&audit_tree_group->mark_mutex);
 
 357	/*
 358	 * mark_mutex stabilizes chunk attached to the mark so we can check
 359	 * whether it didn't change while we've dropped hash_lock.
 360	 */
 361	if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) ||
 362	    mark_chunk(mark) != chunk)
 363		goto out_mutex;
 
 
 
 
 
 
 364
 365	size = chunk_count_trees(chunk);
 366	if (!size) {
 
 367		spin_lock(&hash_lock);
 368		list_del_init(&chunk->trees);
 
 
 
 369		list_del_rcu(&chunk->hash);
 370		replace_mark_chunk(mark, NULL);
 371		spin_unlock(&hash_lock);
 372		fsnotify_detach_mark(mark);
 373		mutex_unlock(&audit_tree_group->mark_mutex);
 374		audit_mark_put_chunk(chunk);
 375		fsnotify_free_mark(mark);
 376		return;
 377	}
 378
 379	new = alloc_chunk(size);
 380	if (!new)
 381		goto out_mutex;
 
 
 
 
 
 
 382
 
 383	spin_lock(&hash_lock);
 384	/*
 385	 * This has to go last when updating chunk as once replace_chunk() is
 386	 * called, new RCU readers can see the new chunk.
 387	 */
 388	replace_chunk(new, chunk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 389	spin_unlock(&hash_lock);
 390	mutex_unlock(&audit_tree_group->mark_mutex);
 391	audit_mark_put_chunk(chunk);
 392	return;
 
 
 393
 394out_mutex:
 395	mutex_unlock(&audit_tree_group->mark_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 396}
 397
 398/* Call with group->mark_mutex held, releases it */
 399static int create_chunk(struct inode *inode, struct audit_tree *tree)
 400{
 401	struct fsnotify_mark *mark;
 402	struct audit_chunk *chunk = alloc_chunk(1);
 403
 404	if (!chunk) {
 405		mutex_unlock(&audit_tree_group->mark_mutex);
 406		return -ENOMEM;
 407	}
 408
 409	mark = alloc_mark();
 410	if (!mark) {
 411		mutex_unlock(&audit_tree_group->mark_mutex);
 412		kfree(chunk);
 413		return -ENOMEM;
 414	}
 415
 416	if (fsnotify_add_inode_mark_locked(mark, inode, 0)) {
 417		mutex_unlock(&audit_tree_group->mark_mutex);
 418		fsnotify_put_mark(mark);
 419		kfree(chunk);
 420		return -ENOSPC;
 421	}
 422
 
 423	spin_lock(&hash_lock);
 424	if (tree->goner) {
 425		spin_unlock(&hash_lock);
 426		fsnotify_detach_mark(mark);
 427		mutex_unlock(&audit_tree_group->mark_mutex);
 428		fsnotify_free_mark(mark);
 429		fsnotify_put_mark(mark);
 430		kfree(chunk);
 431		return 0;
 432	}
 433	replace_mark_chunk(mark, chunk);
 434	chunk->owners[0].index = (1U << 31);
 435	chunk->owners[0].owner = tree;
 436	get_tree(tree);
 437	list_add(&chunk->owners[0].list, &tree->chunks);
 438	if (!tree->root) {
 439		tree->root = chunk;
 440		list_add(&tree->same_root, &chunk->trees);
 441	}
 442	chunk->key = inode_to_key(inode);
 443	/*
 444	 * Inserting into the hash table has to go last as once we do that RCU
 445	 * readers can see the chunk.
 446	 */
 447	insert_hash(chunk);
 448	spin_unlock(&hash_lock);
 449	mutex_unlock(&audit_tree_group->mark_mutex);
 450	/*
 451	 * Drop our initial reference. When mark we point to is getting freed,
 452	 * we get notification through ->freeing_mark callback and cleanup
 453	 * chunk pointing to this mark.
 454	 */
 455	fsnotify_put_mark(mark);
 456	return 0;
 457}
 458
 459/* the first tagged inode becomes root of tree */
 460static int tag_chunk(struct inode *inode, struct audit_tree *tree)
 461{
 462	struct fsnotify_mark *mark;
 
 463	struct audit_chunk *chunk, *old;
 464	struct node *p;
 465	int n;
 466
 467	mutex_lock(&audit_tree_group->mark_mutex);
 468	mark = fsnotify_find_mark(&inode->i_fsnotify_marks, audit_tree_group);
 469	if (!mark)
 470		return create_chunk(inode, tree);
 471
 472	/*
 473	 * Found mark is guaranteed to be attached and mark_mutex protects mark
 474	 * from getting detached and thus it makes sure there is chunk attached
 475	 * to the mark.
 476	 */
 477	/* are we already there? */
 478	spin_lock(&hash_lock);
 479	old = mark_chunk(mark);
 480	for (n = 0; n < old->count; n++) {
 481		if (old->owners[n].owner == tree) {
 482			spin_unlock(&hash_lock);
 483			mutex_unlock(&audit_tree_group->mark_mutex);
 484			fsnotify_put_mark(mark);
 485			return 0;
 486		}
 487	}
 488	spin_unlock(&hash_lock);
 489
 490	chunk = alloc_chunk(old->count + 1);
 491	if (!chunk) {
 492		mutex_unlock(&audit_tree_group->mark_mutex);
 493		fsnotify_put_mark(mark);
 494		return -ENOMEM;
 495	}
 496
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 497	spin_lock(&hash_lock);
 
 
 498	if (tree->goner) {
 499		spin_unlock(&hash_lock);
 500		mutex_unlock(&audit_tree_group->mark_mutex);
 501		fsnotify_put_mark(mark);
 502		kfree(chunk);
 
 
 
 
 
 
 503		return 0;
 504	}
 505	p = &chunk->owners[chunk->count - 1];
 
 
 
 
 
 
 
 
 
 506	p->index = (chunk->count - 1) | (1U<<31);
 507	p->owner = tree;
 508	get_tree(tree);
 509	list_add(&p->list, &tree->chunks);
 
 
 
 
 510	if (!tree->root) {
 511		tree->root = chunk;
 512		list_add(&tree->same_root, &chunk->trees);
 513	}
 514	/*
 515	 * This has to go last when updating chunk as once replace_chunk() is
 516	 * called, new RCU readers can see the new chunk.
 517	 */
 518	replace_chunk(chunk, old);
 519	spin_unlock(&hash_lock);
 520	mutex_unlock(&audit_tree_group->mark_mutex);
 521	fsnotify_put_mark(mark); /* pair to fsnotify_find_mark */
 522	audit_mark_put_chunk(old);
 523
 
 
 524	return 0;
 525}
 526
 527static void audit_tree_log_remove_rule(struct audit_context *context,
 528				       struct audit_krule *rule)
 529{
 530	struct audit_buffer *ab;
 531
 532	if (!audit_enabled)
 533		return;
 534	ab = audit_log_start(context, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
 535	if (unlikely(!ab))
 536		return;
 537	audit_log_format(ab, "op=remove_rule dir=");
 
 538	audit_log_untrustedstring(ab, rule->tree->pathname);
 539	audit_log_key(ab, rule->filterkey);
 540	audit_log_format(ab, " list=%d res=1", rule->listnr);
 541	audit_log_end(ab);
 542}
 543
 544static void kill_rules(struct audit_context *context, struct audit_tree *tree)
 545{
 546	struct audit_krule *rule, *next;
 547	struct audit_entry *entry;
 548
 549	list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
 550		entry = container_of(rule, struct audit_entry, rule);
 551
 552		list_del_init(&rule->rlist);
 553		if (rule->tree) {
 554			/* not a half-baked one */
 555			audit_tree_log_remove_rule(context, rule);
 556			if (entry->rule.exe)
 557				audit_remove_mark(entry->rule.exe);
 558			rule->tree = NULL;
 559			list_del_rcu(&entry->list);
 560			list_del(&entry->rule.list);
 561			call_rcu(&entry->rcu, audit_free_rule_rcu);
 562		}
 563	}
 564}
 565
 566/*
 567 * Remove tree from chunks. If 'tagged' is set, remove tree only from tagged
 568 * chunks. The function expects tagged chunks are all at the beginning of the
 569 * chunks list.
 570 */
 571static void prune_tree_chunks(struct audit_tree *victim, bool tagged)
 572{
 573	spin_lock(&hash_lock);
 574	while (!list_empty(&victim->chunks)) {
 575		struct node *p;
 576		struct audit_chunk *chunk;
 577		struct fsnotify_mark *mark;
 578
 579		p = list_first_entry(&victim->chunks, struct node, list);
 580		/* have we run out of marked? */
 581		if (tagged && !(p->index & (1U<<31)))
 582			break;
 583		chunk = find_chunk(p);
 584		mark = chunk->mark;
 585		remove_chunk_node(chunk, p);
 586		/* Racing with audit_tree_freeing_mark()? */
 587		if (!mark)
 588			continue;
 589		fsnotify_get_mark(mark);
 590		spin_unlock(&hash_lock);
 591
 592		untag_chunk(chunk, mark);
 593		fsnotify_put_mark(mark);
 594
 595		spin_lock(&hash_lock);
 596	}
 597	spin_unlock(&hash_lock);
 598	put_tree(victim);
 599}
 600
 601/*
 602 * finish killing struct audit_tree
 603 */
 604static void prune_one(struct audit_tree *victim)
 605{
 606	prune_tree_chunks(victim, false);
 607}
 608
 609/* trim the uncommitted chunks from tree */
 610
 611static void trim_marked(struct audit_tree *tree)
 612{
 613	struct list_head *p, *q;
 614	spin_lock(&hash_lock);
 615	if (tree->goner) {
 616		spin_unlock(&hash_lock);
 617		return;
 618	}
 619	/* reorder */
 620	for (p = tree->chunks.next; p != &tree->chunks; p = q) {
 621		struct node *node = list_entry(p, struct node, list);
 622		q = p->next;
 623		if (node->index & (1U<<31)) {
 624			list_del_init(p);
 625			list_add(p, &tree->chunks);
 626		}
 627	}
 628	spin_unlock(&hash_lock);
 629
 630	prune_tree_chunks(tree, true);
 
 
 
 631
 632	spin_lock(&hash_lock);
 
 
 
 
 
 633	if (!tree->root && !tree->goner) {
 634		tree->goner = 1;
 635		spin_unlock(&hash_lock);
 636		mutex_lock(&audit_filter_mutex);
 637		kill_rules(audit_context(), tree);
 638		list_del_init(&tree->list);
 639		mutex_unlock(&audit_filter_mutex);
 640		prune_one(tree);
 641	} else {
 642		spin_unlock(&hash_lock);
 643	}
 644}
 645
 646static void audit_schedule_prune(void);
 647
 648/* called with audit_filter_mutex */
 649int audit_remove_tree_rule(struct audit_krule *rule)
 650{
 651	struct audit_tree *tree;
 652	tree = rule->tree;
 653	if (tree) {
 654		spin_lock(&hash_lock);
 655		list_del_init(&rule->rlist);
 656		if (list_empty(&tree->rules) && !tree->goner) {
 657			tree->root = NULL;
 658			list_del_init(&tree->same_root);
 659			tree->goner = 1;
 660			list_move(&tree->list, &prune_list);
 661			rule->tree = NULL;
 662			spin_unlock(&hash_lock);
 663			audit_schedule_prune();
 664			return 1;
 665		}
 666		rule->tree = NULL;
 667		spin_unlock(&hash_lock);
 668		return 1;
 669	}
 670	return 0;
 671}
 672
 673static int compare_root(struct vfsmount *mnt, void *arg)
 674{
 675	return inode_to_key(d_backing_inode(mnt->mnt_root)) ==
 676	       (unsigned long)arg;
 677}
 678
 679void audit_trim_trees(void)
 680{
 681	struct list_head cursor;
 682
 683	mutex_lock(&audit_filter_mutex);
 684	list_add(&cursor, &tree_list);
 685	while (cursor.next != &tree_list) {
 686		struct audit_tree *tree;
 687		struct path path;
 688		struct vfsmount *root_mnt;
 689		struct node *node;
 690		int err;
 691
 692		tree = container_of(cursor.next, struct audit_tree, list);
 693		get_tree(tree);
 694		list_del(&cursor);
 695		list_add(&cursor, &tree->list);
 696		mutex_unlock(&audit_filter_mutex);
 697
 698		err = kern_path(tree->pathname, 0, &path);
 699		if (err)
 700			goto skip_it;
 701
 702		root_mnt = collect_mounts(&path);
 703		path_put(&path);
 704		if (IS_ERR(root_mnt))
 705			goto skip_it;
 706
 707		spin_lock(&hash_lock);
 708		list_for_each_entry(node, &tree->chunks, list) {
 709			struct audit_chunk *chunk = find_chunk(node);
 710			/* this could be NULL if the watch is dying else where... */
 711			node->index |= 1U<<31;
 712			if (iterate_mounts(compare_root,
 713					   (void *)(chunk->key),
 714					   root_mnt))
 715				node->index &= ~(1U<<31);
 716		}
 717		spin_unlock(&hash_lock);
 718		trim_marked(tree);
 719		drop_collected_mounts(root_mnt);
 720skip_it:
 721		put_tree(tree);
 722		mutex_lock(&audit_filter_mutex);
 723	}
 724	list_del(&cursor);
 725	mutex_unlock(&audit_filter_mutex);
 726}
 727
 728int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
 729{
 730
 731	if (pathname[0] != '/' ||
 732	    rule->listnr != AUDIT_FILTER_EXIT ||
 733	    op != Audit_equal ||
 734	    rule->inode_f || rule->watch || rule->tree)
 735		return -EINVAL;
 736	rule->tree = alloc_tree(pathname);
 737	if (!rule->tree)
 738		return -ENOMEM;
 739	return 0;
 740}
 741
 742void audit_put_tree(struct audit_tree *tree)
 743{
 744	put_tree(tree);
 745}
 746
 747static int tag_mount(struct vfsmount *mnt, void *arg)
 748{
 749	return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
 750}
 751
 752/*
 753 * That gets run when evict_chunk() ends up needing to kill audit_tree.
 754 * Runs from a separate thread.
 755 */
 756static int prune_tree_thread(void *unused)
 757{
 758	for (;;) {
 759		if (list_empty(&prune_list)) {
 760			set_current_state(TASK_INTERRUPTIBLE);
 761			schedule();
 762		}
 763
 764		audit_ctl_lock();
 765		mutex_lock(&audit_filter_mutex);
 766
 767		while (!list_empty(&prune_list)) {
 768			struct audit_tree *victim;
 769
 770			victim = list_entry(prune_list.next,
 771					struct audit_tree, list);
 772			list_del_init(&victim->list);
 773
 774			mutex_unlock(&audit_filter_mutex);
 775
 776			prune_one(victim);
 777
 778			mutex_lock(&audit_filter_mutex);
 779		}
 780
 781		mutex_unlock(&audit_filter_mutex);
 782		audit_ctl_unlock();
 783	}
 784	return 0;
 785}
 786
 787static int audit_launch_prune(void)
 788{
 789	if (prune_thread)
 790		return 0;
 791	prune_thread = kthread_run(prune_tree_thread, NULL,
 792				"audit_prune_tree");
 793	if (IS_ERR(prune_thread)) {
 794		pr_err("cannot start thread audit_prune_tree");
 795		prune_thread = NULL;
 796		return -ENOMEM;
 797	}
 798	return 0;
 799}
 800
 801/* called with audit_filter_mutex */
 802int audit_add_tree_rule(struct audit_krule *rule)
 803{
 804	struct audit_tree *seed = rule->tree, *tree;
 805	struct path path;
 806	struct vfsmount *mnt;
 807	int err;
 808
 809	rule->tree = NULL;
 810	list_for_each_entry(tree, &tree_list, list) {
 811		if (!strcmp(seed->pathname, tree->pathname)) {
 812			put_tree(seed);
 813			rule->tree = tree;
 814			list_add(&rule->rlist, &tree->rules);
 815			return 0;
 816		}
 817	}
 818	tree = seed;
 819	list_add(&tree->list, &tree_list);
 820	list_add(&rule->rlist, &tree->rules);
 821	/* do not set rule->tree yet */
 822	mutex_unlock(&audit_filter_mutex);
 823
 824	if (unlikely(!prune_thread)) {
 825		err = audit_launch_prune();
 826		if (err)
 827			goto Err;
 828	}
 829
 830	err = kern_path(tree->pathname, 0, &path);
 831	if (err)
 832		goto Err;
 833	mnt = collect_mounts(&path);
 834	path_put(&path);
 835	if (IS_ERR(mnt)) {
 836		err = PTR_ERR(mnt);
 837		goto Err;
 838	}
 839
 840	get_tree(tree);
 841	err = iterate_mounts(tag_mount, tree, mnt);
 842	drop_collected_mounts(mnt);
 843
 844	if (!err) {
 845		struct node *node;
 846		spin_lock(&hash_lock);
 847		list_for_each_entry(node, &tree->chunks, list)
 848			node->index &= ~(1U<<31);
 849		spin_unlock(&hash_lock);
 850	} else {
 851		trim_marked(tree);
 852		goto Err;
 853	}
 854
 855	mutex_lock(&audit_filter_mutex);
 856	if (list_empty(&rule->rlist)) {
 857		put_tree(tree);
 858		return -ENOENT;
 859	}
 860	rule->tree = tree;
 861	put_tree(tree);
 862
 863	return 0;
 864Err:
 865	mutex_lock(&audit_filter_mutex);
 866	list_del_init(&tree->list);
 867	list_del_init(&tree->rules);
 868	put_tree(tree);
 869	return err;
 870}
 871
 872int audit_tag_tree(char *old, char *new)
 873{
 874	struct list_head cursor, barrier;
 875	int failed = 0;
 876	struct path path1, path2;
 877	struct vfsmount *tagged;
 878	int err;
 879
 880	err = kern_path(new, 0, &path2);
 881	if (err)
 882		return err;
 883	tagged = collect_mounts(&path2);
 884	path_put(&path2);
 885	if (IS_ERR(tagged))
 886		return PTR_ERR(tagged);
 887
 888	err = kern_path(old, 0, &path1);
 889	if (err) {
 890		drop_collected_mounts(tagged);
 891		return err;
 892	}
 893
 894	mutex_lock(&audit_filter_mutex);
 895	list_add(&barrier, &tree_list);
 896	list_add(&cursor, &barrier);
 897
 898	while (cursor.next != &tree_list) {
 899		struct audit_tree *tree;
 900		int good_one = 0;
 901
 902		tree = container_of(cursor.next, struct audit_tree, list);
 903		get_tree(tree);
 904		list_del(&cursor);
 905		list_add(&cursor, &tree->list);
 906		mutex_unlock(&audit_filter_mutex);
 907
 908		err = kern_path(tree->pathname, 0, &path2);
 909		if (!err) {
 910			good_one = path_is_under(&path1, &path2);
 911			path_put(&path2);
 912		}
 913
 914		if (!good_one) {
 915			put_tree(tree);
 916			mutex_lock(&audit_filter_mutex);
 917			continue;
 918		}
 919
 920		failed = iterate_mounts(tag_mount, tree, tagged);
 921		if (failed) {
 922			put_tree(tree);
 923			mutex_lock(&audit_filter_mutex);
 924			break;
 925		}
 926
 927		mutex_lock(&audit_filter_mutex);
 928		spin_lock(&hash_lock);
 929		if (!tree->goner) {
 930			list_del(&tree->list);
 931			list_add(&tree->list, &tree_list);
 932		}
 933		spin_unlock(&hash_lock);
 934		put_tree(tree);
 935	}
 936
 937	while (barrier.prev != &tree_list) {
 938		struct audit_tree *tree;
 939
 940		tree = container_of(barrier.prev, struct audit_tree, list);
 941		get_tree(tree);
 942		list_del(&tree->list);
 943		list_add(&tree->list, &barrier);
 944		mutex_unlock(&audit_filter_mutex);
 945
 946		if (!failed) {
 947			struct node *node;
 948			spin_lock(&hash_lock);
 949			list_for_each_entry(node, &tree->chunks, list)
 950				node->index &= ~(1U<<31);
 951			spin_unlock(&hash_lock);
 952		} else {
 953			trim_marked(tree);
 954		}
 955
 956		put_tree(tree);
 957		mutex_lock(&audit_filter_mutex);
 958	}
 959	list_del(&barrier);
 960	list_del(&cursor);
 961	mutex_unlock(&audit_filter_mutex);
 962	path_put(&path1);
 963	drop_collected_mounts(tagged);
 964	return failed;
 965}
 966
 967
 968static void audit_schedule_prune(void)
 969{
 970	wake_up_process(prune_thread);
 971}
 972
 973/*
 974 * ... and that one is done if evict_chunk() decides to delay until the end
 975 * of syscall.  Runs synchronously.
 976 */
 977void audit_kill_trees(struct audit_context *context)
 978{
 979	struct list_head *list = &context->killed_trees;
 980
 981	audit_ctl_lock();
 982	mutex_lock(&audit_filter_mutex);
 983
 984	while (!list_empty(list)) {
 985		struct audit_tree *victim;
 986
 987		victim = list_entry(list->next, struct audit_tree, list);
 988		kill_rules(context, victim);
 989		list_del_init(&victim->list);
 990
 991		mutex_unlock(&audit_filter_mutex);
 992
 993		prune_one(victim);
 994
 995		mutex_lock(&audit_filter_mutex);
 996	}
 997
 998	mutex_unlock(&audit_filter_mutex);
 999	audit_ctl_unlock();
1000}
1001
1002/*
1003 *  Here comes the stuff asynchronous to auditctl operations
1004 */
1005
1006static void evict_chunk(struct audit_chunk *chunk)
1007{
1008	struct audit_tree *owner;
1009	struct list_head *postponed = audit_killed_trees();
1010	int need_prune = 0;
1011	int n;
1012
 
 
 
 
1013	mutex_lock(&audit_filter_mutex);
1014	spin_lock(&hash_lock);
1015	while (!list_empty(&chunk->trees)) {
1016		owner = list_entry(chunk->trees.next,
1017				   struct audit_tree, same_root);
1018		owner->goner = 1;
1019		owner->root = NULL;
1020		list_del_init(&owner->same_root);
1021		spin_unlock(&hash_lock);
1022		if (!postponed) {
1023			kill_rules(audit_context(), owner);
1024			list_move(&owner->list, &prune_list);
1025			need_prune = 1;
1026		} else {
1027			list_move(&owner->list, postponed);
1028		}
1029		spin_lock(&hash_lock);
1030	}
1031	list_del_rcu(&chunk->hash);
1032	for (n = 0; n < chunk->count; n++)
1033		list_del_init(&chunk->owners[n].list);
1034	spin_unlock(&hash_lock);
1035	mutex_unlock(&audit_filter_mutex);
1036	if (need_prune)
1037		audit_schedule_prune();
1038}
1039
1040static int audit_tree_handle_event(struct fsnotify_group *group,
1041				   struct inode *to_tell,
 
 
1042				   u32 mask, const void *data, int data_type,
1043				   const struct qstr *file_name, u32 cookie,
1044				   struct fsnotify_iter_info *iter_info)
1045{
1046	return 0;
1047}
1048
1049static void audit_tree_freeing_mark(struct fsnotify_mark *mark,
1050				    struct fsnotify_group *group)
1051{
1052	struct audit_chunk *chunk;
1053
1054	mutex_lock(&mark->group->mark_mutex);
1055	spin_lock(&hash_lock);
1056	chunk = mark_chunk(mark);
1057	replace_mark_chunk(mark, NULL);
1058	spin_unlock(&hash_lock);
1059	mutex_unlock(&mark->group->mark_mutex);
1060	if (chunk) {
1061		evict_chunk(chunk);
1062		audit_mark_put_chunk(chunk);
1063	}
1064
1065	/*
1066	 * We are guaranteed to have at least one reference to the mark from
1067	 * either the inode or the caller of fsnotify_destroy_mark().
1068	 */
1069	BUG_ON(refcount_read(&mark->refcnt) < 1);
1070}
1071
1072static const struct fsnotify_ops audit_tree_ops = {
1073	.handle_event = audit_tree_handle_event,
1074	.freeing_mark = audit_tree_freeing_mark,
1075	.free_mark = audit_tree_destroy_watch,
1076};
1077
1078static int __init audit_tree_init(void)
1079{
1080	int i;
1081
1082	audit_tree_mark_cachep = KMEM_CACHE(audit_tree_mark, SLAB_PANIC);
1083
1084	audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
1085	if (IS_ERR(audit_tree_group))
1086		audit_panic("cannot initialize fsnotify group for rectree watches");
1087
1088	for (i = 0; i < HASH_SIZE; i++)
1089		INIT_LIST_HEAD(&chunk_hash_heads[i]);
1090
1091	return 0;
1092}
1093__initcall(audit_tree_init);
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2#include "audit.h"
   3#include <linux/fsnotify_backend.h>
   4#include <linux/namei.h>
   5#include <linux/mount.h>
   6#include <linux/kthread.h>
   7#include <linux/refcount.h>
   8#include <linux/slab.h>
   9
  10struct audit_tree;
  11struct audit_chunk;
  12
  13struct audit_tree {
  14	refcount_t count;
  15	int goner;
  16	struct audit_chunk *root;
  17	struct list_head chunks;
  18	struct list_head rules;
  19	struct list_head list;
  20	struct list_head same_root;
  21	struct rcu_head head;
  22	char pathname[];
  23};
  24
  25struct audit_chunk {
  26	struct list_head hash;
  27	struct fsnotify_mark mark;
 
  28	struct list_head trees;		/* with root here */
  29	int dead;
  30	int count;
  31	atomic_long_t refs;
  32	struct rcu_head head;
  33	struct node {
  34		struct list_head list;
  35		struct audit_tree *owner;
  36		unsigned index;		/* index; upper bit indicates 'will prune' */
  37	} owners[];
  38};
  39
 
 
 
 
 
  40static LIST_HEAD(tree_list);
  41static LIST_HEAD(prune_list);
  42static struct task_struct *prune_thread;
  43
  44/*
  45 * One struct chunk is attached to each inode of interest.
  46 * We replace struct chunk on tagging/untagging.
 
 
 
 
 
 
 
  47 * Rules have pointer to struct audit_tree.
  48 * Rules have struct list_head rlist forming a list of rules over
  49 * the same tree.
  50 * References to struct chunk are collected at audit_inode{,_child}()
  51 * time and used in AUDIT_TREE rule matching.
  52 * These references are dropped at the same time we are calling
  53 * audit_free_names(), etc.
  54 *
  55 * Cyclic lists galore:
  56 * tree.chunks anchors chunk.owners[].list			hash_lock
  57 * tree.rules anchors rule.rlist				audit_filter_mutex
  58 * chunk.trees anchors tree.same_root				hash_lock
  59 * chunk.hash is a hash with middle bits of watch.inode as
  60 * a hash function.						RCU, hash_lock
  61 *
  62 * tree is refcounted; one reference for "some rules on rules_list refer to
  63 * it", one for each chunk with pointer to it.
  64 *
  65 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
  66 * of watch contributes 1 to .refs).
 
 
 
 
  67 *
  68 * node.index allows to get from node.list to containing chunk.
  69 * MSB of that sucker is stolen to mark taggings that we might have to
  70 * revert - several operations have very unpleasant cleanup logics and
  71 * that makes a difference.  Some.
  72 */
  73
  74static struct fsnotify_group *audit_tree_group;
 
  75
  76static struct audit_tree *alloc_tree(const char *s)
  77{
  78	struct audit_tree *tree;
  79
  80	tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
  81	if (tree) {
  82		refcount_set(&tree->count, 1);
  83		tree->goner = 0;
  84		INIT_LIST_HEAD(&tree->chunks);
  85		INIT_LIST_HEAD(&tree->rules);
  86		INIT_LIST_HEAD(&tree->list);
  87		INIT_LIST_HEAD(&tree->same_root);
  88		tree->root = NULL;
  89		strcpy(tree->pathname, s);
  90	}
  91	return tree;
  92}
  93
  94static inline void get_tree(struct audit_tree *tree)
  95{
  96	refcount_inc(&tree->count);
  97}
  98
  99static inline void put_tree(struct audit_tree *tree)
 100{
 101	if (refcount_dec_and_test(&tree->count))
 102		kfree_rcu(tree, head);
 103}
 104
 105/* to avoid bringing the entire thing in audit.h */
 106const char *audit_tree_path(struct audit_tree *tree)
 107{
 108	return tree->pathname;
 109}
 110
 111static void free_chunk(struct audit_chunk *chunk)
 112{
 113	int i;
 114
 115	for (i = 0; i < chunk->count; i++) {
 116		if (chunk->owners[i].owner)
 117			put_tree(chunk->owners[i].owner);
 118	}
 119	kfree(chunk);
 120}
 121
 122void audit_put_chunk(struct audit_chunk *chunk)
 123{
 124	if (atomic_long_dec_and_test(&chunk->refs))
 125		free_chunk(chunk);
 126}
 127
 128static void __put_chunk(struct rcu_head *rcu)
 129{
 130	struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
 131	audit_put_chunk(chunk);
 132}
 133
 134static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
 
 
 
 
 
 135{
 136	struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
 137	call_rcu(&chunk->head, __put_chunk);
 138}
 139
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 140static struct audit_chunk *alloc_chunk(int count)
 141{
 142	struct audit_chunk *chunk;
 143	size_t size;
 144	int i;
 145
 146	size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
 147	chunk = kzalloc(size, GFP_KERNEL);
 148	if (!chunk)
 149		return NULL;
 150
 151	INIT_LIST_HEAD(&chunk->hash);
 152	INIT_LIST_HEAD(&chunk->trees);
 153	chunk->count = count;
 154	atomic_long_set(&chunk->refs, 1);
 155	for (i = 0; i < count; i++) {
 156		INIT_LIST_HEAD(&chunk->owners[i].list);
 157		chunk->owners[i].index = i;
 158	}
 159	fsnotify_init_mark(&chunk->mark, audit_tree_group);
 160	chunk->mark.mask = FS_IN_IGNORED;
 161	return chunk;
 162}
 163
 164enum {HASH_SIZE = 128};
 165static struct list_head chunk_hash_heads[HASH_SIZE];
 166static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
 167
 168/* Function to return search key in our hash from inode. */
 169static unsigned long inode_to_key(const struct inode *inode)
 170{
 171	return (unsigned long)inode;
 172}
 173
 174/*
 175 * Function to return search key in our hash from chunk. Key 0 is special and
 176 * should never be present in the hash.
 177 */
 178static unsigned long chunk_to_key(struct audit_chunk *chunk)
 179{
 180	/*
 181	 * We have a reference to the mark so it should be attached to a
 182	 * connector.
 183	 */
 184	if (WARN_ON_ONCE(!chunk->mark.connector))
 185		return 0;
 186	return (unsigned long)chunk->mark.connector->inode;
 187}
 188
 189static inline struct list_head *chunk_hash(unsigned long key)
 190{
 191	unsigned long n = key / L1_CACHE_BYTES;
 192	return chunk_hash_heads + n % HASH_SIZE;
 193}
 194
 195/* hash_lock & entry->lock is held by caller */
 196static void insert_hash(struct audit_chunk *chunk)
 197{
 198	unsigned long key = chunk_to_key(chunk);
 199	struct list_head *list;
 200
 201	if (!(chunk->mark.flags & FSNOTIFY_MARK_FLAG_ATTACHED))
 202		return;
 203	list = chunk_hash(key);
 
 
 
 
 
 204	list_add_rcu(&chunk->hash, list);
 205}
 206
 207/* called under rcu_read_lock */
 208struct audit_chunk *audit_tree_lookup(const struct inode *inode)
 209{
 210	unsigned long key = inode_to_key(inode);
 211	struct list_head *list = chunk_hash(key);
 212	struct audit_chunk *p;
 213
 214	list_for_each_entry_rcu(p, list, hash) {
 215		if (chunk_to_key(p) == key) {
 
 
 
 
 216			atomic_long_inc(&p->refs);
 217			return p;
 218		}
 219	}
 220	return NULL;
 221}
 222
 223bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
 224{
 225	int n;
 226	for (n = 0; n < chunk->count; n++)
 227		if (chunk->owners[n].owner == tree)
 228			return true;
 229	return false;
 230}
 231
 232/* tagging and untagging inodes with trees */
 233
 234static struct audit_chunk *find_chunk(struct node *p)
 235{
 236	int index = p->index & ~(1U<<31);
 237	p -= index;
 238	return container_of(p, struct audit_chunk, owners[0]);
 239}
 240
 241static void untag_chunk(struct node *p)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 242{
 243	struct audit_chunk *chunk = find_chunk(p);
 244	struct fsnotify_mark *entry = &chunk->mark;
 245	struct audit_chunk *new = NULL;
 246	struct audit_tree *owner;
 247	int size = chunk->count - 1;
 248	int i, j;
 249
 250	fsnotify_get_mark(entry);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 251
 252	spin_unlock(&hash_lock);
 
 
 
 
 253
 254	if (size)
 255		new = alloc_chunk(size);
 
 
 256
 257	mutex_lock(&entry->group->mark_mutex);
 258	spin_lock(&entry->lock);
 259	/*
 260	 * mark_mutex protects mark from getting detached and thus also from
 261	 * mark->connector->inode getting NULL.
 262	 */
 263	if (chunk->dead || !(entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
 264		spin_unlock(&entry->lock);
 265		mutex_unlock(&entry->group->mark_mutex);
 266		if (new)
 267			fsnotify_put_mark(&new->mark);
 268		goto out;
 269	}
 270
 271	owner = p->owner;
 272
 
 273	if (!size) {
 274		chunk->dead = 1;
 275		spin_lock(&hash_lock);
 276		list_del_init(&chunk->trees);
 277		if (owner->root == chunk)
 278			owner->root = NULL;
 279		list_del_init(&p->list);
 280		list_del_rcu(&chunk->hash);
 
 281		spin_unlock(&hash_lock);
 282		spin_unlock(&entry->lock);
 283		mutex_unlock(&entry->group->mark_mutex);
 284		fsnotify_destroy_mark(entry, audit_tree_group);
 285		goto out;
 
 286	}
 287
 
 288	if (!new)
 289		goto Fallback;
 290
 291	if (fsnotify_add_mark_locked(&new->mark, entry->connector->inode,
 292				     NULL, 1)) {
 293		fsnotify_put_mark(&new->mark);
 294		goto Fallback;
 295	}
 296
 297	chunk->dead = 1;
 298	spin_lock(&hash_lock);
 299	list_replace_init(&chunk->trees, &new->trees);
 300	if (owner->root == chunk) {
 301		list_del_init(&owner->same_root);
 302		owner->root = NULL;
 303	}
 304
 305	for (i = j = 0; j <= size; i++, j++) {
 306		struct audit_tree *s;
 307		if (&chunk->owners[j] == p) {
 308			list_del_init(&p->list);
 309			i--;
 310			continue;
 311		}
 312		s = chunk->owners[j].owner;
 313		new->owners[i].owner = s;
 314		new->owners[i].index = chunk->owners[j].index - j + i;
 315		if (!s) /* result of earlier fallback */
 316			continue;
 317		get_tree(s);
 318		list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
 319	}
 320
 321	list_replace_rcu(&chunk->hash, &new->hash);
 322	list_for_each_entry(owner, &new->trees, same_root)
 323		owner->root = new;
 324	spin_unlock(&hash_lock);
 325	spin_unlock(&entry->lock);
 326	mutex_unlock(&entry->group->mark_mutex);
 327	fsnotify_destroy_mark(entry, audit_tree_group);
 328	fsnotify_put_mark(&new->mark);	/* drop initial reference */
 329	goto out;
 330
 331Fallback:
 332	// do the best we can
 333	spin_lock(&hash_lock);
 334	if (owner->root == chunk) {
 335		list_del_init(&owner->same_root);
 336		owner->root = NULL;
 337	}
 338	list_del_init(&p->list);
 339	p->owner = NULL;
 340	put_tree(owner);
 341	spin_unlock(&hash_lock);
 342	spin_unlock(&entry->lock);
 343	mutex_unlock(&entry->group->mark_mutex);
 344out:
 345	fsnotify_put_mark(entry);
 346	spin_lock(&hash_lock);
 347}
 348
 
 349static int create_chunk(struct inode *inode, struct audit_tree *tree)
 350{
 351	struct fsnotify_mark *entry;
 352	struct audit_chunk *chunk = alloc_chunk(1);
 353	if (!chunk)
 
 
 
 
 
 
 
 
 
 354		return -ENOMEM;
 
 355
 356	entry = &chunk->mark;
 357	if (fsnotify_add_mark(entry, inode, NULL, 0)) {
 358		fsnotify_put_mark(entry);
 
 359		return -ENOSPC;
 360	}
 361
 362	spin_lock(&entry->lock);
 363	spin_lock(&hash_lock);
 364	if (tree->goner) {
 365		spin_unlock(&hash_lock);
 366		chunk->dead = 1;
 367		spin_unlock(&entry->lock);
 368		fsnotify_destroy_mark(entry, audit_tree_group);
 369		fsnotify_put_mark(entry);
 
 370		return 0;
 371	}
 
 372	chunk->owners[0].index = (1U << 31);
 373	chunk->owners[0].owner = tree;
 374	get_tree(tree);
 375	list_add(&chunk->owners[0].list, &tree->chunks);
 376	if (!tree->root) {
 377		tree->root = chunk;
 378		list_add(&tree->same_root, &chunk->trees);
 379	}
 
 
 
 
 
 380	insert_hash(chunk);
 381	spin_unlock(&hash_lock);
 382	spin_unlock(&entry->lock);
 383	fsnotify_put_mark(entry);	/* drop initial reference */
 
 
 
 
 
 384	return 0;
 385}
 386
 387/* the first tagged inode becomes root of tree */
 388static int tag_chunk(struct inode *inode, struct audit_tree *tree)
 389{
 390	struct fsnotify_mark *old_entry, *chunk_entry;
 391	struct audit_tree *owner;
 392	struct audit_chunk *chunk, *old;
 393	struct node *p;
 394	int n;
 395
 396	old_entry = fsnotify_find_mark(&inode->i_fsnotify_marks,
 397				       audit_tree_group);
 398	if (!old_entry)
 399		return create_chunk(inode, tree);
 400
 401	old = container_of(old_entry, struct audit_chunk, mark);
 402
 
 
 
 403	/* are we already there? */
 404	spin_lock(&hash_lock);
 
 405	for (n = 0; n < old->count; n++) {
 406		if (old->owners[n].owner == tree) {
 407			spin_unlock(&hash_lock);
 408			fsnotify_put_mark(old_entry);
 
 409			return 0;
 410		}
 411	}
 412	spin_unlock(&hash_lock);
 413
 414	chunk = alloc_chunk(old->count + 1);
 415	if (!chunk) {
 416		fsnotify_put_mark(old_entry);
 
 417		return -ENOMEM;
 418	}
 419
 420	chunk_entry = &chunk->mark;
 421
 422	mutex_lock(&old_entry->group->mark_mutex);
 423	spin_lock(&old_entry->lock);
 424	/*
 425	 * mark_mutex protects mark from getting detached and thus also from
 426	 * mark->connector->inode getting NULL.
 427	 */
 428	if (!(old_entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
 429		/* old_entry is being shot, lets just lie */
 430		spin_unlock(&old_entry->lock);
 431		mutex_unlock(&old_entry->group->mark_mutex);
 432		fsnotify_put_mark(old_entry);
 433		fsnotify_put_mark(&chunk->mark);
 434		return -ENOENT;
 435	}
 436
 437	if (fsnotify_add_mark_locked(chunk_entry,
 438			     old_entry->connector->inode, NULL, 1)) {
 439		spin_unlock(&old_entry->lock);
 440		mutex_unlock(&old_entry->group->mark_mutex);
 441		fsnotify_put_mark(chunk_entry);
 442		fsnotify_put_mark(old_entry);
 443		return -ENOSPC;
 444	}
 445
 446	/* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
 447	spin_lock(&chunk_entry->lock);
 448	spin_lock(&hash_lock);
 449
 450	/* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
 451	if (tree->goner) {
 452		spin_unlock(&hash_lock);
 453		chunk->dead = 1;
 454		spin_unlock(&chunk_entry->lock);
 455		spin_unlock(&old_entry->lock);
 456		mutex_unlock(&old_entry->group->mark_mutex);
 457
 458		fsnotify_destroy_mark(chunk_entry, audit_tree_group);
 459
 460		fsnotify_put_mark(chunk_entry);
 461		fsnotify_put_mark(old_entry);
 462		return 0;
 463	}
 464	list_replace_init(&old->trees, &chunk->trees);
 465	for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
 466		struct audit_tree *s = old->owners[n].owner;
 467		p->owner = s;
 468		p->index = old->owners[n].index;
 469		if (!s) /* result of fallback in untag */
 470			continue;
 471		get_tree(s);
 472		list_replace_init(&old->owners[n].list, &p->list);
 473	}
 474	p->index = (chunk->count - 1) | (1U<<31);
 475	p->owner = tree;
 476	get_tree(tree);
 477	list_add(&p->list, &tree->chunks);
 478	list_replace_rcu(&old->hash, &chunk->hash);
 479	list_for_each_entry(owner, &chunk->trees, same_root)
 480		owner->root = chunk;
 481	old->dead = 1;
 482	if (!tree->root) {
 483		tree->root = chunk;
 484		list_add(&tree->same_root, &chunk->trees);
 485	}
 
 
 
 
 
 486	spin_unlock(&hash_lock);
 487	spin_unlock(&chunk_entry->lock);
 488	spin_unlock(&old_entry->lock);
 489	mutex_unlock(&old_entry->group->mark_mutex);
 490	fsnotify_destroy_mark(old_entry, audit_tree_group);
 491	fsnotify_put_mark(chunk_entry);	/* drop initial reference */
 492	fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
 493	return 0;
 494}
 495
 496static void audit_tree_log_remove_rule(struct audit_krule *rule)
 
 497{
 498	struct audit_buffer *ab;
 499
 500	ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
 
 
 501	if (unlikely(!ab))
 502		return;
 503	audit_log_format(ab, "op=remove_rule");
 504	audit_log_format(ab, " dir=");
 505	audit_log_untrustedstring(ab, rule->tree->pathname);
 506	audit_log_key(ab, rule->filterkey);
 507	audit_log_format(ab, " list=%d res=1", rule->listnr);
 508	audit_log_end(ab);
 509}
 510
 511static void kill_rules(struct audit_tree *tree)
 512{
 513	struct audit_krule *rule, *next;
 514	struct audit_entry *entry;
 515
 516	list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
 517		entry = container_of(rule, struct audit_entry, rule);
 518
 519		list_del_init(&rule->rlist);
 520		if (rule->tree) {
 521			/* not a half-baked one */
 522			audit_tree_log_remove_rule(rule);
 523			if (entry->rule.exe)
 524				audit_remove_mark(entry->rule.exe);
 525			rule->tree = NULL;
 526			list_del_rcu(&entry->list);
 527			list_del(&entry->rule.list);
 528			call_rcu(&entry->rcu, audit_free_rule_rcu);
 529		}
 530	}
 531}
 532
 533/*
 534 * finish killing struct audit_tree
 
 
 535 */
 536static void prune_one(struct audit_tree *victim)
 537{
 538	spin_lock(&hash_lock);
 539	while (!list_empty(&victim->chunks)) {
 540		struct node *p;
 
 
 541
 542		p = list_entry(victim->chunks.next, struct node, list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 543
 544		untag_chunk(p);
 545	}
 546	spin_unlock(&hash_lock);
 547	put_tree(victim);
 548}
 549
 
 
 
 
 
 
 
 
 550/* trim the uncommitted chunks from tree */
 551
 552static void trim_marked(struct audit_tree *tree)
 553{
 554	struct list_head *p, *q;
 555	spin_lock(&hash_lock);
 556	if (tree->goner) {
 557		spin_unlock(&hash_lock);
 558		return;
 559	}
 560	/* reorder */
 561	for (p = tree->chunks.next; p != &tree->chunks; p = q) {
 562		struct node *node = list_entry(p, struct node, list);
 563		q = p->next;
 564		if (node->index & (1U<<31)) {
 565			list_del_init(p);
 566			list_add(p, &tree->chunks);
 567		}
 568	}
 
 569
 570	while (!list_empty(&tree->chunks)) {
 571		struct node *node;
 572
 573		node = list_entry(tree->chunks.next, struct node, list);
 574
 575		/* have we run out of marked? */
 576		if (!(node->index & (1U<<31)))
 577			break;
 578
 579		untag_chunk(node);
 580	}
 581	if (!tree->root && !tree->goner) {
 582		tree->goner = 1;
 583		spin_unlock(&hash_lock);
 584		mutex_lock(&audit_filter_mutex);
 585		kill_rules(tree);
 586		list_del_init(&tree->list);
 587		mutex_unlock(&audit_filter_mutex);
 588		prune_one(tree);
 589	} else {
 590		spin_unlock(&hash_lock);
 591	}
 592}
 593
 594static void audit_schedule_prune(void);
 595
 596/* called with audit_filter_mutex */
 597int audit_remove_tree_rule(struct audit_krule *rule)
 598{
 599	struct audit_tree *tree;
 600	tree = rule->tree;
 601	if (tree) {
 602		spin_lock(&hash_lock);
 603		list_del_init(&rule->rlist);
 604		if (list_empty(&tree->rules) && !tree->goner) {
 605			tree->root = NULL;
 606			list_del_init(&tree->same_root);
 607			tree->goner = 1;
 608			list_move(&tree->list, &prune_list);
 609			rule->tree = NULL;
 610			spin_unlock(&hash_lock);
 611			audit_schedule_prune();
 612			return 1;
 613		}
 614		rule->tree = NULL;
 615		spin_unlock(&hash_lock);
 616		return 1;
 617	}
 618	return 0;
 619}
 620
 621static int compare_root(struct vfsmount *mnt, void *arg)
 622{
 623	return inode_to_key(d_backing_inode(mnt->mnt_root)) ==
 624	       (unsigned long)arg;
 625}
 626
 627void audit_trim_trees(void)
 628{
 629	struct list_head cursor;
 630
 631	mutex_lock(&audit_filter_mutex);
 632	list_add(&cursor, &tree_list);
 633	while (cursor.next != &tree_list) {
 634		struct audit_tree *tree;
 635		struct path path;
 636		struct vfsmount *root_mnt;
 637		struct node *node;
 638		int err;
 639
 640		tree = container_of(cursor.next, struct audit_tree, list);
 641		get_tree(tree);
 642		list_del(&cursor);
 643		list_add(&cursor, &tree->list);
 644		mutex_unlock(&audit_filter_mutex);
 645
 646		err = kern_path(tree->pathname, 0, &path);
 647		if (err)
 648			goto skip_it;
 649
 650		root_mnt = collect_mounts(&path);
 651		path_put(&path);
 652		if (IS_ERR(root_mnt))
 653			goto skip_it;
 654
 655		spin_lock(&hash_lock);
 656		list_for_each_entry(node, &tree->chunks, list) {
 657			struct audit_chunk *chunk = find_chunk(node);
 658			/* this could be NULL if the watch is dying else where... */
 659			node->index |= 1U<<31;
 660			if (iterate_mounts(compare_root,
 661					   (void *)chunk_to_key(chunk),
 662					   root_mnt))
 663				node->index &= ~(1U<<31);
 664		}
 665		spin_unlock(&hash_lock);
 666		trim_marked(tree);
 667		drop_collected_mounts(root_mnt);
 668skip_it:
 669		put_tree(tree);
 670		mutex_lock(&audit_filter_mutex);
 671	}
 672	list_del(&cursor);
 673	mutex_unlock(&audit_filter_mutex);
 674}
 675
 676int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
 677{
 678
 679	if (pathname[0] != '/' ||
 680	    rule->listnr != AUDIT_FILTER_EXIT ||
 681	    op != Audit_equal ||
 682	    rule->inode_f || rule->watch || rule->tree)
 683		return -EINVAL;
 684	rule->tree = alloc_tree(pathname);
 685	if (!rule->tree)
 686		return -ENOMEM;
 687	return 0;
 688}
 689
 690void audit_put_tree(struct audit_tree *tree)
 691{
 692	put_tree(tree);
 693}
 694
 695static int tag_mount(struct vfsmount *mnt, void *arg)
 696{
 697	return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
 698}
 699
 700/*
 701 * That gets run when evict_chunk() ends up needing to kill audit_tree.
 702 * Runs from a separate thread.
 703 */
 704static int prune_tree_thread(void *unused)
 705{
 706	for (;;) {
 707		if (list_empty(&prune_list)) {
 708			set_current_state(TASK_INTERRUPTIBLE);
 709			schedule();
 710		}
 711
 712		audit_ctl_lock();
 713		mutex_lock(&audit_filter_mutex);
 714
 715		while (!list_empty(&prune_list)) {
 716			struct audit_tree *victim;
 717
 718			victim = list_entry(prune_list.next,
 719					struct audit_tree, list);
 720			list_del_init(&victim->list);
 721
 722			mutex_unlock(&audit_filter_mutex);
 723
 724			prune_one(victim);
 725
 726			mutex_lock(&audit_filter_mutex);
 727		}
 728
 729		mutex_unlock(&audit_filter_mutex);
 730		audit_ctl_unlock();
 731	}
 732	return 0;
 733}
 734
 735static int audit_launch_prune(void)
 736{
 737	if (prune_thread)
 738		return 0;
 739	prune_thread = kthread_run(prune_tree_thread, NULL,
 740				"audit_prune_tree");
 741	if (IS_ERR(prune_thread)) {
 742		pr_err("cannot start thread audit_prune_tree");
 743		prune_thread = NULL;
 744		return -ENOMEM;
 745	}
 746	return 0;
 747}
 748
 749/* called with audit_filter_mutex */
 750int audit_add_tree_rule(struct audit_krule *rule)
 751{
 752	struct audit_tree *seed = rule->tree, *tree;
 753	struct path path;
 754	struct vfsmount *mnt;
 755	int err;
 756
 757	rule->tree = NULL;
 758	list_for_each_entry(tree, &tree_list, list) {
 759		if (!strcmp(seed->pathname, tree->pathname)) {
 760			put_tree(seed);
 761			rule->tree = tree;
 762			list_add(&rule->rlist, &tree->rules);
 763			return 0;
 764		}
 765	}
 766	tree = seed;
 767	list_add(&tree->list, &tree_list);
 768	list_add(&rule->rlist, &tree->rules);
 769	/* do not set rule->tree yet */
 770	mutex_unlock(&audit_filter_mutex);
 771
 772	if (unlikely(!prune_thread)) {
 773		err = audit_launch_prune();
 774		if (err)
 775			goto Err;
 776	}
 777
 778	err = kern_path(tree->pathname, 0, &path);
 779	if (err)
 780		goto Err;
 781	mnt = collect_mounts(&path);
 782	path_put(&path);
 783	if (IS_ERR(mnt)) {
 784		err = PTR_ERR(mnt);
 785		goto Err;
 786	}
 787
 788	get_tree(tree);
 789	err = iterate_mounts(tag_mount, tree, mnt);
 790	drop_collected_mounts(mnt);
 791
 792	if (!err) {
 793		struct node *node;
 794		spin_lock(&hash_lock);
 795		list_for_each_entry(node, &tree->chunks, list)
 796			node->index &= ~(1U<<31);
 797		spin_unlock(&hash_lock);
 798	} else {
 799		trim_marked(tree);
 800		goto Err;
 801	}
 802
 803	mutex_lock(&audit_filter_mutex);
 804	if (list_empty(&rule->rlist)) {
 805		put_tree(tree);
 806		return -ENOENT;
 807	}
 808	rule->tree = tree;
 809	put_tree(tree);
 810
 811	return 0;
 812Err:
 813	mutex_lock(&audit_filter_mutex);
 814	list_del_init(&tree->list);
 815	list_del_init(&tree->rules);
 816	put_tree(tree);
 817	return err;
 818}
 819
 820int audit_tag_tree(char *old, char *new)
 821{
 822	struct list_head cursor, barrier;
 823	int failed = 0;
 824	struct path path1, path2;
 825	struct vfsmount *tagged;
 826	int err;
 827
 828	err = kern_path(new, 0, &path2);
 829	if (err)
 830		return err;
 831	tagged = collect_mounts(&path2);
 832	path_put(&path2);
 833	if (IS_ERR(tagged))
 834		return PTR_ERR(tagged);
 835
 836	err = kern_path(old, 0, &path1);
 837	if (err) {
 838		drop_collected_mounts(tagged);
 839		return err;
 840	}
 841
 842	mutex_lock(&audit_filter_mutex);
 843	list_add(&barrier, &tree_list);
 844	list_add(&cursor, &barrier);
 845
 846	while (cursor.next != &tree_list) {
 847		struct audit_tree *tree;
 848		int good_one = 0;
 849
 850		tree = container_of(cursor.next, struct audit_tree, list);
 851		get_tree(tree);
 852		list_del(&cursor);
 853		list_add(&cursor, &tree->list);
 854		mutex_unlock(&audit_filter_mutex);
 855
 856		err = kern_path(tree->pathname, 0, &path2);
 857		if (!err) {
 858			good_one = path_is_under(&path1, &path2);
 859			path_put(&path2);
 860		}
 861
 862		if (!good_one) {
 863			put_tree(tree);
 864			mutex_lock(&audit_filter_mutex);
 865			continue;
 866		}
 867
 868		failed = iterate_mounts(tag_mount, tree, tagged);
 869		if (failed) {
 870			put_tree(tree);
 871			mutex_lock(&audit_filter_mutex);
 872			break;
 873		}
 874
 875		mutex_lock(&audit_filter_mutex);
 876		spin_lock(&hash_lock);
 877		if (!tree->goner) {
 878			list_del(&tree->list);
 879			list_add(&tree->list, &tree_list);
 880		}
 881		spin_unlock(&hash_lock);
 882		put_tree(tree);
 883	}
 884
 885	while (barrier.prev != &tree_list) {
 886		struct audit_tree *tree;
 887
 888		tree = container_of(barrier.prev, struct audit_tree, list);
 889		get_tree(tree);
 890		list_del(&tree->list);
 891		list_add(&tree->list, &barrier);
 892		mutex_unlock(&audit_filter_mutex);
 893
 894		if (!failed) {
 895			struct node *node;
 896			spin_lock(&hash_lock);
 897			list_for_each_entry(node, &tree->chunks, list)
 898				node->index &= ~(1U<<31);
 899			spin_unlock(&hash_lock);
 900		} else {
 901			trim_marked(tree);
 902		}
 903
 904		put_tree(tree);
 905		mutex_lock(&audit_filter_mutex);
 906	}
 907	list_del(&barrier);
 908	list_del(&cursor);
 909	mutex_unlock(&audit_filter_mutex);
 910	path_put(&path1);
 911	drop_collected_mounts(tagged);
 912	return failed;
 913}
 914
 915
 916static void audit_schedule_prune(void)
 917{
 918	wake_up_process(prune_thread);
 919}
 920
 921/*
 922 * ... and that one is done if evict_chunk() decides to delay until the end
 923 * of syscall.  Runs synchronously.
 924 */
 925void audit_kill_trees(struct list_head *list)
 926{
 
 
 927	audit_ctl_lock();
 928	mutex_lock(&audit_filter_mutex);
 929
 930	while (!list_empty(list)) {
 931		struct audit_tree *victim;
 932
 933		victim = list_entry(list->next, struct audit_tree, list);
 934		kill_rules(victim);
 935		list_del_init(&victim->list);
 936
 937		mutex_unlock(&audit_filter_mutex);
 938
 939		prune_one(victim);
 940
 941		mutex_lock(&audit_filter_mutex);
 942	}
 943
 944	mutex_unlock(&audit_filter_mutex);
 945	audit_ctl_unlock();
 946}
 947
 948/*
 949 *  Here comes the stuff asynchronous to auditctl operations
 950 */
 951
 952static void evict_chunk(struct audit_chunk *chunk)
 953{
 954	struct audit_tree *owner;
 955	struct list_head *postponed = audit_killed_trees();
 956	int need_prune = 0;
 957	int n;
 958
 959	if (chunk->dead)
 960		return;
 961
 962	chunk->dead = 1;
 963	mutex_lock(&audit_filter_mutex);
 964	spin_lock(&hash_lock);
 965	while (!list_empty(&chunk->trees)) {
 966		owner = list_entry(chunk->trees.next,
 967				   struct audit_tree, same_root);
 968		owner->goner = 1;
 969		owner->root = NULL;
 970		list_del_init(&owner->same_root);
 971		spin_unlock(&hash_lock);
 972		if (!postponed) {
 973			kill_rules(owner);
 974			list_move(&owner->list, &prune_list);
 975			need_prune = 1;
 976		} else {
 977			list_move(&owner->list, postponed);
 978		}
 979		spin_lock(&hash_lock);
 980	}
 981	list_del_rcu(&chunk->hash);
 982	for (n = 0; n < chunk->count; n++)
 983		list_del_init(&chunk->owners[n].list);
 984	spin_unlock(&hash_lock);
 985	mutex_unlock(&audit_filter_mutex);
 986	if (need_prune)
 987		audit_schedule_prune();
 988}
 989
 990static int audit_tree_handle_event(struct fsnotify_group *group,
 991				   struct inode *to_tell,
 992				   struct fsnotify_mark *inode_mark,
 993				   struct fsnotify_mark *vfsmount_mark,
 994				   u32 mask, const void *data, int data_type,
 995				   const unsigned char *file_name, u32 cookie,
 996				   struct fsnotify_iter_info *iter_info)
 997{
 998	return 0;
 999}
1000
1001static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
 
1002{
1003	struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
1004
1005	evict_chunk(chunk);
 
 
 
 
 
 
 
 
 
1006
1007	/*
1008	 * We are guaranteed to have at least one reference to the mark from
1009	 * either the inode or the caller of fsnotify_destroy_mark().
1010	 */
1011	BUG_ON(refcount_read(&entry->refcnt) < 1);
1012}
1013
1014static const struct fsnotify_ops audit_tree_ops = {
1015	.handle_event = audit_tree_handle_event,
1016	.freeing_mark = audit_tree_freeing_mark,
1017	.free_mark = audit_tree_destroy_watch,
1018};
1019
1020static int __init audit_tree_init(void)
1021{
1022	int i;
 
 
1023
1024	audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
1025	if (IS_ERR(audit_tree_group))
1026		audit_panic("cannot initialize fsnotify group for rectree watches");
1027
1028	for (i = 0; i < HASH_SIZE; i++)
1029		INIT_LIST_HEAD(&chunk_hash_heads[i]);
1030
1031	return 0;
1032}
1033__initcall(audit_tree_init);