Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2#include "audit.h"
   3#include <linux/fsnotify_backend.h>
   4#include <linux/namei.h>
   5#include <linux/mount.h>
   6#include <linux/kthread.h>
   7#include <linux/refcount.h>
   8#include <linux/slab.h>
   9
  10struct audit_tree;
  11struct audit_chunk;
  12
  13struct audit_tree {
  14	refcount_t count;
  15	int goner;
  16	struct audit_chunk *root;
  17	struct list_head chunks;
  18	struct list_head rules;
  19	struct list_head list;
  20	struct list_head same_root;
  21	struct rcu_head head;
  22	char pathname[];
  23};
  24
  25struct audit_chunk {
  26	struct list_head hash;
  27	unsigned long key;
  28	struct fsnotify_mark *mark;
  29	struct list_head trees;		/* with root here */
 
  30	int count;
  31	atomic_long_t refs;
  32	struct rcu_head head;
  33	struct node {
  34		struct list_head list;
  35		struct audit_tree *owner;
  36		unsigned index;		/* index; upper bit indicates 'will prune' */
  37	} owners[];
  38};
  39
  40struct audit_tree_mark {
  41	struct fsnotify_mark mark;
  42	struct audit_chunk *chunk;
  43};
  44
  45static LIST_HEAD(tree_list);
  46static LIST_HEAD(prune_list);
  47static struct task_struct *prune_thread;
  48
  49/*
  50 * One struct chunk is attached to each inode of interest through
  51 * audit_tree_mark (fsnotify mark). We replace struct chunk on tagging /
  52 * untagging, the mark is stable as long as there is chunk attached. The
  53 * association between mark and chunk is protected by hash_lock and
  54 * audit_tree_group->mark_mutex. Thus as long as we hold
  55 * audit_tree_group->mark_mutex and check that the mark is alive by
  56 * FSNOTIFY_MARK_FLAG_ATTACHED flag check, we are sure the mark points to
  57 * the current chunk.
  58 *
  59 * Rules have pointer to struct audit_tree.
  60 * Rules have struct list_head rlist forming a list of rules over
  61 * the same tree.
  62 * References to struct chunk are collected at audit_inode{,_child}()
  63 * time and used in AUDIT_TREE rule matching.
  64 * These references are dropped at the same time we are calling
  65 * audit_free_names(), etc.
  66 *
  67 * Cyclic lists galore:
  68 * tree.chunks anchors chunk.owners[].list			hash_lock
  69 * tree.rules anchors rule.rlist				audit_filter_mutex
  70 * chunk.trees anchors tree.same_root				hash_lock
  71 * chunk.hash is a hash with middle bits of watch.inode as
  72 * a hash function.						RCU, hash_lock
  73 *
  74 * tree is refcounted; one reference for "some rules on rules_list refer to
  75 * it", one for each chunk with pointer to it.
  76 *
  77 * chunk is refcounted by embedded .refs. Mark associated with the chunk holds
  78 * one chunk reference. This reference is dropped either when a mark is going
  79 * to be freed (corresponding inode goes away) or when chunk attached to the
  80 * mark gets replaced. This reference must be dropped using
  81 * audit_mark_put_chunk() to make sure the reference is dropped only after RCU
  82 * grace period as it protects RCU readers of the hash table.
  83 *
  84 * node.index allows to get from node.list to containing chunk.
  85 * MSB of that sucker is stolen to mark taggings that we might have to
  86 * revert - several operations have very unpleasant cleanup logics and
  87 * that makes a difference.  Some.
  88 */
  89
  90static struct fsnotify_group *audit_tree_group;
  91static struct kmem_cache *audit_tree_mark_cachep __read_mostly;
  92
  93static struct audit_tree *alloc_tree(const char *s)
  94{
  95	struct audit_tree *tree;
  96
  97	tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
  98	if (tree) {
  99		refcount_set(&tree->count, 1);
 100		tree->goner = 0;
 101		INIT_LIST_HEAD(&tree->chunks);
 102		INIT_LIST_HEAD(&tree->rules);
 103		INIT_LIST_HEAD(&tree->list);
 104		INIT_LIST_HEAD(&tree->same_root);
 105		tree->root = NULL;
 106		strcpy(tree->pathname, s);
 107	}
 108	return tree;
 109}
 110
 111static inline void get_tree(struct audit_tree *tree)
 112{
 113	refcount_inc(&tree->count);
 114}
 115
 116static inline void put_tree(struct audit_tree *tree)
 117{
 118	if (refcount_dec_and_test(&tree->count))
 119		kfree_rcu(tree, head);
 120}
 121
 122/* to avoid bringing the entire thing in audit.h */
 123const char *audit_tree_path(struct audit_tree *tree)
 124{
 125	return tree->pathname;
 126}
 127
 128static void free_chunk(struct audit_chunk *chunk)
 129{
 130	int i;
 131
 132	for (i = 0; i < chunk->count; i++) {
 133		if (chunk->owners[i].owner)
 134			put_tree(chunk->owners[i].owner);
 135	}
 136	kfree(chunk);
 137}
 138
 139void audit_put_chunk(struct audit_chunk *chunk)
 140{
 141	if (atomic_long_dec_and_test(&chunk->refs))
 142		free_chunk(chunk);
 143}
 144
 145static void __put_chunk(struct rcu_head *rcu)
 146{
 147	struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
 148	audit_put_chunk(chunk);
 149}
 150
 151/*
 152 * Drop reference to the chunk that was held by the mark. This is the reference
 153 * that gets dropped after we've removed the chunk from the hash table and we
 154 * use it to make sure chunk cannot be freed before RCU grace period expires.
 155 */
 156static void audit_mark_put_chunk(struct audit_chunk *chunk)
 157{
 
 158	call_rcu(&chunk->head, __put_chunk);
 159}
 160
 161static inline struct audit_tree_mark *audit_mark(struct fsnotify_mark *mark)
 162{
 163	return container_of(mark, struct audit_tree_mark, mark);
 164}
 165
 166static struct audit_chunk *mark_chunk(struct fsnotify_mark *mark)
 167{
 168	return audit_mark(mark)->chunk;
 169}
 170
 171static void audit_tree_destroy_watch(struct fsnotify_mark *mark)
 172{
 173	kmem_cache_free(audit_tree_mark_cachep, audit_mark(mark));
 174}
 175
 176static struct fsnotify_mark *alloc_mark(void)
 177{
 178	struct audit_tree_mark *amark;
 179
 180	amark = kmem_cache_zalloc(audit_tree_mark_cachep, GFP_KERNEL);
 181	if (!amark)
 182		return NULL;
 183	fsnotify_init_mark(&amark->mark, audit_tree_group);
 184	amark->mark.mask = FS_IN_IGNORED;
 185	return &amark->mark;
 186}
 187
 188static struct audit_chunk *alloc_chunk(int count)
 189{
 190	struct audit_chunk *chunk;
 191	size_t size;
 192	int i;
 193
 194	size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
 195	chunk = kzalloc(size, GFP_KERNEL);
 196	if (!chunk)
 197		return NULL;
 198
 199	INIT_LIST_HEAD(&chunk->hash);
 200	INIT_LIST_HEAD(&chunk->trees);
 201	chunk->count = count;
 202	atomic_long_set(&chunk->refs, 1);
 203	for (i = 0; i < count; i++) {
 204		INIT_LIST_HEAD(&chunk->owners[i].list);
 205		chunk->owners[i].index = i;
 206	}
 
 
 207	return chunk;
 208}
 209
 210enum {HASH_SIZE = 128};
 211static struct list_head chunk_hash_heads[HASH_SIZE];
 212static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
 213
 214/* Function to return search key in our hash from inode. */
 215static unsigned long inode_to_key(const struct inode *inode)
 216{
 217	/* Use address pointed to by connector->obj as the key */
 218	return (unsigned long)&inode->i_fsnotify_marks;
 219}
 220
 221static inline struct list_head *chunk_hash(unsigned long key)
 222{
 223	unsigned long n = key / L1_CACHE_BYTES;
 224	return chunk_hash_heads + n % HASH_SIZE;
 225}
 226
 227/* hash_lock & mark->group->mark_mutex is held by caller */
 228static void insert_hash(struct audit_chunk *chunk)
 229{
 
 230	struct list_head *list;
 231
 232	/*
 233	 * Make sure chunk is fully initialized before making it visible in the
 234	 * hash. Pairs with a data dependency barrier in READ_ONCE() in
 235	 * audit_tree_lookup().
 236	 */
 237	smp_wmb();
 238	WARN_ON_ONCE(!chunk->key);
 239	list = chunk_hash(chunk->key);
 240	list_add_rcu(&chunk->hash, list);
 241}
 242
 243/* called under rcu_read_lock */
 244struct audit_chunk *audit_tree_lookup(const struct inode *inode)
 245{
 246	unsigned long key = inode_to_key(inode);
 247	struct list_head *list = chunk_hash(key);
 248	struct audit_chunk *p;
 249
 250	list_for_each_entry_rcu(p, list, hash) {
 251		/*
 252		 * We use a data dependency barrier in READ_ONCE() to make sure
 253		 * the chunk we see is fully initialized.
 254		 */
 255		if (READ_ONCE(p->key) == key) {
 256			atomic_long_inc(&p->refs);
 257			return p;
 258		}
 259	}
 260	return NULL;
 261}
 262
 263bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
 264{
 265	int n;
 266	for (n = 0; n < chunk->count; n++)
 267		if (chunk->owners[n].owner == tree)
 268			return true;
 269	return false;
 270}
 271
 272/* tagging and untagging inodes with trees */
 273
 274static struct audit_chunk *find_chunk(struct node *p)
 275{
 276	int index = p->index & ~(1U<<31);
 277	p -= index;
 278	return container_of(p, struct audit_chunk, owners[0]);
 279}
 280
 281static void replace_mark_chunk(struct fsnotify_mark *mark,
 282			       struct audit_chunk *chunk)
 283{
 284	struct audit_chunk *old;
 285
 286	assert_spin_locked(&hash_lock);
 287	old = mark_chunk(mark);
 288	audit_mark(mark)->chunk = chunk;
 289	if (chunk)
 290		chunk->mark = mark;
 291	if (old)
 292		old->mark = NULL;
 293}
 294
 295static void replace_chunk(struct audit_chunk *new, struct audit_chunk *old)
 296{
 
 
 
 297	struct audit_tree *owner;
 
 298	int i, j;
 299
 300	new->key = old->key;
 301	list_splice_init(&old->trees, &new->trees);
 302	list_for_each_entry(owner, &new->trees, same_root)
 303		owner->root = new;
 304	for (i = j = 0; j < old->count; i++, j++) {
 305		if (!old->owners[j].owner) {
 306			i--;
 307			continue;
 308		}
 309		owner = old->owners[j].owner;
 310		new->owners[i].owner = owner;
 311		new->owners[i].index = old->owners[j].index - j + i;
 312		if (!owner) /* result of earlier fallback */
 313			continue;
 314		get_tree(owner);
 315		list_replace_init(&old->owners[j].list, &new->owners[i].list);
 316	}
 317	replace_mark_chunk(old->mark, new);
 318	/*
 319	 * Make sure chunk is fully initialized before making it visible in the
 320	 * hash. Pairs with a data dependency barrier in READ_ONCE() in
 321	 * audit_tree_lookup().
 322	 */
 323	smp_wmb();
 324	list_replace_rcu(&old->hash, &new->hash);
 325}
 326
 327static void remove_chunk_node(struct audit_chunk *chunk, struct node *p)
 328{
 329	struct audit_tree *owner = p->owner;
 330
 331	if (owner->root == chunk) {
 332		list_del_init(&owner->same_root);
 333		owner->root = NULL;
 334	}
 335	list_del_init(&p->list);
 336	p->owner = NULL;
 337	put_tree(owner);
 338}
 339
 340static int chunk_count_trees(struct audit_chunk *chunk)
 341{
 342	int i;
 343	int ret = 0;
 344
 345	for (i = 0; i < chunk->count; i++)
 346		if (chunk->owners[i].owner)
 347			ret++;
 348	return ret;
 349}
 
 
 350
 351static void untag_chunk(struct audit_chunk *chunk, struct fsnotify_mark *mark)
 352{
 353	struct audit_chunk *new;
 354	int size;
 355
 356	mutex_lock(&audit_tree_group->mark_mutex);
 357	/*
 358	 * mark_mutex stabilizes chunk attached to the mark so we can check
 359	 * whether it didn't change while we've dropped hash_lock.
 360	 */
 361	if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) ||
 362	    mark_chunk(mark) != chunk)
 363		goto out_mutex;
 364
 365	size = chunk_count_trees(chunk);
 366	if (!size) {
 
 367		spin_lock(&hash_lock);
 368		list_del_init(&chunk->trees);
 
 
 
 369		list_del_rcu(&chunk->hash);
 370		replace_mark_chunk(mark, NULL);
 371		spin_unlock(&hash_lock);
 372		fsnotify_detach_mark(mark);
 373		mutex_unlock(&audit_tree_group->mark_mutex);
 374		audit_mark_put_chunk(chunk);
 375		fsnotify_free_mark(mark);
 376		return;
 377	}
 378
 379	new = alloc_chunk(size);
 380	if (!new)
 381		goto out_mutex;
 
 
 
 
 
 
 382
 
 383	spin_lock(&hash_lock);
 384	/*
 385	 * This has to go last when updating chunk as once replace_chunk() is
 386	 * called, new RCU readers can see the new chunk.
 387	 */
 388	replace_chunk(new, chunk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 389	spin_unlock(&hash_lock);
 390	mutex_unlock(&audit_tree_group->mark_mutex);
 391	audit_mark_put_chunk(chunk);
 392	return;
 
 393
 394out_mutex:
 395	mutex_unlock(&audit_tree_group->mark_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 396}
 397
 398/* Call with group->mark_mutex held, releases it */
 399static int create_chunk(struct inode *inode, struct audit_tree *tree)
 400{
 401	struct fsnotify_mark *mark;
 402	struct audit_chunk *chunk = alloc_chunk(1);
 403
 404	if (!chunk) {
 405		mutex_unlock(&audit_tree_group->mark_mutex);
 406		return -ENOMEM;
 407	}
 408
 409	mark = alloc_mark();
 410	if (!mark) {
 411		mutex_unlock(&audit_tree_group->mark_mutex);
 412		kfree(chunk);
 413		return -ENOMEM;
 414	}
 415
 416	if (fsnotify_add_inode_mark_locked(mark, inode, 0)) {
 417		mutex_unlock(&audit_tree_group->mark_mutex);
 418		fsnotify_put_mark(mark);
 419		kfree(chunk);
 420		return -ENOSPC;
 421	}
 422
 
 423	spin_lock(&hash_lock);
 424	if (tree->goner) {
 425		spin_unlock(&hash_lock);
 426		fsnotify_detach_mark(mark);
 427		mutex_unlock(&audit_tree_group->mark_mutex);
 428		fsnotify_free_mark(mark);
 429		fsnotify_put_mark(mark);
 430		kfree(chunk);
 431		return 0;
 432	}
 433	replace_mark_chunk(mark, chunk);
 434	chunk->owners[0].index = (1U << 31);
 435	chunk->owners[0].owner = tree;
 436	get_tree(tree);
 437	list_add(&chunk->owners[0].list, &tree->chunks);
 438	if (!tree->root) {
 439		tree->root = chunk;
 440		list_add(&tree->same_root, &chunk->trees);
 441	}
 442	chunk->key = inode_to_key(inode);
 443	/*
 444	 * Inserting into the hash table has to go last as once we do that RCU
 445	 * readers can see the chunk.
 446	 */
 447	insert_hash(chunk);
 448	spin_unlock(&hash_lock);
 449	mutex_unlock(&audit_tree_group->mark_mutex);
 450	/*
 451	 * Drop our initial reference. When mark we point to is getting freed,
 452	 * we get notification through ->freeing_mark callback and cleanup
 453	 * chunk pointing to this mark.
 454	 */
 455	fsnotify_put_mark(mark);
 456	return 0;
 457}
 458
 459/* the first tagged inode becomes root of tree */
 460static int tag_chunk(struct inode *inode, struct audit_tree *tree)
 461{
 462	struct fsnotify_mark *mark;
 
 463	struct audit_chunk *chunk, *old;
 464	struct node *p;
 465	int n;
 466
 467	mutex_lock(&audit_tree_group->mark_mutex);
 468	mark = fsnotify_find_mark(&inode->i_fsnotify_marks, audit_tree_group);
 469	if (!mark)
 470		return create_chunk(inode, tree);
 471
 472	/*
 473	 * Found mark is guaranteed to be attached and mark_mutex protects mark
 474	 * from getting detached and thus it makes sure there is chunk attached
 475	 * to the mark.
 476	 */
 477	/* are we already there? */
 478	spin_lock(&hash_lock);
 479	old = mark_chunk(mark);
 480	for (n = 0; n < old->count; n++) {
 481		if (old->owners[n].owner == tree) {
 482			spin_unlock(&hash_lock);
 483			mutex_unlock(&audit_tree_group->mark_mutex);
 484			fsnotify_put_mark(mark);
 485			return 0;
 486		}
 487	}
 488	spin_unlock(&hash_lock);
 489
 490	chunk = alloc_chunk(old->count + 1);
 491	if (!chunk) {
 492		mutex_unlock(&audit_tree_group->mark_mutex);
 493		fsnotify_put_mark(mark);
 494		return -ENOMEM;
 495	}
 496
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 497	spin_lock(&hash_lock);
 
 
 498	if (tree->goner) {
 499		spin_unlock(&hash_lock);
 500		mutex_unlock(&audit_tree_group->mark_mutex);
 501		fsnotify_put_mark(mark);
 502		kfree(chunk);
 
 
 
 
 
 503		return 0;
 504	}
 505	p = &chunk->owners[chunk->count - 1];
 
 
 
 
 
 
 
 
 
 506	p->index = (chunk->count - 1) | (1U<<31);
 507	p->owner = tree;
 508	get_tree(tree);
 509	list_add(&p->list, &tree->chunks);
 
 
 
 
 510	if (!tree->root) {
 511		tree->root = chunk;
 512		list_add(&tree->same_root, &chunk->trees);
 513	}
 514	/*
 515	 * This has to go last when updating chunk as once replace_chunk() is
 516	 * called, new RCU readers can see the new chunk.
 517	 */
 518	replace_chunk(chunk, old);
 519	spin_unlock(&hash_lock);
 520	mutex_unlock(&audit_tree_group->mark_mutex);
 521	fsnotify_put_mark(mark); /* pair to fsnotify_find_mark */
 522	audit_mark_put_chunk(old);
 523
 
 524	return 0;
 525}
 526
 527static void audit_tree_log_remove_rule(struct audit_context *context,
 528				       struct audit_krule *rule)
 529{
 530	struct audit_buffer *ab;
 531
 532	if (!audit_enabled)
 533		return;
 534	ab = audit_log_start(context, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
 535	if (unlikely(!ab))
 536		return;
 537	audit_log_format(ab, "op=remove_rule dir=");
 
 
 538	audit_log_untrustedstring(ab, rule->tree->pathname);
 539	audit_log_key(ab, rule->filterkey);
 540	audit_log_format(ab, " list=%d res=1", rule->listnr);
 541	audit_log_end(ab);
 542}
 543
 544static void kill_rules(struct audit_context *context, struct audit_tree *tree)
 545{
 546	struct audit_krule *rule, *next;
 547	struct audit_entry *entry;
 548
 549	list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
 550		entry = container_of(rule, struct audit_entry, rule);
 551
 552		list_del_init(&rule->rlist);
 553		if (rule->tree) {
 554			/* not a half-baked one */
 555			audit_tree_log_remove_rule(context, rule);
 556			if (entry->rule.exe)
 557				audit_remove_mark(entry->rule.exe);
 558			rule->tree = NULL;
 559			list_del_rcu(&entry->list);
 560			list_del(&entry->rule.list);
 561			call_rcu(&entry->rcu, audit_free_rule_rcu);
 562		}
 563	}
 564}
 565
 566/*
 567 * Remove tree from chunks. If 'tagged' is set, remove tree only from tagged
 568 * chunks. The function expects tagged chunks are all at the beginning of the
 569 * chunks list.
 570 */
 571static void prune_tree_chunks(struct audit_tree *victim, bool tagged)
 572{
 573	spin_lock(&hash_lock);
 574	while (!list_empty(&victim->chunks)) {
 575		struct node *p;
 576		struct audit_chunk *chunk;
 577		struct fsnotify_mark *mark;
 578
 579		p = list_first_entry(&victim->chunks, struct node, list);
 580		/* have we run out of marked? */
 581		if (tagged && !(p->index & (1U<<31)))
 582			break;
 583		chunk = find_chunk(p);
 584		mark = chunk->mark;
 585		remove_chunk_node(chunk, p);
 586		/* Racing with audit_tree_freeing_mark()? */
 587		if (!mark)
 588			continue;
 589		fsnotify_get_mark(mark);
 590		spin_unlock(&hash_lock);
 591
 592		untag_chunk(chunk, mark);
 593		fsnotify_put_mark(mark);
 594
 595		spin_lock(&hash_lock);
 596	}
 597	spin_unlock(&hash_lock);
 598	put_tree(victim);
 599}
 600
 601/*
 602 * finish killing struct audit_tree
 603 */
 604static void prune_one(struct audit_tree *victim)
 605{
 606	prune_tree_chunks(victim, false);
 607}
 608
 609/* trim the uncommitted chunks from tree */
 610
 611static void trim_marked(struct audit_tree *tree)
 612{
 613	struct list_head *p, *q;
 614	spin_lock(&hash_lock);
 615	if (tree->goner) {
 616		spin_unlock(&hash_lock);
 617		return;
 618	}
 619	/* reorder */
 620	for (p = tree->chunks.next; p != &tree->chunks; p = q) {
 621		struct node *node = list_entry(p, struct node, list);
 622		q = p->next;
 623		if (node->index & (1U<<31)) {
 624			list_del_init(p);
 625			list_add(p, &tree->chunks);
 626		}
 627	}
 628	spin_unlock(&hash_lock);
 629
 630	prune_tree_chunks(tree, true);
 
 631
 632	spin_lock(&hash_lock);
 
 
 
 
 
 
 
 633	if (!tree->root && !tree->goner) {
 634		tree->goner = 1;
 635		spin_unlock(&hash_lock);
 636		mutex_lock(&audit_filter_mutex);
 637		kill_rules(audit_context(), tree);
 638		list_del_init(&tree->list);
 639		mutex_unlock(&audit_filter_mutex);
 640		prune_one(tree);
 641	} else {
 642		spin_unlock(&hash_lock);
 643	}
 644}
 645
 646static void audit_schedule_prune(void);
 647
 648/* called with audit_filter_mutex */
 649int audit_remove_tree_rule(struct audit_krule *rule)
 650{
 651	struct audit_tree *tree;
 652	tree = rule->tree;
 653	if (tree) {
 654		spin_lock(&hash_lock);
 655		list_del_init(&rule->rlist);
 656		if (list_empty(&tree->rules) && !tree->goner) {
 657			tree->root = NULL;
 658			list_del_init(&tree->same_root);
 659			tree->goner = 1;
 660			list_move(&tree->list, &prune_list);
 661			rule->tree = NULL;
 662			spin_unlock(&hash_lock);
 663			audit_schedule_prune();
 664			return 1;
 665		}
 666		rule->tree = NULL;
 667		spin_unlock(&hash_lock);
 668		return 1;
 669	}
 670	return 0;
 671}
 672
 673static int compare_root(struct vfsmount *mnt, void *arg)
 674{
 675	return inode_to_key(d_backing_inode(mnt->mnt_root)) ==
 676	       (unsigned long)arg;
 677}
 678
 679void audit_trim_trees(void)
 680{
 681	struct list_head cursor;
 682
 683	mutex_lock(&audit_filter_mutex);
 684	list_add(&cursor, &tree_list);
 685	while (cursor.next != &tree_list) {
 686		struct audit_tree *tree;
 687		struct path path;
 688		struct vfsmount *root_mnt;
 689		struct node *node;
 690		int err;
 691
 692		tree = container_of(cursor.next, struct audit_tree, list);
 693		get_tree(tree);
 694		list_del(&cursor);
 695		list_add(&cursor, &tree->list);
 696		mutex_unlock(&audit_filter_mutex);
 697
 698		err = kern_path(tree->pathname, 0, &path);
 699		if (err)
 700			goto skip_it;
 701
 702		root_mnt = collect_mounts(&path);
 703		path_put(&path);
 704		if (IS_ERR(root_mnt))
 705			goto skip_it;
 706
 707		spin_lock(&hash_lock);
 708		list_for_each_entry(node, &tree->chunks, list) {
 709			struct audit_chunk *chunk = find_chunk(node);
 710			/* this could be NULL if the watch is dying else where... */
 
 711			node->index |= 1U<<31;
 712			if (iterate_mounts(compare_root,
 713					   (void *)(chunk->key),
 714					   root_mnt))
 715				node->index &= ~(1U<<31);
 716		}
 717		spin_unlock(&hash_lock);
 718		trim_marked(tree);
 719		drop_collected_mounts(root_mnt);
 720skip_it:
 721		put_tree(tree);
 722		mutex_lock(&audit_filter_mutex);
 723	}
 724	list_del(&cursor);
 725	mutex_unlock(&audit_filter_mutex);
 726}
 727
 728int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
 729{
 730
 731	if (pathname[0] != '/' ||
 732	    rule->listnr != AUDIT_FILTER_EXIT ||
 733	    op != Audit_equal ||
 734	    rule->inode_f || rule->watch || rule->tree)
 735		return -EINVAL;
 736	rule->tree = alloc_tree(pathname);
 737	if (!rule->tree)
 738		return -ENOMEM;
 739	return 0;
 740}
 741
 742void audit_put_tree(struct audit_tree *tree)
 743{
 744	put_tree(tree);
 745}
 746
 747static int tag_mount(struct vfsmount *mnt, void *arg)
 748{
 749	return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
 750}
 751
 752/*
 753 * That gets run when evict_chunk() ends up needing to kill audit_tree.
 754 * Runs from a separate thread.
 755 */
 756static int prune_tree_thread(void *unused)
 757{
 758	for (;;) {
 759		if (list_empty(&prune_list)) {
 760			set_current_state(TASK_INTERRUPTIBLE);
 761			schedule();
 762		}
 763
 764		audit_ctl_lock();
 765		mutex_lock(&audit_filter_mutex);
 766
 767		while (!list_empty(&prune_list)) {
 768			struct audit_tree *victim;
 769
 770			victim = list_entry(prune_list.next,
 771					struct audit_tree, list);
 772			list_del_init(&victim->list);
 773
 774			mutex_unlock(&audit_filter_mutex);
 775
 776			prune_one(victim);
 777
 778			mutex_lock(&audit_filter_mutex);
 779		}
 780
 781		mutex_unlock(&audit_filter_mutex);
 782		audit_ctl_unlock();
 783	}
 784	return 0;
 785}
 786
 787static int audit_launch_prune(void)
 788{
 789	if (prune_thread)
 790		return 0;
 791	prune_thread = kthread_run(prune_tree_thread, NULL,
 792				"audit_prune_tree");
 793	if (IS_ERR(prune_thread)) {
 794		pr_err("cannot start thread audit_prune_tree");
 795		prune_thread = NULL;
 796		return -ENOMEM;
 
 
 
 797	}
 798	return 0;
 799}
 800
 801/* called with audit_filter_mutex */
 802int audit_add_tree_rule(struct audit_krule *rule)
 803{
 804	struct audit_tree *seed = rule->tree, *tree;
 805	struct path path;
 806	struct vfsmount *mnt;
 807	int err;
 808
 809	rule->tree = NULL;
 810	list_for_each_entry(tree, &tree_list, list) {
 811		if (!strcmp(seed->pathname, tree->pathname)) {
 812			put_tree(seed);
 813			rule->tree = tree;
 814			list_add(&rule->rlist, &tree->rules);
 815			return 0;
 816		}
 817	}
 818	tree = seed;
 819	list_add(&tree->list, &tree_list);
 820	list_add(&rule->rlist, &tree->rules);
 821	/* do not set rule->tree yet */
 822	mutex_unlock(&audit_filter_mutex);
 823
 824	if (unlikely(!prune_thread)) {
 825		err = audit_launch_prune();
 826		if (err)
 827			goto Err;
 828	}
 829
 830	err = kern_path(tree->pathname, 0, &path);
 831	if (err)
 832		goto Err;
 833	mnt = collect_mounts(&path);
 834	path_put(&path);
 835	if (IS_ERR(mnt)) {
 836		err = PTR_ERR(mnt);
 837		goto Err;
 838	}
 839
 840	get_tree(tree);
 841	err = iterate_mounts(tag_mount, tree, mnt);
 842	drop_collected_mounts(mnt);
 843
 844	if (!err) {
 845		struct node *node;
 846		spin_lock(&hash_lock);
 847		list_for_each_entry(node, &tree->chunks, list)
 848			node->index &= ~(1U<<31);
 849		spin_unlock(&hash_lock);
 850	} else {
 851		trim_marked(tree);
 852		goto Err;
 853	}
 854
 855	mutex_lock(&audit_filter_mutex);
 856	if (list_empty(&rule->rlist)) {
 857		put_tree(tree);
 858		return -ENOENT;
 859	}
 860	rule->tree = tree;
 861	put_tree(tree);
 862
 863	return 0;
 864Err:
 865	mutex_lock(&audit_filter_mutex);
 866	list_del_init(&tree->list);
 867	list_del_init(&tree->rules);
 868	put_tree(tree);
 869	return err;
 870}
 871
 872int audit_tag_tree(char *old, char *new)
 873{
 874	struct list_head cursor, barrier;
 875	int failed = 0;
 876	struct path path1, path2;
 877	struct vfsmount *tagged;
 878	int err;
 879
 880	err = kern_path(new, 0, &path2);
 881	if (err)
 882		return err;
 883	tagged = collect_mounts(&path2);
 884	path_put(&path2);
 885	if (IS_ERR(tagged))
 886		return PTR_ERR(tagged);
 887
 888	err = kern_path(old, 0, &path1);
 889	if (err) {
 890		drop_collected_mounts(tagged);
 891		return err;
 892	}
 893
 894	mutex_lock(&audit_filter_mutex);
 895	list_add(&barrier, &tree_list);
 896	list_add(&cursor, &barrier);
 897
 898	while (cursor.next != &tree_list) {
 899		struct audit_tree *tree;
 900		int good_one = 0;
 901
 902		tree = container_of(cursor.next, struct audit_tree, list);
 903		get_tree(tree);
 904		list_del(&cursor);
 905		list_add(&cursor, &tree->list);
 906		mutex_unlock(&audit_filter_mutex);
 907
 908		err = kern_path(tree->pathname, 0, &path2);
 909		if (!err) {
 910			good_one = path_is_under(&path1, &path2);
 911			path_put(&path2);
 912		}
 913
 914		if (!good_one) {
 915			put_tree(tree);
 916			mutex_lock(&audit_filter_mutex);
 917			continue;
 918		}
 919
 920		failed = iterate_mounts(tag_mount, tree, tagged);
 921		if (failed) {
 922			put_tree(tree);
 923			mutex_lock(&audit_filter_mutex);
 924			break;
 925		}
 926
 927		mutex_lock(&audit_filter_mutex);
 928		spin_lock(&hash_lock);
 929		if (!tree->goner) {
 930			list_del(&tree->list);
 931			list_add(&tree->list, &tree_list);
 932		}
 933		spin_unlock(&hash_lock);
 934		put_tree(tree);
 935	}
 936
 937	while (barrier.prev != &tree_list) {
 938		struct audit_tree *tree;
 939
 940		tree = container_of(barrier.prev, struct audit_tree, list);
 941		get_tree(tree);
 942		list_del(&tree->list);
 943		list_add(&tree->list, &barrier);
 944		mutex_unlock(&audit_filter_mutex);
 945
 946		if (!failed) {
 947			struct node *node;
 948			spin_lock(&hash_lock);
 949			list_for_each_entry(node, &tree->chunks, list)
 950				node->index &= ~(1U<<31);
 951			spin_unlock(&hash_lock);
 952		} else {
 953			trim_marked(tree);
 954		}
 955
 956		put_tree(tree);
 957		mutex_lock(&audit_filter_mutex);
 958	}
 959	list_del(&barrier);
 960	list_del(&cursor);
 961	mutex_unlock(&audit_filter_mutex);
 962	path_put(&path1);
 963	drop_collected_mounts(tagged);
 964	return failed;
 965}
 966
 967
 968static void audit_schedule_prune(void)
 969{
 970	wake_up_process(prune_thread);
 971}
 972
 973/*
 974 * ... and that one is done if evict_chunk() decides to delay until the end
 975 * of syscall.  Runs synchronously.
 976 */
 977void audit_kill_trees(struct audit_context *context)
 978{
 979	struct list_head *list = &context->killed_trees;
 980
 981	audit_ctl_lock();
 982	mutex_lock(&audit_filter_mutex);
 983
 984	while (!list_empty(list)) {
 985		struct audit_tree *victim;
 986
 987		victim = list_entry(list->next, struct audit_tree, list);
 988		kill_rules(context, victim);
 989		list_del_init(&victim->list);
 990
 991		mutex_unlock(&audit_filter_mutex);
 992
 993		prune_one(victim);
 994
 995		mutex_lock(&audit_filter_mutex);
 996	}
 997
 998	mutex_unlock(&audit_filter_mutex);
 999	audit_ctl_unlock();
1000}
1001
1002/*
1003 *  Here comes the stuff asynchronous to auditctl operations
1004 */
1005
1006static void evict_chunk(struct audit_chunk *chunk)
1007{
1008	struct audit_tree *owner;
1009	struct list_head *postponed = audit_killed_trees();
1010	int need_prune = 0;
1011	int n;
1012
 
 
 
 
1013	mutex_lock(&audit_filter_mutex);
1014	spin_lock(&hash_lock);
1015	while (!list_empty(&chunk->trees)) {
1016		owner = list_entry(chunk->trees.next,
1017				   struct audit_tree, same_root);
1018		owner->goner = 1;
1019		owner->root = NULL;
1020		list_del_init(&owner->same_root);
1021		spin_unlock(&hash_lock);
1022		if (!postponed) {
1023			kill_rules(audit_context(), owner);
1024			list_move(&owner->list, &prune_list);
1025			need_prune = 1;
1026		} else {
1027			list_move(&owner->list, postponed);
1028		}
1029		spin_lock(&hash_lock);
1030	}
1031	list_del_rcu(&chunk->hash);
1032	for (n = 0; n < chunk->count; n++)
1033		list_del_init(&chunk->owners[n].list);
1034	spin_unlock(&hash_lock);
1035	mutex_unlock(&audit_filter_mutex);
1036	if (need_prune)
1037		audit_schedule_prune();
1038}
1039
1040static int audit_tree_handle_event(struct fsnotify_group *group,
1041				   struct inode *to_tell,
1042				   u32 mask, const void *data, int data_type,
1043				   const struct qstr *file_name, u32 cookie,
1044				   struct fsnotify_iter_info *iter_info)
 
1045{
1046	return 0;
1047}
1048
1049static void audit_tree_freeing_mark(struct fsnotify_mark *mark,
1050				    struct fsnotify_group *group)
1051{
1052	struct audit_chunk *chunk;
1053
1054	mutex_lock(&mark->group->mark_mutex);
1055	spin_lock(&hash_lock);
1056	chunk = mark_chunk(mark);
1057	replace_mark_chunk(mark, NULL);
1058	spin_unlock(&hash_lock);
1059	mutex_unlock(&mark->group->mark_mutex);
1060	if (chunk) {
1061		evict_chunk(chunk);
1062		audit_mark_put_chunk(chunk);
1063	}
1064
1065	/*
1066	 * We are guaranteed to have at least one reference to the mark from
1067	 * either the inode or the caller of fsnotify_destroy_mark().
1068	 */
1069	BUG_ON(refcount_read(&mark->refcnt) < 1);
1070}
1071
1072static const struct fsnotify_ops audit_tree_ops = {
1073	.handle_event = audit_tree_handle_event,
1074	.freeing_mark = audit_tree_freeing_mark,
1075	.free_mark = audit_tree_destroy_watch,
1076};
1077
1078static int __init audit_tree_init(void)
1079{
1080	int i;
1081
1082	audit_tree_mark_cachep = KMEM_CACHE(audit_tree_mark, SLAB_PANIC);
1083
1084	audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
1085	if (IS_ERR(audit_tree_group))
1086		audit_panic("cannot initialize fsnotify group for rectree watches");
1087
1088	for (i = 0; i < HASH_SIZE; i++)
1089		INIT_LIST_HEAD(&chunk_hash_heads[i]);
1090
1091	return 0;
1092}
1093__initcall(audit_tree_init);
v4.6
 
  1#include "audit.h"
  2#include <linux/fsnotify_backend.h>
  3#include <linux/namei.h>
  4#include <linux/mount.h>
  5#include <linux/kthread.h>
 
  6#include <linux/slab.h>
  7
  8struct audit_tree;
  9struct audit_chunk;
 10
 11struct audit_tree {
 12	atomic_t count;
 13	int goner;
 14	struct audit_chunk *root;
 15	struct list_head chunks;
 16	struct list_head rules;
 17	struct list_head list;
 18	struct list_head same_root;
 19	struct rcu_head head;
 20	char pathname[];
 21};
 22
 23struct audit_chunk {
 24	struct list_head hash;
 25	struct fsnotify_mark mark;
 
 26	struct list_head trees;		/* with root here */
 27	int dead;
 28	int count;
 29	atomic_long_t refs;
 30	struct rcu_head head;
 31	struct node {
 32		struct list_head list;
 33		struct audit_tree *owner;
 34		unsigned index;		/* index; upper bit indicates 'will prune' */
 35	} owners[];
 36};
 37
 
 
 
 
 
 38static LIST_HEAD(tree_list);
 39static LIST_HEAD(prune_list);
 40static struct task_struct *prune_thread;
 41
 42/*
 43 * One struct chunk is attached to each inode of interest.
 44 * We replace struct chunk on tagging/untagging.
 
 
 
 
 
 
 
 45 * Rules have pointer to struct audit_tree.
 46 * Rules have struct list_head rlist forming a list of rules over
 47 * the same tree.
 48 * References to struct chunk are collected at audit_inode{,_child}()
 49 * time and used in AUDIT_TREE rule matching.
 50 * These references are dropped at the same time we are calling
 51 * audit_free_names(), etc.
 52 *
 53 * Cyclic lists galore:
 54 * tree.chunks anchors chunk.owners[].list			hash_lock
 55 * tree.rules anchors rule.rlist				audit_filter_mutex
 56 * chunk.trees anchors tree.same_root				hash_lock
 57 * chunk.hash is a hash with middle bits of watch.inode as
 58 * a hash function.						RCU, hash_lock
 59 *
 60 * tree is refcounted; one reference for "some rules on rules_list refer to
 61 * it", one for each chunk with pointer to it.
 62 *
 63 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
 64 * of watch contributes 1 to .refs).
 
 
 
 
 65 *
 66 * node.index allows to get from node.list to containing chunk.
 67 * MSB of that sucker is stolen to mark taggings that we might have to
 68 * revert - several operations have very unpleasant cleanup logics and
 69 * that makes a difference.  Some.
 70 */
 71
 72static struct fsnotify_group *audit_tree_group;
 
 73
 74static struct audit_tree *alloc_tree(const char *s)
 75{
 76	struct audit_tree *tree;
 77
 78	tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
 79	if (tree) {
 80		atomic_set(&tree->count, 1);
 81		tree->goner = 0;
 82		INIT_LIST_HEAD(&tree->chunks);
 83		INIT_LIST_HEAD(&tree->rules);
 84		INIT_LIST_HEAD(&tree->list);
 85		INIT_LIST_HEAD(&tree->same_root);
 86		tree->root = NULL;
 87		strcpy(tree->pathname, s);
 88	}
 89	return tree;
 90}
 91
 92static inline void get_tree(struct audit_tree *tree)
 93{
 94	atomic_inc(&tree->count);
 95}
 96
 97static inline void put_tree(struct audit_tree *tree)
 98{
 99	if (atomic_dec_and_test(&tree->count))
100		kfree_rcu(tree, head);
101}
102
103/* to avoid bringing the entire thing in audit.h */
104const char *audit_tree_path(struct audit_tree *tree)
105{
106	return tree->pathname;
107}
108
109static void free_chunk(struct audit_chunk *chunk)
110{
111	int i;
112
113	for (i = 0; i < chunk->count; i++) {
114		if (chunk->owners[i].owner)
115			put_tree(chunk->owners[i].owner);
116	}
117	kfree(chunk);
118}
119
120void audit_put_chunk(struct audit_chunk *chunk)
121{
122	if (atomic_long_dec_and_test(&chunk->refs))
123		free_chunk(chunk);
124}
125
126static void __put_chunk(struct rcu_head *rcu)
127{
128	struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
129	audit_put_chunk(chunk);
130}
131
132static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
 
 
 
 
 
133{
134	struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
135	call_rcu(&chunk->head, __put_chunk);
136}
137
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138static struct audit_chunk *alloc_chunk(int count)
139{
140	struct audit_chunk *chunk;
141	size_t size;
142	int i;
143
144	size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
145	chunk = kzalloc(size, GFP_KERNEL);
146	if (!chunk)
147		return NULL;
148
149	INIT_LIST_HEAD(&chunk->hash);
150	INIT_LIST_HEAD(&chunk->trees);
151	chunk->count = count;
152	atomic_long_set(&chunk->refs, 1);
153	for (i = 0; i < count; i++) {
154		INIT_LIST_HEAD(&chunk->owners[i].list);
155		chunk->owners[i].index = i;
156	}
157	fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
158	chunk->mark.mask = FS_IN_IGNORED;
159	return chunk;
160}
161
162enum {HASH_SIZE = 128};
163static struct list_head chunk_hash_heads[HASH_SIZE];
164static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
165
166static inline struct list_head *chunk_hash(const struct inode *inode)
 
 
 
 
 
 
 
167{
168	unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
169	return chunk_hash_heads + n % HASH_SIZE;
170}
171
172/* hash_lock & entry->lock is held by caller */
173static void insert_hash(struct audit_chunk *chunk)
174{
175	struct fsnotify_mark *entry = &chunk->mark;
176	struct list_head *list;
177
178	if (!entry->inode)
179		return;
180	list = chunk_hash(entry->inode);
 
 
 
 
 
181	list_add_rcu(&chunk->hash, list);
182}
183
184/* called under rcu_read_lock */
185struct audit_chunk *audit_tree_lookup(const struct inode *inode)
186{
187	struct list_head *list = chunk_hash(inode);
 
188	struct audit_chunk *p;
189
190	list_for_each_entry_rcu(p, list, hash) {
191		/* mark.inode may have gone NULL, but who cares? */
192		if (p->mark.inode == inode) {
 
 
 
193			atomic_long_inc(&p->refs);
194			return p;
195		}
196	}
197	return NULL;
198}
199
200bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
201{
202	int n;
203	for (n = 0; n < chunk->count; n++)
204		if (chunk->owners[n].owner == tree)
205			return true;
206	return false;
207}
208
209/* tagging and untagging inodes with trees */
210
211static struct audit_chunk *find_chunk(struct node *p)
212{
213	int index = p->index & ~(1U<<31);
214	p -= index;
215	return container_of(p, struct audit_chunk, owners[0]);
216}
217
218static void untag_chunk(struct node *p)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219{
220	struct audit_chunk *chunk = find_chunk(p);
221	struct fsnotify_mark *entry = &chunk->mark;
222	struct audit_chunk *new = NULL;
223	struct audit_tree *owner;
224	int size = chunk->count - 1;
225	int i, j;
226
227	fsnotify_get_mark(entry);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228
229	spin_unlock(&hash_lock);
 
 
 
 
 
 
 
 
 
 
 
230
231	if (size)
232		new = alloc_chunk(size);
 
 
233
234	spin_lock(&entry->lock);
235	if (chunk->dead || !entry->inode) {
236		spin_unlock(&entry->lock);
237		if (new)
238			free_chunk(new);
239		goto out;
240	}
241
242	owner = p->owner;
 
 
 
243
 
 
 
 
 
 
 
 
 
 
244	if (!size) {
245		chunk->dead = 1;
246		spin_lock(&hash_lock);
247		list_del_init(&chunk->trees);
248		if (owner->root == chunk)
249			owner->root = NULL;
250		list_del_init(&p->list);
251		list_del_rcu(&chunk->hash);
 
252		spin_unlock(&hash_lock);
253		spin_unlock(&entry->lock);
254		fsnotify_destroy_mark(entry, audit_tree_group);
255		goto out;
 
 
256	}
257
 
258	if (!new)
259		goto Fallback;
260
261	fsnotify_duplicate_mark(&new->mark, entry);
262	if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.inode, NULL, 1)) {
263		fsnotify_put_mark(&new->mark);
264		goto Fallback;
265	}
266
267	chunk->dead = 1;
268	spin_lock(&hash_lock);
269	list_replace_init(&chunk->trees, &new->trees);
270	if (owner->root == chunk) {
271		list_del_init(&owner->same_root);
272		owner->root = NULL;
273	}
274
275	for (i = j = 0; j <= size; i++, j++) {
276		struct audit_tree *s;
277		if (&chunk->owners[j] == p) {
278			list_del_init(&p->list);
279			i--;
280			continue;
281		}
282		s = chunk->owners[j].owner;
283		new->owners[i].owner = s;
284		new->owners[i].index = chunk->owners[j].index - j + i;
285		if (!s) /* result of earlier fallback */
286			continue;
287		get_tree(s);
288		list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
289	}
290
291	list_replace_rcu(&chunk->hash, &new->hash);
292	list_for_each_entry(owner, &new->trees, same_root)
293		owner->root = new;
294	spin_unlock(&hash_lock);
295	spin_unlock(&entry->lock);
296	fsnotify_destroy_mark(entry, audit_tree_group);
297	fsnotify_put_mark(&new->mark);	/* drop initial reference */
298	goto out;
299
300Fallback:
301	// do the best we can
302	spin_lock(&hash_lock);
303	if (owner->root == chunk) {
304		list_del_init(&owner->same_root);
305		owner->root = NULL;
306	}
307	list_del_init(&p->list);
308	p->owner = NULL;
309	put_tree(owner);
310	spin_unlock(&hash_lock);
311	spin_unlock(&entry->lock);
312out:
313	fsnotify_put_mark(entry);
314	spin_lock(&hash_lock);
315}
316
 
317static int create_chunk(struct inode *inode, struct audit_tree *tree)
318{
319	struct fsnotify_mark *entry;
320	struct audit_chunk *chunk = alloc_chunk(1);
321	if (!chunk)
 
 
 
 
 
 
 
 
 
322		return -ENOMEM;
 
323
324	entry = &chunk->mark;
325	if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
326		fsnotify_put_mark(entry);
 
327		return -ENOSPC;
328	}
329
330	spin_lock(&entry->lock);
331	spin_lock(&hash_lock);
332	if (tree->goner) {
333		spin_unlock(&hash_lock);
334		chunk->dead = 1;
335		spin_unlock(&entry->lock);
336		fsnotify_destroy_mark(entry, audit_tree_group);
337		fsnotify_put_mark(entry);
 
338		return 0;
339	}
 
340	chunk->owners[0].index = (1U << 31);
341	chunk->owners[0].owner = tree;
342	get_tree(tree);
343	list_add(&chunk->owners[0].list, &tree->chunks);
344	if (!tree->root) {
345		tree->root = chunk;
346		list_add(&tree->same_root, &chunk->trees);
347	}
 
 
 
 
 
348	insert_hash(chunk);
349	spin_unlock(&hash_lock);
350	spin_unlock(&entry->lock);
351	fsnotify_put_mark(entry);	/* drop initial reference */
 
 
 
 
 
352	return 0;
353}
354
355/* the first tagged inode becomes root of tree */
356static int tag_chunk(struct inode *inode, struct audit_tree *tree)
357{
358	struct fsnotify_mark *old_entry, *chunk_entry;
359	struct audit_tree *owner;
360	struct audit_chunk *chunk, *old;
361	struct node *p;
362	int n;
363
364	old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
365	if (!old_entry)
 
366		return create_chunk(inode, tree);
367
368	old = container_of(old_entry, struct audit_chunk, mark);
369
 
 
 
370	/* are we already there? */
371	spin_lock(&hash_lock);
 
372	for (n = 0; n < old->count; n++) {
373		if (old->owners[n].owner == tree) {
374			spin_unlock(&hash_lock);
375			fsnotify_put_mark(old_entry);
 
376			return 0;
377		}
378	}
379	spin_unlock(&hash_lock);
380
381	chunk = alloc_chunk(old->count + 1);
382	if (!chunk) {
383		fsnotify_put_mark(old_entry);
 
384		return -ENOMEM;
385	}
386
387	chunk_entry = &chunk->mark;
388
389	spin_lock(&old_entry->lock);
390	if (!old_entry->inode) {
391		/* old_entry is being shot, lets just lie */
392		spin_unlock(&old_entry->lock);
393		fsnotify_put_mark(old_entry);
394		free_chunk(chunk);
395		return -ENOENT;
396	}
397
398	fsnotify_duplicate_mark(chunk_entry, old_entry);
399	if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->inode, NULL, 1)) {
400		spin_unlock(&old_entry->lock);
401		fsnotify_put_mark(chunk_entry);
402		fsnotify_put_mark(old_entry);
403		return -ENOSPC;
404	}
405
406	/* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
407	spin_lock(&chunk_entry->lock);
408	spin_lock(&hash_lock);
409
410	/* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
411	if (tree->goner) {
412		spin_unlock(&hash_lock);
413		chunk->dead = 1;
414		spin_unlock(&chunk_entry->lock);
415		spin_unlock(&old_entry->lock);
416
417		fsnotify_destroy_mark(chunk_entry, audit_tree_group);
418
419		fsnotify_put_mark(chunk_entry);
420		fsnotify_put_mark(old_entry);
421		return 0;
422	}
423	list_replace_init(&old->trees, &chunk->trees);
424	for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
425		struct audit_tree *s = old->owners[n].owner;
426		p->owner = s;
427		p->index = old->owners[n].index;
428		if (!s) /* result of fallback in untag */
429			continue;
430		get_tree(s);
431		list_replace_init(&old->owners[n].list, &p->list);
432	}
433	p->index = (chunk->count - 1) | (1U<<31);
434	p->owner = tree;
435	get_tree(tree);
436	list_add(&p->list, &tree->chunks);
437	list_replace_rcu(&old->hash, &chunk->hash);
438	list_for_each_entry(owner, &chunk->trees, same_root)
439		owner->root = chunk;
440	old->dead = 1;
441	if (!tree->root) {
442		tree->root = chunk;
443		list_add(&tree->same_root, &chunk->trees);
444	}
 
 
 
 
 
445	spin_unlock(&hash_lock);
446	spin_unlock(&chunk_entry->lock);
447	spin_unlock(&old_entry->lock);
448	fsnotify_destroy_mark(old_entry, audit_tree_group);
449	fsnotify_put_mark(chunk_entry);	/* drop initial reference */
450	fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
451	return 0;
452}
453
454static void audit_tree_log_remove_rule(struct audit_krule *rule)
 
455{
456	struct audit_buffer *ab;
457
458	ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
 
 
459	if (unlikely(!ab))
460		return;
461	audit_log_format(ab, "op=");
462	audit_log_string(ab, "remove_rule");
463	audit_log_format(ab, " dir=");
464	audit_log_untrustedstring(ab, rule->tree->pathname);
465	audit_log_key(ab, rule->filterkey);
466	audit_log_format(ab, " list=%d res=1", rule->listnr);
467	audit_log_end(ab);
468}
469
470static void kill_rules(struct audit_tree *tree)
471{
472	struct audit_krule *rule, *next;
473	struct audit_entry *entry;
474
475	list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
476		entry = container_of(rule, struct audit_entry, rule);
477
478		list_del_init(&rule->rlist);
479		if (rule->tree) {
480			/* not a half-baked one */
481			audit_tree_log_remove_rule(rule);
482			if (entry->rule.exe)
483				audit_remove_mark(entry->rule.exe);
484			rule->tree = NULL;
485			list_del_rcu(&entry->list);
486			list_del(&entry->rule.list);
487			call_rcu(&entry->rcu, audit_free_rule_rcu);
488		}
489	}
490}
491
492/*
493 * finish killing struct audit_tree
 
 
494 */
495static void prune_one(struct audit_tree *victim)
496{
497	spin_lock(&hash_lock);
498	while (!list_empty(&victim->chunks)) {
499		struct node *p;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
500
501		p = list_entry(victim->chunks.next, struct node, list);
 
502
503		untag_chunk(p);
504	}
505	spin_unlock(&hash_lock);
506	put_tree(victim);
507}
508
 
 
 
 
 
 
 
 
509/* trim the uncommitted chunks from tree */
510
511static void trim_marked(struct audit_tree *tree)
512{
513	struct list_head *p, *q;
514	spin_lock(&hash_lock);
515	if (tree->goner) {
516		spin_unlock(&hash_lock);
517		return;
518	}
519	/* reorder */
520	for (p = tree->chunks.next; p != &tree->chunks; p = q) {
521		struct node *node = list_entry(p, struct node, list);
522		q = p->next;
523		if (node->index & (1U<<31)) {
524			list_del_init(p);
525			list_add(p, &tree->chunks);
526		}
527	}
 
528
529	while (!list_empty(&tree->chunks)) {
530		struct node *node;
531
532		node = list_entry(tree->chunks.next, struct node, list);
533
534		/* have we run out of marked? */
535		if (!(node->index & (1U<<31)))
536			break;
537
538		untag_chunk(node);
539	}
540	if (!tree->root && !tree->goner) {
541		tree->goner = 1;
542		spin_unlock(&hash_lock);
543		mutex_lock(&audit_filter_mutex);
544		kill_rules(tree);
545		list_del_init(&tree->list);
546		mutex_unlock(&audit_filter_mutex);
547		prune_one(tree);
548	} else {
549		spin_unlock(&hash_lock);
550	}
551}
552
553static void audit_schedule_prune(void);
554
555/* called with audit_filter_mutex */
556int audit_remove_tree_rule(struct audit_krule *rule)
557{
558	struct audit_tree *tree;
559	tree = rule->tree;
560	if (tree) {
561		spin_lock(&hash_lock);
562		list_del_init(&rule->rlist);
563		if (list_empty(&tree->rules) && !tree->goner) {
564			tree->root = NULL;
565			list_del_init(&tree->same_root);
566			tree->goner = 1;
567			list_move(&tree->list, &prune_list);
568			rule->tree = NULL;
569			spin_unlock(&hash_lock);
570			audit_schedule_prune();
571			return 1;
572		}
573		rule->tree = NULL;
574		spin_unlock(&hash_lock);
575		return 1;
576	}
577	return 0;
578}
579
580static int compare_root(struct vfsmount *mnt, void *arg)
581{
582	return d_backing_inode(mnt->mnt_root) == arg;
 
583}
584
585void audit_trim_trees(void)
586{
587	struct list_head cursor;
588
589	mutex_lock(&audit_filter_mutex);
590	list_add(&cursor, &tree_list);
591	while (cursor.next != &tree_list) {
592		struct audit_tree *tree;
593		struct path path;
594		struct vfsmount *root_mnt;
595		struct node *node;
596		int err;
597
598		tree = container_of(cursor.next, struct audit_tree, list);
599		get_tree(tree);
600		list_del(&cursor);
601		list_add(&cursor, &tree->list);
602		mutex_unlock(&audit_filter_mutex);
603
604		err = kern_path(tree->pathname, 0, &path);
605		if (err)
606			goto skip_it;
607
608		root_mnt = collect_mounts(&path);
609		path_put(&path);
610		if (IS_ERR(root_mnt))
611			goto skip_it;
612
613		spin_lock(&hash_lock);
614		list_for_each_entry(node, &tree->chunks, list) {
615			struct audit_chunk *chunk = find_chunk(node);
616			/* this could be NULL if the watch is dying else where... */
617			struct inode *inode = chunk->mark.inode;
618			node->index |= 1U<<31;
619			if (iterate_mounts(compare_root, inode, root_mnt))
 
 
620				node->index &= ~(1U<<31);
621		}
622		spin_unlock(&hash_lock);
623		trim_marked(tree);
624		drop_collected_mounts(root_mnt);
625skip_it:
626		put_tree(tree);
627		mutex_lock(&audit_filter_mutex);
628	}
629	list_del(&cursor);
630	mutex_unlock(&audit_filter_mutex);
631}
632
633int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
634{
635
636	if (pathname[0] != '/' ||
637	    rule->listnr != AUDIT_FILTER_EXIT ||
638	    op != Audit_equal ||
639	    rule->inode_f || rule->watch || rule->tree)
640		return -EINVAL;
641	rule->tree = alloc_tree(pathname);
642	if (!rule->tree)
643		return -ENOMEM;
644	return 0;
645}
646
647void audit_put_tree(struct audit_tree *tree)
648{
649	put_tree(tree);
650}
651
652static int tag_mount(struct vfsmount *mnt, void *arg)
653{
654	return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
655}
656
657/*
658 * That gets run when evict_chunk() ends up needing to kill audit_tree.
659 * Runs from a separate thread.
660 */
661static int prune_tree_thread(void *unused)
662{
663	for (;;) {
664		set_current_state(TASK_INTERRUPTIBLE);
665		if (list_empty(&prune_list))
666			schedule();
667		__set_current_state(TASK_RUNNING);
668
669		mutex_lock(&audit_cmd_mutex);
670		mutex_lock(&audit_filter_mutex);
671
672		while (!list_empty(&prune_list)) {
673			struct audit_tree *victim;
674
675			victim = list_entry(prune_list.next,
676					struct audit_tree, list);
677			list_del_init(&victim->list);
678
679			mutex_unlock(&audit_filter_mutex);
680
681			prune_one(victim);
682
683			mutex_lock(&audit_filter_mutex);
684		}
685
686		mutex_unlock(&audit_filter_mutex);
687		mutex_unlock(&audit_cmd_mutex);
688	}
689	return 0;
690}
691
692static int audit_launch_prune(void)
693{
694	if (prune_thread)
695		return 0;
696	prune_thread = kthread_create(prune_tree_thread, NULL,
697				"audit_prune_tree");
698	if (IS_ERR(prune_thread)) {
699		pr_err("cannot start thread audit_prune_tree");
700		prune_thread = NULL;
701		return -ENOMEM;
702	} else {
703		wake_up_process(prune_thread);
704		return 0;
705	}
 
706}
707
708/* called with audit_filter_mutex */
709int audit_add_tree_rule(struct audit_krule *rule)
710{
711	struct audit_tree *seed = rule->tree, *tree;
712	struct path path;
713	struct vfsmount *mnt;
714	int err;
715
716	rule->tree = NULL;
717	list_for_each_entry(tree, &tree_list, list) {
718		if (!strcmp(seed->pathname, tree->pathname)) {
719			put_tree(seed);
720			rule->tree = tree;
721			list_add(&rule->rlist, &tree->rules);
722			return 0;
723		}
724	}
725	tree = seed;
726	list_add(&tree->list, &tree_list);
727	list_add(&rule->rlist, &tree->rules);
728	/* do not set rule->tree yet */
729	mutex_unlock(&audit_filter_mutex);
730
731	if (unlikely(!prune_thread)) {
732		err = audit_launch_prune();
733		if (err)
734			goto Err;
735	}
736
737	err = kern_path(tree->pathname, 0, &path);
738	if (err)
739		goto Err;
740	mnt = collect_mounts(&path);
741	path_put(&path);
742	if (IS_ERR(mnt)) {
743		err = PTR_ERR(mnt);
744		goto Err;
745	}
746
747	get_tree(tree);
748	err = iterate_mounts(tag_mount, tree, mnt);
749	drop_collected_mounts(mnt);
750
751	if (!err) {
752		struct node *node;
753		spin_lock(&hash_lock);
754		list_for_each_entry(node, &tree->chunks, list)
755			node->index &= ~(1U<<31);
756		spin_unlock(&hash_lock);
757	} else {
758		trim_marked(tree);
759		goto Err;
760	}
761
762	mutex_lock(&audit_filter_mutex);
763	if (list_empty(&rule->rlist)) {
764		put_tree(tree);
765		return -ENOENT;
766	}
767	rule->tree = tree;
768	put_tree(tree);
769
770	return 0;
771Err:
772	mutex_lock(&audit_filter_mutex);
773	list_del_init(&tree->list);
774	list_del_init(&tree->rules);
775	put_tree(tree);
776	return err;
777}
778
779int audit_tag_tree(char *old, char *new)
780{
781	struct list_head cursor, barrier;
782	int failed = 0;
783	struct path path1, path2;
784	struct vfsmount *tagged;
785	int err;
786
787	err = kern_path(new, 0, &path2);
788	if (err)
789		return err;
790	tagged = collect_mounts(&path2);
791	path_put(&path2);
792	if (IS_ERR(tagged))
793		return PTR_ERR(tagged);
794
795	err = kern_path(old, 0, &path1);
796	if (err) {
797		drop_collected_mounts(tagged);
798		return err;
799	}
800
801	mutex_lock(&audit_filter_mutex);
802	list_add(&barrier, &tree_list);
803	list_add(&cursor, &barrier);
804
805	while (cursor.next != &tree_list) {
806		struct audit_tree *tree;
807		int good_one = 0;
808
809		tree = container_of(cursor.next, struct audit_tree, list);
810		get_tree(tree);
811		list_del(&cursor);
812		list_add(&cursor, &tree->list);
813		mutex_unlock(&audit_filter_mutex);
814
815		err = kern_path(tree->pathname, 0, &path2);
816		if (!err) {
817			good_one = path_is_under(&path1, &path2);
818			path_put(&path2);
819		}
820
821		if (!good_one) {
822			put_tree(tree);
823			mutex_lock(&audit_filter_mutex);
824			continue;
825		}
826
827		failed = iterate_mounts(tag_mount, tree, tagged);
828		if (failed) {
829			put_tree(tree);
830			mutex_lock(&audit_filter_mutex);
831			break;
832		}
833
834		mutex_lock(&audit_filter_mutex);
835		spin_lock(&hash_lock);
836		if (!tree->goner) {
837			list_del(&tree->list);
838			list_add(&tree->list, &tree_list);
839		}
840		spin_unlock(&hash_lock);
841		put_tree(tree);
842	}
843
844	while (barrier.prev != &tree_list) {
845		struct audit_tree *tree;
846
847		tree = container_of(barrier.prev, struct audit_tree, list);
848		get_tree(tree);
849		list_del(&tree->list);
850		list_add(&tree->list, &barrier);
851		mutex_unlock(&audit_filter_mutex);
852
853		if (!failed) {
854			struct node *node;
855			spin_lock(&hash_lock);
856			list_for_each_entry(node, &tree->chunks, list)
857				node->index &= ~(1U<<31);
858			spin_unlock(&hash_lock);
859		} else {
860			trim_marked(tree);
861		}
862
863		put_tree(tree);
864		mutex_lock(&audit_filter_mutex);
865	}
866	list_del(&barrier);
867	list_del(&cursor);
868	mutex_unlock(&audit_filter_mutex);
869	path_put(&path1);
870	drop_collected_mounts(tagged);
871	return failed;
872}
873
874
875static void audit_schedule_prune(void)
876{
877	wake_up_process(prune_thread);
878}
879
880/*
881 * ... and that one is done if evict_chunk() decides to delay until the end
882 * of syscall.  Runs synchronously.
883 */
884void audit_kill_trees(struct list_head *list)
885{
886	mutex_lock(&audit_cmd_mutex);
 
 
887	mutex_lock(&audit_filter_mutex);
888
889	while (!list_empty(list)) {
890		struct audit_tree *victim;
891
892		victim = list_entry(list->next, struct audit_tree, list);
893		kill_rules(victim);
894		list_del_init(&victim->list);
895
896		mutex_unlock(&audit_filter_mutex);
897
898		prune_one(victim);
899
900		mutex_lock(&audit_filter_mutex);
901	}
902
903	mutex_unlock(&audit_filter_mutex);
904	mutex_unlock(&audit_cmd_mutex);
905}
906
907/*
908 *  Here comes the stuff asynchronous to auditctl operations
909 */
910
911static void evict_chunk(struct audit_chunk *chunk)
912{
913	struct audit_tree *owner;
914	struct list_head *postponed = audit_killed_trees();
915	int need_prune = 0;
916	int n;
917
918	if (chunk->dead)
919		return;
920
921	chunk->dead = 1;
922	mutex_lock(&audit_filter_mutex);
923	spin_lock(&hash_lock);
924	while (!list_empty(&chunk->trees)) {
925		owner = list_entry(chunk->trees.next,
926				   struct audit_tree, same_root);
927		owner->goner = 1;
928		owner->root = NULL;
929		list_del_init(&owner->same_root);
930		spin_unlock(&hash_lock);
931		if (!postponed) {
932			kill_rules(owner);
933			list_move(&owner->list, &prune_list);
934			need_prune = 1;
935		} else {
936			list_move(&owner->list, postponed);
937		}
938		spin_lock(&hash_lock);
939	}
940	list_del_rcu(&chunk->hash);
941	for (n = 0; n < chunk->count; n++)
942		list_del_init(&chunk->owners[n].list);
943	spin_unlock(&hash_lock);
944	mutex_unlock(&audit_filter_mutex);
945	if (need_prune)
946		audit_schedule_prune();
947}
948
949static int audit_tree_handle_event(struct fsnotify_group *group,
950				   struct inode *to_tell,
951				   struct fsnotify_mark *inode_mark,
952				   struct fsnotify_mark *vfsmount_mark,
953				   u32 mask, void *data, int data_type,
954				   const unsigned char *file_name, u32 cookie)
955{
956	return 0;
957}
958
959static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
 
960{
961	struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
962
963	evict_chunk(chunk);
 
 
 
 
 
 
 
 
 
964
965	/*
966	 * We are guaranteed to have at least one reference to the mark from
967	 * either the inode or the caller of fsnotify_destroy_mark().
968	 */
969	BUG_ON(atomic_read(&entry->refcnt) < 1);
970}
971
972static const struct fsnotify_ops audit_tree_ops = {
973	.handle_event = audit_tree_handle_event,
974	.freeing_mark = audit_tree_freeing_mark,
 
975};
976
977static int __init audit_tree_init(void)
978{
979	int i;
 
 
980
981	audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
982	if (IS_ERR(audit_tree_group))
983		audit_panic("cannot initialize fsnotify group for rectree watches");
984
985	for (i = 0; i < HASH_SIZE; i++)
986		INIT_LIST_HEAD(&chunk_hash_heads[i]);
987
988	return 0;
989}
990__initcall(audit_tree_init);