xref: /openbmc/linux/kernel/audit_tree.c (revision f35e839a)
1 #include "audit.h"
2 #include <linux/fsnotify_backend.h>
3 #include <linux/namei.h>
4 #include <linux/mount.h>
5 #include <linux/kthread.h>
6 #include <linux/slab.h>
7 
8 struct audit_tree;
9 struct audit_chunk;
10 
11 struct audit_tree {
12 	atomic_t count;
13 	int goner;
14 	struct audit_chunk *root;
15 	struct list_head chunks;
16 	struct list_head rules;
17 	struct list_head list;
18 	struct list_head same_root;
19 	struct rcu_head head;
20 	char pathname[];
21 };
22 
23 struct audit_chunk {
24 	struct list_head hash;
25 	struct fsnotify_mark mark;
26 	struct list_head trees;		/* with root here */
27 	int dead;
28 	int count;
29 	atomic_long_t refs;
30 	struct rcu_head head;
31 	struct node {
32 		struct list_head list;
33 		struct audit_tree *owner;
34 		unsigned index;		/* index; upper bit indicates 'will prune' */
35 	} owners[];
36 };
37 
38 static LIST_HEAD(tree_list);
39 static LIST_HEAD(prune_list);
40 
41 /*
42  * One struct chunk is attached to each inode of interest.
43  * We replace struct chunk on tagging/untagging.
44  * Rules have pointer to struct audit_tree.
45  * Rules have struct list_head rlist forming a list of rules over
46  * the same tree.
47  * References to struct chunk are collected at audit_inode{,_child}()
48  * time and used in AUDIT_TREE rule matching.
49  * These references are dropped at the same time we are calling
50  * audit_free_names(), etc.
51  *
52  * Cyclic lists galore:
53  * tree.chunks anchors chunk.owners[].list			hash_lock
54  * tree.rules anchors rule.rlist				audit_filter_mutex
55  * chunk.trees anchors tree.same_root				hash_lock
56  * chunk.hash is a hash with middle bits of watch.inode as
57  * a hash function.						RCU, hash_lock
58  *
59  * tree is refcounted; one reference for "some rules on rules_list refer to
60  * it", one for each chunk with pointer to it.
61  *
62  * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
63  * of watch contributes 1 to .refs).
64  *
65  * node.index allows to get from node.list to containing chunk.
66  * MSB of that sucker is stolen to mark taggings that we might have to
67  * revert - several operations have very unpleasant cleanup logics and
68  * that makes a difference.  Some.
69  */
70 
71 static struct fsnotify_group *audit_tree_group;
72 
73 static struct audit_tree *alloc_tree(const char *s)
74 {
75 	struct audit_tree *tree;
76 
77 	tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
78 	if (tree) {
79 		atomic_set(&tree->count, 1);
80 		tree->goner = 0;
81 		INIT_LIST_HEAD(&tree->chunks);
82 		INIT_LIST_HEAD(&tree->rules);
83 		INIT_LIST_HEAD(&tree->list);
84 		INIT_LIST_HEAD(&tree->same_root);
85 		tree->root = NULL;
86 		strcpy(tree->pathname, s);
87 	}
88 	return tree;
89 }
90 
91 static inline void get_tree(struct audit_tree *tree)
92 {
93 	atomic_inc(&tree->count);
94 }
95 
96 static inline void put_tree(struct audit_tree *tree)
97 {
98 	if (atomic_dec_and_test(&tree->count))
99 		kfree_rcu(tree, head);
100 }
101 
102 /* to avoid bringing the entire thing in audit.h */
103 const char *audit_tree_path(struct audit_tree *tree)
104 {
105 	return tree->pathname;
106 }
107 
108 static void free_chunk(struct audit_chunk *chunk)
109 {
110 	int i;
111 
112 	for (i = 0; i < chunk->count; i++) {
113 		if (chunk->owners[i].owner)
114 			put_tree(chunk->owners[i].owner);
115 	}
116 	kfree(chunk);
117 }
118 
119 void audit_put_chunk(struct audit_chunk *chunk)
120 {
121 	if (atomic_long_dec_and_test(&chunk->refs))
122 		free_chunk(chunk);
123 }
124 
125 static void __put_chunk(struct rcu_head *rcu)
126 {
127 	struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
128 	audit_put_chunk(chunk);
129 }
130 
131 static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
132 {
133 	struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
134 	call_rcu(&chunk->head, __put_chunk);
135 }
136 
137 static struct audit_chunk *alloc_chunk(int count)
138 {
139 	struct audit_chunk *chunk;
140 	size_t size;
141 	int i;
142 
143 	size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
144 	chunk = kzalloc(size, GFP_KERNEL);
145 	if (!chunk)
146 		return NULL;
147 
148 	INIT_LIST_HEAD(&chunk->hash);
149 	INIT_LIST_HEAD(&chunk->trees);
150 	chunk->count = count;
151 	atomic_long_set(&chunk->refs, 1);
152 	for (i = 0; i < count; i++) {
153 		INIT_LIST_HEAD(&chunk->owners[i].list);
154 		chunk->owners[i].index = i;
155 	}
156 	fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
157 	return chunk;
158 }
159 
160 enum {HASH_SIZE = 128};
161 static struct list_head chunk_hash_heads[HASH_SIZE];
162 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
163 
164 static inline struct list_head *chunk_hash(const struct inode *inode)
165 {
166 	unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
167 	return chunk_hash_heads + n % HASH_SIZE;
168 }
169 
170 /* hash_lock & entry->lock is held by caller */
171 static void insert_hash(struct audit_chunk *chunk)
172 {
173 	struct fsnotify_mark *entry = &chunk->mark;
174 	struct list_head *list;
175 
176 	if (!entry->i.inode)
177 		return;
178 	list = chunk_hash(entry->i.inode);
179 	list_add_rcu(&chunk->hash, list);
180 }
181 
182 /* called under rcu_read_lock */
183 struct audit_chunk *audit_tree_lookup(const struct inode *inode)
184 {
185 	struct list_head *list = chunk_hash(inode);
186 	struct audit_chunk *p;
187 
188 	list_for_each_entry_rcu(p, list, hash) {
189 		/* mark.inode may have gone NULL, but who cares? */
190 		if (p->mark.i.inode == inode) {
191 			atomic_long_inc(&p->refs);
192 			return p;
193 		}
194 	}
195 	return NULL;
196 }
197 
198 int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
199 {
200 	int n;
201 	for (n = 0; n < chunk->count; n++)
202 		if (chunk->owners[n].owner == tree)
203 			return 1;
204 	return 0;
205 }
206 
207 /* tagging and untagging inodes with trees */
208 
209 static struct audit_chunk *find_chunk(struct node *p)
210 {
211 	int index = p->index & ~(1U<<31);
212 	p -= index;
213 	return container_of(p, struct audit_chunk, owners[0]);
214 }
215 
216 static void untag_chunk(struct node *p)
217 {
218 	struct audit_chunk *chunk = find_chunk(p);
219 	struct fsnotify_mark *entry = &chunk->mark;
220 	struct audit_chunk *new = NULL;
221 	struct audit_tree *owner;
222 	int size = chunk->count - 1;
223 	int i, j;
224 
225 	fsnotify_get_mark(entry);
226 
227 	spin_unlock(&hash_lock);
228 
229 	if (size)
230 		new = alloc_chunk(size);
231 
232 	spin_lock(&entry->lock);
233 	if (chunk->dead || !entry->i.inode) {
234 		spin_unlock(&entry->lock);
235 		if (new)
236 			free_chunk(new);
237 		goto out;
238 	}
239 
240 	owner = p->owner;
241 
242 	if (!size) {
243 		chunk->dead = 1;
244 		spin_lock(&hash_lock);
245 		list_del_init(&chunk->trees);
246 		if (owner->root == chunk)
247 			owner->root = NULL;
248 		list_del_init(&p->list);
249 		list_del_rcu(&chunk->hash);
250 		spin_unlock(&hash_lock);
251 		spin_unlock(&entry->lock);
252 		fsnotify_destroy_mark(entry, audit_tree_group);
253 		goto out;
254 	}
255 
256 	if (!new)
257 		goto Fallback;
258 
259 	fsnotify_duplicate_mark(&new->mark, entry);
260 	if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) {
261 		fsnotify_put_mark(&new->mark);
262 		goto Fallback;
263 	}
264 
265 	chunk->dead = 1;
266 	spin_lock(&hash_lock);
267 	list_replace_init(&chunk->trees, &new->trees);
268 	if (owner->root == chunk) {
269 		list_del_init(&owner->same_root);
270 		owner->root = NULL;
271 	}
272 
273 	for (i = j = 0; j <= size; i++, j++) {
274 		struct audit_tree *s;
275 		if (&chunk->owners[j] == p) {
276 			list_del_init(&p->list);
277 			i--;
278 			continue;
279 		}
280 		s = chunk->owners[j].owner;
281 		new->owners[i].owner = s;
282 		new->owners[i].index = chunk->owners[j].index - j + i;
283 		if (!s) /* result of earlier fallback */
284 			continue;
285 		get_tree(s);
286 		list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
287 	}
288 
289 	list_replace_rcu(&chunk->hash, &new->hash);
290 	list_for_each_entry(owner, &new->trees, same_root)
291 		owner->root = new;
292 	spin_unlock(&hash_lock);
293 	spin_unlock(&entry->lock);
294 	fsnotify_destroy_mark(entry, audit_tree_group);
295 	fsnotify_put_mark(&new->mark);	/* drop initial reference */
296 	goto out;
297 
298 Fallback:
299 	// do the best we can
300 	spin_lock(&hash_lock);
301 	if (owner->root == chunk) {
302 		list_del_init(&owner->same_root);
303 		owner->root = NULL;
304 	}
305 	list_del_init(&p->list);
306 	p->owner = NULL;
307 	put_tree(owner);
308 	spin_unlock(&hash_lock);
309 	spin_unlock(&entry->lock);
310 out:
311 	fsnotify_put_mark(entry);
312 	spin_lock(&hash_lock);
313 }
314 
315 static int create_chunk(struct inode *inode, struct audit_tree *tree)
316 {
317 	struct fsnotify_mark *entry;
318 	struct audit_chunk *chunk = alloc_chunk(1);
319 	if (!chunk)
320 		return -ENOMEM;
321 
322 	entry = &chunk->mark;
323 	if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
324 		fsnotify_put_mark(entry);
325 		return -ENOSPC;
326 	}
327 
328 	spin_lock(&entry->lock);
329 	spin_lock(&hash_lock);
330 	if (tree->goner) {
331 		spin_unlock(&hash_lock);
332 		chunk->dead = 1;
333 		spin_unlock(&entry->lock);
334 		fsnotify_destroy_mark(entry, audit_tree_group);
335 		fsnotify_put_mark(entry);
336 		return 0;
337 	}
338 	chunk->owners[0].index = (1U << 31);
339 	chunk->owners[0].owner = tree;
340 	get_tree(tree);
341 	list_add(&chunk->owners[0].list, &tree->chunks);
342 	if (!tree->root) {
343 		tree->root = chunk;
344 		list_add(&tree->same_root, &chunk->trees);
345 	}
346 	insert_hash(chunk);
347 	spin_unlock(&hash_lock);
348 	spin_unlock(&entry->lock);
349 	fsnotify_put_mark(entry);	/* drop initial reference */
350 	return 0;
351 }
352 
353 /* the first tagged inode becomes root of tree */
354 static int tag_chunk(struct inode *inode, struct audit_tree *tree)
355 {
356 	struct fsnotify_mark *old_entry, *chunk_entry;
357 	struct audit_tree *owner;
358 	struct audit_chunk *chunk, *old;
359 	struct node *p;
360 	int n;
361 
362 	old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
363 	if (!old_entry)
364 		return create_chunk(inode, tree);
365 
366 	old = container_of(old_entry, struct audit_chunk, mark);
367 
368 	/* are we already there? */
369 	spin_lock(&hash_lock);
370 	for (n = 0; n < old->count; n++) {
371 		if (old->owners[n].owner == tree) {
372 			spin_unlock(&hash_lock);
373 			fsnotify_put_mark(old_entry);
374 			return 0;
375 		}
376 	}
377 	spin_unlock(&hash_lock);
378 
379 	chunk = alloc_chunk(old->count + 1);
380 	if (!chunk) {
381 		fsnotify_put_mark(old_entry);
382 		return -ENOMEM;
383 	}
384 
385 	chunk_entry = &chunk->mark;
386 
387 	spin_lock(&old_entry->lock);
388 	if (!old_entry->i.inode) {
389 		/* old_entry is being shot, lets just lie */
390 		spin_unlock(&old_entry->lock);
391 		fsnotify_put_mark(old_entry);
392 		free_chunk(chunk);
393 		return -ENOENT;
394 	}
395 
396 	fsnotify_duplicate_mark(chunk_entry, old_entry);
397 	if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) {
398 		spin_unlock(&old_entry->lock);
399 		fsnotify_put_mark(chunk_entry);
400 		fsnotify_put_mark(old_entry);
401 		return -ENOSPC;
402 	}
403 
404 	/* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
405 	spin_lock(&chunk_entry->lock);
406 	spin_lock(&hash_lock);
407 
408 	/* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
409 	if (tree->goner) {
410 		spin_unlock(&hash_lock);
411 		chunk->dead = 1;
412 		spin_unlock(&chunk_entry->lock);
413 		spin_unlock(&old_entry->lock);
414 
415 		fsnotify_destroy_mark(chunk_entry, audit_tree_group);
416 
417 		fsnotify_put_mark(chunk_entry);
418 		fsnotify_put_mark(old_entry);
419 		return 0;
420 	}
421 	list_replace_init(&old->trees, &chunk->trees);
422 	for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
423 		struct audit_tree *s = old->owners[n].owner;
424 		p->owner = s;
425 		p->index = old->owners[n].index;
426 		if (!s) /* result of fallback in untag */
427 			continue;
428 		get_tree(s);
429 		list_replace_init(&old->owners[n].list, &p->list);
430 	}
431 	p->index = (chunk->count - 1) | (1U<<31);
432 	p->owner = tree;
433 	get_tree(tree);
434 	list_add(&p->list, &tree->chunks);
435 	list_replace_rcu(&old->hash, &chunk->hash);
436 	list_for_each_entry(owner, &chunk->trees, same_root)
437 		owner->root = chunk;
438 	old->dead = 1;
439 	if (!tree->root) {
440 		tree->root = chunk;
441 		list_add(&tree->same_root, &chunk->trees);
442 	}
443 	spin_unlock(&hash_lock);
444 	spin_unlock(&chunk_entry->lock);
445 	spin_unlock(&old_entry->lock);
446 	fsnotify_destroy_mark(old_entry, audit_tree_group);
447 	fsnotify_put_mark(chunk_entry);	/* drop initial reference */
448 	fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
449 	return 0;
450 }
451 
452 static void audit_log_remove_rule(struct audit_krule *rule)
453 {
454 	struct audit_buffer *ab;
455 
456 	ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
457 	if (unlikely(!ab))
458 		return;
459 	audit_log_format(ab, "op=");
460 	audit_log_string(ab, "remove rule");
461 	audit_log_format(ab, " dir=");
462 	audit_log_untrustedstring(ab, rule->tree->pathname);
463 	audit_log_key(ab, rule->filterkey);
464 	audit_log_format(ab, " list=%d res=1", rule->listnr);
465 	audit_log_end(ab);
466 }
467 
468 static void kill_rules(struct audit_tree *tree)
469 {
470 	struct audit_krule *rule, *next;
471 	struct audit_entry *entry;
472 
473 	list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
474 		entry = container_of(rule, struct audit_entry, rule);
475 
476 		list_del_init(&rule->rlist);
477 		if (rule->tree) {
478 			/* not a half-baked one */
479 			audit_log_remove_rule(rule);
480 			rule->tree = NULL;
481 			list_del_rcu(&entry->list);
482 			list_del(&entry->rule.list);
483 			call_rcu(&entry->rcu, audit_free_rule_rcu);
484 		}
485 	}
486 }
487 
488 /*
489  * finish killing struct audit_tree
490  */
491 static void prune_one(struct audit_tree *victim)
492 {
493 	spin_lock(&hash_lock);
494 	while (!list_empty(&victim->chunks)) {
495 		struct node *p;
496 
497 		p = list_entry(victim->chunks.next, struct node, list);
498 
499 		untag_chunk(p);
500 	}
501 	spin_unlock(&hash_lock);
502 	put_tree(victim);
503 }
504 
505 /* trim the uncommitted chunks from tree */
506 
507 static void trim_marked(struct audit_tree *tree)
508 {
509 	struct list_head *p, *q;
510 	spin_lock(&hash_lock);
511 	if (tree->goner) {
512 		spin_unlock(&hash_lock);
513 		return;
514 	}
515 	/* reorder */
516 	for (p = tree->chunks.next; p != &tree->chunks; p = q) {
517 		struct node *node = list_entry(p, struct node, list);
518 		q = p->next;
519 		if (node->index & (1U<<31)) {
520 			list_del_init(p);
521 			list_add(p, &tree->chunks);
522 		}
523 	}
524 
525 	while (!list_empty(&tree->chunks)) {
526 		struct node *node;
527 
528 		node = list_entry(tree->chunks.next, struct node, list);
529 
530 		/* have we run out of marked? */
531 		if (!(node->index & (1U<<31)))
532 			break;
533 
534 		untag_chunk(node);
535 	}
536 	if (!tree->root && !tree->goner) {
537 		tree->goner = 1;
538 		spin_unlock(&hash_lock);
539 		mutex_lock(&audit_filter_mutex);
540 		kill_rules(tree);
541 		list_del_init(&tree->list);
542 		mutex_unlock(&audit_filter_mutex);
543 		prune_one(tree);
544 	} else {
545 		spin_unlock(&hash_lock);
546 	}
547 }
548 
549 static void audit_schedule_prune(void);
550 
551 /* called with audit_filter_mutex */
552 int audit_remove_tree_rule(struct audit_krule *rule)
553 {
554 	struct audit_tree *tree;
555 	tree = rule->tree;
556 	if (tree) {
557 		spin_lock(&hash_lock);
558 		list_del_init(&rule->rlist);
559 		if (list_empty(&tree->rules) && !tree->goner) {
560 			tree->root = NULL;
561 			list_del_init(&tree->same_root);
562 			tree->goner = 1;
563 			list_move(&tree->list, &prune_list);
564 			rule->tree = NULL;
565 			spin_unlock(&hash_lock);
566 			audit_schedule_prune();
567 			return 1;
568 		}
569 		rule->tree = NULL;
570 		spin_unlock(&hash_lock);
571 		return 1;
572 	}
573 	return 0;
574 }
575 
576 static int compare_root(struct vfsmount *mnt, void *arg)
577 {
578 	return mnt->mnt_root->d_inode == arg;
579 }
580 
581 void audit_trim_trees(void)
582 {
583 	struct list_head cursor;
584 
585 	mutex_lock(&audit_filter_mutex);
586 	list_add(&cursor, &tree_list);
587 	while (cursor.next != &tree_list) {
588 		struct audit_tree *tree;
589 		struct path path;
590 		struct vfsmount *root_mnt;
591 		struct node *node;
592 		int err;
593 
594 		tree = container_of(cursor.next, struct audit_tree, list);
595 		get_tree(tree);
596 		list_del(&cursor);
597 		list_add(&cursor, &tree->list);
598 		mutex_unlock(&audit_filter_mutex);
599 
600 		err = kern_path(tree->pathname, 0, &path);
601 		if (err)
602 			goto skip_it;
603 
604 		root_mnt = collect_mounts(&path);
605 		path_put(&path);
606 		if (IS_ERR(root_mnt))
607 			goto skip_it;
608 
609 		spin_lock(&hash_lock);
610 		list_for_each_entry(node, &tree->chunks, list) {
611 			struct audit_chunk *chunk = find_chunk(node);
612 			/* this could be NULL if the watch is dying else where... */
613 			struct inode *inode = chunk->mark.i.inode;
614 			node->index |= 1U<<31;
615 			if (iterate_mounts(compare_root, inode, root_mnt))
616 				node->index &= ~(1U<<31);
617 		}
618 		spin_unlock(&hash_lock);
619 		trim_marked(tree);
620 		drop_collected_mounts(root_mnt);
621 skip_it:
622 		put_tree(tree);
623 		mutex_lock(&audit_filter_mutex);
624 	}
625 	list_del(&cursor);
626 	mutex_unlock(&audit_filter_mutex);
627 }
628 
629 int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
630 {
631 
632 	if (pathname[0] != '/' ||
633 	    rule->listnr != AUDIT_FILTER_EXIT ||
634 	    op != Audit_equal ||
635 	    rule->inode_f || rule->watch || rule->tree)
636 		return -EINVAL;
637 	rule->tree = alloc_tree(pathname);
638 	if (!rule->tree)
639 		return -ENOMEM;
640 	return 0;
641 }
642 
643 void audit_put_tree(struct audit_tree *tree)
644 {
645 	put_tree(tree);
646 }
647 
648 static int tag_mount(struct vfsmount *mnt, void *arg)
649 {
650 	return tag_chunk(mnt->mnt_root->d_inode, arg);
651 }
652 
653 /* called with audit_filter_mutex */
654 int audit_add_tree_rule(struct audit_krule *rule)
655 {
656 	struct audit_tree *seed = rule->tree, *tree;
657 	struct path path;
658 	struct vfsmount *mnt;
659 	int err;
660 
661 	list_for_each_entry(tree, &tree_list, list) {
662 		if (!strcmp(seed->pathname, tree->pathname)) {
663 			put_tree(seed);
664 			rule->tree = tree;
665 			list_add(&rule->rlist, &tree->rules);
666 			return 0;
667 		}
668 	}
669 	tree = seed;
670 	list_add(&tree->list, &tree_list);
671 	list_add(&rule->rlist, &tree->rules);
672 	/* do not set rule->tree yet */
673 	mutex_unlock(&audit_filter_mutex);
674 
675 	err = kern_path(tree->pathname, 0, &path);
676 	if (err)
677 		goto Err;
678 	mnt = collect_mounts(&path);
679 	path_put(&path);
680 	if (IS_ERR(mnt)) {
681 		err = PTR_ERR(mnt);
682 		goto Err;
683 	}
684 
685 	get_tree(tree);
686 	err = iterate_mounts(tag_mount, tree, mnt);
687 	drop_collected_mounts(mnt);
688 
689 	if (!err) {
690 		struct node *node;
691 		spin_lock(&hash_lock);
692 		list_for_each_entry(node, &tree->chunks, list)
693 			node->index &= ~(1U<<31);
694 		spin_unlock(&hash_lock);
695 	} else {
696 		trim_marked(tree);
697 		goto Err;
698 	}
699 
700 	mutex_lock(&audit_filter_mutex);
701 	if (list_empty(&rule->rlist)) {
702 		put_tree(tree);
703 		return -ENOENT;
704 	}
705 	rule->tree = tree;
706 	put_tree(tree);
707 
708 	return 0;
709 Err:
710 	mutex_lock(&audit_filter_mutex);
711 	list_del_init(&tree->list);
712 	list_del_init(&tree->rules);
713 	put_tree(tree);
714 	return err;
715 }
716 
717 int audit_tag_tree(char *old, char *new)
718 {
719 	struct list_head cursor, barrier;
720 	int failed = 0;
721 	struct path path1, path2;
722 	struct vfsmount *tagged;
723 	int err;
724 
725 	err = kern_path(new, 0, &path2);
726 	if (err)
727 		return err;
728 	tagged = collect_mounts(&path2);
729 	path_put(&path2);
730 	if (IS_ERR(tagged))
731 		return PTR_ERR(tagged);
732 
733 	err = kern_path(old, 0, &path1);
734 	if (err) {
735 		drop_collected_mounts(tagged);
736 		return err;
737 	}
738 
739 	mutex_lock(&audit_filter_mutex);
740 	list_add(&barrier, &tree_list);
741 	list_add(&cursor, &barrier);
742 
743 	while (cursor.next != &tree_list) {
744 		struct audit_tree *tree;
745 		int good_one = 0;
746 
747 		tree = container_of(cursor.next, struct audit_tree, list);
748 		get_tree(tree);
749 		list_del(&cursor);
750 		list_add(&cursor, &tree->list);
751 		mutex_unlock(&audit_filter_mutex);
752 
753 		err = kern_path(tree->pathname, 0, &path2);
754 		if (!err) {
755 			good_one = path_is_under(&path1, &path2);
756 			path_put(&path2);
757 		}
758 
759 		if (!good_one) {
760 			put_tree(tree);
761 			mutex_lock(&audit_filter_mutex);
762 			continue;
763 		}
764 
765 		failed = iterate_mounts(tag_mount, tree, tagged);
766 		if (failed) {
767 			put_tree(tree);
768 			mutex_lock(&audit_filter_mutex);
769 			break;
770 		}
771 
772 		mutex_lock(&audit_filter_mutex);
773 		spin_lock(&hash_lock);
774 		if (!tree->goner) {
775 			list_del(&tree->list);
776 			list_add(&tree->list, &tree_list);
777 		}
778 		spin_unlock(&hash_lock);
779 		put_tree(tree);
780 	}
781 
782 	while (barrier.prev != &tree_list) {
783 		struct audit_tree *tree;
784 
785 		tree = container_of(barrier.prev, struct audit_tree, list);
786 		get_tree(tree);
787 		list_del(&tree->list);
788 		list_add(&tree->list, &barrier);
789 		mutex_unlock(&audit_filter_mutex);
790 
791 		if (!failed) {
792 			struct node *node;
793 			spin_lock(&hash_lock);
794 			list_for_each_entry(node, &tree->chunks, list)
795 				node->index &= ~(1U<<31);
796 			spin_unlock(&hash_lock);
797 		} else {
798 			trim_marked(tree);
799 		}
800 
801 		put_tree(tree);
802 		mutex_lock(&audit_filter_mutex);
803 	}
804 	list_del(&barrier);
805 	list_del(&cursor);
806 	mutex_unlock(&audit_filter_mutex);
807 	path_put(&path1);
808 	drop_collected_mounts(tagged);
809 	return failed;
810 }
811 
812 /*
813  * That gets run when evict_chunk() ends up needing to kill audit_tree.
814  * Runs from a separate thread.
815  */
816 static int prune_tree_thread(void *unused)
817 {
818 	mutex_lock(&audit_cmd_mutex);
819 	mutex_lock(&audit_filter_mutex);
820 
821 	while (!list_empty(&prune_list)) {
822 		struct audit_tree *victim;
823 
824 		victim = list_entry(prune_list.next, struct audit_tree, list);
825 		list_del_init(&victim->list);
826 
827 		mutex_unlock(&audit_filter_mutex);
828 
829 		prune_one(victim);
830 
831 		mutex_lock(&audit_filter_mutex);
832 	}
833 
834 	mutex_unlock(&audit_filter_mutex);
835 	mutex_unlock(&audit_cmd_mutex);
836 	return 0;
837 }
838 
839 static void audit_schedule_prune(void)
840 {
841 	kthread_run(prune_tree_thread, NULL, "audit_prune_tree");
842 }
843 
844 /*
845  * ... and that one is done if evict_chunk() decides to delay until the end
846  * of syscall.  Runs synchronously.
847  */
848 void audit_kill_trees(struct list_head *list)
849 {
850 	mutex_lock(&audit_cmd_mutex);
851 	mutex_lock(&audit_filter_mutex);
852 
853 	while (!list_empty(list)) {
854 		struct audit_tree *victim;
855 
856 		victim = list_entry(list->next, struct audit_tree, list);
857 		kill_rules(victim);
858 		list_del_init(&victim->list);
859 
860 		mutex_unlock(&audit_filter_mutex);
861 
862 		prune_one(victim);
863 
864 		mutex_lock(&audit_filter_mutex);
865 	}
866 
867 	mutex_unlock(&audit_filter_mutex);
868 	mutex_unlock(&audit_cmd_mutex);
869 }
870 
871 /*
872  *  Here comes the stuff asynchronous to auditctl operations
873  */
874 
875 static void evict_chunk(struct audit_chunk *chunk)
876 {
877 	struct audit_tree *owner;
878 	struct list_head *postponed = audit_killed_trees();
879 	int need_prune = 0;
880 	int n;
881 
882 	if (chunk->dead)
883 		return;
884 
885 	chunk->dead = 1;
886 	mutex_lock(&audit_filter_mutex);
887 	spin_lock(&hash_lock);
888 	while (!list_empty(&chunk->trees)) {
889 		owner = list_entry(chunk->trees.next,
890 				   struct audit_tree, same_root);
891 		owner->goner = 1;
892 		owner->root = NULL;
893 		list_del_init(&owner->same_root);
894 		spin_unlock(&hash_lock);
895 		if (!postponed) {
896 			kill_rules(owner);
897 			list_move(&owner->list, &prune_list);
898 			need_prune = 1;
899 		} else {
900 			list_move(&owner->list, postponed);
901 		}
902 		spin_lock(&hash_lock);
903 	}
904 	list_del_rcu(&chunk->hash);
905 	for (n = 0; n < chunk->count; n++)
906 		list_del_init(&chunk->owners[n].list);
907 	spin_unlock(&hash_lock);
908 	if (need_prune)
909 		audit_schedule_prune();
910 	mutex_unlock(&audit_filter_mutex);
911 }
912 
913 static int audit_tree_handle_event(struct fsnotify_group *group,
914 				   struct fsnotify_mark *inode_mark,
915 				   struct fsnotify_mark *vfsmonut_mark,
916 				   struct fsnotify_event *event)
917 {
918 	BUG();
919 	return -EOPNOTSUPP;
920 }
921 
922 static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
923 {
924 	struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
925 
926 	evict_chunk(chunk);
927 
928 	/*
929 	 * We are guaranteed to have at least one reference to the mark from
930 	 * either the inode or the caller of fsnotify_destroy_mark().
931 	 */
932 	BUG_ON(atomic_read(&entry->refcnt) < 1);
933 }
934 
935 static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode,
936 				  struct fsnotify_mark *inode_mark,
937 				  struct fsnotify_mark *vfsmount_mark,
938 				  __u32 mask, void *data, int data_type)
939 {
940 	return false;
941 }
942 
943 static const struct fsnotify_ops audit_tree_ops = {
944 	.handle_event = audit_tree_handle_event,
945 	.should_send_event = audit_tree_send_event,
946 	.free_group_priv = NULL,
947 	.free_event_priv = NULL,
948 	.freeing_mark = audit_tree_freeing_mark,
949 };
950 
951 static int __init audit_tree_init(void)
952 {
953 	int i;
954 
955 	audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
956 	if (IS_ERR(audit_tree_group))
957 		audit_panic("cannot initialize fsnotify group for rectree watches");
958 
959 	for (i = 0; i < HASH_SIZE; i++)
960 		INIT_LIST_HEAD(&chunk_hash_heads[i]);
961 
962 	return 0;
963 }
964 __initcall(audit_tree_init);
965