xref: /openbmc/linux/kernel/audit_tree.c (revision 3bf50923)
1 #include "audit.h"
2 #include <linux/inotify.h>
3 #include <linux/namei.h>
4 #include <linux/mount.h>
5 
6 struct audit_tree;
7 struct audit_chunk;
8 
9 struct audit_tree {
10 	atomic_t count;
11 	int goner;
12 	struct audit_chunk *root;
13 	struct list_head chunks;
14 	struct list_head rules;
15 	struct list_head list;
16 	struct list_head same_root;
17 	struct rcu_head head;
18 	char pathname[];
19 };
20 
21 struct audit_chunk {
22 	struct list_head hash;
23 	struct inotify_watch watch;
24 	struct list_head trees;		/* with root here */
25 	int dead;
26 	int count;
27 	atomic_long_t refs;
28 	struct rcu_head head;
29 	struct node {
30 		struct list_head list;
31 		struct audit_tree *owner;
32 		unsigned index;		/* index; upper bit indicates 'will prune' */
33 	} owners[];
34 };
35 
36 static LIST_HEAD(tree_list);
37 static LIST_HEAD(prune_list);
38 
39 /*
40  * One struct chunk is attached to each inode of interest.
41  * We replace struct chunk on tagging/untagging.
42  * Rules have pointer to struct audit_tree.
43  * Rules have struct list_head rlist forming a list of rules over
44  * the same tree.
45  * References to struct chunk are collected at audit_inode{,_child}()
46  * time and used in AUDIT_TREE rule matching.
47  * These references are dropped at the same time we are calling
48  * audit_free_names(), etc.
49  *
50  * Cyclic lists galore:
51  * tree.chunks anchors chunk.owners[].list			hash_lock
52  * tree.rules anchors rule.rlist				audit_filter_mutex
53  * chunk.trees anchors tree.same_root				hash_lock
54  * chunk.hash is a hash with middle bits of watch.inode as
55  * a hash function.						RCU, hash_lock
56  *
57  * tree is refcounted; one reference for "some rules on rules_list refer to
58  * it", one for each chunk with pointer to it.
59  *
60  * chunk is refcounted by embedded inotify_watch + .refs (non-zero refcount
61  * of watch contributes 1 to .refs).
62  *
63  * node.index allows to get from node.list to containing chunk.
64  * MSB of that sucker is stolen to mark taggings that we might have to
65  * revert - several operations have very unpleasant cleanup logics and
66  * that makes a difference.  Some.
67  */
68 
69 static struct inotify_handle *rtree_ih;
70 
71 static struct audit_tree *alloc_tree(const char *s)
72 {
73 	struct audit_tree *tree;
74 
75 	tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
76 	if (tree) {
77 		atomic_set(&tree->count, 1);
78 		tree->goner = 0;
79 		INIT_LIST_HEAD(&tree->chunks);
80 		INIT_LIST_HEAD(&tree->rules);
81 		INIT_LIST_HEAD(&tree->list);
82 		INIT_LIST_HEAD(&tree->same_root);
83 		tree->root = NULL;
84 		strcpy(tree->pathname, s);
85 	}
86 	return tree;
87 }
88 
89 static inline void get_tree(struct audit_tree *tree)
90 {
91 	atomic_inc(&tree->count);
92 }
93 
94 static void __put_tree(struct rcu_head *rcu)
95 {
96 	struct audit_tree *tree = container_of(rcu, struct audit_tree, head);
97 	kfree(tree);
98 }
99 
100 static inline void put_tree(struct audit_tree *tree)
101 {
102 	if (atomic_dec_and_test(&tree->count))
103 		call_rcu(&tree->head, __put_tree);
104 }
105 
106 /* to avoid bringing the entire thing in audit.h */
107 const char *audit_tree_path(struct audit_tree *tree)
108 {
109 	return tree->pathname;
110 }
111 
112 static struct audit_chunk *alloc_chunk(int count)
113 {
114 	struct audit_chunk *chunk;
115 	size_t size;
116 	int i;
117 
118 	size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
119 	chunk = kzalloc(size, GFP_KERNEL);
120 	if (!chunk)
121 		return NULL;
122 
123 	INIT_LIST_HEAD(&chunk->hash);
124 	INIT_LIST_HEAD(&chunk->trees);
125 	chunk->count = count;
126 	atomic_long_set(&chunk->refs, 1);
127 	for (i = 0; i < count; i++) {
128 		INIT_LIST_HEAD(&chunk->owners[i].list);
129 		chunk->owners[i].index = i;
130 	}
131 	inotify_init_watch(&chunk->watch);
132 	return chunk;
133 }
134 
135 static void free_chunk(struct audit_chunk *chunk)
136 {
137 	int i;
138 
139 	for (i = 0; i < chunk->count; i++) {
140 		if (chunk->owners[i].owner)
141 			put_tree(chunk->owners[i].owner);
142 	}
143 	kfree(chunk);
144 }
145 
146 void audit_put_chunk(struct audit_chunk *chunk)
147 {
148 	if (atomic_long_dec_and_test(&chunk->refs))
149 		free_chunk(chunk);
150 }
151 
152 static void __put_chunk(struct rcu_head *rcu)
153 {
154 	struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
155 	audit_put_chunk(chunk);
156 }
157 
158 enum {HASH_SIZE = 128};
159 static struct list_head chunk_hash_heads[HASH_SIZE];
160 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
161 
162 static inline struct list_head *chunk_hash(const struct inode *inode)
163 {
164 	unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
165 	return chunk_hash_heads + n % HASH_SIZE;
166 }
167 
168 /* hash_lock is held by caller */
169 static void insert_hash(struct audit_chunk *chunk)
170 {
171 	struct list_head *list = chunk_hash(chunk->watch.inode);
172 	list_add_rcu(&chunk->hash, list);
173 }
174 
175 /* called under rcu_read_lock */
176 struct audit_chunk *audit_tree_lookup(const struct inode *inode)
177 {
178 	struct list_head *list = chunk_hash(inode);
179 	struct audit_chunk *p;
180 
181 	list_for_each_entry_rcu(p, list, hash) {
182 		if (p->watch.inode == inode) {
183 			atomic_long_inc(&p->refs);
184 			return p;
185 		}
186 	}
187 	return NULL;
188 }
189 
190 int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
191 {
192 	int n;
193 	for (n = 0; n < chunk->count; n++)
194 		if (chunk->owners[n].owner == tree)
195 			return 1;
196 	return 0;
197 }
198 
199 /* tagging and untagging inodes with trees */
200 
201 static struct audit_chunk *find_chunk(struct node *p)
202 {
203 	int index = p->index & ~(1U<<31);
204 	p -= index;
205 	return container_of(p, struct audit_chunk, owners[0]);
206 }
207 
208 static void untag_chunk(struct node *p)
209 {
210 	struct audit_chunk *chunk = find_chunk(p);
211 	struct audit_chunk *new;
212 	struct audit_tree *owner;
213 	int size = chunk->count - 1;
214 	int i, j;
215 
216 	if (!pin_inotify_watch(&chunk->watch)) {
217 		/*
218 		 * Filesystem is shutting down; all watches are getting
219 		 * evicted, just take it off the node list for this
220 		 * tree and let the eviction logics take care of the
221 		 * rest.
222 		 */
223 		owner = p->owner;
224 		if (owner->root == chunk) {
225 			list_del_init(&owner->same_root);
226 			owner->root = NULL;
227 		}
228 		list_del_init(&p->list);
229 		p->owner = NULL;
230 		put_tree(owner);
231 		return;
232 	}
233 
234 	spin_unlock(&hash_lock);
235 
236 	/*
237 	 * pin_inotify_watch() succeeded, so the watch won't go away
238 	 * from under us.
239 	 */
240 	mutex_lock(&chunk->watch.inode->inotify_mutex);
241 	if (chunk->dead) {
242 		mutex_unlock(&chunk->watch.inode->inotify_mutex);
243 		goto out;
244 	}
245 
246 	owner = p->owner;
247 
248 	if (!size) {
249 		chunk->dead = 1;
250 		spin_lock(&hash_lock);
251 		list_del_init(&chunk->trees);
252 		if (owner->root == chunk)
253 			owner->root = NULL;
254 		list_del_init(&p->list);
255 		list_del_rcu(&chunk->hash);
256 		spin_unlock(&hash_lock);
257 		inotify_evict_watch(&chunk->watch);
258 		mutex_unlock(&chunk->watch.inode->inotify_mutex);
259 		put_inotify_watch(&chunk->watch);
260 		goto out;
261 	}
262 
263 	new = alloc_chunk(size);
264 	if (!new)
265 		goto Fallback;
266 	if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) {
267 		free_chunk(new);
268 		goto Fallback;
269 	}
270 
271 	chunk->dead = 1;
272 	spin_lock(&hash_lock);
273 	list_replace_init(&chunk->trees, &new->trees);
274 	if (owner->root == chunk) {
275 		list_del_init(&owner->same_root);
276 		owner->root = NULL;
277 	}
278 
279 	for (i = j = 0; i < size; i++, j++) {
280 		struct audit_tree *s;
281 		if (&chunk->owners[j] == p) {
282 			list_del_init(&p->list);
283 			i--;
284 			continue;
285 		}
286 		s = chunk->owners[j].owner;
287 		new->owners[i].owner = s;
288 		new->owners[i].index = chunk->owners[j].index - j + i;
289 		if (!s) /* result of earlier fallback */
290 			continue;
291 		get_tree(s);
292 		list_replace_init(&chunk->owners[i].list, &new->owners[j].list);
293 	}
294 
295 	list_replace_rcu(&chunk->hash, &new->hash);
296 	list_for_each_entry(owner, &new->trees, same_root)
297 		owner->root = new;
298 	spin_unlock(&hash_lock);
299 	inotify_evict_watch(&chunk->watch);
300 	mutex_unlock(&chunk->watch.inode->inotify_mutex);
301 	put_inotify_watch(&chunk->watch);
302 	goto out;
303 
304 Fallback:
305 	// do the best we can
306 	spin_lock(&hash_lock);
307 	if (owner->root == chunk) {
308 		list_del_init(&owner->same_root);
309 		owner->root = NULL;
310 	}
311 	list_del_init(&p->list);
312 	p->owner = NULL;
313 	put_tree(owner);
314 	spin_unlock(&hash_lock);
315 	mutex_unlock(&chunk->watch.inode->inotify_mutex);
316 out:
317 	unpin_inotify_watch(&chunk->watch);
318 	spin_lock(&hash_lock);
319 }
320 
321 static int create_chunk(struct inode *inode, struct audit_tree *tree)
322 {
323 	struct audit_chunk *chunk = alloc_chunk(1);
324 	if (!chunk)
325 		return -ENOMEM;
326 
327 	if (inotify_add_watch(rtree_ih, &chunk->watch, inode, IN_IGNORED | IN_DELETE_SELF) < 0) {
328 		free_chunk(chunk);
329 		return -ENOSPC;
330 	}
331 
332 	mutex_lock(&inode->inotify_mutex);
333 	spin_lock(&hash_lock);
334 	if (tree->goner) {
335 		spin_unlock(&hash_lock);
336 		chunk->dead = 1;
337 		inotify_evict_watch(&chunk->watch);
338 		mutex_unlock(&inode->inotify_mutex);
339 		put_inotify_watch(&chunk->watch);
340 		return 0;
341 	}
342 	chunk->owners[0].index = (1U << 31);
343 	chunk->owners[0].owner = tree;
344 	get_tree(tree);
345 	list_add(&chunk->owners[0].list, &tree->chunks);
346 	if (!tree->root) {
347 		tree->root = chunk;
348 		list_add(&tree->same_root, &chunk->trees);
349 	}
350 	insert_hash(chunk);
351 	spin_unlock(&hash_lock);
352 	mutex_unlock(&inode->inotify_mutex);
353 	return 0;
354 }
355 
356 /* the first tagged inode becomes root of tree */
357 static int tag_chunk(struct inode *inode, struct audit_tree *tree)
358 {
359 	struct inotify_watch *watch;
360 	struct audit_tree *owner;
361 	struct audit_chunk *chunk, *old;
362 	struct node *p;
363 	int n;
364 
365 	if (inotify_find_watch(rtree_ih, inode, &watch) < 0)
366 		return create_chunk(inode, tree);
367 
368 	old = container_of(watch, struct audit_chunk, watch);
369 
370 	/* are we already there? */
371 	spin_lock(&hash_lock);
372 	for (n = 0; n < old->count; n++) {
373 		if (old->owners[n].owner == tree) {
374 			spin_unlock(&hash_lock);
375 			put_inotify_watch(watch);
376 			return 0;
377 		}
378 	}
379 	spin_unlock(&hash_lock);
380 
381 	chunk = alloc_chunk(old->count + 1);
382 	if (!chunk)
383 		return -ENOMEM;
384 
385 	mutex_lock(&inode->inotify_mutex);
386 	if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) {
387 		mutex_unlock(&inode->inotify_mutex);
388 		free_chunk(chunk);
389 		return -ENOSPC;
390 	}
391 	spin_lock(&hash_lock);
392 	if (tree->goner) {
393 		spin_unlock(&hash_lock);
394 		chunk->dead = 1;
395 		inotify_evict_watch(&chunk->watch);
396 		mutex_unlock(&inode->inotify_mutex);
397 		put_inotify_watch(&chunk->watch);
398 		return 0;
399 	}
400 	list_replace_init(&old->trees, &chunk->trees);
401 	for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
402 		struct audit_tree *s = old->owners[n].owner;
403 		p->owner = s;
404 		p->index = old->owners[n].index;
405 		if (!s) /* result of fallback in untag */
406 			continue;
407 		get_tree(s);
408 		list_replace_init(&old->owners[n].list, &p->list);
409 	}
410 	p->index = (chunk->count - 1) | (1U<<31);
411 	p->owner = tree;
412 	get_tree(tree);
413 	list_add(&p->list, &tree->chunks);
414 	list_replace_rcu(&old->hash, &chunk->hash);
415 	list_for_each_entry(owner, &chunk->trees, same_root)
416 		owner->root = chunk;
417 	old->dead = 1;
418 	if (!tree->root) {
419 		tree->root = chunk;
420 		list_add(&tree->same_root, &chunk->trees);
421 	}
422 	spin_unlock(&hash_lock);
423 	inotify_evict_watch(&old->watch);
424 	mutex_unlock(&inode->inotify_mutex);
425 	put_inotify_watch(&old->watch);
426 	return 0;
427 }
428 
429 static void kill_rules(struct audit_tree *tree)
430 {
431 	struct audit_krule *rule, *next;
432 	struct audit_entry *entry;
433 	struct audit_buffer *ab;
434 
435 	list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
436 		entry = container_of(rule, struct audit_entry, rule);
437 
438 		list_del_init(&rule->rlist);
439 		if (rule->tree) {
440 			/* not a half-baked one */
441 			ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
442 			audit_log_format(ab, "op=remove rule dir=");
443 			audit_log_untrustedstring(ab, rule->tree->pathname);
444 			if (rule->filterkey) {
445 				audit_log_format(ab, " key=");
446 				audit_log_untrustedstring(ab, rule->filterkey);
447 			} else
448 				audit_log_format(ab, " key=(null)");
449 			audit_log_format(ab, " list=%d res=1", rule->listnr);
450 			audit_log_end(ab);
451 			rule->tree = NULL;
452 			list_del_rcu(&entry->list);
453 			list_del(&entry->rule.list);
454 			call_rcu(&entry->rcu, audit_free_rule_rcu);
455 		}
456 	}
457 }
458 
459 /*
460  * finish killing struct audit_tree
461  */
462 static void prune_one(struct audit_tree *victim)
463 {
464 	spin_lock(&hash_lock);
465 	while (!list_empty(&victim->chunks)) {
466 		struct node *p;
467 
468 		p = list_entry(victim->chunks.next, struct node, list);
469 
470 		untag_chunk(p);
471 	}
472 	spin_unlock(&hash_lock);
473 	put_tree(victim);
474 }
475 
476 /* trim the uncommitted chunks from tree */
477 
478 static void trim_marked(struct audit_tree *tree)
479 {
480 	struct list_head *p, *q;
481 	spin_lock(&hash_lock);
482 	if (tree->goner) {
483 		spin_unlock(&hash_lock);
484 		return;
485 	}
486 	/* reorder */
487 	for (p = tree->chunks.next; p != &tree->chunks; p = q) {
488 		struct node *node = list_entry(p, struct node, list);
489 		q = p->next;
490 		if (node->index & (1U<<31)) {
491 			list_del_init(p);
492 			list_add(p, &tree->chunks);
493 		}
494 	}
495 
496 	while (!list_empty(&tree->chunks)) {
497 		struct node *node;
498 
499 		node = list_entry(tree->chunks.next, struct node, list);
500 
501 		/* have we run out of marked? */
502 		if (!(node->index & (1U<<31)))
503 			break;
504 
505 		untag_chunk(node);
506 	}
507 	if (!tree->root && !tree->goner) {
508 		tree->goner = 1;
509 		spin_unlock(&hash_lock);
510 		mutex_lock(&audit_filter_mutex);
511 		kill_rules(tree);
512 		list_del_init(&tree->list);
513 		mutex_unlock(&audit_filter_mutex);
514 		prune_one(tree);
515 	} else {
516 		spin_unlock(&hash_lock);
517 	}
518 }
519 
520 /* called with audit_filter_mutex */
521 int audit_remove_tree_rule(struct audit_krule *rule)
522 {
523 	struct audit_tree *tree;
524 	tree = rule->tree;
525 	if (tree) {
526 		spin_lock(&hash_lock);
527 		list_del_init(&rule->rlist);
528 		if (list_empty(&tree->rules) && !tree->goner) {
529 			tree->root = NULL;
530 			list_del_init(&tree->same_root);
531 			tree->goner = 1;
532 			list_move(&tree->list, &prune_list);
533 			rule->tree = NULL;
534 			spin_unlock(&hash_lock);
535 			audit_schedule_prune();
536 			return 1;
537 		}
538 		rule->tree = NULL;
539 		spin_unlock(&hash_lock);
540 		return 1;
541 	}
542 	return 0;
543 }
544 
545 void audit_trim_trees(void)
546 {
547 	struct list_head cursor;
548 
549 	mutex_lock(&audit_filter_mutex);
550 	list_add(&cursor, &tree_list);
551 	while (cursor.next != &tree_list) {
552 		struct audit_tree *tree;
553 		struct path path;
554 		struct vfsmount *root_mnt;
555 		struct node *node;
556 		struct list_head list;
557 		int err;
558 
559 		tree = container_of(cursor.next, struct audit_tree, list);
560 		get_tree(tree);
561 		list_del(&cursor);
562 		list_add(&cursor, &tree->list);
563 		mutex_unlock(&audit_filter_mutex);
564 
565 		err = kern_path(tree->pathname, 0, &path);
566 		if (err)
567 			goto skip_it;
568 
569 		root_mnt = collect_mounts(path.mnt, path.dentry);
570 		path_put(&path);
571 		if (!root_mnt)
572 			goto skip_it;
573 
574 		list_add_tail(&list, &root_mnt->mnt_list);
575 		spin_lock(&hash_lock);
576 		list_for_each_entry(node, &tree->chunks, list) {
577 			struct audit_chunk *chunk = find_chunk(node);
578 			struct inode *inode = chunk->watch.inode;
579 			struct vfsmount *mnt;
580 			node->index |= 1U<<31;
581 			list_for_each_entry(mnt, &list, mnt_list) {
582 				if (mnt->mnt_root->d_inode == inode) {
583 					node->index &= ~(1U<<31);
584 					break;
585 				}
586 			}
587 		}
588 		spin_unlock(&hash_lock);
589 		trim_marked(tree);
590 		put_tree(tree);
591 		list_del_init(&list);
592 		drop_collected_mounts(root_mnt);
593 skip_it:
594 		mutex_lock(&audit_filter_mutex);
595 	}
596 	list_del(&cursor);
597 	mutex_unlock(&audit_filter_mutex);
598 }
599 
600 static int is_under(struct vfsmount *mnt, struct dentry *dentry,
601 		    struct path *path)
602 {
603 	if (mnt != path->mnt) {
604 		for (;;) {
605 			if (mnt->mnt_parent == mnt)
606 				return 0;
607 			if (mnt->mnt_parent == path->mnt)
608 					break;
609 			mnt = mnt->mnt_parent;
610 		}
611 		dentry = mnt->mnt_mountpoint;
612 	}
613 	return is_subdir(dentry, path->dentry);
614 }
615 
616 int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
617 {
618 
619 	if (pathname[0] != '/' ||
620 	    rule->listnr != AUDIT_FILTER_EXIT ||
621 	    op != Audit_equal ||
622 	    rule->inode_f || rule->watch || rule->tree)
623 		return -EINVAL;
624 	rule->tree = alloc_tree(pathname);
625 	if (!rule->tree)
626 		return -ENOMEM;
627 	return 0;
628 }
629 
630 void audit_put_tree(struct audit_tree *tree)
631 {
632 	put_tree(tree);
633 }
634 
635 /* called with audit_filter_mutex */
636 int audit_add_tree_rule(struct audit_krule *rule)
637 {
638 	struct audit_tree *seed = rule->tree, *tree;
639 	struct path path;
640 	struct vfsmount *mnt, *p;
641 	struct list_head list;
642 	int err;
643 
644 	list_for_each_entry(tree, &tree_list, list) {
645 		if (!strcmp(seed->pathname, tree->pathname)) {
646 			put_tree(seed);
647 			rule->tree = tree;
648 			list_add(&rule->rlist, &tree->rules);
649 			return 0;
650 		}
651 	}
652 	tree = seed;
653 	list_add(&tree->list, &tree_list);
654 	list_add(&rule->rlist, &tree->rules);
655 	/* do not set rule->tree yet */
656 	mutex_unlock(&audit_filter_mutex);
657 
658 	err = kern_path(tree->pathname, 0, &path);
659 	if (err)
660 		goto Err;
661 	mnt = collect_mounts(path.mnt, path.dentry);
662 	path_put(&path);
663 	if (!mnt) {
664 		err = -ENOMEM;
665 		goto Err;
666 	}
667 	list_add_tail(&list, &mnt->mnt_list);
668 
669 	get_tree(tree);
670 	list_for_each_entry(p, &list, mnt_list) {
671 		err = tag_chunk(p->mnt_root->d_inode, tree);
672 		if (err)
673 			break;
674 	}
675 
676 	list_del(&list);
677 	drop_collected_mounts(mnt);
678 
679 	if (!err) {
680 		struct node *node;
681 		spin_lock(&hash_lock);
682 		list_for_each_entry(node, &tree->chunks, list)
683 			node->index &= ~(1U<<31);
684 		spin_unlock(&hash_lock);
685 	} else {
686 		trim_marked(tree);
687 		goto Err;
688 	}
689 
690 	mutex_lock(&audit_filter_mutex);
691 	if (list_empty(&rule->rlist)) {
692 		put_tree(tree);
693 		return -ENOENT;
694 	}
695 	rule->tree = tree;
696 	put_tree(tree);
697 
698 	return 0;
699 Err:
700 	mutex_lock(&audit_filter_mutex);
701 	list_del_init(&tree->list);
702 	list_del_init(&tree->rules);
703 	put_tree(tree);
704 	return err;
705 }
706 
707 int audit_tag_tree(char *old, char *new)
708 {
709 	struct list_head cursor, barrier;
710 	int failed = 0;
711 	struct path path;
712 	struct vfsmount *tagged;
713 	struct list_head list;
714 	struct vfsmount *mnt;
715 	struct dentry *dentry;
716 	int err;
717 
718 	err = kern_path(new, 0, &path);
719 	if (err)
720 		return err;
721 	tagged = collect_mounts(path.mnt, path.dentry);
722 	path_put(&path);
723 	if (!tagged)
724 		return -ENOMEM;
725 
726 	err = kern_path(old, 0, &path);
727 	if (err) {
728 		drop_collected_mounts(tagged);
729 		return err;
730 	}
731 	mnt = mntget(path.mnt);
732 	dentry = dget(path.dentry);
733 	path_put(&path);
734 
735 	if (dentry == tagged->mnt_root && dentry == mnt->mnt_root)
736 		follow_up(&mnt, &dentry);
737 
738 	list_add_tail(&list, &tagged->mnt_list);
739 
740 	mutex_lock(&audit_filter_mutex);
741 	list_add(&barrier, &tree_list);
742 	list_add(&cursor, &barrier);
743 
744 	while (cursor.next != &tree_list) {
745 		struct audit_tree *tree;
746 		struct vfsmount *p;
747 
748 		tree = container_of(cursor.next, struct audit_tree, list);
749 		get_tree(tree);
750 		list_del(&cursor);
751 		list_add(&cursor, &tree->list);
752 		mutex_unlock(&audit_filter_mutex);
753 
754 		err = kern_path(tree->pathname, 0, &path);
755 		if (err) {
756 			put_tree(tree);
757 			mutex_lock(&audit_filter_mutex);
758 			continue;
759 		}
760 
761 		spin_lock(&vfsmount_lock);
762 		if (!is_under(mnt, dentry, &path)) {
763 			spin_unlock(&vfsmount_lock);
764 			path_put(&path);
765 			put_tree(tree);
766 			mutex_lock(&audit_filter_mutex);
767 			continue;
768 		}
769 		spin_unlock(&vfsmount_lock);
770 		path_put(&path);
771 
772 		list_for_each_entry(p, &list, mnt_list) {
773 			failed = tag_chunk(p->mnt_root->d_inode, tree);
774 			if (failed)
775 				break;
776 		}
777 
778 		if (failed) {
779 			put_tree(tree);
780 			mutex_lock(&audit_filter_mutex);
781 			break;
782 		}
783 
784 		mutex_lock(&audit_filter_mutex);
785 		spin_lock(&hash_lock);
786 		if (!tree->goner) {
787 			list_del(&tree->list);
788 			list_add(&tree->list, &tree_list);
789 		}
790 		spin_unlock(&hash_lock);
791 		put_tree(tree);
792 	}
793 
794 	while (barrier.prev != &tree_list) {
795 		struct audit_tree *tree;
796 
797 		tree = container_of(barrier.prev, struct audit_tree, list);
798 		get_tree(tree);
799 		list_del(&tree->list);
800 		list_add(&tree->list, &barrier);
801 		mutex_unlock(&audit_filter_mutex);
802 
803 		if (!failed) {
804 			struct node *node;
805 			spin_lock(&hash_lock);
806 			list_for_each_entry(node, &tree->chunks, list)
807 				node->index &= ~(1U<<31);
808 			spin_unlock(&hash_lock);
809 		} else {
810 			trim_marked(tree);
811 		}
812 
813 		put_tree(tree);
814 		mutex_lock(&audit_filter_mutex);
815 	}
816 	list_del(&barrier);
817 	list_del(&cursor);
818 	list_del(&list);
819 	mutex_unlock(&audit_filter_mutex);
820 	dput(dentry);
821 	mntput(mnt);
822 	drop_collected_mounts(tagged);
823 	return failed;
824 }
825 
826 /*
827  * That gets run when evict_chunk() ends up needing to kill audit_tree.
828  * Runs from a separate thread, with audit_cmd_mutex held.
829  */
830 void audit_prune_trees(void)
831 {
832 	mutex_lock(&audit_filter_mutex);
833 
834 	while (!list_empty(&prune_list)) {
835 		struct audit_tree *victim;
836 
837 		victim = list_entry(prune_list.next, struct audit_tree, list);
838 		list_del_init(&victim->list);
839 
840 		mutex_unlock(&audit_filter_mutex);
841 
842 		prune_one(victim);
843 
844 		mutex_lock(&audit_filter_mutex);
845 	}
846 
847 	mutex_unlock(&audit_filter_mutex);
848 }
849 
850 /*
851  *  Here comes the stuff asynchronous to auditctl operations
852  */
853 
854 /* inode->inotify_mutex is locked */
855 static void evict_chunk(struct audit_chunk *chunk)
856 {
857 	struct audit_tree *owner;
858 	int n;
859 
860 	if (chunk->dead)
861 		return;
862 
863 	chunk->dead = 1;
864 	mutex_lock(&audit_filter_mutex);
865 	spin_lock(&hash_lock);
866 	while (!list_empty(&chunk->trees)) {
867 		owner = list_entry(chunk->trees.next,
868 				   struct audit_tree, same_root);
869 		owner->goner = 1;
870 		owner->root = NULL;
871 		list_del_init(&owner->same_root);
872 		spin_unlock(&hash_lock);
873 		kill_rules(owner);
874 		list_move(&owner->list, &prune_list);
875 		audit_schedule_prune();
876 		spin_lock(&hash_lock);
877 	}
878 	list_del_rcu(&chunk->hash);
879 	for (n = 0; n < chunk->count; n++)
880 		list_del_init(&chunk->owners[n].list);
881 	spin_unlock(&hash_lock);
882 	mutex_unlock(&audit_filter_mutex);
883 }
884 
885 static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask,
886                          u32 cookie, const char *dname, struct inode *inode)
887 {
888 	struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
889 
890 	if (mask & IN_IGNORED) {
891 		evict_chunk(chunk);
892 		put_inotify_watch(watch);
893 	}
894 }
895 
896 static void destroy_watch(struct inotify_watch *watch)
897 {
898 	struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
899 	call_rcu(&chunk->head, __put_chunk);
900 }
901 
902 static const struct inotify_operations rtree_inotify_ops = {
903 	.handle_event	= handle_event,
904 	.destroy_watch	= destroy_watch,
905 };
906 
907 static int __init audit_tree_init(void)
908 {
909 	int i;
910 
911 	rtree_ih = inotify_init(&rtree_inotify_ops);
912 	if (IS_ERR(rtree_ih))
913 		audit_panic("cannot initialize inotify handle for rectree watches");
914 
915 	for (i = 0; i < HASH_SIZE; i++)
916 		INIT_LIST_HEAD(&chunk_hash_heads[i]);
917 
918 	return 0;
919 }
920 __initcall(audit_tree_init);
921