xref: /openbmc/linux/kernel/audit_tree.c (revision b627b4ed)
1 #include "audit.h"
2 #include <linux/inotify.h>
3 #include <linux/namei.h>
4 #include <linux/mount.h>
5 
6 struct audit_tree;
7 struct audit_chunk;
8 
9 struct audit_tree {
10 	atomic_t count;
11 	int goner;
12 	struct audit_chunk *root;
13 	struct list_head chunks;
14 	struct list_head rules;
15 	struct list_head list;
16 	struct list_head same_root;
17 	struct rcu_head head;
18 	char pathname[];
19 };
20 
21 struct audit_chunk {
22 	struct list_head hash;
23 	struct inotify_watch watch;
24 	struct list_head trees;		/* with root here */
25 	int dead;
26 	int count;
27 	atomic_long_t refs;
28 	struct rcu_head head;
29 	struct node {
30 		struct list_head list;
31 		struct audit_tree *owner;
32 		unsigned index;		/* index; upper bit indicates 'will prune' */
33 	} owners[];
34 };
35 
36 static LIST_HEAD(tree_list);
37 static LIST_HEAD(prune_list);
38 
39 /*
40  * One struct chunk is attached to each inode of interest.
41  * We replace struct chunk on tagging/untagging.
42  * Rules have pointer to struct audit_tree.
43  * Rules have struct list_head rlist forming a list of rules over
44  * the same tree.
45  * References to struct chunk are collected at audit_inode{,_child}()
46  * time and used in AUDIT_TREE rule matching.
47  * These references are dropped at the same time we are calling
48  * audit_free_names(), etc.
49  *
50  * Cyclic lists galore:
51  * tree.chunks anchors chunk.owners[].list			hash_lock
52  * tree.rules anchors rule.rlist				audit_filter_mutex
53  * chunk.trees anchors tree.same_root				hash_lock
54  * chunk.hash is a hash with middle bits of watch.inode as
55  * a hash function.						RCU, hash_lock
56  *
57  * tree is refcounted; one reference for "some rules on rules_list refer to
58  * it", one for each chunk with pointer to it.
59  *
60  * chunk is refcounted by embedded inotify_watch + .refs (non-zero refcount
61  * of watch contributes 1 to .refs).
62  *
63  * node.index allows to get from node.list to containing chunk.
64  * MSB of that sucker is stolen to mark taggings that we might have to
65  * revert - several operations have very unpleasant cleanup logics and
66  * that makes a difference.  Some.
67  */
68 
69 static struct inotify_handle *rtree_ih;
70 
71 static struct audit_tree *alloc_tree(const char *s)
72 {
73 	struct audit_tree *tree;
74 
75 	tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
76 	if (tree) {
77 		atomic_set(&tree->count, 1);
78 		tree->goner = 0;
79 		INIT_LIST_HEAD(&tree->chunks);
80 		INIT_LIST_HEAD(&tree->rules);
81 		INIT_LIST_HEAD(&tree->list);
82 		INIT_LIST_HEAD(&tree->same_root);
83 		tree->root = NULL;
84 		strcpy(tree->pathname, s);
85 	}
86 	return tree;
87 }
88 
89 static inline void get_tree(struct audit_tree *tree)
90 {
91 	atomic_inc(&tree->count);
92 }
93 
94 static void __put_tree(struct rcu_head *rcu)
95 {
96 	struct audit_tree *tree = container_of(rcu, struct audit_tree, head);
97 	kfree(tree);
98 }
99 
100 static inline void put_tree(struct audit_tree *tree)
101 {
102 	if (atomic_dec_and_test(&tree->count))
103 		call_rcu(&tree->head, __put_tree);
104 }
105 
106 /* to avoid bringing the entire thing in audit.h */
107 const char *audit_tree_path(struct audit_tree *tree)
108 {
109 	return tree->pathname;
110 }
111 
112 static struct audit_chunk *alloc_chunk(int count)
113 {
114 	struct audit_chunk *chunk;
115 	size_t size;
116 	int i;
117 
118 	size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
119 	chunk = kzalloc(size, GFP_KERNEL);
120 	if (!chunk)
121 		return NULL;
122 
123 	INIT_LIST_HEAD(&chunk->hash);
124 	INIT_LIST_HEAD(&chunk->trees);
125 	chunk->count = count;
126 	atomic_long_set(&chunk->refs, 1);
127 	for (i = 0; i < count; i++) {
128 		INIT_LIST_HEAD(&chunk->owners[i].list);
129 		chunk->owners[i].index = i;
130 	}
131 	inotify_init_watch(&chunk->watch);
132 	return chunk;
133 }
134 
135 static void free_chunk(struct audit_chunk *chunk)
136 {
137 	int i;
138 
139 	for (i = 0; i < chunk->count; i++) {
140 		if (chunk->owners[i].owner)
141 			put_tree(chunk->owners[i].owner);
142 	}
143 	kfree(chunk);
144 }
145 
146 void audit_put_chunk(struct audit_chunk *chunk)
147 {
148 	if (atomic_long_dec_and_test(&chunk->refs))
149 		free_chunk(chunk);
150 }
151 
152 static void __put_chunk(struct rcu_head *rcu)
153 {
154 	struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
155 	audit_put_chunk(chunk);
156 }
157 
158 enum {HASH_SIZE = 128};
159 static struct list_head chunk_hash_heads[HASH_SIZE];
160 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
161 
162 static inline struct list_head *chunk_hash(const struct inode *inode)
163 {
164 	unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
165 	return chunk_hash_heads + n % HASH_SIZE;
166 }
167 
168 /* hash_lock is held by caller */
169 static void insert_hash(struct audit_chunk *chunk)
170 {
171 	struct list_head *list = chunk_hash(chunk->watch.inode);
172 	list_add_rcu(&chunk->hash, list);
173 }
174 
175 /* called under rcu_read_lock */
176 struct audit_chunk *audit_tree_lookup(const struct inode *inode)
177 {
178 	struct list_head *list = chunk_hash(inode);
179 	struct audit_chunk *p;
180 
181 	list_for_each_entry_rcu(p, list, hash) {
182 		if (p->watch.inode == inode) {
183 			atomic_long_inc(&p->refs);
184 			return p;
185 		}
186 	}
187 	return NULL;
188 }
189 
190 int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
191 {
192 	int n;
193 	for (n = 0; n < chunk->count; n++)
194 		if (chunk->owners[n].owner == tree)
195 			return 1;
196 	return 0;
197 }
198 
199 /* tagging and untagging inodes with trees */
200 
201 static struct audit_chunk *find_chunk(struct node *p)
202 {
203 	int index = p->index & ~(1U<<31);
204 	p -= index;
205 	return container_of(p, struct audit_chunk, owners[0]);
206 }
207 
208 static void untag_chunk(struct node *p)
209 {
210 	struct audit_chunk *chunk = find_chunk(p);
211 	struct audit_chunk *new;
212 	struct audit_tree *owner;
213 	int size = chunk->count - 1;
214 	int i, j;
215 
216 	if (!pin_inotify_watch(&chunk->watch)) {
217 		/*
218 		 * Filesystem is shutting down; all watches are getting
219 		 * evicted, just take it off the node list for this
220 		 * tree and let the eviction logics take care of the
221 		 * rest.
222 		 */
223 		owner = p->owner;
224 		if (owner->root == chunk) {
225 			list_del_init(&owner->same_root);
226 			owner->root = NULL;
227 		}
228 		list_del_init(&p->list);
229 		p->owner = NULL;
230 		put_tree(owner);
231 		return;
232 	}
233 
234 	spin_unlock(&hash_lock);
235 
236 	/*
237 	 * pin_inotify_watch() succeeded, so the watch won't go away
238 	 * from under us.
239 	 */
240 	mutex_lock(&chunk->watch.inode->inotify_mutex);
241 	if (chunk->dead) {
242 		mutex_unlock(&chunk->watch.inode->inotify_mutex);
243 		goto out;
244 	}
245 
246 	owner = p->owner;
247 
248 	if (!size) {
249 		chunk->dead = 1;
250 		spin_lock(&hash_lock);
251 		list_del_init(&chunk->trees);
252 		if (owner->root == chunk)
253 			owner->root = NULL;
254 		list_del_init(&p->list);
255 		list_del_rcu(&chunk->hash);
256 		spin_unlock(&hash_lock);
257 		inotify_evict_watch(&chunk->watch);
258 		mutex_unlock(&chunk->watch.inode->inotify_mutex);
259 		put_inotify_watch(&chunk->watch);
260 		goto out;
261 	}
262 
263 	new = alloc_chunk(size);
264 	if (!new)
265 		goto Fallback;
266 	if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) {
267 		free_chunk(new);
268 		goto Fallback;
269 	}
270 
271 	chunk->dead = 1;
272 	spin_lock(&hash_lock);
273 	list_replace_init(&chunk->trees, &new->trees);
274 	if (owner->root == chunk) {
275 		list_del_init(&owner->same_root);
276 		owner->root = NULL;
277 	}
278 
279 	for (i = j = 0; i < size; i++, j++) {
280 		struct audit_tree *s;
281 		if (&chunk->owners[j] == p) {
282 			list_del_init(&p->list);
283 			i--;
284 			continue;
285 		}
286 		s = chunk->owners[j].owner;
287 		new->owners[i].owner = s;
288 		new->owners[i].index = chunk->owners[j].index - j + i;
289 		if (!s) /* result of earlier fallback */
290 			continue;
291 		get_tree(s);
292 		list_replace_init(&chunk->owners[i].list, &new->owners[j].list);
293 	}
294 
295 	list_replace_rcu(&chunk->hash, &new->hash);
296 	list_for_each_entry(owner, &new->trees, same_root)
297 		owner->root = new;
298 	spin_unlock(&hash_lock);
299 	inotify_evict_watch(&chunk->watch);
300 	mutex_unlock(&chunk->watch.inode->inotify_mutex);
301 	put_inotify_watch(&chunk->watch);
302 	goto out;
303 
304 Fallback:
305 	// do the best we can
306 	spin_lock(&hash_lock);
307 	if (owner->root == chunk) {
308 		list_del_init(&owner->same_root);
309 		owner->root = NULL;
310 	}
311 	list_del_init(&p->list);
312 	p->owner = NULL;
313 	put_tree(owner);
314 	spin_unlock(&hash_lock);
315 	mutex_unlock(&chunk->watch.inode->inotify_mutex);
316 out:
317 	unpin_inotify_watch(&chunk->watch);
318 	spin_lock(&hash_lock);
319 }
320 
321 static int create_chunk(struct inode *inode, struct audit_tree *tree)
322 {
323 	struct audit_chunk *chunk = alloc_chunk(1);
324 	if (!chunk)
325 		return -ENOMEM;
326 
327 	if (inotify_add_watch(rtree_ih, &chunk->watch, inode, IN_IGNORED | IN_DELETE_SELF) < 0) {
328 		free_chunk(chunk);
329 		return -ENOSPC;
330 	}
331 
332 	mutex_lock(&inode->inotify_mutex);
333 	spin_lock(&hash_lock);
334 	if (tree->goner) {
335 		spin_unlock(&hash_lock);
336 		chunk->dead = 1;
337 		inotify_evict_watch(&chunk->watch);
338 		mutex_unlock(&inode->inotify_mutex);
339 		put_inotify_watch(&chunk->watch);
340 		return 0;
341 	}
342 	chunk->owners[0].index = (1U << 31);
343 	chunk->owners[0].owner = tree;
344 	get_tree(tree);
345 	list_add(&chunk->owners[0].list, &tree->chunks);
346 	if (!tree->root) {
347 		tree->root = chunk;
348 		list_add(&tree->same_root, &chunk->trees);
349 	}
350 	insert_hash(chunk);
351 	spin_unlock(&hash_lock);
352 	mutex_unlock(&inode->inotify_mutex);
353 	return 0;
354 }
355 
356 /* the first tagged inode becomes root of tree */
357 static int tag_chunk(struct inode *inode, struct audit_tree *tree)
358 {
359 	struct inotify_watch *watch;
360 	struct audit_tree *owner;
361 	struct audit_chunk *chunk, *old;
362 	struct node *p;
363 	int n;
364 
365 	if (inotify_find_watch(rtree_ih, inode, &watch) < 0)
366 		return create_chunk(inode, tree);
367 
368 	old = container_of(watch, struct audit_chunk, watch);
369 
370 	/* are we already there? */
371 	spin_lock(&hash_lock);
372 	for (n = 0; n < old->count; n++) {
373 		if (old->owners[n].owner == tree) {
374 			spin_unlock(&hash_lock);
375 			put_inotify_watch(watch);
376 			return 0;
377 		}
378 	}
379 	spin_unlock(&hash_lock);
380 
381 	chunk = alloc_chunk(old->count + 1);
382 	if (!chunk)
383 		return -ENOMEM;
384 
385 	mutex_lock(&inode->inotify_mutex);
386 	if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) {
387 		mutex_unlock(&inode->inotify_mutex);
388 		put_inotify_watch(&old->watch);
389 		free_chunk(chunk);
390 		return -ENOSPC;
391 	}
392 	spin_lock(&hash_lock);
393 	if (tree->goner) {
394 		spin_unlock(&hash_lock);
395 		chunk->dead = 1;
396 		inotify_evict_watch(&chunk->watch);
397 		mutex_unlock(&inode->inotify_mutex);
398 		put_inotify_watch(&old->watch);
399 		put_inotify_watch(&chunk->watch);
400 		return 0;
401 	}
402 	list_replace_init(&old->trees, &chunk->trees);
403 	for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
404 		struct audit_tree *s = old->owners[n].owner;
405 		p->owner = s;
406 		p->index = old->owners[n].index;
407 		if (!s) /* result of fallback in untag */
408 			continue;
409 		get_tree(s);
410 		list_replace_init(&old->owners[n].list, &p->list);
411 	}
412 	p->index = (chunk->count - 1) | (1U<<31);
413 	p->owner = tree;
414 	get_tree(tree);
415 	list_add(&p->list, &tree->chunks);
416 	list_replace_rcu(&old->hash, &chunk->hash);
417 	list_for_each_entry(owner, &chunk->trees, same_root)
418 		owner->root = chunk;
419 	old->dead = 1;
420 	if (!tree->root) {
421 		tree->root = chunk;
422 		list_add(&tree->same_root, &chunk->trees);
423 	}
424 	spin_unlock(&hash_lock);
425 	inotify_evict_watch(&old->watch);
426 	mutex_unlock(&inode->inotify_mutex);
427 	put_inotify_watch(&old->watch);
428 	return 0;
429 }
430 
431 static void kill_rules(struct audit_tree *tree)
432 {
433 	struct audit_krule *rule, *next;
434 	struct audit_entry *entry;
435 	struct audit_buffer *ab;
436 
437 	list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
438 		entry = container_of(rule, struct audit_entry, rule);
439 
440 		list_del_init(&rule->rlist);
441 		if (rule->tree) {
442 			/* not a half-baked one */
443 			ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
444 			audit_log_format(ab, "op=remove rule dir=");
445 			audit_log_untrustedstring(ab, rule->tree->pathname);
446 			if (rule->filterkey) {
447 				audit_log_format(ab, " key=");
448 				audit_log_untrustedstring(ab, rule->filterkey);
449 			} else
450 				audit_log_format(ab, " key=(null)");
451 			audit_log_format(ab, " list=%d res=1", rule->listnr);
452 			audit_log_end(ab);
453 			rule->tree = NULL;
454 			list_del_rcu(&entry->list);
455 			list_del(&entry->rule.list);
456 			call_rcu(&entry->rcu, audit_free_rule_rcu);
457 		}
458 	}
459 }
460 
461 /*
462  * finish killing struct audit_tree
463  */
464 static void prune_one(struct audit_tree *victim)
465 {
466 	spin_lock(&hash_lock);
467 	while (!list_empty(&victim->chunks)) {
468 		struct node *p;
469 
470 		p = list_entry(victim->chunks.next, struct node, list);
471 
472 		untag_chunk(p);
473 	}
474 	spin_unlock(&hash_lock);
475 	put_tree(victim);
476 }
477 
478 /* trim the uncommitted chunks from tree */
479 
480 static void trim_marked(struct audit_tree *tree)
481 {
482 	struct list_head *p, *q;
483 	spin_lock(&hash_lock);
484 	if (tree->goner) {
485 		spin_unlock(&hash_lock);
486 		return;
487 	}
488 	/* reorder */
489 	for (p = tree->chunks.next; p != &tree->chunks; p = q) {
490 		struct node *node = list_entry(p, struct node, list);
491 		q = p->next;
492 		if (node->index & (1U<<31)) {
493 			list_del_init(p);
494 			list_add(p, &tree->chunks);
495 		}
496 	}
497 
498 	while (!list_empty(&tree->chunks)) {
499 		struct node *node;
500 
501 		node = list_entry(tree->chunks.next, struct node, list);
502 
503 		/* have we run out of marked? */
504 		if (!(node->index & (1U<<31)))
505 			break;
506 
507 		untag_chunk(node);
508 	}
509 	if (!tree->root && !tree->goner) {
510 		tree->goner = 1;
511 		spin_unlock(&hash_lock);
512 		mutex_lock(&audit_filter_mutex);
513 		kill_rules(tree);
514 		list_del_init(&tree->list);
515 		mutex_unlock(&audit_filter_mutex);
516 		prune_one(tree);
517 	} else {
518 		spin_unlock(&hash_lock);
519 	}
520 }
521 
522 /* called with audit_filter_mutex */
523 int audit_remove_tree_rule(struct audit_krule *rule)
524 {
525 	struct audit_tree *tree;
526 	tree = rule->tree;
527 	if (tree) {
528 		spin_lock(&hash_lock);
529 		list_del_init(&rule->rlist);
530 		if (list_empty(&tree->rules) && !tree->goner) {
531 			tree->root = NULL;
532 			list_del_init(&tree->same_root);
533 			tree->goner = 1;
534 			list_move(&tree->list, &prune_list);
535 			rule->tree = NULL;
536 			spin_unlock(&hash_lock);
537 			audit_schedule_prune();
538 			return 1;
539 		}
540 		rule->tree = NULL;
541 		spin_unlock(&hash_lock);
542 		return 1;
543 	}
544 	return 0;
545 }
546 
547 void audit_trim_trees(void)
548 {
549 	struct list_head cursor;
550 
551 	mutex_lock(&audit_filter_mutex);
552 	list_add(&cursor, &tree_list);
553 	while (cursor.next != &tree_list) {
554 		struct audit_tree *tree;
555 		struct path path;
556 		struct vfsmount *root_mnt;
557 		struct node *node;
558 		struct list_head list;
559 		int err;
560 
561 		tree = container_of(cursor.next, struct audit_tree, list);
562 		get_tree(tree);
563 		list_del(&cursor);
564 		list_add(&cursor, &tree->list);
565 		mutex_unlock(&audit_filter_mutex);
566 
567 		err = kern_path(tree->pathname, 0, &path);
568 		if (err)
569 			goto skip_it;
570 
571 		root_mnt = collect_mounts(path.mnt, path.dentry);
572 		path_put(&path);
573 		if (!root_mnt)
574 			goto skip_it;
575 
576 		list_add_tail(&list, &root_mnt->mnt_list);
577 		spin_lock(&hash_lock);
578 		list_for_each_entry(node, &tree->chunks, list) {
579 			struct audit_chunk *chunk = find_chunk(node);
580 			struct inode *inode = chunk->watch.inode;
581 			struct vfsmount *mnt;
582 			node->index |= 1U<<31;
583 			list_for_each_entry(mnt, &list, mnt_list) {
584 				if (mnt->mnt_root->d_inode == inode) {
585 					node->index &= ~(1U<<31);
586 					break;
587 				}
588 			}
589 		}
590 		spin_unlock(&hash_lock);
591 		trim_marked(tree);
592 		put_tree(tree);
593 		list_del_init(&list);
594 		drop_collected_mounts(root_mnt);
595 skip_it:
596 		mutex_lock(&audit_filter_mutex);
597 	}
598 	list_del(&cursor);
599 	mutex_unlock(&audit_filter_mutex);
600 }
601 
602 static int is_under(struct vfsmount *mnt, struct dentry *dentry,
603 		    struct path *path)
604 {
605 	if (mnt != path->mnt) {
606 		for (;;) {
607 			if (mnt->mnt_parent == mnt)
608 				return 0;
609 			if (mnt->mnt_parent == path->mnt)
610 					break;
611 			mnt = mnt->mnt_parent;
612 		}
613 		dentry = mnt->mnt_mountpoint;
614 	}
615 	return is_subdir(dentry, path->dentry);
616 }
617 
618 int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
619 {
620 
621 	if (pathname[0] != '/' ||
622 	    rule->listnr != AUDIT_FILTER_EXIT ||
623 	    op != Audit_equal ||
624 	    rule->inode_f || rule->watch || rule->tree)
625 		return -EINVAL;
626 	rule->tree = alloc_tree(pathname);
627 	if (!rule->tree)
628 		return -ENOMEM;
629 	return 0;
630 }
631 
632 void audit_put_tree(struct audit_tree *tree)
633 {
634 	put_tree(tree);
635 }
636 
637 /* called with audit_filter_mutex */
638 int audit_add_tree_rule(struct audit_krule *rule)
639 {
640 	struct audit_tree *seed = rule->tree, *tree;
641 	struct path path;
642 	struct vfsmount *mnt, *p;
643 	struct list_head list;
644 	int err;
645 
646 	list_for_each_entry(tree, &tree_list, list) {
647 		if (!strcmp(seed->pathname, tree->pathname)) {
648 			put_tree(seed);
649 			rule->tree = tree;
650 			list_add(&rule->rlist, &tree->rules);
651 			return 0;
652 		}
653 	}
654 	tree = seed;
655 	list_add(&tree->list, &tree_list);
656 	list_add(&rule->rlist, &tree->rules);
657 	/* do not set rule->tree yet */
658 	mutex_unlock(&audit_filter_mutex);
659 
660 	err = kern_path(tree->pathname, 0, &path);
661 	if (err)
662 		goto Err;
663 	mnt = collect_mounts(path.mnt, path.dentry);
664 	path_put(&path);
665 	if (!mnt) {
666 		err = -ENOMEM;
667 		goto Err;
668 	}
669 	list_add_tail(&list, &mnt->mnt_list);
670 
671 	get_tree(tree);
672 	list_for_each_entry(p, &list, mnt_list) {
673 		err = tag_chunk(p->mnt_root->d_inode, tree);
674 		if (err)
675 			break;
676 	}
677 
678 	list_del(&list);
679 	drop_collected_mounts(mnt);
680 
681 	if (!err) {
682 		struct node *node;
683 		spin_lock(&hash_lock);
684 		list_for_each_entry(node, &tree->chunks, list)
685 			node->index &= ~(1U<<31);
686 		spin_unlock(&hash_lock);
687 	} else {
688 		trim_marked(tree);
689 		goto Err;
690 	}
691 
692 	mutex_lock(&audit_filter_mutex);
693 	if (list_empty(&rule->rlist)) {
694 		put_tree(tree);
695 		return -ENOENT;
696 	}
697 	rule->tree = tree;
698 	put_tree(tree);
699 
700 	return 0;
701 Err:
702 	mutex_lock(&audit_filter_mutex);
703 	list_del_init(&tree->list);
704 	list_del_init(&tree->rules);
705 	put_tree(tree);
706 	return err;
707 }
708 
709 int audit_tag_tree(char *old, char *new)
710 {
711 	struct list_head cursor, barrier;
712 	int failed = 0;
713 	struct path path;
714 	struct vfsmount *tagged;
715 	struct list_head list;
716 	struct vfsmount *mnt;
717 	struct dentry *dentry;
718 	int err;
719 
720 	err = kern_path(new, 0, &path);
721 	if (err)
722 		return err;
723 	tagged = collect_mounts(path.mnt, path.dentry);
724 	path_put(&path);
725 	if (!tagged)
726 		return -ENOMEM;
727 
728 	err = kern_path(old, 0, &path);
729 	if (err) {
730 		drop_collected_mounts(tagged);
731 		return err;
732 	}
733 	mnt = mntget(path.mnt);
734 	dentry = dget(path.dentry);
735 	path_put(&path);
736 
737 	if (dentry == tagged->mnt_root && dentry == mnt->mnt_root)
738 		follow_up(&mnt, &dentry);
739 
740 	list_add_tail(&list, &tagged->mnt_list);
741 
742 	mutex_lock(&audit_filter_mutex);
743 	list_add(&barrier, &tree_list);
744 	list_add(&cursor, &barrier);
745 
746 	while (cursor.next != &tree_list) {
747 		struct audit_tree *tree;
748 		struct vfsmount *p;
749 
750 		tree = container_of(cursor.next, struct audit_tree, list);
751 		get_tree(tree);
752 		list_del(&cursor);
753 		list_add(&cursor, &tree->list);
754 		mutex_unlock(&audit_filter_mutex);
755 
756 		err = kern_path(tree->pathname, 0, &path);
757 		if (err) {
758 			put_tree(tree);
759 			mutex_lock(&audit_filter_mutex);
760 			continue;
761 		}
762 
763 		spin_lock(&vfsmount_lock);
764 		if (!is_under(mnt, dentry, &path)) {
765 			spin_unlock(&vfsmount_lock);
766 			path_put(&path);
767 			put_tree(tree);
768 			mutex_lock(&audit_filter_mutex);
769 			continue;
770 		}
771 		spin_unlock(&vfsmount_lock);
772 		path_put(&path);
773 
774 		list_for_each_entry(p, &list, mnt_list) {
775 			failed = tag_chunk(p->mnt_root->d_inode, tree);
776 			if (failed)
777 				break;
778 		}
779 
780 		if (failed) {
781 			put_tree(tree);
782 			mutex_lock(&audit_filter_mutex);
783 			break;
784 		}
785 
786 		mutex_lock(&audit_filter_mutex);
787 		spin_lock(&hash_lock);
788 		if (!tree->goner) {
789 			list_del(&tree->list);
790 			list_add(&tree->list, &tree_list);
791 		}
792 		spin_unlock(&hash_lock);
793 		put_tree(tree);
794 	}
795 
796 	while (barrier.prev != &tree_list) {
797 		struct audit_tree *tree;
798 
799 		tree = container_of(barrier.prev, struct audit_tree, list);
800 		get_tree(tree);
801 		list_del(&tree->list);
802 		list_add(&tree->list, &barrier);
803 		mutex_unlock(&audit_filter_mutex);
804 
805 		if (!failed) {
806 			struct node *node;
807 			spin_lock(&hash_lock);
808 			list_for_each_entry(node, &tree->chunks, list)
809 				node->index &= ~(1U<<31);
810 			spin_unlock(&hash_lock);
811 		} else {
812 			trim_marked(tree);
813 		}
814 
815 		put_tree(tree);
816 		mutex_lock(&audit_filter_mutex);
817 	}
818 	list_del(&barrier);
819 	list_del(&cursor);
820 	list_del(&list);
821 	mutex_unlock(&audit_filter_mutex);
822 	dput(dentry);
823 	mntput(mnt);
824 	drop_collected_mounts(tagged);
825 	return failed;
826 }
827 
828 /*
829  * That gets run when evict_chunk() ends up needing to kill audit_tree.
830  * Runs from a separate thread, with audit_cmd_mutex held.
831  */
832 void audit_prune_trees(void)
833 {
834 	mutex_lock(&audit_filter_mutex);
835 
836 	while (!list_empty(&prune_list)) {
837 		struct audit_tree *victim;
838 
839 		victim = list_entry(prune_list.next, struct audit_tree, list);
840 		list_del_init(&victim->list);
841 
842 		mutex_unlock(&audit_filter_mutex);
843 
844 		prune_one(victim);
845 
846 		mutex_lock(&audit_filter_mutex);
847 	}
848 
849 	mutex_unlock(&audit_filter_mutex);
850 }
851 
852 /*
853  *  Here comes the stuff asynchronous to auditctl operations
854  */
855 
856 /* inode->inotify_mutex is locked */
857 static void evict_chunk(struct audit_chunk *chunk)
858 {
859 	struct audit_tree *owner;
860 	int n;
861 
862 	if (chunk->dead)
863 		return;
864 
865 	chunk->dead = 1;
866 	mutex_lock(&audit_filter_mutex);
867 	spin_lock(&hash_lock);
868 	while (!list_empty(&chunk->trees)) {
869 		owner = list_entry(chunk->trees.next,
870 				   struct audit_tree, same_root);
871 		owner->goner = 1;
872 		owner->root = NULL;
873 		list_del_init(&owner->same_root);
874 		spin_unlock(&hash_lock);
875 		kill_rules(owner);
876 		list_move(&owner->list, &prune_list);
877 		audit_schedule_prune();
878 		spin_lock(&hash_lock);
879 	}
880 	list_del_rcu(&chunk->hash);
881 	for (n = 0; n < chunk->count; n++)
882 		list_del_init(&chunk->owners[n].list);
883 	spin_unlock(&hash_lock);
884 	mutex_unlock(&audit_filter_mutex);
885 }
886 
887 static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask,
888                          u32 cookie, const char *dname, struct inode *inode)
889 {
890 	struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
891 
892 	if (mask & IN_IGNORED) {
893 		evict_chunk(chunk);
894 		put_inotify_watch(watch);
895 	}
896 }
897 
898 static void destroy_watch(struct inotify_watch *watch)
899 {
900 	struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
901 	call_rcu(&chunk->head, __put_chunk);
902 }
903 
904 static const struct inotify_operations rtree_inotify_ops = {
905 	.handle_event	= handle_event,
906 	.destroy_watch	= destroy_watch,
907 };
908 
909 static int __init audit_tree_init(void)
910 {
911 	int i;
912 
913 	rtree_ih = inotify_init(&rtree_inotify_ops);
914 	if (IS_ERR(rtree_ih))
915 		audit_panic("cannot initialize inotify handle for rectree watches");
916 
917 	for (i = 0; i < HASH_SIZE; i++)
918 		INIT_LIST_HEAD(&chunk_hash_heads[i]);
919 
920 	return 0;
921 }
922 __initcall(audit_tree_init);
923