xref: /openbmc/linux/kernel/audit_tree.c (revision 367b8112)
1 #include "audit.h"
2 #include <linux/inotify.h>
3 #include <linux/namei.h>
4 #include <linux/mount.h>
5 
6 struct audit_tree;
7 struct audit_chunk;
8 
9 struct audit_tree {
10 	atomic_t count;
11 	int goner;
12 	struct audit_chunk *root;
13 	struct list_head chunks;
14 	struct list_head rules;
15 	struct list_head list;
16 	struct list_head same_root;
17 	struct rcu_head head;
18 	char pathname[];
19 };
20 
21 struct audit_chunk {
22 	struct list_head hash;
23 	struct inotify_watch watch;
24 	struct list_head trees;		/* with root here */
25 	int dead;
26 	int count;
27 	struct rcu_head head;
28 	struct node {
29 		struct list_head list;
30 		struct audit_tree *owner;
31 		unsigned index;		/* index; upper bit indicates 'will prune' */
32 	} owners[];
33 };
34 
35 static LIST_HEAD(tree_list);
36 static LIST_HEAD(prune_list);
37 
38 /*
39  * One struct chunk is attached to each inode of interest.
40  * We replace struct chunk on tagging/untagging.
41  * Rules have pointer to struct audit_tree.
42  * Rules have struct list_head rlist forming a list of rules over
43  * the same tree.
44  * References to struct chunk are collected at audit_inode{,_child}()
45  * time and used in AUDIT_TREE rule matching.
46  * These references are dropped at the same time we are calling
47  * audit_free_names(), etc.
48  *
49  * Cyclic lists galore:
50  * tree.chunks anchors chunk.owners[].list			hash_lock
51  * tree.rules anchors rule.rlist				audit_filter_mutex
52  * chunk.trees anchors tree.same_root				hash_lock
53  * chunk.hash is a hash with middle bits of watch.inode as
54  * a hash function.						RCU, hash_lock
55  *
56  * tree is refcounted; one reference for "some rules on rules_list refer to
57  * it", one for each chunk with pointer to it.
58  *
59  * chunk is refcounted by embedded inotify_watch.
60  *
61  * node.index allows to get from node.list to containing chunk.
62  * MSB of that sucker is stolen to mark taggings that we might have to
63  * revert - several operations have very unpleasant cleanup logics and
64  * that makes a difference.  Some.
65  */
66 
67 static struct inotify_handle *rtree_ih;
68 
69 static struct audit_tree *alloc_tree(const char *s)
70 {
71 	struct audit_tree *tree;
72 
73 	tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
74 	if (tree) {
75 		atomic_set(&tree->count, 1);
76 		tree->goner = 0;
77 		INIT_LIST_HEAD(&tree->chunks);
78 		INIT_LIST_HEAD(&tree->rules);
79 		INIT_LIST_HEAD(&tree->list);
80 		INIT_LIST_HEAD(&tree->same_root);
81 		tree->root = NULL;
82 		strcpy(tree->pathname, s);
83 	}
84 	return tree;
85 }
86 
87 static inline void get_tree(struct audit_tree *tree)
88 {
89 	atomic_inc(&tree->count);
90 }
91 
92 static void __put_tree(struct rcu_head *rcu)
93 {
94 	struct audit_tree *tree = container_of(rcu, struct audit_tree, head);
95 	kfree(tree);
96 }
97 
98 static inline void put_tree(struct audit_tree *tree)
99 {
100 	if (atomic_dec_and_test(&tree->count))
101 		call_rcu(&tree->head, __put_tree);
102 }
103 
104 /* to avoid bringing the entire thing in audit.h */
105 const char *audit_tree_path(struct audit_tree *tree)
106 {
107 	return tree->pathname;
108 }
109 
110 static struct audit_chunk *alloc_chunk(int count)
111 {
112 	struct audit_chunk *chunk;
113 	size_t size;
114 	int i;
115 
116 	size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
117 	chunk = kzalloc(size, GFP_KERNEL);
118 	if (!chunk)
119 		return NULL;
120 
121 	INIT_LIST_HEAD(&chunk->hash);
122 	INIT_LIST_HEAD(&chunk->trees);
123 	chunk->count = count;
124 	for (i = 0; i < count; i++) {
125 		INIT_LIST_HEAD(&chunk->owners[i].list);
126 		chunk->owners[i].index = i;
127 	}
128 	inotify_init_watch(&chunk->watch);
129 	return chunk;
130 }
131 
132 static void __free_chunk(struct rcu_head *rcu)
133 {
134 	struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
135 	int i;
136 
137 	for (i = 0; i < chunk->count; i++) {
138 		if (chunk->owners[i].owner)
139 			put_tree(chunk->owners[i].owner);
140 	}
141 	kfree(chunk);
142 }
143 
144 static inline void free_chunk(struct audit_chunk *chunk)
145 {
146 	call_rcu(&chunk->head, __free_chunk);
147 }
148 
149 void audit_put_chunk(struct audit_chunk *chunk)
150 {
151 	put_inotify_watch(&chunk->watch);
152 }
153 
154 enum {HASH_SIZE = 128};
155 static struct list_head chunk_hash_heads[HASH_SIZE];
156 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
157 
158 static inline struct list_head *chunk_hash(const struct inode *inode)
159 {
160 	unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
161 	return chunk_hash_heads + n % HASH_SIZE;
162 }
163 
164 /* hash_lock is held by caller */
165 static void insert_hash(struct audit_chunk *chunk)
166 {
167 	struct list_head *list = chunk_hash(chunk->watch.inode);
168 	list_add_rcu(&chunk->hash, list);
169 }
170 
171 /* called under rcu_read_lock */
172 struct audit_chunk *audit_tree_lookup(const struct inode *inode)
173 {
174 	struct list_head *list = chunk_hash(inode);
175 	struct audit_chunk *p;
176 
177 	list_for_each_entry_rcu(p, list, hash) {
178 		if (p->watch.inode == inode) {
179 			get_inotify_watch(&p->watch);
180 			return p;
181 		}
182 	}
183 	return NULL;
184 }
185 
186 int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
187 {
188 	int n;
189 	for (n = 0; n < chunk->count; n++)
190 		if (chunk->owners[n].owner == tree)
191 			return 1;
192 	return 0;
193 }
194 
195 /* tagging and untagging inodes with trees */
196 
197 static void untag_chunk(struct audit_chunk *chunk, struct node *p)
198 {
199 	struct audit_chunk *new;
200 	struct audit_tree *owner;
201 	int size = chunk->count - 1;
202 	int i, j;
203 
204 	mutex_lock(&chunk->watch.inode->inotify_mutex);
205 	if (chunk->dead) {
206 		mutex_unlock(&chunk->watch.inode->inotify_mutex);
207 		return;
208 	}
209 
210 	owner = p->owner;
211 
212 	if (!size) {
213 		chunk->dead = 1;
214 		spin_lock(&hash_lock);
215 		list_del_init(&chunk->trees);
216 		if (owner->root == chunk)
217 			owner->root = NULL;
218 		list_del_init(&p->list);
219 		list_del_rcu(&chunk->hash);
220 		spin_unlock(&hash_lock);
221 		inotify_evict_watch(&chunk->watch);
222 		mutex_unlock(&chunk->watch.inode->inotify_mutex);
223 		put_inotify_watch(&chunk->watch);
224 		return;
225 	}
226 
227 	new = alloc_chunk(size);
228 	if (!new)
229 		goto Fallback;
230 	if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) {
231 		free_chunk(new);
232 		goto Fallback;
233 	}
234 
235 	chunk->dead = 1;
236 	spin_lock(&hash_lock);
237 	list_replace_init(&chunk->trees, &new->trees);
238 	if (owner->root == chunk) {
239 		list_del_init(&owner->same_root);
240 		owner->root = NULL;
241 	}
242 
243 	for (i = j = 0; i < size; i++, j++) {
244 		struct audit_tree *s;
245 		if (&chunk->owners[j] == p) {
246 			list_del_init(&p->list);
247 			i--;
248 			continue;
249 		}
250 		s = chunk->owners[j].owner;
251 		new->owners[i].owner = s;
252 		new->owners[i].index = chunk->owners[j].index - j + i;
253 		if (!s) /* result of earlier fallback */
254 			continue;
255 		get_tree(s);
256 		list_replace_init(&chunk->owners[i].list, &new->owners[j].list);
257 	}
258 
259 	list_replace_rcu(&chunk->hash, &new->hash);
260 	list_for_each_entry(owner, &new->trees, same_root)
261 		owner->root = new;
262 	spin_unlock(&hash_lock);
263 	inotify_evict_watch(&chunk->watch);
264 	mutex_unlock(&chunk->watch.inode->inotify_mutex);
265 	put_inotify_watch(&chunk->watch);
266 	return;
267 
268 Fallback:
269 	// do the best we can
270 	spin_lock(&hash_lock);
271 	if (owner->root == chunk) {
272 		list_del_init(&owner->same_root);
273 		owner->root = NULL;
274 	}
275 	list_del_init(&p->list);
276 	p->owner = NULL;
277 	put_tree(owner);
278 	spin_unlock(&hash_lock);
279 	mutex_unlock(&chunk->watch.inode->inotify_mutex);
280 }
281 
282 static int create_chunk(struct inode *inode, struct audit_tree *tree)
283 {
284 	struct audit_chunk *chunk = alloc_chunk(1);
285 	if (!chunk)
286 		return -ENOMEM;
287 
288 	if (inotify_add_watch(rtree_ih, &chunk->watch, inode, IN_IGNORED | IN_DELETE_SELF) < 0) {
289 		free_chunk(chunk);
290 		return -ENOSPC;
291 	}
292 
293 	mutex_lock(&inode->inotify_mutex);
294 	spin_lock(&hash_lock);
295 	if (tree->goner) {
296 		spin_unlock(&hash_lock);
297 		chunk->dead = 1;
298 		inotify_evict_watch(&chunk->watch);
299 		mutex_unlock(&inode->inotify_mutex);
300 		put_inotify_watch(&chunk->watch);
301 		return 0;
302 	}
303 	chunk->owners[0].index = (1U << 31);
304 	chunk->owners[0].owner = tree;
305 	get_tree(tree);
306 	list_add(&chunk->owners[0].list, &tree->chunks);
307 	if (!tree->root) {
308 		tree->root = chunk;
309 		list_add(&tree->same_root, &chunk->trees);
310 	}
311 	insert_hash(chunk);
312 	spin_unlock(&hash_lock);
313 	mutex_unlock(&inode->inotify_mutex);
314 	return 0;
315 }
316 
317 /* the first tagged inode becomes root of tree */
318 static int tag_chunk(struct inode *inode, struct audit_tree *tree)
319 {
320 	struct inotify_watch *watch;
321 	struct audit_tree *owner;
322 	struct audit_chunk *chunk, *old;
323 	struct node *p;
324 	int n;
325 
326 	if (inotify_find_watch(rtree_ih, inode, &watch) < 0)
327 		return create_chunk(inode, tree);
328 
329 	old = container_of(watch, struct audit_chunk, watch);
330 
331 	/* are we already there? */
332 	spin_lock(&hash_lock);
333 	for (n = 0; n < old->count; n++) {
334 		if (old->owners[n].owner == tree) {
335 			spin_unlock(&hash_lock);
336 			put_inotify_watch(watch);
337 			return 0;
338 		}
339 	}
340 	spin_unlock(&hash_lock);
341 
342 	chunk = alloc_chunk(old->count + 1);
343 	if (!chunk)
344 		return -ENOMEM;
345 
346 	mutex_lock(&inode->inotify_mutex);
347 	if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) {
348 		mutex_unlock(&inode->inotify_mutex);
349 		free_chunk(chunk);
350 		return -ENOSPC;
351 	}
352 	spin_lock(&hash_lock);
353 	if (tree->goner) {
354 		spin_unlock(&hash_lock);
355 		chunk->dead = 1;
356 		inotify_evict_watch(&chunk->watch);
357 		mutex_unlock(&inode->inotify_mutex);
358 		put_inotify_watch(&chunk->watch);
359 		return 0;
360 	}
361 	list_replace_init(&old->trees, &chunk->trees);
362 	for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
363 		struct audit_tree *s = old->owners[n].owner;
364 		p->owner = s;
365 		p->index = old->owners[n].index;
366 		if (!s) /* result of fallback in untag */
367 			continue;
368 		get_tree(s);
369 		list_replace_init(&old->owners[n].list, &p->list);
370 	}
371 	p->index = (chunk->count - 1) | (1U<<31);
372 	p->owner = tree;
373 	get_tree(tree);
374 	list_add(&p->list, &tree->chunks);
375 	list_replace_rcu(&old->hash, &chunk->hash);
376 	list_for_each_entry(owner, &chunk->trees, same_root)
377 		owner->root = chunk;
378 	old->dead = 1;
379 	if (!tree->root) {
380 		tree->root = chunk;
381 		list_add(&tree->same_root, &chunk->trees);
382 	}
383 	spin_unlock(&hash_lock);
384 	inotify_evict_watch(&old->watch);
385 	mutex_unlock(&inode->inotify_mutex);
386 	put_inotify_watch(&old->watch);
387 	return 0;
388 }
389 
390 static struct audit_chunk *find_chunk(struct node *p)
391 {
392 	int index = p->index & ~(1U<<31);
393 	p -= index;
394 	return container_of(p, struct audit_chunk, owners[0]);
395 }
396 
397 static void kill_rules(struct audit_tree *tree)
398 {
399 	struct audit_krule *rule, *next;
400 	struct audit_entry *entry;
401 	struct audit_buffer *ab;
402 
403 	list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
404 		entry = container_of(rule, struct audit_entry, rule);
405 
406 		list_del_init(&rule->rlist);
407 		if (rule->tree) {
408 			/* not a half-baked one */
409 			ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
410 			audit_log_format(ab, "op=remove rule dir=");
411 			audit_log_untrustedstring(ab, rule->tree->pathname);
412 			if (rule->filterkey) {
413 				audit_log_format(ab, " key=");
414 				audit_log_untrustedstring(ab, rule->filterkey);
415 			} else
416 				audit_log_format(ab, " key=(null)");
417 			audit_log_format(ab, " list=%d res=1", rule->listnr);
418 			audit_log_end(ab);
419 			rule->tree = NULL;
420 			list_del_rcu(&entry->list);
421 			call_rcu(&entry->rcu, audit_free_rule_rcu);
422 		}
423 	}
424 }
425 
426 /*
427  * finish killing struct audit_tree
428  */
429 static void prune_one(struct audit_tree *victim)
430 {
431 	spin_lock(&hash_lock);
432 	while (!list_empty(&victim->chunks)) {
433 		struct node *p;
434 		struct audit_chunk *chunk;
435 
436 		p = list_entry(victim->chunks.next, struct node, list);
437 		chunk = find_chunk(p);
438 		get_inotify_watch(&chunk->watch);
439 		spin_unlock(&hash_lock);
440 
441 		untag_chunk(chunk, p);
442 
443 		put_inotify_watch(&chunk->watch);
444 		spin_lock(&hash_lock);
445 	}
446 	spin_unlock(&hash_lock);
447 	put_tree(victim);
448 }
449 
450 /* trim the uncommitted chunks from tree */
451 
452 static void trim_marked(struct audit_tree *tree)
453 {
454 	struct list_head *p, *q;
455 	spin_lock(&hash_lock);
456 	if (tree->goner) {
457 		spin_unlock(&hash_lock);
458 		return;
459 	}
460 	/* reorder */
461 	for (p = tree->chunks.next; p != &tree->chunks; p = q) {
462 		struct node *node = list_entry(p, struct node, list);
463 		q = p->next;
464 		if (node->index & (1U<<31)) {
465 			list_del_init(p);
466 			list_add(p, &tree->chunks);
467 		}
468 	}
469 
470 	while (!list_empty(&tree->chunks)) {
471 		struct node *node;
472 		struct audit_chunk *chunk;
473 
474 		node = list_entry(tree->chunks.next, struct node, list);
475 
476 		/* have we run out of marked? */
477 		if (!(node->index & (1U<<31)))
478 			break;
479 
480 		chunk = find_chunk(node);
481 		get_inotify_watch(&chunk->watch);
482 		spin_unlock(&hash_lock);
483 
484 		untag_chunk(chunk, node);
485 
486 		put_inotify_watch(&chunk->watch);
487 		spin_lock(&hash_lock);
488 	}
489 	if (!tree->root && !tree->goner) {
490 		tree->goner = 1;
491 		spin_unlock(&hash_lock);
492 		mutex_lock(&audit_filter_mutex);
493 		kill_rules(tree);
494 		list_del_init(&tree->list);
495 		mutex_unlock(&audit_filter_mutex);
496 		prune_one(tree);
497 	} else {
498 		spin_unlock(&hash_lock);
499 	}
500 }
501 
502 /* called with audit_filter_mutex */
503 int audit_remove_tree_rule(struct audit_krule *rule)
504 {
505 	struct audit_tree *tree;
506 	tree = rule->tree;
507 	if (tree) {
508 		spin_lock(&hash_lock);
509 		list_del_init(&rule->rlist);
510 		if (list_empty(&tree->rules) && !tree->goner) {
511 			tree->root = NULL;
512 			list_del_init(&tree->same_root);
513 			tree->goner = 1;
514 			list_move(&tree->list, &prune_list);
515 			rule->tree = NULL;
516 			spin_unlock(&hash_lock);
517 			audit_schedule_prune();
518 			return 1;
519 		}
520 		rule->tree = NULL;
521 		spin_unlock(&hash_lock);
522 		return 1;
523 	}
524 	return 0;
525 }
526 
527 void audit_trim_trees(void)
528 {
529 	struct list_head cursor;
530 
531 	mutex_lock(&audit_filter_mutex);
532 	list_add(&cursor, &tree_list);
533 	while (cursor.next != &tree_list) {
534 		struct audit_tree *tree;
535 		struct path path;
536 		struct vfsmount *root_mnt;
537 		struct node *node;
538 		struct list_head list;
539 		int err;
540 
541 		tree = container_of(cursor.next, struct audit_tree, list);
542 		get_tree(tree);
543 		list_del(&cursor);
544 		list_add(&cursor, &tree->list);
545 		mutex_unlock(&audit_filter_mutex);
546 
547 		err = kern_path(tree->pathname, 0, &path);
548 		if (err)
549 			goto skip_it;
550 
551 		root_mnt = collect_mounts(path.mnt, path.dentry);
552 		path_put(&path);
553 		if (!root_mnt)
554 			goto skip_it;
555 
556 		list_add_tail(&list, &root_mnt->mnt_list);
557 		spin_lock(&hash_lock);
558 		list_for_each_entry(node, &tree->chunks, list) {
559 			struct audit_chunk *chunk = find_chunk(node);
560 			struct inode *inode = chunk->watch.inode;
561 			struct vfsmount *mnt;
562 			node->index |= 1U<<31;
563 			list_for_each_entry(mnt, &list, mnt_list) {
564 				if (mnt->mnt_root->d_inode == inode) {
565 					node->index &= ~(1U<<31);
566 					break;
567 				}
568 			}
569 		}
570 		spin_unlock(&hash_lock);
571 		trim_marked(tree);
572 		put_tree(tree);
573 		list_del_init(&list);
574 		drop_collected_mounts(root_mnt);
575 skip_it:
576 		mutex_lock(&audit_filter_mutex);
577 	}
578 	list_del(&cursor);
579 	mutex_unlock(&audit_filter_mutex);
580 }
581 
582 static int is_under(struct vfsmount *mnt, struct dentry *dentry,
583 		    struct path *path)
584 {
585 	if (mnt != path->mnt) {
586 		for (;;) {
587 			if (mnt->mnt_parent == mnt)
588 				return 0;
589 			if (mnt->mnt_parent == path->mnt)
590 					break;
591 			mnt = mnt->mnt_parent;
592 		}
593 		dentry = mnt->mnt_mountpoint;
594 	}
595 	return is_subdir(dentry, path->dentry);
596 }
597 
598 int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
599 {
600 
601 	if (pathname[0] != '/' ||
602 	    rule->listnr != AUDIT_FILTER_EXIT ||
603 	    op & ~AUDIT_EQUAL ||
604 	    rule->inode_f || rule->watch || rule->tree)
605 		return -EINVAL;
606 	rule->tree = alloc_tree(pathname);
607 	if (!rule->tree)
608 		return -ENOMEM;
609 	return 0;
610 }
611 
612 void audit_put_tree(struct audit_tree *tree)
613 {
614 	put_tree(tree);
615 }
616 
617 /* called with audit_filter_mutex */
618 int audit_add_tree_rule(struct audit_krule *rule)
619 {
620 	struct audit_tree *seed = rule->tree, *tree;
621 	struct path path;
622 	struct vfsmount *mnt, *p;
623 	struct list_head list;
624 	int err;
625 
626 	list_for_each_entry(tree, &tree_list, list) {
627 		if (!strcmp(seed->pathname, tree->pathname)) {
628 			put_tree(seed);
629 			rule->tree = tree;
630 			list_add(&rule->rlist, &tree->rules);
631 			return 0;
632 		}
633 	}
634 	tree = seed;
635 	list_add(&tree->list, &tree_list);
636 	list_add(&rule->rlist, &tree->rules);
637 	/* do not set rule->tree yet */
638 	mutex_unlock(&audit_filter_mutex);
639 
640 	err = kern_path(tree->pathname, 0, &path);
641 	if (err)
642 		goto Err;
643 	mnt = collect_mounts(path.mnt, path.dentry);
644 	path_put(&path);
645 	if (!mnt) {
646 		err = -ENOMEM;
647 		goto Err;
648 	}
649 	list_add_tail(&list, &mnt->mnt_list);
650 
651 	get_tree(tree);
652 	list_for_each_entry(p, &list, mnt_list) {
653 		err = tag_chunk(p->mnt_root->d_inode, tree);
654 		if (err)
655 			break;
656 	}
657 
658 	list_del(&list);
659 	drop_collected_mounts(mnt);
660 
661 	if (!err) {
662 		struct node *node;
663 		spin_lock(&hash_lock);
664 		list_for_each_entry(node, &tree->chunks, list)
665 			node->index &= ~(1U<<31);
666 		spin_unlock(&hash_lock);
667 	} else {
668 		trim_marked(tree);
669 		goto Err;
670 	}
671 
672 	mutex_lock(&audit_filter_mutex);
673 	if (list_empty(&rule->rlist)) {
674 		put_tree(tree);
675 		return -ENOENT;
676 	}
677 	rule->tree = tree;
678 	put_tree(tree);
679 
680 	return 0;
681 Err:
682 	mutex_lock(&audit_filter_mutex);
683 	list_del_init(&tree->list);
684 	list_del_init(&tree->rules);
685 	put_tree(tree);
686 	return err;
687 }
688 
689 int audit_tag_tree(char *old, char *new)
690 {
691 	struct list_head cursor, barrier;
692 	int failed = 0;
693 	struct path path;
694 	struct vfsmount *tagged;
695 	struct list_head list;
696 	struct vfsmount *mnt;
697 	struct dentry *dentry;
698 	int err;
699 
700 	err = kern_path(new, 0, &path);
701 	if (err)
702 		return err;
703 	tagged = collect_mounts(path.mnt, path.dentry);
704 	path_put(&path);
705 	if (!tagged)
706 		return -ENOMEM;
707 
708 	err = kern_path(old, 0, &path);
709 	if (err) {
710 		drop_collected_mounts(tagged);
711 		return err;
712 	}
713 	mnt = mntget(path.mnt);
714 	dentry = dget(path.dentry);
715 	path_put(&path);
716 
717 	if (dentry == tagged->mnt_root && dentry == mnt->mnt_root)
718 		follow_up(&mnt, &dentry);
719 
720 	list_add_tail(&list, &tagged->mnt_list);
721 
722 	mutex_lock(&audit_filter_mutex);
723 	list_add(&barrier, &tree_list);
724 	list_add(&cursor, &barrier);
725 
726 	while (cursor.next != &tree_list) {
727 		struct audit_tree *tree;
728 		struct vfsmount *p;
729 
730 		tree = container_of(cursor.next, struct audit_tree, list);
731 		get_tree(tree);
732 		list_del(&cursor);
733 		list_add(&cursor, &tree->list);
734 		mutex_unlock(&audit_filter_mutex);
735 
736 		err = kern_path(tree->pathname, 0, &path);
737 		if (err) {
738 			put_tree(tree);
739 			mutex_lock(&audit_filter_mutex);
740 			continue;
741 		}
742 
743 		spin_lock(&vfsmount_lock);
744 		if (!is_under(mnt, dentry, &path)) {
745 			spin_unlock(&vfsmount_lock);
746 			path_put(&path);
747 			put_tree(tree);
748 			mutex_lock(&audit_filter_mutex);
749 			continue;
750 		}
751 		spin_unlock(&vfsmount_lock);
752 		path_put(&path);
753 
754 		list_for_each_entry(p, &list, mnt_list) {
755 			failed = tag_chunk(p->mnt_root->d_inode, tree);
756 			if (failed)
757 				break;
758 		}
759 
760 		if (failed) {
761 			put_tree(tree);
762 			mutex_lock(&audit_filter_mutex);
763 			break;
764 		}
765 
766 		mutex_lock(&audit_filter_mutex);
767 		spin_lock(&hash_lock);
768 		if (!tree->goner) {
769 			list_del(&tree->list);
770 			list_add(&tree->list, &tree_list);
771 		}
772 		spin_unlock(&hash_lock);
773 		put_tree(tree);
774 	}
775 
776 	while (barrier.prev != &tree_list) {
777 		struct audit_tree *tree;
778 
779 		tree = container_of(barrier.prev, struct audit_tree, list);
780 		get_tree(tree);
781 		list_del(&tree->list);
782 		list_add(&tree->list, &barrier);
783 		mutex_unlock(&audit_filter_mutex);
784 
785 		if (!failed) {
786 			struct node *node;
787 			spin_lock(&hash_lock);
788 			list_for_each_entry(node, &tree->chunks, list)
789 				node->index &= ~(1U<<31);
790 			spin_unlock(&hash_lock);
791 		} else {
792 			trim_marked(tree);
793 		}
794 
795 		put_tree(tree);
796 		mutex_lock(&audit_filter_mutex);
797 	}
798 	list_del(&barrier);
799 	list_del(&cursor);
800 	list_del(&list);
801 	mutex_unlock(&audit_filter_mutex);
802 	dput(dentry);
803 	mntput(mnt);
804 	drop_collected_mounts(tagged);
805 	return failed;
806 }
807 
808 /*
809  * That gets run when evict_chunk() ends up needing to kill audit_tree.
810  * Runs from a separate thread, with audit_cmd_mutex held.
811  */
812 void audit_prune_trees(void)
813 {
814 	mutex_lock(&audit_filter_mutex);
815 
816 	while (!list_empty(&prune_list)) {
817 		struct audit_tree *victim;
818 
819 		victim = list_entry(prune_list.next, struct audit_tree, list);
820 		list_del_init(&victim->list);
821 
822 		mutex_unlock(&audit_filter_mutex);
823 
824 		prune_one(victim);
825 
826 		mutex_lock(&audit_filter_mutex);
827 	}
828 
829 	mutex_unlock(&audit_filter_mutex);
830 }
831 
832 /*
833  *  Here comes the stuff asynchronous to auditctl operations
834  */
835 
836 /* inode->inotify_mutex is locked */
837 static void evict_chunk(struct audit_chunk *chunk)
838 {
839 	struct audit_tree *owner;
840 	int n;
841 
842 	if (chunk->dead)
843 		return;
844 
845 	chunk->dead = 1;
846 	mutex_lock(&audit_filter_mutex);
847 	spin_lock(&hash_lock);
848 	while (!list_empty(&chunk->trees)) {
849 		owner = list_entry(chunk->trees.next,
850 				   struct audit_tree, same_root);
851 		owner->goner = 1;
852 		owner->root = NULL;
853 		list_del_init(&owner->same_root);
854 		spin_unlock(&hash_lock);
855 		kill_rules(owner);
856 		list_move(&owner->list, &prune_list);
857 		audit_schedule_prune();
858 		spin_lock(&hash_lock);
859 	}
860 	list_del_rcu(&chunk->hash);
861 	for (n = 0; n < chunk->count; n++)
862 		list_del_init(&chunk->owners[n].list);
863 	spin_unlock(&hash_lock);
864 	mutex_unlock(&audit_filter_mutex);
865 }
866 
867 static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask,
868                          u32 cookie, const char *dname, struct inode *inode)
869 {
870 	struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
871 
872 	if (mask & IN_IGNORED) {
873 		evict_chunk(chunk);
874 		put_inotify_watch(watch);
875 	}
876 }
877 
878 static void destroy_watch(struct inotify_watch *watch)
879 {
880 	struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
881 	free_chunk(chunk);
882 }
883 
884 static const struct inotify_operations rtree_inotify_ops = {
885 	.handle_event	= handle_event,
886 	.destroy_watch	= destroy_watch,
887 };
888 
889 static int __init audit_tree_init(void)
890 {
891 	int i;
892 
893 	rtree_ih = inotify_init(&rtree_inotify_ops);
894 	if (IS_ERR(rtree_ih))
895 		audit_panic("cannot initialize inotify handle for rectree watches");
896 
897 	for (i = 0; i < HASH_SIZE; i++)
898 		INIT_LIST_HEAD(&chunk_hash_heads[i]);
899 
900 	return 0;
901 }
902 __initcall(audit_tree_init);
903