xref: /openbmc/linux/kernel/audit_tree.c (revision 36f10f55ff1d2867bfc48ed898a9cc0dc6b49dd2)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "audit.h"
3 #include <linux/fsnotify_backend.h>
4 #include <linux/namei.h>
5 #include <linux/mount.h>
6 #include <linux/kthread.h>
7 #include <linux/refcount.h>
8 #include <linux/slab.h>
9 
10 struct audit_tree;
11 struct audit_chunk;
12 
13 struct audit_tree {
14 	refcount_t count;
15 	int goner;
16 	struct audit_chunk *root;
17 	struct list_head chunks;
18 	struct list_head rules;
19 	struct list_head list;
20 	struct list_head same_root;
21 	struct rcu_head head;
22 	char pathname[];
23 };
24 
25 struct audit_chunk {
26 	struct list_head hash;
27 	struct fsnotify_mark mark;
28 	struct list_head trees;		/* with root here */
29 	int dead;
30 	int count;
31 	atomic_long_t refs;
32 	struct rcu_head head;
33 	struct node {
34 		struct list_head list;
35 		struct audit_tree *owner;
36 		unsigned index;		/* index; upper bit indicates 'will prune' */
37 	} owners[];
38 };
39 
40 static LIST_HEAD(tree_list);
41 static LIST_HEAD(prune_list);
42 static struct task_struct *prune_thread;
43 
44 /*
45  * One struct chunk is attached to each inode of interest.
46  * We replace struct chunk on tagging/untagging.
47  * Rules have pointer to struct audit_tree.
48  * Rules have struct list_head rlist forming a list of rules over
49  * the same tree.
50  * References to struct chunk are collected at audit_inode{,_child}()
51  * time and used in AUDIT_TREE rule matching.
52  * These references are dropped at the same time we are calling
53  * audit_free_names(), etc.
54  *
55  * Cyclic lists galore:
56  * tree.chunks anchors chunk.owners[].list			hash_lock
57  * tree.rules anchors rule.rlist				audit_filter_mutex
58  * chunk.trees anchors tree.same_root				hash_lock
59  * chunk.hash is a hash with middle bits of watch.inode as
60  * a hash function.						RCU, hash_lock
61  *
62  * tree is refcounted; one reference for "some rules on rules_list refer to
63  * it", one for each chunk with pointer to it.
64  *
65  * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
66  * of watch contributes 1 to .refs).
67  *
68  * node.index allows to get from node.list to containing chunk.
69  * MSB of that sucker is stolen to mark taggings that we might have to
70  * revert - several operations have very unpleasant cleanup logics and
71  * that makes a difference.  Some.
72  */
73 
74 static struct fsnotify_group *audit_tree_group;
75 
76 static struct audit_tree *alloc_tree(const char *s)
77 {
78 	struct audit_tree *tree;
79 
80 	tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
81 	if (tree) {
82 		refcount_set(&tree->count, 1);
83 		tree->goner = 0;
84 		INIT_LIST_HEAD(&tree->chunks);
85 		INIT_LIST_HEAD(&tree->rules);
86 		INIT_LIST_HEAD(&tree->list);
87 		INIT_LIST_HEAD(&tree->same_root);
88 		tree->root = NULL;
89 		strcpy(tree->pathname, s);
90 	}
91 	return tree;
92 }
93 
94 static inline void get_tree(struct audit_tree *tree)
95 {
96 	refcount_inc(&tree->count);
97 }
98 
99 static inline void put_tree(struct audit_tree *tree)
100 {
101 	if (refcount_dec_and_test(&tree->count))
102 		kfree_rcu(tree, head);
103 }
104 
105 /* to avoid bringing the entire thing in audit.h */
106 const char *audit_tree_path(struct audit_tree *tree)
107 {
108 	return tree->pathname;
109 }
110 
111 static void free_chunk(struct audit_chunk *chunk)
112 {
113 	int i;
114 
115 	for (i = 0; i < chunk->count; i++) {
116 		if (chunk->owners[i].owner)
117 			put_tree(chunk->owners[i].owner);
118 	}
119 	kfree(chunk);
120 }
121 
122 void audit_put_chunk(struct audit_chunk *chunk)
123 {
124 	if (atomic_long_dec_and_test(&chunk->refs))
125 		free_chunk(chunk);
126 }
127 
128 static void __put_chunk(struct rcu_head *rcu)
129 {
130 	struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
131 	audit_put_chunk(chunk);
132 }
133 
134 static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
135 {
136 	struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
137 	call_rcu(&chunk->head, __put_chunk);
138 }
139 
140 static struct audit_chunk *alloc_chunk(int count)
141 {
142 	struct audit_chunk *chunk;
143 	size_t size;
144 	int i;
145 
146 	size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
147 	chunk = kzalloc(size, GFP_KERNEL);
148 	if (!chunk)
149 		return NULL;
150 
151 	INIT_LIST_HEAD(&chunk->hash);
152 	INIT_LIST_HEAD(&chunk->trees);
153 	chunk->count = count;
154 	atomic_long_set(&chunk->refs, 1);
155 	for (i = 0; i < count; i++) {
156 		INIT_LIST_HEAD(&chunk->owners[i].list);
157 		chunk->owners[i].index = i;
158 	}
159 	fsnotify_init_mark(&chunk->mark, audit_tree_group);
160 	chunk->mark.mask = FS_IN_IGNORED;
161 	return chunk;
162 }
163 
164 enum {HASH_SIZE = 128};
165 static struct list_head chunk_hash_heads[HASH_SIZE];
166 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
167 
168 /* Function to return search key in our hash from inode. */
169 static unsigned long inode_to_key(const struct inode *inode)
170 {
171 	/* Use address pointed to by connector->obj as the key */
172 	return (unsigned long)&inode->i_fsnotify_marks;
173 }
174 
175 /*
176  * Function to return search key in our hash from chunk. Key 0 is special and
177  * should never be present in the hash.
178  */
179 static unsigned long chunk_to_key(struct audit_chunk *chunk)
180 {
181 	/*
182 	 * We have a reference to the mark so it should be attached to a
183 	 * connector.
184 	 */
185 	if (WARN_ON_ONCE(!chunk->mark.connector))
186 		return 0;
187 	return (unsigned long)chunk->mark.connector->obj;
188 }
189 
190 static inline struct list_head *chunk_hash(unsigned long key)
191 {
192 	unsigned long n = key / L1_CACHE_BYTES;
193 	return chunk_hash_heads + n % HASH_SIZE;
194 }
195 
196 /* hash_lock & entry->lock is held by caller */
197 static void insert_hash(struct audit_chunk *chunk)
198 {
199 	unsigned long key = chunk_to_key(chunk);
200 	struct list_head *list;
201 
202 	if (!(chunk->mark.flags & FSNOTIFY_MARK_FLAG_ATTACHED))
203 		return;
204 	list = chunk_hash(key);
205 	list_add_rcu(&chunk->hash, list);
206 }
207 
208 /* called under rcu_read_lock */
209 struct audit_chunk *audit_tree_lookup(const struct inode *inode)
210 {
211 	unsigned long key = inode_to_key(inode);
212 	struct list_head *list = chunk_hash(key);
213 	struct audit_chunk *p;
214 
215 	list_for_each_entry_rcu(p, list, hash) {
216 		if (chunk_to_key(p) == key) {
217 			atomic_long_inc(&p->refs);
218 			return p;
219 		}
220 	}
221 	return NULL;
222 }
223 
224 bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
225 {
226 	int n;
227 	for (n = 0; n < chunk->count; n++)
228 		if (chunk->owners[n].owner == tree)
229 			return true;
230 	return false;
231 }
232 
233 /* tagging and untagging inodes with trees */
234 
235 static struct audit_chunk *find_chunk(struct node *p)
236 {
237 	int index = p->index & ~(1U<<31);
238 	p -= index;
239 	return container_of(p, struct audit_chunk, owners[0]);
240 }
241 
242 static void untag_chunk(struct node *p)
243 {
244 	struct audit_chunk *chunk = find_chunk(p);
245 	struct fsnotify_mark *entry = &chunk->mark;
246 	struct audit_chunk *new = NULL;
247 	struct audit_tree *owner;
248 	int size = chunk->count - 1;
249 	int i, j;
250 
251 	fsnotify_get_mark(entry);
252 
253 	spin_unlock(&hash_lock);
254 
255 	if (size)
256 		new = alloc_chunk(size);
257 
258 	mutex_lock(&entry->group->mark_mutex);
259 	spin_lock(&entry->lock);
260 	/*
261 	 * mark_mutex protects mark from getting detached and thus also from
262 	 * mark->connector->obj getting NULL.
263 	 */
264 	if (chunk->dead || !(entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
265 		spin_unlock(&entry->lock);
266 		mutex_unlock(&entry->group->mark_mutex);
267 		if (new)
268 			fsnotify_put_mark(&new->mark);
269 		goto out;
270 	}
271 
272 	owner = p->owner;
273 
274 	if (!size) {
275 		chunk->dead = 1;
276 		spin_lock(&hash_lock);
277 		list_del_init(&chunk->trees);
278 		if (owner->root == chunk)
279 			owner->root = NULL;
280 		list_del_init(&p->list);
281 		list_del_rcu(&chunk->hash);
282 		spin_unlock(&hash_lock);
283 		spin_unlock(&entry->lock);
284 		mutex_unlock(&entry->group->mark_mutex);
285 		fsnotify_destroy_mark(entry, audit_tree_group);
286 		goto out;
287 	}
288 
289 	if (!new)
290 		goto Fallback;
291 
292 	if (fsnotify_add_mark_locked(&new->mark, entry->connector->obj,
293 				     FSNOTIFY_OBJ_TYPE_INODE, 1)) {
294 		fsnotify_put_mark(&new->mark);
295 		goto Fallback;
296 	}
297 
298 	chunk->dead = 1;
299 	spin_lock(&hash_lock);
300 	list_replace_init(&chunk->trees, &new->trees);
301 	if (owner->root == chunk) {
302 		list_del_init(&owner->same_root);
303 		owner->root = NULL;
304 	}
305 
306 	for (i = j = 0; j <= size; i++, j++) {
307 		struct audit_tree *s;
308 		if (&chunk->owners[j] == p) {
309 			list_del_init(&p->list);
310 			i--;
311 			continue;
312 		}
313 		s = chunk->owners[j].owner;
314 		new->owners[i].owner = s;
315 		new->owners[i].index = chunk->owners[j].index - j + i;
316 		if (!s) /* result of earlier fallback */
317 			continue;
318 		get_tree(s);
319 		list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
320 	}
321 
322 	list_replace_rcu(&chunk->hash, &new->hash);
323 	list_for_each_entry(owner, &new->trees, same_root)
324 		owner->root = new;
325 	spin_unlock(&hash_lock);
326 	spin_unlock(&entry->lock);
327 	mutex_unlock(&entry->group->mark_mutex);
328 	fsnotify_destroy_mark(entry, audit_tree_group);
329 	fsnotify_put_mark(&new->mark);	/* drop initial reference */
330 	goto out;
331 
332 Fallback:
333 	// do the best we can
334 	spin_lock(&hash_lock);
335 	if (owner->root == chunk) {
336 		list_del_init(&owner->same_root);
337 		owner->root = NULL;
338 	}
339 	list_del_init(&p->list);
340 	p->owner = NULL;
341 	put_tree(owner);
342 	spin_unlock(&hash_lock);
343 	spin_unlock(&entry->lock);
344 	mutex_unlock(&entry->group->mark_mutex);
345 out:
346 	fsnotify_put_mark(entry);
347 	spin_lock(&hash_lock);
348 }
349 
350 static int create_chunk(struct inode *inode, struct audit_tree *tree)
351 {
352 	struct fsnotify_mark *entry;
353 	struct audit_chunk *chunk = alloc_chunk(1);
354 	if (!chunk)
355 		return -ENOMEM;
356 
357 	entry = &chunk->mark;
358 	if (fsnotify_add_inode_mark(entry, inode, 0)) {
359 		fsnotify_put_mark(entry);
360 		return -ENOSPC;
361 	}
362 
363 	spin_lock(&entry->lock);
364 	spin_lock(&hash_lock);
365 	if (tree->goner) {
366 		spin_unlock(&hash_lock);
367 		chunk->dead = 1;
368 		spin_unlock(&entry->lock);
369 		fsnotify_destroy_mark(entry, audit_tree_group);
370 		fsnotify_put_mark(entry);
371 		return 0;
372 	}
373 	chunk->owners[0].index = (1U << 31);
374 	chunk->owners[0].owner = tree;
375 	get_tree(tree);
376 	list_add(&chunk->owners[0].list, &tree->chunks);
377 	if (!tree->root) {
378 		tree->root = chunk;
379 		list_add(&tree->same_root, &chunk->trees);
380 	}
381 	insert_hash(chunk);
382 	spin_unlock(&hash_lock);
383 	spin_unlock(&entry->lock);
384 	fsnotify_put_mark(entry);	/* drop initial reference */
385 	return 0;
386 }
387 
388 /* the first tagged inode becomes root of tree */
389 static int tag_chunk(struct inode *inode, struct audit_tree *tree)
390 {
391 	struct fsnotify_mark *old_entry, *chunk_entry;
392 	struct audit_tree *owner;
393 	struct audit_chunk *chunk, *old;
394 	struct node *p;
395 	int n;
396 
397 	old_entry = fsnotify_find_mark(&inode->i_fsnotify_marks,
398 				       audit_tree_group);
399 	if (!old_entry)
400 		return create_chunk(inode, tree);
401 
402 	old = container_of(old_entry, struct audit_chunk, mark);
403 
404 	/* are we already there? */
405 	spin_lock(&hash_lock);
406 	for (n = 0; n < old->count; n++) {
407 		if (old->owners[n].owner == tree) {
408 			spin_unlock(&hash_lock);
409 			fsnotify_put_mark(old_entry);
410 			return 0;
411 		}
412 	}
413 	spin_unlock(&hash_lock);
414 
415 	chunk = alloc_chunk(old->count + 1);
416 	if (!chunk) {
417 		fsnotify_put_mark(old_entry);
418 		return -ENOMEM;
419 	}
420 
421 	chunk_entry = &chunk->mark;
422 
423 	mutex_lock(&old_entry->group->mark_mutex);
424 	spin_lock(&old_entry->lock);
425 	/*
426 	 * mark_mutex protects mark from getting detached and thus also from
427 	 * mark->connector->obj getting NULL.
428 	 */
429 	if (!(old_entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
430 		/* old_entry is being shot, lets just lie */
431 		spin_unlock(&old_entry->lock);
432 		mutex_unlock(&old_entry->group->mark_mutex);
433 		fsnotify_put_mark(old_entry);
434 		fsnotify_put_mark(&chunk->mark);
435 		return -ENOENT;
436 	}
437 
438 	if (fsnotify_add_mark_locked(chunk_entry, old_entry->connector->obj,
439 				     FSNOTIFY_OBJ_TYPE_INODE, 1)) {
440 		spin_unlock(&old_entry->lock);
441 		mutex_unlock(&old_entry->group->mark_mutex);
442 		fsnotify_put_mark(chunk_entry);
443 		fsnotify_put_mark(old_entry);
444 		return -ENOSPC;
445 	}
446 
447 	/* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
448 	spin_lock(&chunk_entry->lock);
449 	spin_lock(&hash_lock);
450 
451 	/* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
452 	if (tree->goner) {
453 		spin_unlock(&hash_lock);
454 		chunk->dead = 1;
455 		spin_unlock(&chunk_entry->lock);
456 		spin_unlock(&old_entry->lock);
457 		mutex_unlock(&old_entry->group->mark_mutex);
458 
459 		fsnotify_destroy_mark(chunk_entry, audit_tree_group);
460 
461 		fsnotify_put_mark(chunk_entry);
462 		fsnotify_put_mark(old_entry);
463 		return 0;
464 	}
465 	list_replace_init(&old->trees, &chunk->trees);
466 	for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
467 		struct audit_tree *s = old->owners[n].owner;
468 		p->owner = s;
469 		p->index = old->owners[n].index;
470 		if (!s) /* result of fallback in untag */
471 			continue;
472 		get_tree(s);
473 		list_replace_init(&old->owners[n].list, &p->list);
474 	}
475 	p->index = (chunk->count - 1) | (1U<<31);
476 	p->owner = tree;
477 	get_tree(tree);
478 	list_add(&p->list, &tree->chunks);
479 	list_replace_rcu(&old->hash, &chunk->hash);
480 	list_for_each_entry(owner, &chunk->trees, same_root)
481 		owner->root = chunk;
482 	old->dead = 1;
483 	if (!tree->root) {
484 		tree->root = chunk;
485 		list_add(&tree->same_root, &chunk->trees);
486 	}
487 	spin_unlock(&hash_lock);
488 	spin_unlock(&chunk_entry->lock);
489 	spin_unlock(&old_entry->lock);
490 	mutex_unlock(&old_entry->group->mark_mutex);
491 	fsnotify_destroy_mark(old_entry, audit_tree_group);
492 	fsnotify_put_mark(chunk_entry);	/* drop initial reference */
493 	fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
494 	return 0;
495 }
496 
497 static void audit_tree_log_remove_rule(struct audit_krule *rule)
498 {
499 	struct audit_buffer *ab;
500 
501 	ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
502 	if (unlikely(!ab))
503 		return;
504 	audit_log_format(ab, "op=remove_rule");
505 	audit_log_format(ab, " dir=");
506 	audit_log_untrustedstring(ab, rule->tree->pathname);
507 	audit_log_key(ab, rule->filterkey);
508 	audit_log_format(ab, " list=%d res=1", rule->listnr);
509 	audit_log_end(ab);
510 }
511 
512 static void kill_rules(struct audit_tree *tree)
513 {
514 	struct audit_krule *rule, *next;
515 	struct audit_entry *entry;
516 
517 	list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
518 		entry = container_of(rule, struct audit_entry, rule);
519 
520 		list_del_init(&rule->rlist);
521 		if (rule->tree) {
522 			/* not a half-baked one */
523 			audit_tree_log_remove_rule(rule);
524 			if (entry->rule.exe)
525 				audit_remove_mark(entry->rule.exe);
526 			rule->tree = NULL;
527 			list_del_rcu(&entry->list);
528 			list_del(&entry->rule.list);
529 			call_rcu(&entry->rcu, audit_free_rule_rcu);
530 		}
531 	}
532 }
533 
534 /*
535  * finish killing struct audit_tree
536  */
537 static void prune_one(struct audit_tree *victim)
538 {
539 	spin_lock(&hash_lock);
540 	while (!list_empty(&victim->chunks)) {
541 		struct node *p;
542 
543 		p = list_entry(victim->chunks.next, struct node, list);
544 
545 		untag_chunk(p);
546 	}
547 	spin_unlock(&hash_lock);
548 	put_tree(victim);
549 }
550 
551 /* trim the uncommitted chunks from tree */
552 
553 static void trim_marked(struct audit_tree *tree)
554 {
555 	struct list_head *p, *q;
556 	spin_lock(&hash_lock);
557 	if (tree->goner) {
558 		spin_unlock(&hash_lock);
559 		return;
560 	}
561 	/* reorder */
562 	for (p = tree->chunks.next; p != &tree->chunks; p = q) {
563 		struct node *node = list_entry(p, struct node, list);
564 		q = p->next;
565 		if (node->index & (1U<<31)) {
566 			list_del_init(p);
567 			list_add(p, &tree->chunks);
568 		}
569 	}
570 
571 	while (!list_empty(&tree->chunks)) {
572 		struct node *node;
573 
574 		node = list_entry(tree->chunks.next, struct node, list);
575 
576 		/* have we run out of marked? */
577 		if (!(node->index & (1U<<31)))
578 			break;
579 
580 		untag_chunk(node);
581 	}
582 	if (!tree->root && !tree->goner) {
583 		tree->goner = 1;
584 		spin_unlock(&hash_lock);
585 		mutex_lock(&audit_filter_mutex);
586 		kill_rules(tree);
587 		list_del_init(&tree->list);
588 		mutex_unlock(&audit_filter_mutex);
589 		prune_one(tree);
590 	} else {
591 		spin_unlock(&hash_lock);
592 	}
593 }
594 
595 static void audit_schedule_prune(void);
596 
597 /* called with audit_filter_mutex */
598 int audit_remove_tree_rule(struct audit_krule *rule)
599 {
600 	struct audit_tree *tree;
601 	tree = rule->tree;
602 	if (tree) {
603 		spin_lock(&hash_lock);
604 		list_del_init(&rule->rlist);
605 		if (list_empty(&tree->rules) && !tree->goner) {
606 			tree->root = NULL;
607 			list_del_init(&tree->same_root);
608 			tree->goner = 1;
609 			list_move(&tree->list, &prune_list);
610 			rule->tree = NULL;
611 			spin_unlock(&hash_lock);
612 			audit_schedule_prune();
613 			return 1;
614 		}
615 		rule->tree = NULL;
616 		spin_unlock(&hash_lock);
617 		return 1;
618 	}
619 	return 0;
620 }
621 
622 static int compare_root(struct vfsmount *mnt, void *arg)
623 {
624 	return inode_to_key(d_backing_inode(mnt->mnt_root)) ==
625 	       (unsigned long)arg;
626 }
627 
628 void audit_trim_trees(void)
629 {
630 	struct list_head cursor;
631 
632 	mutex_lock(&audit_filter_mutex);
633 	list_add(&cursor, &tree_list);
634 	while (cursor.next != &tree_list) {
635 		struct audit_tree *tree;
636 		struct path path;
637 		struct vfsmount *root_mnt;
638 		struct node *node;
639 		int err;
640 
641 		tree = container_of(cursor.next, struct audit_tree, list);
642 		get_tree(tree);
643 		list_del(&cursor);
644 		list_add(&cursor, &tree->list);
645 		mutex_unlock(&audit_filter_mutex);
646 
647 		err = kern_path(tree->pathname, 0, &path);
648 		if (err)
649 			goto skip_it;
650 
651 		root_mnt = collect_mounts(&path);
652 		path_put(&path);
653 		if (IS_ERR(root_mnt))
654 			goto skip_it;
655 
656 		spin_lock(&hash_lock);
657 		list_for_each_entry(node, &tree->chunks, list) {
658 			struct audit_chunk *chunk = find_chunk(node);
659 			/* this could be NULL if the watch is dying else where... */
660 			node->index |= 1U<<31;
661 			if (iterate_mounts(compare_root,
662 					   (void *)chunk_to_key(chunk),
663 					   root_mnt))
664 				node->index &= ~(1U<<31);
665 		}
666 		spin_unlock(&hash_lock);
667 		trim_marked(tree);
668 		drop_collected_mounts(root_mnt);
669 skip_it:
670 		put_tree(tree);
671 		mutex_lock(&audit_filter_mutex);
672 	}
673 	list_del(&cursor);
674 	mutex_unlock(&audit_filter_mutex);
675 }
676 
677 int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
678 {
679 
680 	if (pathname[0] != '/' ||
681 	    rule->listnr != AUDIT_FILTER_EXIT ||
682 	    op != Audit_equal ||
683 	    rule->inode_f || rule->watch || rule->tree)
684 		return -EINVAL;
685 	rule->tree = alloc_tree(pathname);
686 	if (!rule->tree)
687 		return -ENOMEM;
688 	return 0;
689 }
690 
691 void audit_put_tree(struct audit_tree *tree)
692 {
693 	put_tree(tree);
694 }
695 
696 static int tag_mount(struct vfsmount *mnt, void *arg)
697 {
698 	return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
699 }
700 
701 /*
702  * That gets run when evict_chunk() ends up needing to kill audit_tree.
703  * Runs from a separate thread.
704  */
705 static int prune_tree_thread(void *unused)
706 {
707 	for (;;) {
708 		if (list_empty(&prune_list)) {
709 			set_current_state(TASK_INTERRUPTIBLE);
710 			schedule();
711 		}
712 
713 		audit_ctl_lock();
714 		mutex_lock(&audit_filter_mutex);
715 
716 		while (!list_empty(&prune_list)) {
717 			struct audit_tree *victim;
718 
719 			victim = list_entry(prune_list.next,
720 					struct audit_tree, list);
721 			list_del_init(&victim->list);
722 
723 			mutex_unlock(&audit_filter_mutex);
724 
725 			prune_one(victim);
726 
727 			mutex_lock(&audit_filter_mutex);
728 		}
729 
730 		mutex_unlock(&audit_filter_mutex);
731 		audit_ctl_unlock();
732 	}
733 	return 0;
734 }
735 
736 static int audit_launch_prune(void)
737 {
738 	if (prune_thread)
739 		return 0;
740 	prune_thread = kthread_run(prune_tree_thread, NULL,
741 				"audit_prune_tree");
742 	if (IS_ERR(prune_thread)) {
743 		pr_err("cannot start thread audit_prune_tree");
744 		prune_thread = NULL;
745 		return -ENOMEM;
746 	}
747 	return 0;
748 }
749 
750 /* called with audit_filter_mutex */
751 int audit_add_tree_rule(struct audit_krule *rule)
752 {
753 	struct audit_tree *seed = rule->tree, *tree;
754 	struct path path;
755 	struct vfsmount *mnt;
756 	int err;
757 
758 	rule->tree = NULL;
759 	list_for_each_entry(tree, &tree_list, list) {
760 		if (!strcmp(seed->pathname, tree->pathname)) {
761 			put_tree(seed);
762 			rule->tree = tree;
763 			list_add(&rule->rlist, &tree->rules);
764 			return 0;
765 		}
766 	}
767 	tree = seed;
768 	list_add(&tree->list, &tree_list);
769 	list_add(&rule->rlist, &tree->rules);
770 	/* do not set rule->tree yet */
771 	mutex_unlock(&audit_filter_mutex);
772 
773 	if (unlikely(!prune_thread)) {
774 		err = audit_launch_prune();
775 		if (err)
776 			goto Err;
777 	}
778 
779 	err = kern_path(tree->pathname, 0, &path);
780 	if (err)
781 		goto Err;
782 	mnt = collect_mounts(&path);
783 	path_put(&path);
784 	if (IS_ERR(mnt)) {
785 		err = PTR_ERR(mnt);
786 		goto Err;
787 	}
788 
789 	get_tree(tree);
790 	err = iterate_mounts(tag_mount, tree, mnt);
791 	drop_collected_mounts(mnt);
792 
793 	if (!err) {
794 		struct node *node;
795 		spin_lock(&hash_lock);
796 		list_for_each_entry(node, &tree->chunks, list)
797 			node->index &= ~(1U<<31);
798 		spin_unlock(&hash_lock);
799 	} else {
800 		trim_marked(tree);
801 		goto Err;
802 	}
803 
804 	mutex_lock(&audit_filter_mutex);
805 	if (list_empty(&rule->rlist)) {
806 		put_tree(tree);
807 		return -ENOENT;
808 	}
809 	rule->tree = tree;
810 	put_tree(tree);
811 
812 	return 0;
813 Err:
814 	mutex_lock(&audit_filter_mutex);
815 	list_del_init(&tree->list);
816 	list_del_init(&tree->rules);
817 	put_tree(tree);
818 	return err;
819 }
820 
821 int audit_tag_tree(char *old, char *new)
822 {
823 	struct list_head cursor, barrier;
824 	int failed = 0;
825 	struct path path1, path2;
826 	struct vfsmount *tagged;
827 	int err;
828 
829 	err = kern_path(new, 0, &path2);
830 	if (err)
831 		return err;
832 	tagged = collect_mounts(&path2);
833 	path_put(&path2);
834 	if (IS_ERR(tagged))
835 		return PTR_ERR(tagged);
836 
837 	err = kern_path(old, 0, &path1);
838 	if (err) {
839 		drop_collected_mounts(tagged);
840 		return err;
841 	}
842 
843 	mutex_lock(&audit_filter_mutex);
844 	list_add(&barrier, &tree_list);
845 	list_add(&cursor, &barrier);
846 
847 	while (cursor.next != &tree_list) {
848 		struct audit_tree *tree;
849 		int good_one = 0;
850 
851 		tree = container_of(cursor.next, struct audit_tree, list);
852 		get_tree(tree);
853 		list_del(&cursor);
854 		list_add(&cursor, &tree->list);
855 		mutex_unlock(&audit_filter_mutex);
856 
857 		err = kern_path(tree->pathname, 0, &path2);
858 		if (!err) {
859 			good_one = path_is_under(&path1, &path2);
860 			path_put(&path2);
861 		}
862 
863 		if (!good_one) {
864 			put_tree(tree);
865 			mutex_lock(&audit_filter_mutex);
866 			continue;
867 		}
868 
869 		failed = iterate_mounts(tag_mount, tree, tagged);
870 		if (failed) {
871 			put_tree(tree);
872 			mutex_lock(&audit_filter_mutex);
873 			break;
874 		}
875 
876 		mutex_lock(&audit_filter_mutex);
877 		spin_lock(&hash_lock);
878 		if (!tree->goner) {
879 			list_del(&tree->list);
880 			list_add(&tree->list, &tree_list);
881 		}
882 		spin_unlock(&hash_lock);
883 		put_tree(tree);
884 	}
885 
886 	while (barrier.prev != &tree_list) {
887 		struct audit_tree *tree;
888 
889 		tree = container_of(barrier.prev, struct audit_tree, list);
890 		get_tree(tree);
891 		list_del(&tree->list);
892 		list_add(&tree->list, &barrier);
893 		mutex_unlock(&audit_filter_mutex);
894 
895 		if (!failed) {
896 			struct node *node;
897 			spin_lock(&hash_lock);
898 			list_for_each_entry(node, &tree->chunks, list)
899 				node->index &= ~(1U<<31);
900 			spin_unlock(&hash_lock);
901 		} else {
902 			trim_marked(tree);
903 		}
904 
905 		put_tree(tree);
906 		mutex_lock(&audit_filter_mutex);
907 	}
908 	list_del(&barrier);
909 	list_del(&cursor);
910 	mutex_unlock(&audit_filter_mutex);
911 	path_put(&path1);
912 	drop_collected_mounts(tagged);
913 	return failed;
914 }
915 
916 
917 static void audit_schedule_prune(void)
918 {
919 	wake_up_process(prune_thread);
920 }
921 
922 /*
923  * ... and that one is done if evict_chunk() decides to delay until the end
924  * of syscall.  Runs synchronously.
925  */
926 void audit_kill_trees(struct list_head *list)
927 {
928 	audit_ctl_lock();
929 	mutex_lock(&audit_filter_mutex);
930 
931 	while (!list_empty(list)) {
932 		struct audit_tree *victim;
933 
934 		victim = list_entry(list->next, struct audit_tree, list);
935 		kill_rules(victim);
936 		list_del_init(&victim->list);
937 
938 		mutex_unlock(&audit_filter_mutex);
939 
940 		prune_one(victim);
941 
942 		mutex_lock(&audit_filter_mutex);
943 	}
944 
945 	mutex_unlock(&audit_filter_mutex);
946 	audit_ctl_unlock();
947 }
948 
949 /*
950  *  Here comes the stuff asynchronous to auditctl operations
951  */
952 
953 static void evict_chunk(struct audit_chunk *chunk)
954 {
955 	struct audit_tree *owner;
956 	struct list_head *postponed = audit_killed_trees();
957 	int need_prune = 0;
958 	int n;
959 
960 	if (chunk->dead)
961 		return;
962 
963 	chunk->dead = 1;
964 	mutex_lock(&audit_filter_mutex);
965 	spin_lock(&hash_lock);
966 	while (!list_empty(&chunk->trees)) {
967 		owner = list_entry(chunk->trees.next,
968 				   struct audit_tree, same_root);
969 		owner->goner = 1;
970 		owner->root = NULL;
971 		list_del_init(&owner->same_root);
972 		spin_unlock(&hash_lock);
973 		if (!postponed) {
974 			kill_rules(owner);
975 			list_move(&owner->list, &prune_list);
976 			need_prune = 1;
977 		} else {
978 			list_move(&owner->list, postponed);
979 		}
980 		spin_lock(&hash_lock);
981 	}
982 	list_del_rcu(&chunk->hash);
983 	for (n = 0; n < chunk->count; n++)
984 		list_del_init(&chunk->owners[n].list);
985 	spin_unlock(&hash_lock);
986 	mutex_unlock(&audit_filter_mutex);
987 	if (need_prune)
988 		audit_schedule_prune();
989 }
990 
991 static int audit_tree_handle_event(struct fsnotify_group *group,
992 				   struct inode *to_tell,
993 				   u32 mask, const void *data, int data_type,
994 				   const unsigned char *file_name, u32 cookie,
995 				   struct fsnotify_iter_info *iter_info)
996 {
997 	return 0;
998 }
999 
1000 static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
1001 {
1002 	struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
1003 
1004 	evict_chunk(chunk);
1005 
1006 	/*
1007 	 * We are guaranteed to have at least one reference to the mark from
1008 	 * either the inode or the caller of fsnotify_destroy_mark().
1009 	 */
1010 	BUG_ON(refcount_read(&entry->refcnt) < 1);
1011 }
1012 
1013 static const struct fsnotify_ops audit_tree_ops = {
1014 	.handle_event = audit_tree_handle_event,
1015 	.freeing_mark = audit_tree_freeing_mark,
1016 	.free_mark = audit_tree_destroy_watch,
1017 };
1018 
1019 static int __init audit_tree_init(void)
1020 {
1021 	int i;
1022 
1023 	audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
1024 	if (IS_ERR(audit_tree_group))
1025 		audit_panic("cannot initialize fsnotify group for rectree watches");
1026 
1027 	for (i = 0; i < HASH_SIZE; i++)
1028 		INIT_LIST_HEAD(&chunk_hash_heads[i]);
1029 
1030 	return 0;
1031 }
1032 __initcall(audit_tree_init);
1033