xref: /openbmc/linux/kernel/audit_tree.c (revision b9a1b977)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
274c3cbe3SAl Viro #include "audit.h"
328a3a7ebSEric Paris #include <linux/fsnotify_backend.h>
474c3cbe3SAl Viro #include <linux/namei.h>
574c3cbe3SAl Viro #include <linux/mount.h>
6916d7576SAl Viro #include <linux/kthread.h>
79d2378f8SElena Reshetova #include <linux/refcount.h>
85a0e3ad6STejun Heo #include <linux/slab.h>
974c3cbe3SAl Viro 
1074c3cbe3SAl Viro struct audit_tree;
1174c3cbe3SAl Viro struct audit_chunk;
1274c3cbe3SAl Viro 
1374c3cbe3SAl Viro struct audit_tree {
149d2378f8SElena Reshetova 	refcount_t count;
1574c3cbe3SAl Viro 	int goner;
1674c3cbe3SAl Viro 	struct audit_chunk *root;
1774c3cbe3SAl Viro 	struct list_head chunks;
1874c3cbe3SAl Viro 	struct list_head rules;
1974c3cbe3SAl Viro 	struct list_head list;
2074c3cbe3SAl Viro 	struct list_head same_root;
2174c3cbe3SAl Viro 	struct rcu_head head;
2274c3cbe3SAl Viro 	char pathname[];
2374c3cbe3SAl Viro };
2474c3cbe3SAl Viro 
2574c3cbe3SAl Viro struct audit_chunk {
2674c3cbe3SAl Viro 	struct list_head hash;
278d20d6e9SJan Kara 	unsigned long key;
285f516130SJan Kara 	struct fsnotify_mark *mark;
2974c3cbe3SAl Viro 	struct list_head trees;		/* with root here */
3074c3cbe3SAl Viro 	int count;
318f7b0ba1SAl Viro 	atomic_long_t refs;
3274c3cbe3SAl Viro 	struct rcu_head head;
3374c3cbe3SAl Viro 	struct node {
3474c3cbe3SAl Viro 		struct list_head list;
3574c3cbe3SAl Viro 		struct audit_tree *owner;
3674c3cbe3SAl Viro 		unsigned index;		/* index; upper bit indicates 'will prune' */
3774c3cbe3SAl Viro 	} owners[];
3874c3cbe3SAl Viro };
3974c3cbe3SAl Viro 
405f516130SJan Kara struct audit_tree_mark {
415f516130SJan Kara 	struct fsnotify_mark mark;
425f516130SJan Kara 	struct audit_chunk *chunk;
435f516130SJan Kara };
445f516130SJan Kara 
4574c3cbe3SAl Viro static LIST_HEAD(tree_list);
4674c3cbe3SAl Viro static LIST_HEAD(prune_list);
47f1aaf262SImre Palik static struct task_struct *prune_thread;
4874c3cbe3SAl Viro 
4974c3cbe3SAl Viro /*
5083d23bc8SJan Kara  * One struct chunk is attached to each inode of interest through
5183d23bc8SJan Kara  * audit_tree_mark (fsnotify mark). We replace struct chunk on tagging /
5283d23bc8SJan Kara  * untagging, the mark is stable as long as there is chunk attached. The
5383d23bc8SJan Kara  * association between mark and chunk is protected by hash_lock and
5483d23bc8SJan Kara  * audit_tree_group->mark_mutex. Thus as long as we hold
5583d23bc8SJan Kara  * audit_tree_group->mark_mutex and check that the mark is alive by
5683d23bc8SJan Kara  * FSNOTIFY_MARK_FLAG_ATTACHED flag check, we are sure the mark points to
5783d23bc8SJan Kara  * the current chunk.
5883d23bc8SJan Kara  *
5974c3cbe3SAl Viro  * Rules have pointer to struct audit_tree.
6074c3cbe3SAl Viro  * Rules have struct list_head rlist forming a list of rules over
6174c3cbe3SAl Viro  * the same tree.
6274c3cbe3SAl Viro  * References to struct chunk are collected at audit_inode{,_child}()
6374c3cbe3SAl Viro  * time and used in AUDIT_TREE rule matching.
6474c3cbe3SAl Viro  * These references are dropped at the same time we are calling
6574c3cbe3SAl Viro  * audit_free_names(), etc.
6674c3cbe3SAl Viro  *
6774c3cbe3SAl Viro  * Cyclic lists galore:
6874c3cbe3SAl Viro  * tree.chunks anchors chunk.owners[].list			hash_lock
6974c3cbe3SAl Viro  * tree.rules anchors rule.rlist				audit_filter_mutex
7074c3cbe3SAl Viro  * chunk.trees anchors tree.same_root				hash_lock
7174c3cbe3SAl Viro  * chunk.hash is a hash with middle bits of watch.inode as
7274c3cbe3SAl Viro  * a hash function.						RCU, hash_lock
7374c3cbe3SAl Viro  *
7474c3cbe3SAl Viro  * tree is refcounted; one reference for "some rules on rules_list refer to
7574c3cbe3SAl Viro  * it", one for each chunk with pointer to it.
7674c3cbe3SAl Viro  *
7783d23bc8SJan Kara  * chunk is refcounted by embedded .refs. Mark associated with the chunk holds
7883d23bc8SJan Kara  * one chunk reference. This reference is dropped either when a mark is going
7983d23bc8SJan Kara  * to be freed (corresponding inode goes away) or when chunk attached to the
8083d23bc8SJan Kara  * mark gets replaced. This reference must be dropped using
8183d23bc8SJan Kara  * audit_mark_put_chunk() to make sure the reference is dropped only after RCU
8283d23bc8SJan Kara  * grace period as it protects RCU readers of the hash table.
8374c3cbe3SAl Viro  *
8474c3cbe3SAl Viro  * node.index allows to get from node.list to containing chunk.
8574c3cbe3SAl Viro  * MSB of that sucker is stolen to mark taggings that we might have to
8674c3cbe3SAl Viro  * revert - several operations have very unpleasant cleanup logics and
8774c3cbe3SAl Viro  * that makes a difference.  Some.
8874c3cbe3SAl Viro  */
8974c3cbe3SAl Viro 
9028a3a7ebSEric Paris static struct fsnotify_group *audit_tree_group;
915f516130SJan Kara static struct kmem_cache *audit_tree_mark_cachep __read_mostly;
9274c3cbe3SAl Viro 
9374c3cbe3SAl Viro static struct audit_tree *alloc_tree(const char *s)
9474c3cbe3SAl Viro {
9574c3cbe3SAl Viro 	struct audit_tree *tree;
9674c3cbe3SAl Viro 
9774c3cbe3SAl Viro 	tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
9874c3cbe3SAl Viro 	if (tree) {
999d2378f8SElena Reshetova 		refcount_set(&tree->count, 1);
10074c3cbe3SAl Viro 		tree->goner = 0;
10174c3cbe3SAl Viro 		INIT_LIST_HEAD(&tree->chunks);
10274c3cbe3SAl Viro 		INIT_LIST_HEAD(&tree->rules);
10374c3cbe3SAl Viro 		INIT_LIST_HEAD(&tree->list);
10474c3cbe3SAl Viro 		INIT_LIST_HEAD(&tree->same_root);
10574c3cbe3SAl Viro 		tree->root = NULL;
10674c3cbe3SAl Viro 		strcpy(tree->pathname, s);
10774c3cbe3SAl Viro 	}
10874c3cbe3SAl Viro 	return tree;
10974c3cbe3SAl Viro }
11074c3cbe3SAl Viro 
11174c3cbe3SAl Viro static inline void get_tree(struct audit_tree *tree)
11274c3cbe3SAl Viro {
1139d2378f8SElena Reshetova 	refcount_inc(&tree->count);
11474c3cbe3SAl Viro }
11574c3cbe3SAl Viro 
11674c3cbe3SAl Viro static inline void put_tree(struct audit_tree *tree)
11774c3cbe3SAl Viro {
1189d2378f8SElena Reshetova 	if (refcount_dec_and_test(&tree->count))
1193b097c46SLai Jiangshan 		kfree_rcu(tree, head);
12074c3cbe3SAl Viro }
12174c3cbe3SAl Viro 
12274c3cbe3SAl Viro /* to avoid bringing the entire thing in audit.h */
12374c3cbe3SAl Viro const char *audit_tree_path(struct audit_tree *tree)
12474c3cbe3SAl Viro {
12574c3cbe3SAl Viro 	return tree->pathname;
12674c3cbe3SAl Viro }
12774c3cbe3SAl Viro 
1288f7b0ba1SAl Viro static void free_chunk(struct audit_chunk *chunk)
12974c3cbe3SAl Viro {
13074c3cbe3SAl Viro 	int i;
13174c3cbe3SAl Viro 
13274c3cbe3SAl Viro 	for (i = 0; i < chunk->count; i++) {
13374c3cbe3SAl Viro 		if (chunk->owners[i].owner)
13474c3cbe3SAl Viro 			put_tree(chunk->owners[i].owner);
13574c3cbe3SAl Viro 	}
13674c3cbe3SAl Viro 	kfree(chunk);
13774c3cbe3SAl Viro }
13874c3cbe3SAl Viro 
13974c3cbe3SAl Viro void audit_put_chunk(struct audit_chunk *chunk)
14074c3cbe3SAl Viro {
1418f7b0ba1SAl Viro 	if (atomic_long_dec_and_test(&chunk->refs))
1428f7b0ba1SAl Viro 		free_chunk(chunk);
1438f7b0ba1SAl Viro }
1448f7b0ba1SAl Viro 
1458f7b0ba1SAl Viro static void __put_chunk(struct rcu_head *rcu)
1468f7b0ba1SAl Viro {
1478f7b0ba1SAl Viro 	struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
1488f7b0ba1SAl Viro 	audit_put_chunk(chunk);
14974c3cbe3SAl Viro }
15074c3cbe3SAl Viro 
151a8375713SJan Kara /*
152a8375713SJan Kara  * Drop reference to the chunk that was held by the mark. This is the reference
153a8375713SJan Kara  * that gets dropped after we've removed the chunk from the hash table and we
154a8375713SJan Kara  * use it to make sure chunk cannot be freed before RCU grace period expires.
155a8375713SJan Kara  */
156a8375713SJan Kara static void audit_mark_put_chunk(struct audit_chunk *chunk)
157a8375713SJan Kara {
158a8375713SJan Kara 	call_rcu(&chunk->head, __put_chunk);
159a8375713SJan Kara }
160a8375713SJan Kara 
161f905c2fcSJan Kara static inline struct audit_tree_mark *audit_mark(struct fsnotify_mark *mark)
1625f516130SJan Kara {
163f905c2fcSJan Kara 	return container_of(mark, struct audit_tree_mark, mark);
1645f516130SJan Kara }
1655f516130SJan Kara 
1665f516130SJan Kara static struct audit_chunk *mark_chunk(struct fsnotify_mark *mark)
1675f516130SJan Kara {
1685f516130SJan Kara 	return audit_mark(mark)->chunk;
1695f516130SJan Kara }
1705f516130SJan Kara 
171f905c2fcSJan Kara static void audit_tree_destroy_watch(struct fsnotify_mark *mark)
17228a3a7ebSEric Paris {
173f905c2fcSJan Kara 	kmem_cache_free(audit_tree_mark_cachep, audit_mark(mark));
1745f516130SJan Kara }
1755f516130SJan Kara 
1765f516130SJan Kara static struct fsnotify_mark *alloc_mark(void)
1775f516130SJan Kara {
1785f516130SJan Kara 	struct audit_tree_mark *amark;
1795f516130SJan Kara 
1805f516130SJan Kara 	amark = kmem_cache_zalloc(audit_tree_mark_cachep, GFP_KERNEL);
1815f516130SJan Kara 	if (!amark)
1825f516130SJan Kara 		return NULL;
1835f516130SJan Kara 	fsnotify_init_mark(&amark->mark, audit_tree_group);
1845f516130SJan Kara 	amark->mark.mask = FS_IN_IGNORED;
1855f516130SJan Kara 	return &amark->mark;
18628a3a7ebSEric Paris }
18728a3a7ebSEric Paris 
18828a3a7ebSEric Paris static struct audit_chunk *alloc_chunk(int count)
18928a3a7ebSEric Paris {
19028a3a7ebSEric Paris 	struct audit_chunk *chunk;
19128a3a7ebSEric Paris 	size_t size;
19228a3a7ebSEric Paris 	int i;
19328a3a7ebSEric Paris 
19428a3a7ebSEric Paris 	size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
19528a3a7ebSEric Paris 	chunk = kzalloc(size, GFP_KERNEL);
19628a3a7ebSEric Paris 	if (!chunk)
19728a3a7ebSEric Paris 		return NULL;
19828a3a7ebSEric Paris 
19928a3a7ebSEric Paris 	INIT_LIST_HEAD(&chunk->hash);
20028a3a7ebSEric Paris 	INIT_LIST_HEAD(&chunk->trees);
20128a3a7ebSEric Paris 	chunk->count = count;
20228a3a7ebSEric Paris 	atomic_long_set(&chunk->refs, 1);
20328a3a7ebSEric Paris 	for (i = 0; i < count; i++) {
20428a3a7ebSEric Paris 		INIT_LIST_HEAD(&chunk->owners[i].list);
20528a3a7ebSEric Paris 		chunk->owners[i].index = i;
20628a3a7ebSEric Paris 	}
20728a3a7ebSEric Paris 	return chunk;
20828a3a7ebSEric Paris }
20928a3a7ebSEric Paris 
21074c3cbe3SAl Viro enum {HASH_SIZE = 128};
21174c3cbe3SAl Viro static struct list_head chunk_hash_heads[HASH_SIZE];
21274c3cbe3SAl Viro static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
21374c3cbe3SAl Viro 
214f410ff65SJan Kara /* Function to return search key in our hash from inode. */
215f410ff65SJan Kara static unsigned long inode_to_key(const struct inode *inode)
21674c3cbe3SAl Viro {
21736f10f55SAmir Goldstein 	/* Use address pointed to by connector->obj as the key */
21836f10f55SAmir Goldstein 	return (unsigned long)&inode->i_fsnotify_marks;
219f410ff65SJan Kara }
220f410ff65SJan Kara 
221f410ff65SJan Kara static inline struct list_head *chunk_hash(unsigned long key)
222f410ff65SJan Kara {
223f410ff65SJan Kara 	unsigned long n = key / L1_CACHE_BYTES;
22474c3cbe3SAl Viro 	return chunk_hash_heads + n % HASH_SIZE;
22574c3cbe3SAl Viro }
22674c3cbe3SAl Viro 
227f905c2fcSJan Kara /* hash_lock & mark->group->mark_mutex is held by caller */
22874c3cbe3SAl Viro static void insert_hash(struct audit_chunk *chunk)
22974c3cbe3SAl Viro {
23028a3a7ebSEric Paris 	struct list_head *list;
23128a3a7ebSEric Paris 
2321635e572SJan Kara 	/*
2331635e572SJan Kara 	 * Make sure chunk is fully initialized before making it visible in the
2341635e572SJan Kara 	 * hash. Pairs with a data dependency barrier in READ_ONCE() in
2351635e572SJan Kara 	 * audit_tree_lookup().
2361635e572SJan Kara 	 */
2371635e572SJan Kara 	smp_wmb();
2388d20d6e9SJan Kara 	WARN_ON_ONCE(!chunk->key);
2398d20d6e9SJan Kara 	list = chunk_hash(chunk->key);
24074c3cbe3SAl Viro 	list_add_rcu(&chunk->hash, list);
24174c3cbe3SAl Viro }
24274c3cbe3SAl Viro 
24374c3cbe3SAl Viro /* called under rcu_read_lock */
24474c3cbe3SAl Viro struct audit_chunk *audit_tree_lookup(const struct inode *inode)
24574c3cbe3SAl Viro {
246f410ff65SJan Kara 	unsigned long key = inode_to_key(inode);
247f410ff65SJan Kara 	struct list_head *list = chunk_hash(key);
2486793a051SPaul E. McKenney 	struct audit_chunk *p;
24974c3cbe3SAl Viro 
2506793a051SPaul E. McKenney 	list_for_each_entry_rcu(p, list, hash) {
2511635e572SJan Kara 		/*
2521635e572SJan Kara 		 * We use a data dependency barrier in READ_ONCE() to make sure
2531635e572SJan Kara 		 * the chunk we see is fully initialized.
2541635e572SJan Kara 		 */
2551635e572SJan Kara 		if (READ_ONCE(p->key) == key) {
2568f7b0ba1SAl Viro 			atomic_long_inc(&p->refs);
25774c3cbe3SAl Viro 			return p;
25874c3cbe3SAl Viro 		}
25974c3cbe3SAl Viro 	}
26074c3cbe3SAl Viro 	return NULL;
26174c3cbe3SAl Viro }
26274c3cbe3SAl Viro 
2636f1b5d7aSYaowei Bai bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
26474c3cbe3SAl Viro {
26574c3cbe3SAl Viro 	int n;
26674c3cbe3SAl Viro 	for (n = 0; n < chunk->count; n++)
26774c3cbe3SAl Viro 		if (chunk->owners[n].owner == tree)
2686f1b5d7aSYaowei Bai 			return true;
2696f1b5d7aSYaowei Bai 	return false;
27074c3cbe3SAl Viro }
27174c3cbe3SAl Viro 
27274c3cbe3SAl Viro /* tagging and untagging inodes with trees */
27374c3cbe3SAl Viro 
2748f7b0ba1SAl Viro static struct audit_chunk *find_chunk(struct node *p)
27574c3cbe3SAl Viro {
2768f7b0ba1SAl Viro 	int index = p->index & ~(1U<<31);
2778f7b0ba1SAl Viro 	p -= index;
2788f7b0ba1SAl Viro 	return container_of(p, struct audit_chunk, owners[0]);
2798f7b0ba1SAl Viro }
2808f7b0ba1SAl Viro 
281f905c2fcSJan Kara static void replace_mark_chunk(struct fsnotify_mark *mark,
28283d23bc8SJan Kara 			       struct audit_chunk *chunk)
28383d23bc8SJan Kara {
28483d23bc8SJan Kara 	struct audit_chunk *old;
28583d23bc8SJan Kara 
28683d23bc8SJan Kara 	assert_spin_locked(&hash_lock);
287f905c2fcSJan Kara 	old = mark_chunk(mark);
288f905c2fcSJan Kara 	audit_mark(mark)->chunk = chunk;
28983d23bc8SJan Kara 	if (chunk)
290f905c2fcSJan Kara 		chunk->mark = mark;
29183d23bc8SJan Kara 	if (old)
29283d23bc8SJan Kara 		old->mark = NULL;
29383d23bc8SJan Kara }
29483d23bc8SJan Kara 
295c22fcde7SJan Kara static void replace_chunk(struct audit_chunk *new, struct audit_chunk *old)
296d31b326dSJan Kara {
297d31b326dSJan Kara 	struct audit_tree *owner;
298d31b326dSJan Kara 	int i, j;
299d31b326dSJan Kara 
300d31b326dSJan Kara 	new->key = old->key;
301d31b326dSJan Kara 	list_splice_init(&old->trees, &new->trees);
302d31b326dSJan Kara 	list_for_each_entry(owner, &new->trees, same_root)
303d31b326dSJan Kara 		owner->root = new;
304d31b326dSJan Kara 	for (i = j = 0; j < old->count; i++, j++) {
305c22fcde7SJan Kara 		if (!old->owners[j].owner) {
306d31b326dSJan Kara 			i--;
307d31b326dSJan Kara 			continue;
308d31b326dSJan Kara 		}
309d31b326dSJan Kara 		owner = old->owners[j].owner;
310d31b326dSJan Kara 		new->owners[i].owner = owner;
311d31b326dSJan Kara 		new->owners[i].index = old->owners[j].index - j + i;
312d31b326dSJan Kara 		if (!owner) /* result of earlier fallback */
313d31b326dSJan Kara 			continue;
314d31b326dSJan Kara 		get_tree(owner);
315d31b326dSJan Kara 		list_replace_init(&old->owners[j].list, &new->owners[i].list);
316d31b326dSJan Kara 	}
31783d23bc8SJan Kara 	replace_mark_chunk(old->mark, new);
318d31b326dSJan Kara 	/*
319d31b326dSJan Kara 	 * Make sure chunk is fully initialized before making it visible in the
320d31b326dSJan Kara 	 * hash. Pairs with a data dependency barrier in READ_ONCE() in
321d31b326dSJan Kara 	 * audit_tree_lookup().
322d31b326dSJan Kara 	 */
323d31b326dSJan Kara 	smp_wmb();
324d31b326dSJan Kara 	list_replace_rcu(&old->hash, &new->hash);
325d31b326dSJan Kara }
326d31b326dSJan Kara 
32749a4ee7dSJan Kara static void remove_chunk_node(struct audit_chunk *chunk, struct node *p)
32849a4ee7dSJan Kara {
32949a4ee7dSJan Kara 	struct audit_tree *owner = p->owner;
33049a4ee7dSJan Kara 
33149a4ee7dSJan Kara 	if (owner->root == chunk) {
33249a4ee7dSJan Kara 		list_del_init(&owner->same_root);
33349a4ee7dSJan Kara 		owner->root = NULL;
33449a4ee7dSJan Kara 	}
33549a4ee7dSJan Kara 	list_del_init(&p->list);
33649a4ee7dSJan Kara 	p->owner = NULL;
33749a4ee7dSJan Kara 	put_tree(owner);
33849a4ee7dSJan Kara }
33949a4ee7dSJan Kara 
340c22fcde7SJan Kara static int chunk_count_trees(struct audit_chunk *chunk)
341c22fcde7SJan Kara {
342c22fcde7SJan Kara 	int i;
343c22fcde7SJan Kara 	int ret = 0;
344c22fcde7SJan Kara 
345c22fcde7SJan Kara 	for (i = 0; i < chunk->count; i++)
346c22fcde7SJan Kara 		if (chunk->owners[i].owner)
347c22fcde7SJan Kara 			ret++;
348c22fcde7SJan Kara 	return ret;
349c22fcde7SJan Kara }
350c22fcde7SJan Kara 
351f905c2fcSJan Kara static void untag_chunk(struct audit_chunk *chunk, struct fsnotify_mark *mark)
3528f7b0ba1SAl Viro {
3538432c700SJan Kara 	struct audit_chunk *new;
354c22fcde7SJan Kara 	int size;
35574c3cbe3SAl Viro 
3568432c700SJan Kara 	mutex_lock(&audit_tree_group->mark_mutex);
3576b3f05d2SJan Kara 	/*
35883d23bc8SJan Kara 	 * mark_mutex stabilizes chunk attached to the mark so we can check
35983d23bc8SJan Kara 	 * whether it didn't change while we've dropped hash_lock.
3606b3f05d2SJan Kara 	 */
361f905c2fcSJan Kara 	if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) ||
362f905c2fcSJan Kara 	    mark_chunk(mark) != chunk)
3638432c700SJan Kara 		goto out_mutex;
36474c3cbe3SAl Viro 
365c22fcde7SJan Kara 	size = chunk_count_trees(chunk);
36674c3cbe3SAl Viro 	if (!size) {
36774c3cbe3SAl Viro 		spin_lock(&hash_lock);
36874c3cbe3SAl Viro 		list_del_init(&chunk->trees);
36974c3cbe3SAl Viro 		list_del_rcu(&chunk->hash);
370f905c2fcSJan Kara 		replace_mark_chunk(mark, NULL);
37174c3cbe3SAl Viro 		spin_unlock(&hash_lock);
372f905c2fcSJan Kara 		fsnotify_detach_mark(mark);
3738432c700SJan Kara 		mutex_unlock(&audit_tree_group->mark_mutex);
37483d23bc8SJan Kara 		audit_mark_put_chunk(chunk);
375f905c2fcSJan Kara 		fsnotify_free_mark(mark);
3768432c700SJan Kara 		return;
37774c3cbe3SAl Viro 	}
37874c3cbe3SAl Viro 
379c22fcde7SJan Kara 	new = alloc_chunk(size);
38074c3cbe3SAl Viro 	if (!new)
38149a4ee7dSJan Kara 		goto out_mutex;
382f7a998a9SAl Viro 
38374c3cbe3SAl Viro 	spin_lock(&hash_lock);
3841635e572SJan Kara 	/*
385d31b326dSJan Kara 	 * This has to go last when updating chunk as once replace_chunk() is
386d31b326dSJan Kara 	 * called, new RCU readers can see the new chunk.
3871635e572SJan Kara 	 */
388c22fcde7SJan Kara 	replace_chunk(new, chunk);
38974c3cbe3SAl Viro 	spin_unlock(&hash_lock);
3908432c700SJan Kara 	mutex_unlock(&audit_tree_group->mark_mutex);
39183d23bc8SJan Kara 	audit_mark_put_chunk(chunk);
3928432c700SJan Kara 	return;
39374c3cbe3SAl Viro 
39449a4ee7dSJan Kara out_mutex:
3958432c700SJan Kara 	mutex_unlock(&audit_tree_group->mark_mutex);
39674c3cbe3SAl Viro }
39774c3cbe3SAl Viro 
398a5789b07SJan Kara /* Call with group->mark_mutex held, releases it */
39974c3cbe3SAl Viro static int create_chunk(struct inode *inode, struct audit_tree *tree)
40074c3cbe3SAl Viro {
401f905c2fcSJan Kara 	struct fsnotify_mark *mark;
40274c3cbe3SAl Viro 	struct audit_chunk *chunk = alloc_chunk(1);
403a5789b07SJan Kara 
404a5789b07SJan Kara 	if (!chunk) {
405a5789b07SJan Kara 		mutex_unlock(&audit_tree_group->mark_mutex);
40674c3cbe3SAl Viro 		return -ENOMEM;
407a5789b07SJan Kara 	}
40874c3cbe3SAl Viro 
409f905c2fcSJan Kara 	mark = alloc_mark();
410f905c2fcSJan Kara 	if (!mark) {
41183d23bc8SJan Kara 		mutex_unlock(&audit_tree_group->mark_mutex);
41283d23bc8SJan Kara 		kfree(chunk);
41383d23bc8SJan Kara 		return -ENOMEM;
41483d23bc8SJan Kara 	}
41583d23bc8SJan Kara 
416f905c2fcSJan Kara 	if (fsnotify_add_inode_mark_locked(mark, inode, 0)) {
417a5789b07SJan Kara 		mutex_unlock(&audit_tree_group->mark_mutex);
418f905c2fcSJan Kara 		fsnotify_put_mark(mark);
41983d23bc8SJan Kara 		kfree(chunk);
42074c3cbe3SAl Viro 		return -ENOSPC;
42174c3cbe3SAl Viro 	}
42274c3cbe3SAl Viro 
42374c3cbe3SAl Viro 	spin_lock(&hash_lock);
42474c3cbe3SAl Viro 	if (tree->goner) {
42574c3cbe3SAl Viro 		spin_unlock(&hash_lock);
426f905c2fcSJan Kara 		fsnotify_detach_mark(mark);
427a5789b07SJan Kara 		mutex_unlock(&audit_tree_group->mark_mutex);
428f905c2fcSJan Kara 		fsnotify_free_mark(mark);
429f905c2fcSJan Kara 		fsnotify_put_mark(mark);
43083d23bc8SJan Kara 		kfree(chunk);
43174c3cbe3SAl Viro 		return 0;
43274c3cbe3SAl Viro 	}
433f905c2fcSJan Kara 	replace_mark_chunk(mark, chunk);
43474c3cbe3SAl Viro 	chunk->owners[0].index = (1U << 31);
43574c3cbe3SAl Viro 	chunk->owners[0].owner = tree;
43674c3cbe3SAl Viro 	get_tree(tree);
43774c3cbe3SAl Viro 	list_add(&chunk->owners[0].list, &tree->chunks);
43874c3cbe3SAl Viro 	if (!tree->root) {
43974c3cbe3SAl Viro 		tree->root = chunk;
44074c3cbe3SAl Viro 		list_add(&tree->same_root, &chunk->trees);
44174c3cbe3SAl Viro 	}
4428d20d6e9SJan Kara 	chunk->key = inode_to_key(inode);
4431635e572SJan Kara 	/*
4441635e572SJan Kara 	 * Inserting into the hash table has to go last as once we do that RCU
4451635e572SJan Kara 	 * readers can see the chunk.
4461635e572SJan Kara 	 */
44774c3cbe3SAl Viro 	insert_hash(chunk);
44874c3cbe3SAl Viro 	spin_unlock(&hash_lock);
449a5789b07SJan Kara 	mutex_unlock(&audit_tree_group->mark_mutex);
45083d23bc8SJan Kara 	/*
45183d23bc8SJan Kara 	 * Drop our initial reference. When mark we point to is getting freed,
45283d23bc8SJan Kara 	 * we get notification through ->freeing_mark callback and cleanup
45383d23bc8SJan Kara 	 * chunk pointing to this mark.
45483d23bc8SJan Kara 	 */
455f905c2fcSJan Kara 	fsnotify_put_mark(mark);
45674c3cbe3SAl Viro 	return 0;
45774c3cbe3SAl Viro }
45874c3cbe3SAl Viro 
45974c3cbe3SAl Viro /* the first tagged inode becomes root of tree */
46074c3cbe3SAl Viro static int tag_chunk(struct inode *inode, struct audit_tree *tree)
46174c3cbe3SAl Viro {
462f905c2fcSJan Kara 	struct fsnotify_mark *mark;
46374c3cbe3SAl Viro 	struct audit_chunk *chunk, *old;
46474c3cbe3SAl Viro 	struct node *p;
46574c3cbe3SAl Viro 	int n;
46674c3cbe3SAl Viro 
467a5789b07SJan Kara 	mutex_lock(&audit_tree_group->mark_mutex);
468f905c2fcSJan Kara 	mark = fsnotify_find_mark(&inode->i_fsnotify_marks, audit_tree_group);
469f905c2fcSJan Kara 	if (!mark)
47074c3cbe3SAl Viro 		return create_chunk(inode, tree);
47174c3cbe3SAl Viro 
47283d23bc8SJan Kara 	/*
47383d23bc8SJan Kara 	 * Found mark is guaranteed to be attached and mark_mutex protects mark
47483d23bc8SJan Kara 	 * from getting detached and thus it makes sure there is chunk attached
47583d23bc8SJan Kara 	 * to the mark.
47683d23bc8SJan Kara 	 */
47774c3cbe3SAl Viro 	/* are we already there? */
47874c3cbe3SAl Viro 	spin_lock(&hash_lock);
479f905c2fcSJan Kara 	old = mark_chunk(mark);
48074c3cbe3SAl Viro 	for (n = 0; n < old->count; n++) {
48174c3cbe3SAl Viro 		if (old->owners[n].owner == tree) {
48274c3cbe3SAl Viro 			spin_unlock(&hash_lock);
483a5789b07SJan Kara 			mutex_unlock(&audit_tree_group->mark_mutex);
484f905c2fcSJan Kara 			fsnotify_put_mark(mark);
48574c3cbe3SAl Viro 			return 0;
48674c3cbe3SAl Viro 		}
48774c3cbe3SAl Viro 	}
48874c3cbe3SAl Viro 	spin_unlock(&hash_lock);
48974c3cbe3SAl Viro 
49074c3cbe3SAl Viro 	chunk = alloc_chunk(old->count + 1);
491b4c30aadSAl Viro 	if (!chunk) {
492a5789b07SJan Kara 		mutex_unlock(&audit_tree_group->mark_mutex);
493f905c2fcSJan Kara 		fsnotify_put_mark(mark);
49474c3cbe3SAl Viro 		return -ENOMEM;
495b4c30aadSAl Viro 	}
49674c3cbe3SAl Viro 
49774c3cbe3SAl Viro 	spin_lock(&hash_lock);
49874c3cbe3SAl Viro 	if (tree->goner) {
49974c3cbe3SAl Viro 		spin_unlock(&hash_lock);
500a5789b07SJan Kara 		mutex_unlock(&audit_tree_group->mark_mutex);
501f905c2fcSJan Kara 		fsnotify_put_mark(mark);
50283d23bc8SJan Kara 		kfree(chunk);
50374c3cbe3SAl Viro 		return 0;
50474c3cbe3SAl Viro 	}
505d31b326dSJan Kara 	p = &chunk->owners[chunk->count - 1];
50674c3cbe3SAl Viro 	p->index = (chunk->count - 1) | (1U<<31);
50774c3cbe3SAl Viro 	p->owner = tree;
50874c3cbe3SAl Viro 	get_tree(tree);
50974c3cbe3SAl Viro 	list_add(&p->list, &tree->chunks);
51074c3cbe3SAl Viro 	if (!tree->root) {
51174c3cbe3SAl Viro 		tree->root = chunk;
51274c3cbe3SAl Viro 		list_add(&tree->same_root, &chunk->trees);
51374c3cbe3SAl Viro 	}
5141635e572SJan Kara 	/*
515d31b326dSJan Kara 	 * This has to go last when updating chunk as once replace_chunk() is
516d31b326dSJan Kara 	 * called, new RCU readers can see the new chunk.
5171635e572SJan Kara 	 */
518c22fcde7SJan Kara 	replace_chunk(chunk, old);
51974c3cbe3SAl Viro 	spin_unlock(&hash_lock);
520a5789b07SJan Kara 	mutex_unlock(&audit_tree_group->mark_mutex);
521f905c2fcSJan Kara 	fsnotify_put_mark(mark); /* pair to fsnotify_find_mark */
52283d23bc8SJan Kara 	audit_mark_put_chunk(old);
52383d23bc8SJan Kara 
52474c3cbe3SAl Viro 	return 0;
52574c3cbe3SAl Viro }
52674c3cbe3SAl Viro 
5279e36a5d4SRichard Guy Briggs static void audit_tree_log_remove_rule(struct audit_context *context,
5289e36a5d4SRichard Guy Briggs 				       struct audit_krule *rule)
52974c3cbe3SAl Viro {
53074c3cbe3SAl Viro 	struct audit_buffer *ab;
53174c3cbe3SAl Viro 
53265a8766fSRichard Guy Briggs 	if (!audit_enabled)
53365a8766fSRichard Guy Briggs 		return;
5349e36a5d4SRichard Guy Briggs 	ab = audit_log_start(context, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
5350644ec0cSKees Cook 	if (unlikely(!ab))
5360644ec0cSKees Cook 		return;
537d0a3f18aSPaul Moore 	audit_log_format(ab, "op=remove_rule dir=");
53874c3cbe3SAl Viro 	audit_log_untrustedstring(ab, rule->tree->pathname);
5399d960985SEric Paris 	audit_log_key(ab, rule->filterkey);
54074c3cbe3SAl Viro 	audit_log_format(ab, " list=%d res=1", rule->listnr);
54174c3cbe3SAl Viro 	audit_log_end(ab);
5420644ec0cSKees Cook }
5430644ec0cSKees Cook 
5449e36a5d4SRichard Guy Briggs static void kill_rules(struct audit_context *context, struct audit_tree *tree)
5450644ec0cSKees Cook {
5460644ec0cSKees Cook 	struct audit_krule *rule, *next;
5470644ec0cSKees Cook 	struct audit_entry *entry;
5480644ec0cSKees Cook 
5490644ec0cSKees Cook 	list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
5500644ec0cSKees Cook 		entry = container_of(rule, struct audit_entry, rule);
5510644ec0cSKees Cook 
5520644ec0cSKees Cook 		list_del_init(&rule->rlist);
5530644ec0cSKees Cook 		if (rule->tree) {
5540644ec0cSKees Cook 			/* not a half-baked one */
5559e36a5d4SRichard Guy Briggs 			audit_tree_log_remove_rule(context, rule);
55634d99af5SRichard Guy Briggs 			if (entry->rule.exe)
55734d99af5SRichard Guy Briggs 				audit_remove_mark(entry->rule.exe);
55874c3cbe3SAl Viro 			rule->tree = NULL;
55974c3cbe3SAl Viro 			list_del_rcu(&entry->list);
560e45aa212SAl Viro 			list_del(&entry->rule.list);
56174c3cbe3SAl Viro 			call_rcu(&entry->rcu, audit_free_rule_rcu);
56274c3cbe3SAl Viro 		}
56374c3cbe3SAl Viro 	}
56474c3cbe3SAl Viro }
56574c3cbe3SAl Viro 
56674c3cbe3SAl Viro /*
5678432c700SJan Kara  * Remove tree from chunks. If 'tagged' is set, remove tree only from tagged
5688432c700SJan Kara  * chunks. The function expects tagged chunks are all at the beginning of the
5698432c700SJan Kara  * chunks list.
57074c3cbe3SAl Viro  */
5718432c700SJan Kara static void prune_tree_chunks(struct audit_tree *victim, bool tagged)
57274c3cbe3SAl Viro {
57374c3cbe3SAl Viro 	spin_lock(&hash_lock);
57474c3cbe3SAl Viro 	while (!list_empty(&victim->chunks)) {
57574c3cbe3SAl Viro 		struct node *p;
5768432c700SJan Kara 		struct audit_chunk *chunk;
5778432c700SJan Kara 		struct fsnotify_mark *mark;
57874c3cbe3SAl Viro 
5798432c700SJan Kara 		p = list_first_entry(&victim->chunks, struct node, list);
5808432c700SJan Kara 		/* have we run out of marked? */
5818432c700SJan Kara 		if (tagged && !(p->index & (1U<<31)))
5828432c700SJan Kara 			break;
5838432c700SJan Kara 		chunk = find_chunk(p);
5848432c700SJan Kara 		mark = chunk->mark;
5858432c700SJan Kara 		remove_chunk_node(chunk, p);
58683d23bc8SJan Kara 		/* Racing with audit_tree_freeing_mark()? */
58783d23bc8SJan Kara 		if (!mark)
58883d23bc8SJan Kara 			continue;
5898432c700SJan Kara 		fsnotify_get_mark(mark);
5908432c700SJan Kara 		spin_unlock(&hash_lock);
59174c3cbe3SAl Viro 
5928432c700SJan Kara 		untag_chunk(chunk, mark);
5938432c700SJan Kara 		fsnotify_put_mark(mark);
5948432c700SJan Kara 
5958432c700SJan Kara 		spin_lock(&hash_lock);
59674c3cbe3SAl Viro 	}
59774c3cbe3SAl Viro 	spin_unlock(&hash_lock);
59874c3cbe3SAl Viro 	put_tree(victim);
59974c3cbe3SAl Viro }
60074c3cbe3SAl Viro 
6018432c700SJan Kara /*
6028432c700SJan Kara  * finish killing struct audit_tree
6038432c700SJan Kara  */
6048432c700SJan Kara static void prune_one(struct audit_tree *victim)
6058432c700SJan Kara {
6068432c700SJan Kara 	prune_tree_chunks(victim, false);
6078432c700SJan Kara }
6088432c700SJan Kara 
60974c3cbe3SAl Viro /* trim the uncommitted chunks from tree */
61074c3cbe3SAl Viro 
61174c3cbe3SAl Viro static void trim_marked(struct audit_tree *tree)
61274c3cbe3SAl Viro {
61374c3cbe3SAl Viro 	struct list_head *p, *q;
61474c3cbe3SAl Viro 	spin_lock(&hash_lock);
61574c3cbe3SAl Viro 	if (tree->goner) {
61674c3cbe3SAl Viro 		spin_unlock(&hash_lock);
61774c3cbe3SAl Viro 		return;
61874c3cbe3SAl Viro 	}
61974c3cbe3SAl Viro 	/* reorder */
62074c3cbe3SAl Viro 	for (p = tree->chunks.next; p != &tree->chunks; p = q) {
62174c3cbe3SAl Viro 		struct node *node = list_entry(p, struct node, list);
62274c3cbe3SAl Viro 		q = p->next;
62374c3cbe3SAl Viro 		if (node->index & (1U<<31)) {
62474c3cbe3SAl Viro 			list_del_init(p);
62574c3cbe3SAl Viro 			list_add(p, &tree->chunks);
62674c3cbe3SAl Viro 		}
62774c3cbe3SAl Viro 	}
6288432c700SJan Kara 	spin_unlock(&hash_lock);
62974c3cbe3SAl Viro 
6308432c700SJan Kara 	prune_tree_chunks(tree, true);
63174c3cbe3SAl Viro 
6328432c700SJan Kara 	spin_lock(&hash_lock);
63374c3cbe3SAl Viro 	if (!tree->root && !tree->goner) {
63474c3cbe3SAl Viro 		tree->goner = 1;
63574c3cbe3SAl Viro 		spin_unlock(&hash_lock);
63674c3cbe3SAl Viro 		mutex_lock(&audit_filter_mutex);
6379e36a5d4SRichard Guy Briggs 		kill_rules(audit_context(), tree);
63874c3cbe3SAl Viro 		list_del_init(&tree->list);
63974c3cbe3SAl Viro 		mutex_unlock(&audit_filter_mutex);
64074c3cbe3SAl Viro 		prune_one(tree);
64174c3cbe3SAl Viro 	} else {
64274c3cbe3SAl Viro 		spin_unlock(&hash_lock);
64374c3cbe3SAl Viro 	}
64474c3cbe3SAl Viro }
64574c3cbe3SAl Viro 
646916d7576SAl Viro static void audit_schedule_prune(void);
647916d7576SAl Viro 
64874c3cbe3SAl Viro /* called with audit_filter_mutex */
64974c3cbe3SAl Viro int audit_remove_tree_rule(struct audit_krule *rule)
65074c3cbe3SAl Viro {
65174c3cbe3SAl Viro 	struct audit_tree *tree;
65274c3cbe3SAl Viro 	tree = rule->tree;
65374c3cbe3SAl Viro 	if (tree) {
65474c3cbe3SAl Viro 		spin_lock(&hash_lock);
65574c3cbe3SAl Viro 		list_del_init(&rule->rlist);
65674c3cbe3SAl Viro 		if (list_empty(&tree->rules) && !tree->goner) {
65774c3cbe3SAl Viro 			tree->root = NULL;
65874c3cbe3SAl Viro 			list_del_init(&tree->same_root);
65974c3cbe3SAl Viro 			tree->goner = 1;
66074c3cbe3SAl Viro 			list_move(&tree->list, &prune_list);
66174c3cbe3SAl Viro 			rule->tree = NULL;
66274c3cbe3SAl Viro 			spin_unlock(&hash_lock);
66374c3cbe3SAl Viro 			audit_schedule_prune();
66474c3cbe3SAl Viro 			return 1;
66574c3cbe3SAl Viro 		}
66674c3cbe3SAl Viro 		rule->tree = NULL;
66774c3cbe3SAl Viro 		spin_unlock(&hash_lock);
66874c3cbe3SAl Viro 		return 1;
66974c3cbe3SAl Viro 	}
67074c3cbe3SAl Viro 	return 0;
67174c3cbe3SAl Viro }
67274c3cbe3SAl Viro 
6731f707137SAl Viro static int compare_root(struct vfsmount *mnt, void *arg)
6741f707137SAl Viro {
675f410ff65SJan Kara 	return inode_to_key(d_backing_inode(mnt->mnt_root)) ==
676f410ff65SJan Kara 	       (unsigned long)arg;
6771f707137SAl Viro }
6781f707137SAl Viro 
67974c3cbe3SAl Viro void audit_trim_trees(void)
68074c3cbe3SAl Viro {
68174c3cbe3SAl Viro 	struct list_head cursor;
68274c3cbe3SAl Viro 
68374c3cbe3SAl Viro 	mutex_lock(&audit_filter_mutex);
68474c3cbe3SAl Viro 	list_add(&cursor, &tree_list);
68574c3cbe3SAl Viro 	while (cursor.next != &tree_list) {
68674c3cbe3SAl Viro 		struct audit_tree *tree;
68798bc993fSAl Viro 		struct path path;
68874c3cbe3SAl Viro 		struct vfsmount *root_mnt;
68974c3cbe3SAl Viro 		struct node *node;
69074c3cbe3SAl Viro 		int err;
69174c3cbe3SAl Viro 
69274c3cbe3SAl Viro 		tree = container_of(cursor.next, struct audit_tree, list);
69374c3cbe3SAl Viro 		get_tree(tree);
69474c3cbe3SAl Viro 		list_del(&cursor);
69574c3cbe3SAl Viro 		list_add(&cursor, &tree->list);
69674c3cbe3SAl Viro 		mutex_unlock(&audit_filter_mutex);
69774c3cbe3SAl Viro 
69898bc993fSAl Viro 		err = kern_path(tree->pathname, 0, &path);
69974c3cbe3SAl Viro 		if (err)
70074c3cbe3SAl Viro 			goto skip_it;
70174c3cbe3SAl Viro 
702589ff870SAl Viro 		root_mnt = collect_mounts(&path);
70398bc993fSAl Viro 		path_put(&path);
704be34d1a3SDavid Howells 		if (IS_ERR(root_mnt))
70574c3cbe3SAl Viro 			goto skip_it;
70674c3cbe3SAl Viro 
70774c3cbe3SAl Viro 		spin_lock(&hash_lock);
70874c3cbe3SAl Viro 		list_for_each_entry(node, &tree->chunks, list) {
70928a3a7ebSEric Paris 			struct audit_chunk *chunk = find_chunk(node);
71025985edcSLucas De Marchi 			/* this could be NULL if the watch is dying else where... */
71174c3cbe3SAl Viro 			node->index |= 1U<<31;
712f410ff65SJan Kara 			if (iterate_mounts(compare_root,
7138d20d6e9SJan Kara 					   (void *)(chunk->key),
714f410ff65SJan Kara 					   root_mnt))
71574c3cbe3SAl Viro 				node->index &= ~(1U<<31);
71674c3cbe3SAl Viro 		}
71774c3cbe3SAl Viro 		spin_unlock(&hash_lock);
71874c3cbe3SAl Viro 		trim_marked(tree);
71974c3cbe3SAl Viro 		drop_collected_mounts(root_mnt);
72074c3cbe3SAl Viro skip_it:
72112b2f117SChen Gang 		put_tree(tree);
72274c3cbe3SAl Viro 		mutex_lock(&audit_filter_mutex);
72374c3cbe3SAl Viro 	}
72474c3cbe3SAl Viro 	list_del(&cursor);
72574c3cbe3SAl Viro 	mutex_unlock(&audit_filter_mutex);
72674c3cbe3SAl Viro }
72774c3cbe3SAl Viro 
72874c3cbe3SAl Viro int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
72974c3cbe3SAl Viro {
73074c3cbe3SAl Viro 
73174c3cbe3SAl Viro 	if (pathname[0] != '/' ||
73274c3cbe3SAl Viro 	    rule->listnr != AUDIT_FILTER_EXIT ||
7335af75d8dSAl Viro 	    op != Audit_equal ||
73474c3cbe3SAl Viro 	    rule->inode_f || rule->watch || rule->tree)
73574c3cbe3SAl Viro 		return -EINVAL;
73674c3cbe3SAl Viro 	rule->tree = alloc_tree(pathname);
73774c3cbe3SAl Viro 	if (!rule->tree)
73874c3cbe3SAl Viro 		return -ENOMEM;
73974c3cbe3SAl Viro 	return 0;
74074c3cbe3SAl Viro }
74174c3cbe3SAl Viro 
74274c3cbe3SAl Viro void audit_put_tree(struct audit_tree *tree)
74374c3cbe3SAl Viro {
74474c3cbe3SAl Viro 	put_tree(tree);
74574c3cbe3SAl Viro }
74674c3cbe3SAl Viro 
7471f707137SAl Viro static int tag_mount(struct vfsmount *mnt, void *arg)
7481f707137SAl Viro {
7493b362157SDavid Howells 	return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
7501f707137SAl Viro }
7511f707137SAl Viro 
752f1aaf262SImre Palik /*
753f1aaf262SImre Palik  * That gets run when evict_chunk() ends up needing to kill audit_tree.
754f1aaf262SImre Palik  * Runs from a separate thread.
755f1aaf262SImre Palik  */
756f1aaf262SImre Palik static int prune_tree_thread(void *unused)
757f1aaf262SImre Palik {
758f1aaf262SImre Palik 	for (;;) {
7590bf676d1SJiri Slaby 		if (list_empty(&prune_list)) {
760f1aaf262SImre Palik 			set_current_state(TASK_INTERRUPTIBLE);
761f1aaf262SImre Palik 			schedule();
7620bf676d1SJiri Slaby 		}
763f1aaf262SImre Palik 
764ce423631SPaul Moore 		audit_ctl_lock();
765f1aaf262SImre Palik 		mutex_lock(&audit_filter_mutex);
766f1aaf262SImre Palik 
767f1aaf262SImre Palik 		while (!list_empty(&prune_list)) {
768f1aaf262SImre Palik 			struct audit_tree *victim;
769f1aaf262SImre Palik 
770f1aaf262SImre Palik 			victim = list_entry(prune_list.next,
771f1aaf262SImre Palik 					struct audit_tree, list);
772f1aaf262SImre Palik 			list_del_init(&victim->list);
773f1aaf262SImre Palik 
774f1aaf262SImre Palik 			mutex_unlock(&audit_filter_mutex);
775f1aaf262SImre Palik 
776f1aaf262SImre Palik 			prune_one(victim);
777f1aaf262SImre Palik 
778f1aaf262SImre Palik 			mutex_lock(&audit_filter_mutex);
779f1aaf262SImre Palik 		}
780f1aaf262SImre Palik 
781f1aaf262SImre Palik 		mutex_unlock(&audit_filter_mutex);
782ce423631SPaul Moore 		audit_ctl_unlock();
783f1aaf262SImre Palik 	}
784f1aaf262SImre Palik 	return 0;
785f1aaf262SImre Palik }
786f1aaf262SImre Palik 
787f1aaf262SImre Palik static int audit_launch_prune(void)
788f1aaf262SImre Palik {
789f1aaf262SImre Palik 	if (prune_thread)
790f1aaf262SImre Palik 		return 0;
7910bf676d1SJiri Slaby 	prune_thread = kthread_run(prune_tree_thread, NULL,
792f1aaf262SImre Palik 				"audit_prune_tree");
793f1aaf262SImre Palik 	if (IS_ERR(prune_thread)) {
794f1aaf262SImre Palik 		pr_err("cannot start thread audit_prune_tree");
795f1aaf262SImre Palik 		prune_thread = NULL;
796f1aaf262SImre Palik 		return -ENOMEM;
797f1aaf262SImre Palik 	}
7980bf676d1SJiri Slaby 	return 0;
799f1aaf262SImre Palik }
800f1aaf262SImre Palik 
80174c3cbe3SAl Viro /* called with audit_filter_mutex */
80274c3cbe3SAl Viro int audit_add_tree_rule(struct audit_krule *rule)
80374c3cbe3SAl Viro {
80474c3cbe3SAl Viro 	struct audit_tree *seed = rule->tree, *tree;
80598bc993fSAl Viro 	struct path path;
8061f707137SAl Viro 	struct vfsmount *mnt;
80774c3cbe3SAl Viro 	int err;
80874c3cbe3SAl Viro 
809736f3203SChen Gang 	rule->tree = NULL;
81074c3cbe3SAl Viro 	list_for_each_entry(tree, &tree_list, list) {
81174c3cbe3SAl Viro 		if (!strcmp(seed->pathname, tree->pathname)) {
81274c3cbe3SAl Viro 			put_tree(seed);
81374c3cbe3SAl Viro 			rule->tree = tree;
81474c3cbe3SAl Viro 			list_add(&rule->rlist, &tree->rules);
81574c3cbe3SAl Viro 			return 0;
81674c3cbe3SAl Viro 		}
81774c3cbe3SAl Viro 	}
81874c3cbe3SAl Viro 	tree = seed;
81974c3cbe3SAl Viro 	list_add(&tree->list, &tree_list);
82074c3cbe3SAl Viro 	list_add(&rule->rlist, &tree->rules);
82174c3cbe3SAl Viro 	/* do not set rule->tree yet */
82274c3cbe3SAl Viro 	mutex_unlock(&audit_filter_mutex);
82374c3cbe3SAl Viro 
824f1aaf262SImre Palik 	if (unlikely(!prune_thread)) {
825f1aaf262SImre Palik 		err = audit_launch_prune();
826f1aaf262SImre Palik 		if (err)
827f1aaf262SImre Palik 			goto Err;
828f1aaf262SImre Palik 	}
829f1aaf262SImre Palik 
83098bc993fSAl Viro 	err = kern_path(tree->pathname, 0, &path);
83174c3cbe3SAl Viro 	if (err)
83274c3cbe3SAl Viro 		goto Err;
833589ff870SAl Viro 	mnt = collect_mounts(&path);
83498bc993fSAl Viro 	path_put(&path);
835be34d1a3SDavid Howells 	if (IS_ERR(mnt)) {
836be34d1a3SDavid Howells 		err = PTR_ERR(mnt);
83774c3cbe3SAl Viro 		goto Err;
83874c3cbe3SAl Viro 	}
83974c3cbe3SAl Viro 
84074c3cbe3SAl Viro 	get_tree(tree);
8411f707137SAl Viro 	err = iterate_mounts(tag_mount, tree, mnt);
84274c3cbe3SAl Viro 	drop_collected_mounts(mnt);
84374c3cbe3SAl Viro 
84474c3cbe3SAl Viro 	if (!err) {
84574c3cbe3SAl Viro 		struct node *node;
84674c3cbe3SAl Viro 		spin_lock(&hash_lock);
84774c3cbe3SAl Viro 		list_for_each_entry(node, &tree->chunks, list)
84874c3cbe3SAl Viro 			node->index &= ~(1U<<31);
84974c3cbe3SAl Viro 		spin_unlock(&hash_lock);
85074c3cbe3SAl Viro 	} else {
85174c3cbe3SAl Viro 		trim_marked(tree);
85274c3cbe3SAl Viro 		goto Err;
85374c3cbe3SAl Viro 	}
85474c3cbe3SAl Viro 
85574c3cbe3SAl Viro 	mutex_lock(&audit_filter_mutex);
85674c3cbe3SAl Viro 	if (list_empty(&rule->rlist)) {
85774c3cbe3SAl Viro 		put_tree(tree);
85874c3cbe3SAl Viro 		return -ENOENT;
85974c3cbe3SAl Viro 	}
86074c3cbe3SAl Viro 	rule->tree = tree;
86174c3cbe3SAl Viro 	put_tree(tree);
86274c3cbe3SAl Viro 
86374c3cbe3SAl Viro 	return 0;
86474c3cbe3SAl Viro Err:
86574c3cbe3SAl Viro 	mutex_lock(&audit_filter_mutex);
86674c3cbe3SAl Viro 	list_del_init(&tree->list);
86774c3cbe3SAl Viro 	list_del_init(&tree->rules);
86874c3cbe3SAl Viro 	put_tree(tree);
86974c3cbe3SAl Viro 	return err;
87074c3cbe3SAl Viro }
87174c3cbe3SAl Viro 
87274c3cbe3SAl Viro int audit_tag_tree(char *old, char *new)
87374c3cbe3SAl Viro {
87474c3cbe3SAl Viro 	struct list_head cursor, barrier;
87574c3cbe3SAl Viro 	int failed = 0;
8762096f759SAl Viro 	struct path path1, path2;
87774c3cbe3SAl Viro 	struct vfsmount *tagged;
87874c3cbe3SAl Viro 	int err;
87974c3cbe3SAl Viro 
8802096f759SAl Viro 	err = kern_path(new, 0, &path2);
88174c3cbe3SAl Viro 	if (err)
88274c3cbe3SAl Viro 		return err;
8832096f759SAl Viro 	tagged = collect_mounts(&path2);
8842096f759SAl Viro 	path_put(&path2);
885be34d1a3SDavid Howells 	if (IS_ERR(tagged))
886be34d1a3SDavid Howells 		return PTR_ERR(tagged);
88774c3cbe3SAl Viro 
8882096f759SAl Viro 	err = kern_path(old, 0, &path1);
88974c3cbe3SAl Viro 	if (err) {
89074c3cbe3SAl Viro 		drop_collected_mounts(tagged);
89174c3cbe3SAl Viro 		return err;
89274c3cbe3SAl Viro 	}
89374c3cbe3SAl Viro 
89474c3cbe3SAl Viro 	mutex_lock(&audit_filter_mutex);
89574c3cbe3SAl Viro 	list_add(&barrier, &tree_list);
89674c3cbe3SAl Viro 	list_add(&cursor, &barrier);
89774c3cbe3SAl Viro 
89874c3cbe3SAl Viro 	while (cursor.next != &tree_list) {
89974c3cbe3SAl Viro 		struct audit_tree *tree;
9002096f759SAl Viro 		int good_one = 0;
90174c3cbe3SAl Viro 
90274c3cbe3SAl Viro 		tree = container_of(cursor.next, struct audit_tree, list);
90374c3cbe3SAl Viro 		get_tree(tree);
90474c3cbe3SAl Viro 		list_del(&cursor);
90574c3cbe3SAl Viro 		list_add(&cursor, &tree->list);
90674c3cbe3SAl Viro 		mutex_unlock(&audit_filter_mutex);
90774c3cbe3SAl Viro 
9082096f759SAl Viro 		err = kern_path(tree->pathname, 0, &path2);
9092096f759SAl Viro 		if (!err) {
9102096f759SAl Viro 			good_one = path_is_under(&path1, &path2);
9112096f759SAl Viro 			path_put(&path2);
91274c3cbe3SAl Viro 		}
91374c3cbe3SAl Viro 
9142096f759SAl Viro 		if (!good_one) {
91574c3cbe3SAl Viro 			put_tree(tree);
91674c3cbe3SAl Viro 			mutex_lock(&audit_filter_mutex);
91774c3cbe3SAl Viro 			continue;
91874c3cbe3SAl Viro 		}
91974c3cbe3SAl Viro 
9201f707137SAl Viro 		failed = iterate_mounts(tag_mount, tree, tagged);
92174c3cbe3SAl Viro 		if (failed) {
92274c3cbe3SAl Viro 			put_tree(tree);
92374c3cbe3SAl Viro 			mutex_lock(&audit_filter_mutex);
92474c3cbe3SAl Viro 			break;
92574c3cbe3SAl Viro 		}
92674c3cbe3SAl Viro 
92774c3cbe3SAl Viro 		mutex_lock(&audit_filter_mutex);
92874c3cbe3SAl Viro 		spin_lock(&hash_lock);
92974c3cbe3SAl Viro 		if (!tree->goner) {
93074c3cbe3SAl Viro 			list_del(&tree->list);
93174c3cbe3SAl Viro 			list_add(&tree->list, &tree_list);
93274c3cbe3SAl Viro 		}
93374c3cbe3SAl Viro 		spin_unlock(&hash_lock);
93474c3cbe3SAl Viro 		put_tree(tree);
93574c3cbe3SAl Viro 	}
93674c3cbe3SAl Viro 
93774c3cbe3SAl Viro 	while (barrier.prev != &tree_list) {
93874c3cbe3SAl Viro 		struct audit_tree *tree;
93974c3cbe3SAl Viro 
94074c3cbe3SAl Viro 		tree = container_of(barrier.prev, struct audit_tree, list);
94174c3cbe3SAl Viro 		get_tree(tree);
94274c3cbe3SAl Viro 		list_del(&tree->list);
94374c3cbe3SAl Viro 		list_add(&tree->list, &barrier);
94474c3cbe3SAl Viro 		mutex_unlock(&audit_filter_mutex);
94574c3cbe3SAl Viro 
94674c3cbe3SAl Viro 		if (!failed) {
94774c3cbe3SAl Viro 			struct node *node;
94874c3cbe3SAl Viro 			spin_lock(&hash_lock);
94974c3cbe3SAl Viro 			list_for_each_entry(node, &tree->chunks, list)
95074c3cbe3SAl Viro 				node->index &= ~(1U<<31);
95174c3cbe3SAl Viro 			spin_unlock(&hash_lock);
95274c3cbe3SAl Viro 		} else {
95374c3cbe3SAl Viro 			trim_marked(tree);
95474c3cbe3SAl Viro 		}
95574c3cbe3SAl Viro 
95674c3cbe3SAl Viro 		put_tree(tree);
95774c3cbe3SAl Viro 		mutex_lock(&audit_filter_mutex);
95874c3cbe3SAl Viro 	}
95974c3cbe3SAl Viro 	list_del(&barrier);
96074c3cbe3SAl Viro 	list_del(&cursor);
96174c3cbe3SAl Viro 	mutex_unlock(&audit_filter_mutex);
9622096f759SAl Viro 	path_put(&path1);
96374c3cbe3SAl Viro 	drop_collected_mounts(tagged);
96474c3cbe3SAl Viro 	return failed;
96574c3cbe3SAl Viro }
96674c3cbe3SAl Viro 
967916d7576SAl Viro 
968916d7576SAl Viro static void audit_schedule_prune(void)
969916d7576SAl Viro {
970f1aaf262SImre Palik 	wake_up_process(prune_thread);
971916d7576SAl Viro }
972916d7576SAl Viro 
973916d7576SAl Viro /*
974916d7576SAl Viro  * ... and that one is done if evict_chunk() decides to delay until the end
975916d7576SAl Viro  * of syscall.  Runs synchronously.
976916d7576SAl Viro  */
9779e36a5d4SRichard Guy Briggs void audit_kill_trees(struct audit_context *context)
978916d7576SAl Viro {
9799e36a5d4SRichard Guy Briggs 	struct list_head *list = &context->killed_trees;
9809e36a5d4SRichard Guy Briggs 
981ce423631SPaul Moore 	audit_ctl_lock();
982916d7576SAl Viro 	mutex_lock(&audit_filter_mutex);
983916d7576SAl Viro 
984916d7576SAl Viro 	while (!list_empty(list)) {
985916d7576SAl Viro 		struct audit_tree *victim;
986916d7576SAl Viro 
987916d7576SAl Viro 		victim = list_entry(list->next, struct audit_tree, list);
9889e36a5d4SRichard Guy Briggs 		kill_rules(context, victim);
989916d7576SAl Viro 		list_del_init(&victim->list);
990916d7576SAl Viro 
991916d7576SAl Viro 		mutex_unlock(&audit_filter_mutex);
992916d7576SAl Viro 
993916d7576SAl Viro 		prune_one(victim);
994916d7576SAl Viro 
995916d7576SAl Viro 		mutex_lock(&audit_filter_mutex);
996916d7576SAl Viro 	}
997916d7576SAl Viro 
998916d7576SAl Viro 	mutex_unlock(&audit_filter_mutex);
999ce423631SPaul Moore 	audit_ctl_unlock();
100074c3cbe3SAl Viro }
100174c3cbe3SAl Viro 
100274c3cbe3SAl Viro /*
100374c3cbe3SAl Viro  *  Here comes the stuff asynchronous to auditctl operations
100474c3cbe3SAl Viro  */
100574c3cbe3SAl Viro 
100674c3cbe3SAl Viro static void evict_chunk(struct audit_chunk *chunk)
100774c3cbe3SAl Viro {
100874c3cbe3SAl Viro 	struct audit_tree *owner;
1009916d7576SAl Viro 	struct list_head *postponed = audit_killed_trees();
1010916d7576SAl Viro 	int need_prune = 0;
101174c3cbe3SAl Viro 	int n;
101274c3cbe3SAl Viro 
101374c3cbe3SAl Viro 	mutex_lock(&audit_filter_mutex);
101474c3cbe3SAl Viro 	spin_lock(&hash_lock);
101574c3cbe3SAl Viro 	while (!list_empty(&chunk->trees)) {
101674c3cbe3SAl Viro 		owner = list_entry(chunk->trees.next,
101774c3cbe3SAl Viro 				   struct audit_tree, same_root);
101874c3cbe3SAl Viro 		owner->goner = 1;
101974c3cbe3SAl Viro 		owner->root = NULL;
102074c3cbe3SAl Viro 		list_del_init(&owner->same_root);
102174c3cbe3SAl Viro 		spin_unlock(&hash_lock);
1022916d7576SAl Viro 		if (!postponed) {
10239e36a5d4SRichard Guy Briggs 			kill_rules(audit_context(), owner);
102474c3cbe3SAl Viro 			list_move(&owner->list, &prune_list);
1025916d7576SAl Viro 			need_prune = 1;
1026916d7576SAl Viro 		} else {
1027916d7576SAl Viro 			list_move(&owner->list, postponed);
1028916d7576SAl Viro 		}
102974c3cbe3SAl Viro 		spin_lock(&hash_lock);
103074c3cbe3SAl Viro 	}
103174c3cbe3SAl Viro 	list_del_rcu(&chunk->hash);
103274c3cbe3SAl Viro 	for (n = 0; n < chunk->count; n++)
103374c3cbe3SAl Viro 		list_del_init(&chunk->owners[n].list);
103474c3cbe3SAl Viro 	spin_unlock(&hash_lock);
1035f1aaf262SImre Palik 	mutex_unlock(&audit_filter_mutex);
1036916d7576SAl Viro 	if (need_prune)
1037916d7576SAl Viro 		audit_schedule_prune();
103874c3cbe3SAl Viro }
103974c3cbe3SAl Viro 
1040b9a1b977SAmir Goldstein static int audit_tree_handle_event(struct fsnotify_mark *mark, u32 mask,
1041b9a1b977SAmir Goldstein 				   struct inode *inode, struct inode *dir,
1042b9a1b977SAmir Goldstein 				   const struct qstr *file_name)
104374c3cbe3SAl Viro {
104483c4c4b0SJan Kara 	return 0;
104528a3a7ebSEric Paris }
104674c3cbe3SAl Viro 
1047f905c2fcSJan Kara static void audit_tree_freeing_mark(struct fsnotify_mark *mark,
1048f905c2fcSJan Kara 				    struct fsnotify_group *group)
104928a3a7ebSEric Paris {
105083d23bc8SJan Kara 	struct audit_chunk *chunk;
105128a3a7ebSEric Paris 
1052f905c2fcSJan Kara 	mutex_lock(&mark->group->mark_mutex);
105383d23bc8SJan Kara 	spin_lock(&hash_lock);
1054f905c2fcSJan Kara 	chunk = mark_chunk(mark);
1055f905c2fcSJan Kara 	replace_mark_chunk(mark, NULL);
105683d23bc8SJan Kara 	spin_unlock(&hash_lock);
1057f905c2fcSJan Kara 	mutex_unlock(&mark->group->mark_mutex);
105883d23bc8SJan Kara 	if (chunk) {
105974c3cbe3SAl Viro 		evict_chunk(chunk);
106083d23bc8SJan Kara 		audit_mark_put_chunk(chunk);
106183d23bc8SJan Kara 	}
1062b3e8692bSMiklos Szeredi 
1063b3e8692bSMiklos Szeredi 	/*
1064b3e8692bSMiklos Szeredi 	 * We are guaranteed to have at least one reference to the mark from
1065b3e8692bSMiklos Szeredi 	 * either the inode or the caller of fsnotify_destroy_mark().
1066b3e8692bSMiklos Szeredi 	 */
1067f905c2fcSJan Kara 	BUG_ON(refcount_read(&mark->refcnt) < 1);
106874c3cbe3SAl Viro }
106974c3cbe3SAl Viro 
107028a3a7ebSEric Paris static const struct fsnotify_ops audit_tree_ops = {
1071b9a1b977SAmir Goldstein 	.handle_inode_event = audit_tree_handle_event,
107228a3a7ebSEric Paris 	.freeing_mark = audit_tree_freeing_mark,
1073054c636eSJan Kara 	.free_mark = audit_tree_destroy_watch,
107474c3cbe3SAl Viro };
107574c3cbe3SAl Viro 
107674c3cbe3SAl Viro static int __init audit_tree_init(void)
107774c3cbe3SAl Viro {
107874c3cbe3SAl Viro 	int i;
107974c3cbe3SAl Viro 
10805f516130SJan Kara 	audit_tree_mark_cachep = KMEM_CACHE(audit_tree_mark, SLAB_PANIC);
10815f516130SJan Kara 
10820d2e2a1dSEric Paris 	audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
108328a3a7ebSEric Paris 	if (IS_ERR(audit_tree_group))
108428a3a7ebSEric Paris 		audit_panic("cannot initialize fsnotify group for rectree watches");
108574c3cbe3SAl Viro 
108674c3cbe3SAl Viro 	for (i = 0; i < HASH_SIZE; i++)
108774c3cbe3SAl Viro 		INIT_LIST_HEAD(&chunk_hash_heads[i]);
108874c3cbe3SAl Viro 
108974c3cbe3SAl Viro 	return 0;
109074c3cbe3SAl Viro }
109174c3cbe3SAl Viro __initcall(audit_tree_init);
1092