xref: /openbmc/linux/security/selinux/ss/sidtab.c (revision 82df5b73)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Implementation of the SID table type.
4  *
5  * Original author: Stephen Smalley, <sds@tycho.nsa.gov>
6  * Author: Ondrej Mosnacek, <omosnacek@gmail.com>
7  *
8  * Copyright (C) 2018 Red Hat, Inc.
9  */
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/list.h>
13 #include <linux/rcupdate.h>
14 #include <linux/slab.h>
15 #include <linux/sched.h>
16 #include <linux/spinlock.h>
17 #include <asm/barrier.h>
18 #include "flask.h"
19 #include "security.h"
20 #include "sidtab.h"
21 
22 struct sidtab_str_cache {
23 	struct rcu_head rcu_member;
24 	struct list_head lru_member;
25 	struct sidtab_entry *parent;
26 	u32 len;
27 	char str[];
28 };
29 
30 #define index_to_sid(index) (index + SECINITSID_NUM + 1)
31 #define sid_to_index(sid) (sid - (SECINITSID_NUM + 1))
32 
33 int sidtab_init(struct sidtab *s)
34 {
35 	u32 i;
36 
37 	memset(s->roots, 0, sizeof(s->roots));
38 
39 	for (i = 0; i < SECINITSID_NUM; i++)
40 		s->isids[i].set = 0;
41 
42 	s->count = 0;
43 	s->convert = NULL;
44 	hash_init(s->context_to_sid);
45 
46 	spin_lock_init(&s->lock);
47 
48 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
49 	s->cache_free_slots = CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE;
50 	INIT_LIST_HEAD(&s->cache_lru_list);
51 	spin_lock_init(&s->cache_lock);
52 #endif
53 
54 	return 0;
55 }
56 
57 static u32 context_to_sid(struct sidtab *s, struct context *context, u32 hash)
58 {
59 	struct sidtab_entry *entry;
60 	u32 sid = 0;
61 
62 	rcu_read_lock();
63 	hash_for_each_possible_rcu(s->context_to_sid, entry, list, hash) {
64 		if (entry->hash != hash)
65 			continue;
66 		if (context_cmp(&entry->context, context)) {
67 			sid = entry->sid;
68 			break;
69 		}
70 	}
71 	rcu_read_unlock();
72 	return sid;
73 }
74 
75 int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
76 {
77 	struct sidtab_isid_entry *isid;
78 	u32 hash;
79 	int rc;
80 
81 	if (sid == 0 || sid > SECINITSID_NUM)
82 		return -EINVAL;
83 
84 	isid = &s->isids[sid - 1];
85 
86 	rc = context_cpy(&isid->entry.context, context);
87 	if (rc)
88 		return rc;
89 
90 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
91 	isid->entry.cache = NULL;
92 #endif
93 	isid->set = 1;
94 
95 	hash = context_compute_hash(context);
96 
97 	/*
98 	 * Multiple initial sids may map to the same context. Check that this
99 	 * context is not already represented in the context_to_sid hashtable
100 	 * to avoid duplicate entries and long linked lists upon hash
101 	 * collision.
102 	 */
103 	if (!context_to_sid(s, context, hash)) {
104 		isid->entry.sid = sid;
105 		isid->entry.hash = hash;
106 		hash_add(s->context_to_sid, &isid->entry.list, hash);
107 	}
108 
109 	return 0;
110 }
111 
112 int sidtab_hash_stats(struct sidtab *sidtab, char *page)
113 {
114 	int i;
115 	int chain_len = 0;
116 	int slots_used = 0;
117 	int entries = 0;
118 	int max_chain_len = 0;
119 	int cur_bucket = 0;
120 	struct sidtab_entry *entry;
121 
122 	rcu_read_lock();
123 	hash_for_each_rcu(sidtab->context_to_sid, i, entry, list) {
124 		entries++;
125 		if (i == cur_bucket) {
126 			chain_len++;
127 			if (chain_len == 1)
128 				slots_used++;
129 		} else {
130 			cur_bucket = i;
131 			if (chain_len > max_chain_len)
132 				max_chain_len = chain_len;
133 			chain_len = 0;
134 		}
135 	}
136 	rcu_read_unlock();
137 
138 	if (chain_len > max_chain_len)
139 		max_chain_len = chain_len;
140 
141 	return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
142 			 "longest chain: %d\n", entries,
143 			 slots_used, SIDTAB_HASH_BUCKETS, max_chain_len);
144 }
145 
146 static u32 sidtab_level_from_count(u32 count)
147 {
148 	u32 capacity = SIDTAB_LEAF_ENTRIES;
149 	u32 level = 0;
150 
151 	while (count > capacity) {
152 		capacity <<= SIDTAB_INNER_SHIFT;
153 		++level;
154 	}
155 	return level;
156 }
157 
158 static int sidtab_alloc_roots(struct sidtab *s, u32 level)
159 {
160 	u32 l;
161 
162 	if (!s->roots[0].ptr_leaf) {
163 		s->roots[0].ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
164 					       GFP_ATOMIC);
165 		if (!s->roots[0].ptr_leaf)
166 			return -ENOMEM;
167 	}
168 	for (l = 1; l <= level; ++l)
169 		if (!s->roots[l].ptr_inner) {
170 			s->roots[l].ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
171 							GFP_ATOMIC);
172 			if (!s->roots[l].ptr_inner)
173 				return -ENOMEM;
174 			s->roots[l].ptr_inner->entries[0] = s->roots[l - 1];
175 		}
176 	return 0;
177 }
178 
179 static struct sidtab_entry *sidtab_do_lookup(struct sidtab *s, u32 index,
180 					     int alloc)
181 {
182 	union sidtab_entry_inner *entry;
183 	u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES;
184 
185 	/* find the level of the subtree we need */
186 	level = sidtab_level_from_count(index + 1);
187 	capacity_shift = level * SIDTAB_INNER_SHIFT;
188 
189 	/* allocate roots if needed */
190 	if (alloc && sidtab_alloc_roots(s, level) != 0)
191 		return NULL;
192 
193 	/* lookup inside the subtree */
194 	entry = &s->roots[level];
195 	while (level != 0) {
196 		capacity_shift -= SIDTAB_INNER_SHIFT;
197 		--level;
198 
199 		entry = &entry->ptr_inner->entries[leaf_index >> capacity_shift];
200 		leaf_index &= ((u32)1 << capacity_shift) - 1;
201 
202 		if (!entry->ptr_inner) {
203 			if (alloc)
204 				entry->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
205 							   GFP_ATOMIC);
206 			if (!entry->ptr_inner)
207 				return NULL;
208 		}
209 	}
210 	if (!entry->ptr_leaf) {
211 		if (alloc)
212 			entry->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
213 						  GFP_ATOMIC);
214 		if (!entry->ptr_leaf)
215 			return NULL;
216 	}
217 	return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES];
218 }
219 
220 static struct sidtab_entry *sidtab_lookup(struct sidtab *s, u32 index)
221 {
222 	/* read entries only after reading count */
223 	u32 count = smp_load_acquire(&s->count);
224 
225 	if (index >= count)
226 		return NULL;
227 
228 	return sidtab_do_lookup(s, index, 0);
229 }
230 
231 static struct sidtab_entry *sidtab_lookup_initial(struct sidtab *s, u32 sid)
232 {
233 	return s->isids[sid - 1].set ? &s->isids[sid - 1].entry : NULL;
234 }
235 
236 static struct sidtab_entry *sidtab_search_core(struct sidtab *s, u32 sid,
237 					       int force)
238 {
239 	if (sid != 0) {
240 		struct sidtab_entry *entry;
241 
242 		if (sid > SECINITSID_NUM)
243 			entry = sidtab_lookup(s, sid_to_index(sid));
244 		else
245 			entry = sidtab_lookup_initial(s, sid);
246 		if (entry && (!entry->context.len || force))
247 			return entry;
248 	}
249 
250 	return sidtab_lookup_initial(s, SECINITSID_UNLABELED);
251 }
252 
253 struct sidtab_entry *sidtab_search_entry(struct sidtab *s, u32 sid)
254 {
255 	return sidtab_search_core(s, sid, 0);
256 }
257 
258 struct sidtab_entry *sidtab_search_entry_force(struct sidtab *s, u32 sid)
259 {
260 	return sidtab_search_core(s, sid, 1);
261 }
262 
263 int sidtab_context_to_sid(struct sidtab *s, struct context *context,
264 			  u32 *sid)
265 {
266 	unsigned long flags;
267 	u32 count, hash = context_compute_hash(context);
268 	struct sidtab_convert_params *convert;
269 	struct sidtab_entry *dst, *dst_convert;
270 	int rc;
271 
272 	*sid = context_to_sid(s, context, hash);
273 	if (*sid)
274 		return 0;
275 
276 	/* lock-free search failed: lock, re-search, and insert if not found */
277 	spin_lock_irqsave(&s->lock, flags);
278 
279 	rc = 0;
280 	*sid = context_to_sid(s, context, hash);
281 	if (*sid)
282 		goto out_unlock;
283 
284 	count = s->count;
285 	convert = s->convert;
286 
287 	/* bail out if we already reached max entries */
288 	rc = -EOVERFLOW;
289 	if (count >= SIDTAB_MAX)
290 		goto out_unlock;
291 
292 	/* insert context into new entry */
293 	rc = -ENOMEM;
294 	dst = sidtab_do_lookup(s, count, 1);
295 	if (!dst)
296 		goto out_unlock;
297 
298 	dst->sid = index_to_sid(count);
299 	dst->hash = hash;
300 
301 	rc = context_cpy(&dst->context, context);
302 	if (rc)
303 		goto out_unlock;
304 
305 	/*
306 	 * if we are building a new sidtab, we need to convert the context
307 	 * and insert it there as well
308 	 */
309 	if (convert) {
310 		rc = -ENOMEM;
311 		dst_convert = sidtab_do_lookup(convert->target, count, 1);
312 		if (!dst_convert) {
313 			context_destroy(&dst->context);
314 			goto out_unlock;
315 		}
316 
317 		rc = convert->func(context, &dst_convert->context,
318 				   convert->args);
319 		if (rc) {
320 			context_destroy(&dst->context);
321 			goto out_unlock;
322 		}
323 		dst_convert->sid = index_to_sid(count);
324 		dst_convert->hash = context_compute_hash(&dst_convert->context);
325 		convert->target->count = count + 1;
326 
327 		hash_add_rcu(convert->target->context_to_sid,
328 			     &dst_convert->list, dst_convert->hash);
329 	}
330 
331 	if (context->len)
332 		pr_info("SELinux:  Context %s is not valid (left unmapped).\n",
333 			context->str);
334 
335 	*sid = index_to_sid(count);
336 
337 	/* write entries before updating count */
338 	smp_store_release(&s->count, count + 1);
339 	hash_add_rcu(s->context_to_sid, &dst->list, dst->hash);
340 
341 	rc = 0;
342 out_unlock:
343 	spin_unlock_irqrestore(&s->lock, flags);
344 	return rc;
345 }
346 
347 static void sidtab_convert_hashtable(struct sidtab *s, u32 count)
348 {
349 	struct sidtab_entry *entry;
350 	u32 i;
351 
352 	for (i = 0; i < count; i++) {
353 		entry = sidtab_do_lookup(s, i, 0);
354 		entry->sid = index_to_sid(i);
355 		entry->hash = context_compute_hash(&entry->context);
356 
357 		hash_add_rcu(s->context_to_sid, &entry->list, entry->hash);
358 	}
359 }
360 
361 static int sidtab_convert_tree(union sidtab_entry_inner *edst,
362 			       union sidtab_entry_inner *esrc,
363 			       u32 *pos, u32 count, u32 level,
364 			       struct sidtab_convert_params *convert)
365 {
366 	int rc;
367 	u32 i;
368 
369 	if (level != 0) {
370 		if (!edst->ptr_inner) {
371 			edst->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
372 						  GFP_KERNEL);
373 			if (!edst->ptr_inner)
374 				return -ENOMEM;
375 		}
376 		i = 0;
377 		while (i < SIDTAB_INNER_ENTRIES && *pos < count) {
378 			rc = sidtab_convert_tree(&edst->ptr_inner->entries[i],
379 						 &esrc->ptr_inner->entries[i],
380 						 pos, count, level - 1,
381 						 convert);
382 			if (rc)
383 				return rc;
384 			i++;
385 		}
386 	} else {
387 		if (!edst->ptr_leaf) {
388 			edst->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
389 						 GFP_KERNEL);
390 			if (!edst->ptr_leaf)
391 				return -ENOMEM;
392 		}
393 		i = 0;
394 		while (i < SIDTAB_LEAF_ENTRIES && *pos < count) {
395 			rc = convert->func(&esrc->ptr_leaf->entries[i].context,
396 					   &edst->ptr_leaf->entries[i].context,
397 					   convert->args);
398 			if (rc)
399 				return rc;
400 			(*pos)++;
401 			i++;
402 		}
403 		cond_resched();
404 	}
405 	return 0;
406 }
407 
408 int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
409 {
410 	unsigned long flags;
411 	u32 count, level, pos;
412 	int rc;
413 
414 	spin_lock_irqsave(&s->lock, flags);
415 
416 	/* concurrent policy loads are not allowed */
417 	if (s->convert) {
418 		spin_unlock_irqrestore(&s->lock, flags);
419 		return -EBUSY;
420 	}
421 
422 	count = s->count;
423 	level = sidtab_level_from_count(count);
424 
425 	/* allocate last leaf in the new sidtab (to avoid race with
426 	 * live convert)
427 	 */
428 	rc = sidtab_do_lookup(params->target, count - 1, 1) ? 0 : -ENOMEM;
429 	if (rc) {
430 		spin_unlock_irqrestore(&s->lock, flags);
431 		return rc;
432 	}
433 
434 	/* set count in case no new entries are added during conversion */
435 	params->target->count = count;
436 
437 	/* enable live convert of new entries */
438 	s->convert = params;
439 
440 	/* we can safely convert the tree outside the lock */
441 	spin_unlock_irqrestore(&s->lock, flags);
442 
443 	pr_info("SELinux:  Converting %u SID table entries...\n", count);
444 
445 	/* convert all entries not covered by live convert */
446 	pos = 0;
447 	rc = sidtab_convert_tree(&params->target->roots[level],
448 				 &s->roots[level], &pos, count, level, params);
449 	if (rc) {
450 		/* we need to keep the old table - disable live convert */
451 		spin_lock_irqsave(&s->lock, flags);
452 		s->convert = NULL;
453 		spin_unlock_irqrestore(&s->lock, flags);
454 		return rc;
455 	}
456 	/*
457 	 * The hashtable can also be modified in sidtab_context_to_sid()
458 	 * so we must re-acquire the lock here.
459 	 */
460 	spin_lock_irqsave(&s->lock, flags);
461 	sidtab_convert_hashtable(params->target, count);
462 	spin_unlock_irqrestore(&s->lock, flags);
463 
464 	return 0;
465 }
466 
467 static void sidtab_destroy_entry(struct sidtab_entry *entry)
468 {
469 	context_destroy(&entry->context);
470 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
471 	kfree(rcu_dereference_raw(entry->cache));
472 #endif
473 }
474 
475 static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level)
476 {
477 	u32 i;
478 
479 	if (level != 0) {
480 		struct sidtab_node_inner *node = entry.ptr_inner;
481 
482 		if (!node)
483 			return;
484 
485 		for (i = 0; i < SIDTAB_INNER_ENTRIES; i++)
486 			sidtab_destroy_tree(node->entries[i], level - 1);
487 		kfree(node);
488 	} else {
489 		struct sidtab_node_leaf *node = entry.ptr_leaf;
490 
491 		if (!node)
492 			return;
493 
494 		for (i = 0; i < SIDTAB_LEAF_ENTRIES; i++)
495 			sidtab_destroy_entry(&node->entries[i]);
496 		kfree(node);
497 	}
498 }
499 
500 void sidtab_destroy(struct sidtab *s)
501 {
502 	u32 i, level;
503 
504 	for (i = 0; i < SECINITSID_NUM; i++)
505 		if (s->isids[i].set)
506 			sidtab_destroy_entry(&s->isids[i].entry);
507 
508 	level = SIDTAB_MAX_LEVEL;
509 	while (level && !s->roots[level].ptr_inner)
510 		--level;
511 
512 	sidtab_destroy_tree(s->roots[level], level);
513 	/*
514 	 * The context_to_sid hashtable's objects are all shared
515 	 * with the isids array and context tree, and so don't need
516 	 * to be cleaned up here.
517 	 */
518 }
519 
520 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
521 
522 void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry,
523 			const char *str, u32 str_len)
524 {
525 	struct sidtab_str_cache *cache, *victim = NULL;
526 	unsigned long flags;
527 
528 	/* do not cache invalid contexts */
529 	if (entry->context.len)
530 		return;
531 
532 	spin_lock_irqsave(&s->cache_lock, flags);
533 
534 	cache = rcu_dereference_protected(entry->cache,
535 					  lockdep_is_held(&s->cache_lock));
536 	if (cache) {
537 		/* entry in cache - just bump to the head of LRU list */
538 		list_move(&cache->lru_member, &s->cache_lru_list);
539 		goto out_unlock;
540 	}
541 
542 	cache = kmalloc(sizeof(struct sidtab_str_cache) + str_len, GFP_ATOMIC);
543 	if (!cache)
544 		goto out_unlock;
545 
546 	if (s->cache_free_slots == 0) {
547 		/* pop a cache entry from the tail and free it */
548 		victim = container_of(s->cache_lru_list.prev,
549 				      struct sidtab_str_cache, lru_member);
550 		list_del(&victim->lru_member);
551 		rcu_assign_pointer(victim->parent->cache, NULL);
552 	} else {
553 		s->cache_free_slots--;
554 	}
555 	cache->parent = entry;
556 	cache->len = str_len;
557 	memcpy(cache->str, str, str_len);
558 	list_add(&cache->lru_member, &s->cache_lru_list);
559 
560 	rcu_assign_pointer(entry->cache, cache);
561 
562 out_unlock:
563 	spin_unlock_irqrestore(&s->cache_lock, flags);
564 	kfree_rcu(victim, rcu_member);
565 }
566 
567 int sidtab_sid2str_get(struct sidtab *s, struct sidtab_entry *entry,
568 		       char **out, u32 *out_len)
569 {
570 	struct sidtab_str_cache *cache;
571 	int rc = 0;
572 
573 	if (entry->context.len)
574 		return -ENOENT; /* do not cache invalid contexts */
575 
576 	rcu_read_lock();
577 
578 	cache = rcu_dereference(entry->cache);
579 	if (!cache) {
580 		rc = -ENOENT;
581 	} else {
582 		*out_len = cache->len;
583 		if (out) {
584 			*out = kmemdup(cache->str, cache->len, GFP_ATOMIC);
585 			if (!*out)
586 				rc = -ENOMEM;
587 		}
588 	}
589 
590 	rcu_read_unlock();
591 
592 	if (!rc && out)
593 		sidtab_sid2str_put(s, entry, *out, *out_len);
594 	return rc;
595 }
596 
597 #endif /* CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 */
598