xref: /openbmc/linux/security/selinux/ss/sidtab.c (revision 15e3ae36)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Implementation of the SID table type.
4  *
5  * Original author: Stephen Smalley, <sds@tycho.nsa.gov>
6  * Author: Ondrej Mosnacek, <omosnacek@gmail.com>
7  *
8  * Copyright (C) 2018 Red Hat, Inc.
9  */
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/list.h>
13 #include <linux/rcupdate.h>
14 #include <linux/slab.h>
15 #include <linux/sched.h>
16 #include <linux/spinlock.h>
17 #include <asm/barrier.h>
18 #include "flask.h"
19 #include "security.h"
20 #include "sidtab.h"
21 
22 struct sidtab_str_cache {
23 	struct rcu_head rcu_member;
24 	struct list_head lru_member;
25 	struct sidtab_entry *parent;
26 	u32 len;
27 	char str[];
28 };
29 
30 #define index_to_sid(index) (index + SECINITSID_NUM + 1)
31 #define sid_to_index(sid) (sid - (SECINITSID_NUM + 1))
32 
33 int sidtab_init(struct sidtab *s)
34 {
35 	u32 i;
36 
37 	memset(s->roots, 0, sizeof(s->roots));
38 
39 	for (i = 0; i < SECINITSID_NUM; i++)
40 		s->isids[i].set = 0;
41 
42 	s->count = 0;
43 	s->convert = NULL;
44 	hash_init(s->context_to_sid);
45 
46 	spin_lock_init(&s->lock);
47 
48 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
49 	s->cache_free_slots = CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE;
50 	INIT_LIST_HEAD(&s->cache_lru_list);
51 	spin_lock_init(&s->cache_lock);
52 #endif
53 
54 	return 0;
55 }
56 
57 static u32 context_to_sid(struct sidtab *s, struct context *context)
58 {
59 	struct sidtab_entry *entry;
60 	u32 sid = 0;
61 
62 	rcu_read_lock();
63 	hash_for_each_possible_rcu(s->context_to_sid, entry, list,
64 				   context->hash) {
65 		if (context_cmp(&entry->context, context)) {
66 			sid = entry->sid;
67 			break;
68 		}
69 	}
70 	rcu_read_unlock();
71 	return sid;
72 }
73 
74 int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
75 {
76 	struct sidtab_isid_entry *isid;
77 	int rc;
78 
79 	if (sid == 0 || sid > SECINITSID_NUM)
80 		return -EINVAL;
81 
82 	isid = &s->isids[sid - 1];
83 
84 	rc = context_cpy(&isid->entry.context, context);
85 	if (rc)
86 		return rc;
87 
88 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
89 	isid->entry.cache = NULL;
90 #endif
91 	isid->set = 1;
92 
93 	/*
94 	 * Multiple initial sids may map to the same context. Check that this
95 	 * context is not already represented in the context_to_sid hashtable
96 	 * to avoid duplicate entries and long linked lists upon hash
97 	 * collision.
98 	 */
99 	if (!context_to_sid(s, context)) {
100 		isid->entry.sid = sid;
101 		hash_add(s->context_to_sid, &isid->entry.list, context->hash);
102 	}
103 
104 	return 0;
105 }
106 
107 int sidtab_hash_stats(struct sidtab *sidtab, char *page)
108 {
109 	int i;
110 	int chain_len = 0;
111 	int slots_used = 0;
112 	int entries = 0;
113 	int max_chain_len = 0;
114 	int cur_bucket = 0;
115 	struct sidtab_entry *entry;
116 
117 	rcu_read_lock();
118 	hash_for_each_rcu(sidtab->context_to_sid, i, entry, list) {
119 		entries++;
120 		if (i == cur_bucket) {
121 			chain_len++;
122 			if (chain_len == 1)
123 				slots_used++;
124 		} else {
125 			cur_bucket = i;
126 			if (chain_len > max_chain_len)
127 				max_chain_len = chain_len;
128 			chain_len = 0;
129 		}
130 	}
131 	rcu_read_unlock();
132 
133 	if (chain_len > max_chain_len)
134 		max_chain_len = chain_len;
135 
136 	return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
137 			 "longest chain: %d\n", entries,
138 			 slots_used, SIDTAB_HASH_BUCKETS, max_chain_len);
139 }
140 
141 static u32 sidtab_level_from_count(u32 count)
142 {
143 	u32 capacity = SIDTAB_LEAF_ENTRIES;
144 	u32 level = 0;
145 
146 	while (count > capacity) {
147 		capacity <<= SIDTAB_INNER_SHIFT;
148 		++level;
149 	}
150 	return level;
151 }
152 
153 static int sidtab_alloc_roots(struct sidtab *s, u32 level)
154 {
155 	u32 l;
156 
157 	if (!s->roots[0].ptr_leaf) {
158 		s->roots[0].ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
159 					       GFP_ATOMIC);
160 		if (!s->roots[0].ptr_leaf)
161 			return -ENOMEM;
162 	}
163 	for (l = 1; l <= level; ++l)
164 		if (!s->roots[l].ptr_inner) {
165 			s->roots[l].ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
166 							GFP_ATOMIC);
167 			if (!s->roots[l].ptr_inner)
168 				return -ENOMEM;
169 			s->roots[l].ptr_inner->entries[0] = s->roots[l - 1];
170 		}
171 	return 0;
172 }
173 
174 static struct sidtab_entry *sidtab_do_lookup(struct sidtab *s, u32 index,
175 					     int alloc)
176 {
177 	union sidtab_entry_inner *entry;
178 	u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES;
179 
180 	/* find the level of the subtree we need */
181 	level = sidtab_level_from_count(index + 1);
182 	capacity_shift = level * SIDTAB_INNER_SHIFT;
183 
184 	/* allocate roots if needed */
185 	if (alloc && sidtab_alloc_roots(s, level) != 0)
186 		return NULL;
187 
188 	/* lookup inside the subtree */
189 	entry = &s->roots[level];
190 	while (level != 0) {
191 		capacity_shift -= SIDTAB_INNER_SHIFT;
192 		--level;
193 
194 		entry = &entry->ptr_inner->entries[leaf_index >> capacity_shift];
195 		leaf_index &= ((u32)1 << capacity_shift) - 1;
196 
197 		if (!entry->ptr_inner) {
198 			if (alloc)
199 				entry->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
200 							   GFP_ATOMIC);
201 			if (!entry->ptr_inner)
202 				return NULL;
203 		}
204 	}
205 	if (!entry->ptr_leaf) {
206 		if (alloc)
207 			entry->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
208 						  GFP_ATOMIC);
209 		if (!entry->ptr_leaf)
210 			return NULL;
211 	}
212 	return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES];
213 }
214 
215 static struct sidtab_entry *sidtab_lookup(struct sidtab *s, u32 index)
216 {
217 	/* read entries only after reading count */
218 	u32 count = smp_load_acquire(&s->count);
219 
220 	if (index >= count)
221 		return NULL;
222 
223 	return sidtab_do_lookup(s, index, 0);
224 }
225 
226 static struct sidtab_entry *sidtab_lookup_initial(struct sidtab *s, u32 sid)
227 {
228 	return s->isids[sid - 1].set ? &s->isids[sid - 1].entry : NULL;
229 }
230 
231 static struct sidtab_entry *sidtab_search_core(struct sidtab *s, u32 sid,
232 					       int force)
233 {
234 	if (sid != 0) {
235 		struct sidtab_entry *entry;
236 
237 		if (sid > SECINITSID_NUM)
238 			entry = sidtab_lookup(s, sid_to_index(sid));
239 		else
240 			entry = sidtab_lookup_initial(s, sid);
241 		if (entry && (!entry->context.len || force))
242 			return entry;
243 	}
244 
245 	return sidtab_lookup_initial(s, SECINITSID_UNLABELED);
246 }
247 
248 struct sidtab_entry *sidtab_search_entry(struct sidtab *s, u32 sid)
249 {
250 	return sidtab_search_core(s, sid, 0);
251 }
252 
253 struct sidtab_entry *sidtab_search_entry_force(struct sidtab *s, u32 sid)
254 {
255 	return sidtab_search_core(s, sid, 1);
256 }
257 
258 int sidtab_context_to_sid(struct sidtab *s, struct context *context,
259 			  u32 *sid)
260 {
261 	unsigned long flags;
262 	u32 count;
263 	struct sidtab_convert_params *convert;
264 	struct sidtab_entry *dst, *dst_convert;
265 	int rc;
266 
267 	*sid = context_to_sid(s, context);
268 	if (*sid)
269 		return 0;
270 
271 	/* lock-free search failed: lock, re-search, and insert if not found */
272 	spin_lock_irqsave(&s->lock, flags);
273 
274 	rc = 0;
275 	*sid = context_to_sid(s, context);
276 	if (*sid)
277 		goto out_unlock;
278 
279 	/* read entries only after reading count */
280 	count = smp_load_acquire(&s->count);
281 	convert = s->convert;
282 
283 	/* bail out if we already reached max entries */
284 	rc = -EOVERFLOW;
285 	if (count >= SIDTAB_MAX)
286 		goto out_unlock;
287 
288 	/* insert context into new entry */
289 	rc = -ENOMEM;
290 	dst = sidtab_do_lookup(s, count, 1);
291 	if (!dst)
292 		goto out_unlock;
293 
294 	dst->sid = index_to_sid(count);
295 
296 	rc = context_cpy(&dst->context, context);
297 	if (rc)
298 		goto out_unlock;
299 
300 	/*
301 	 * if we are building a new sidtab, we need to convert the context
302 	 * and insert it there as well
303 	 */
304 	if (convert) {
305 		rc = -ENOMEM;
306 		dst_convert = sidtab_do_lookup(convert->target, count, 1);
307 		if (!dst_convert) {
308 			context_destroy(&dst->context);
309 			goto out_unlock;
310 		}
311 
312 		rc = convert->func(context, &dst_convert->context,
313 				   convert->args);
314 		if (rc) {
315 			context_destroy(&dst->context);
316 			goto out_unlock;
317 		}
318 		dst_convert->sid = index_to_sid(count);
319 		convert->target->count = count + 1;
320 
321 		hash_add_rcu(convert->target->context_to_sid,
322 			     &dst_convert->list, dst_convert->context.hash);
323 	}
324 
325 	if (context->len)
326 		pr_info("SELinux:  Context %s is not valid (left unmapped).\n",
327 			context->str);
328 
329 	*sid = index_to_sid(count);
330 
331 	/* write entries before updating count */
332 	smp_store_release(&s->count, count + 1);
333 	hash_add_rcu(s->context_to_sid, &dst->list, dst->context.hash);
334 
335 	rc = 0;
336 out_unlock:
337 	spin_unlock_irqrestore(&s->lock, flags);
338 	return rc;
339 }
340 
341 static void sidtab_convert_hashtable(struct sidtab *s, u32 count)
342 {
343 	struct sidtab_entry *entry;
344 	u32 i;
345 
346 	for (i = 0; i < count; i++) {
347 		entry = sidtab_do_lookup(s, i, 0);
348 		entry->sid = index_to_sid(i);
349 
350 		hash_add_rcu(s->context_to_sid, &entry->list,
351 			     entry->context.hash);
352 
353 	}
354 }
355 
356 static int sidtab_convert_tree(union sidtab_entry_inner *edst,
357 			       union sidtab_entry_inner *esrc,
358 			       u32 *pos, u32 count, u32 level,
359 			       struct sidtab_convert_params *convert)
360 {
361 	int rc;
362 	u32 i;
363 
364 	if (level != 0) {
365 		if (!edst->ptr_inner) {
366 			edst->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
367 						  GFP_KERNEL);
368 			if (!edst->ptr_inner)
369 				return -ENOMEM;
370 		}
371 		i = 0;
372 		while (i < SIDTAB_INNER_ENTRIES && *pos < count) {
373 			rc = sidtab_convert_tree(&edst->ptr_inner->entries[i],
374 						 &esrc->ptr_inner->entries[i],
375 						 pos, count, level - 1,
376 						 convert);
377 			if (rc)
378 				return rc;
379 			i++;
380 		}
381 	} else {
382 		if (!edst->ptr_leaf) {
383 			edst->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
384 						 GFP_KERNEL);
385 			if (!edst->ptr_leaf)
386 				return -ENOMEM;
387 		}
388 		i = 0;
389 		while (i < SIDTAB_LEAF_ENTRIES && *pos < count) {
390 			rc = convert->func(&esrc->ptr_leaf->entries[i].context,
391 					   &edst->ptr_leaf->entries[i].context,
392 					   convert->args);
393 			if (rc)
394 				return rc;
395 			(*pos)++;
396 			i++;
397 		}
398 		cond_resched();
399 	}
400 	return 0;
401 }
402 
403 int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
404 {
405 	unsigned long flags;
406 	u32 count, level, pos;
407 	int rc;
408 
409 	spin_lock_irqsave(&s->lock, flags);
410 
411 	/* concurrent policy loads are not allowed */
412 	if (s->convert) {
413 		spin_unlock_irqrestore(&s->lock, flags);
414 		return -EBUSY;
415 	}
416 
417 	count = s->count;
418 	level = sidtab_level_from_count(count);
419 
420 	/* allocate last leaf in the new sidtab (to avoid race with
421 	 * live convert)
422 	 */
423 	rc = sidtab_do_lookup(params->target, count - 1, 1) ? 0 : -ENOMEM;
424 	if (rc) {
425 		spin_unlock_irqrestore(&s->lock, flags);
426 		return rc;
427 	}
428 
429 	/* set count in case no new entries are added during conversion */
430 	params->target->count = count;
431 
432 	/* enable live convert of new entries */
433 	s->convert = params;
434 
435 	/* we can safely convert the tree outside the lock */
436 	spin_unlock_irqrestore(&s->lock, flags);
437 
438 	pr_info("SELinux:  Converting %u SID table entries...\n", count);
439 
440 	/* convert all entries not covered by live convert */
441 	pos = 0;
442 	rc = sidtab_convert_tree(&params->target->roots[level],
443 				 &s->roots[level], &pos, count, level, params);
444 	if (rc) {
445 		/* we need to keep the old table - disable live convert */
446 		spin_lock_irqsave(&s->lock, flags);
447 		s->convert = NULL;
448 		spin_unlock_irqrestore(&s->lock, flags);
449 		return rc;
450 	}
451 	/*
452 	 * The hashtable can also be modified in sidtab_context_to_sid()
453 	 * so we must re-acquire the lock here.
454 	 */
455 	spin_lock_irqsave(&s->lock, flags);
456 	sidtab_convert_hashtable(params->target, count);
457 	spin_unlock_irqrestore(&s->lock, flags);
458 
459 	return 0;
460 }
461 
462 static void sidtab_destroy_entry(struct sidtab_entry *entry)
463 {
464 	context_destroy(&entry->context);
465 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
466 	kfree(rcu_dereference_raw(entry->cache));
467 #endif
468 }
469 
470 static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level)
471 {
472 	u32 i;
473 
474 	if (level != 0) {
475 		struct sidtab_node_inner *node = entry.ptr_inner;
476 
477 		if (!node)
478 			return;
479 
480 		for (i = 0; i < SIDTAB_INNER_ENTRIES; i++)
481 			sidtab_destroy_tree(node->entries[i], level - 1);
482 		kfree(node);
483 	} else {
484 		struct sidtab_node_leaf *node = entry.ptr_leaf;
485 
486 		if (!node)
487 			return;
488 
489 		for (i = 0; i < SIDTAB_LEAF_ENTRIES; i++)
490 			sidtab_destroy_entry(&node->entries[i]);
491 		kfree(node);
492 	}
493 }
494 
495 void sidtab_destroy(struct sidtab *s)
496 {
497 	u32 i, level;
498 
499 	for (i = 0; i < SECINITSID_NUM; i++)
500 		if (s->isids[i].set)
501 			sidtab_destroy_entry(&s->isids[i].entry);
502 
503 	level = SIDTAB_MAX_LEVEL;
504 	while (level && !s->roots[level].ptr_inner)
505 		--level;
506 
507 	sidtab_destroy_tree(s->roots[level], level);
508 	/*
509 	 * The context_to_sid hashtable's objects are all shared
510 	 * with the isids array and context tree, and so don't need
511 	 * to be cleaned up here.
512 	 */
513 }
514 
515 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
516 
517 void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry,
518 			const char *str, u32 str_len)
519 {
520 	struct sidtab_str_cache *cache, *victim = NULL;
521 	unsigned long flags;
522 
523 	/* do not cache invalid contexts */
524 	if (entry->context.len)
525 		return;
526 
527 	spin_lock_irqsave(&s->cache_lock, flags);
528 
529 	cache = rcu_dereference_protected(entry->cache,
530 					  lockdep_is_held(&s->cache_lock));
531 	if (cache) {
532 		/* entry in cache - just bump to the head of LRU list */
533 		list_move(&cache->lru_member, &s->cache_lru_list);
534 		goto out_unlock;
535 	}
536 
537 	cache = kmalloc(sizeof(struct sidtab_str_cache) + str_len, GFP_ATOMIC);
538 	if (!cache)
539 		goto out_unlock;
540 
541 	if (s->cache_free_slots == 0) {
542 		/* pop a cache entry from the tail and free it */
543 		victim = container_of(s->cache_lru_list.prev,
544 				      struct sidtab_str_cache, lru_member);
545 		list_del(&victim->lru_member);
546 		rcu_assign_pointer(victim->parent->cache, NULL);
547 	} else {
548 		s->cache_free_slots--;
549 	}
550 	cache->parent = entry;
551 	cache->len = str_len;
552 	memcpy(cache->str, str, str_len);
553 	list_add(&cache->lru_member, &s->cache_lru_list);
554 
555 	rcu_assign_pointer(entry->cache, cache);
556 
557 out_unlock:
558 	spin_unlock_irqrestore(&s->cache_lock, flags);
559 	kfree_rcu(victim, rcu_member);
560 }
561 
562 int sidtab_sid2str_get(struct sidtab *s, struct sidtab_entry *entry,
563 		       char **out, u32 *out_len)
564 {
565 	struct sidtab_str_cache *cache;
566 	int rc = 0;
567 
568 	if (entry->context.len)
569 		return -ENOENT; /* do not cache invalid contexts */
570 
571 	rcu_read_lock();
572 
573 	cache = rcu_dereference(entry->cache);
574 	if (!cache) {
575 		rc = -ENOENT;
576 	} else {
577 		*out_len = cache->len;
578 		if (out) {
579 			*out = kmemdup(cache->str, cache->len, GFP_ATOMIC);
580 			if (!*out)
581 				rc = -ENOMEM;
582 		}
583 	}
584 
585 	rcu_read_unlock();
586 
587 	if (!rc && out)
588 		sidtab_sid2str_put(s, entry, *out, *out_len);
589 	return rc;
590 }
591 
592 #endif /* CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 */
593