1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Implementation of the SID table type. 4 * 5 * Original author: Stephen Smalley, <sds@tycho.nsa.gov> 6 * Author: Ondrej Mosnacek, <omosnacek@gmail.com> 7 * 8 * Copyright (C) 2018 Red Hat, Inc. 9 */ 10 #include <linux/errno.h> 11 #include <linux/kernel.h> 12 #include <linux/list.h> 13 #include <linux/rcupdate.h> 14 #include <linux/slab.h> 15 #include <linux/sched.h> 16 #include <linux/spinlock.h> 17 #include <asm/barrier.h> 18 #include "flask.h" 19 #include "security.h" 20 #include "sidtab.h" 21 22 struct sidtab_str_cache { 23 struct rcu_head rcu_member; 24 struct list_head lru_member; 25 struct sidtab_entry *parent; 26 u32 len; 27 char str[]; 28 }; 29 30 #define index_to_sid(index) (index + SECINITSID_NUM + 1) 31 #define sid_to_index(sid) (sid - (SECINITSID_NUM + 1)) 32 33 int sidtab_init(struct sidtab *s) 34 { 35 u32 i; 36 37 memset(s->roots, 0, sizeof(s->roots)); 38 39 for (i = 0; i < SECINITSID_NUM; i++) 40 s->isids[i].set = 0; 41 42 s->count = 0; 43 s->convert = NULL; 44 hash_init(s->context_to_sid); 45 46 spin_lock_init(&s->lock); 47 48 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 49 s->cache_free_slots = CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE; 50 INIT_LIST_HEAD(&s->cache_lru_list); 51 spin_lock_init(&s->cache_lock); 52 #endif 53 54 return 0; 55 } 56 57 static u32 context_to_sid(struct sidtab *s, struct context *context, u32 hash) 58 { 59 struct sidtab_entry *entry; 60 u32 sid = 0; 61 62 rcu_read_lock(); 63 hash_for_each_possible_rcu(s->context_to_sid, entry, list, hash) { 64 if (entry->hash != hash) 65 continue; 66 if (context_cmp(&entry->context, context)) { 67 sid = entry->sid; 68 break; 69 } 70 } 71 rcu_read_unlock(); 72 return sid; 73 } 74 75 int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context) 76 { 77 struct sidtab_isid_entry *isid; 78 u32 hash; 79 int rc; 80 81 if (sid == 0 || sid > SECINITSID_NUM) 82 return -EINVAL; 83 84 isid = &s->isids[sid - 1]; 85 86 rc = context_cpy(&isid->entry.context, context); 87 if (rc) 88 return rc; 89 90 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 91 isid->entry.cache = NULL; 92 #endif 93 isid->set = 1; 94 95 hash = context_compute_hash(context); 96 97 /* 98 * Multiple initial sids may map to the same context. Check that this 99 * context is not already represented in the context_to_sid hashtable 100 * to avoid duplicate entries and long linked lists upon hash 101 * collision. 102 */ 103 if (!context_to_sid(s, context, hash)) { 104 isid->entry.sid = sid; 105 isid->entry.hash = hash; 106 hash_add(s->context_to_sid, &isid->entry.list, hash); 107 } 108 109 return 0; 110 } 111 112 int sidtab_hash_stats(struct sidtab *sidtab, char *page) 113 { 114 int i; 115 int chain_len = 0; 116 int slots_used = 0; 117 int entries = 0; 118 int max_chain_len = 0; 119 int cur_bucket = 0; 120 struct sidtab_entry *entry; 121 122 rcu_read_lock(); 123 hash_for_each_rcu(sidtab->context_to_sid, i, entry, list) { 124 entries++; 125 if (i == cur_bucket) { 126 chain_len++; 127 if (chain_len == 1) 128 slots_used++; 129 } else { 130 cur_bucket = i; 131 if (chain_len > max_chain_len) 132 max_chain_len = chain_len; 133 chain_len = 0; 134 } 135 } 136 rcu_read_unlock(); 137 138 if (chain_len > max_chain_len) 139 max_chain_len = chain_len; 140 141 return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n" 142 "longest chain: %d\n", entries, 143 slots_used, SIDTAB_HASH_BUCKETS, max_chain_len); 144 } 145 146 static u32 sidtab_level_from_count(u32 count) 147 { 148 u32 capacity = SIDTAB_LEAF_ENTRIES; 149 u32 level = 0; 150 151 while (count > capacity) { 152 capacity <<= SIDTAB_INNER_SHIFT; 153 ++level; 154 } 155 return level; 156 } 157 158 static int sidtab_alloc_roots(struct sidtab *s, u32 level) 159 { 160 u32 l; 161 162 if (!s->roots[0].ptr_leaf) { 163 s->roots[0].ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 164 GFP_ATOMIC); 165 if (!s->roots[0].ptr_leaf) 166 return -ENOMEM; 167 } 168 for (l = 1; l <= level; ++l) 169 if (!s->roots[l].ptr_inner) { 170 s->roots[l].ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 171 GFP_ATOMIC); 172 if (!s->roots[l].ptr_inner) 173 return -ENOMEM; 174 s->roots[l].ptr_inner->entries[0] = s->roots[l - 1]; 175 } 176 return 0; 177 } 178 179 static struct sidtab_entry *sidtab_do_lookup(struct sidtab *s, u32 index, 180 int alloc) 181 { 182 union sidtab_entry_inner *entry; 183 u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES; 184 185 /* find the level of the subtree we need */ 186 level = sidtab_level_from_count(index + 1); 187 capacity_shift = level * SIDTAB_INNER_SHIFT; 188 189 /* allocate roots if needed */ 190 if (alloc && sidtab_alloc_roots(s, level) != 0) 191 return NULL; 192 193 /* lookup inside the subtree */ 194 entry = &s->roots[level]; 195 while (level != 0) { 196 capacity_shift -= SIDTAB_INNER_SHIFT; 197 --level; 198 199 entry = &entry->ptr_inner->entries[leaf_index >> capacity_shift]; 200 leaf_index &= ((u32)1 << capacity_shift) - 1; 201 202 if (!entry->ptr_inner) { 203 if (alloc) 204 entry->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 205 GFP_ATOMIC); 206 if (!entry->ptr_inner) 207 return NULL; 208 } 209 } 210 if (!entry->ptr_leaf) { 211 if (alloc) 212 entry->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 213 GFP_ATOMIC); 214 if (!entry->ptr_leaf) 215 return NULL; 216 } 217 return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES]; 218 } 219 220 static struct sidtab_entry *sidtab_lookup(struct sidtab *s, u32 index) 221 { 222 /* read entries only after reading count */ 223 u32 count = smp_load_acquire(&s->count); 224 225 if (index >= count) 226 return NULL; 227 228 return sidtab_do_lookup(s, index, 0); 229 } 230 231 static struct sidtab_entry *sidtab_lookup_initial(struct sidtab *s, u32 sid) 232 { 233 return s->isids[sid - 1].set ? &s->isids[sid - 1].entry : NULL; 234 } 235 236 static struct sidtab_entry *sidtab_search_core(struct sidtab *s, u32 sid, 237 int force) 238 { 239 if (sid != 0) { 240 struct sidtab_entry *entry; 241 242 if (sid > SECINITSID_NUM) 243 entry = sidtab_lookup(s, sid_to_index(sid)); 244 else 245 entry = sidtab_lookup_initial(s, sid); 246 if (entry && (!entry->context.len || force)) 247 return entry; 248 } 249 250 return sidtab_lookup_initial(s, SECINITSID_UNLABELED); 251 } 252 253 struct sidtab_entry *sidtab_search_entry(struct sidtab *s, u32 sid) 254 { 255 return sidtab_search_core(s, sid, 0); 256 } 257 258 struct sidtab_entry *sidtab_search_entry_force(struct sidtab *s, u32 sid) 259 { 260 return sidtab_search_core(s, sid, 1); 261 } 262 263 int sidtab_context_to_sid(struct sidtab *s, struct context *context, 264 u32 *sid) 265 { 266 unsigned long flags; 267 u32 count, hash = context_compute_hash(context); 268 struct sidtab_convert_params *convert; 269 struct sidtab_entry *dst, *dst_convert; 270 int rc; 271 272 *sid = context_to_sid(s, context, hash); 273 if (*sid) 274 return 0; 275 276 /* lock-free search failed: lock, re-search, and insert if not found */ 277 spin_lock_irqsave(&s->lock, flags); 278 279 rc = 0; 280 *sid = context_to_sid(s, context, hash); 281 if (*sid) 282 goto out_unlock; 283 284 count = s->count; 285 convert = s->convert; 286 287 /* bail out if we already reached max entries */ 288 rc = -EOVERFLOW; 289 if (count >= SIDTAB_MAX) 290 goto out_unlock; 291 292 /* insert context into new entry */ 293 rc = -ENOMEM; 294 dst = sidtab_do_lookup(s, count, 1); 295 if (!dst) 296 goto out_unlock; 297 298 dst->sid = index_to_sid(count); 299 dst->hash = hash; 300 301 rc = context_cpy(&dst->context, context); 302 if (rc) 303 goto out_unlock; 304 305 /* 306 * if we are building a new sidtab, we need to convert the context 307 * and insert it there as well 308 */ 309 if (convert) { 310 rc = -ENOMEM; 311 dst_convert = sidtab_do_lookup(convert->target, count, 1); 312 if (!dst_convert) { 313 context_destroy(&dst->context); 314 goto out_unlock; 315 } 316 317 rc = convert->func(context, &dst_convert->context, 318 convert->args); 319 if (rc) { 320 context_destroy(&dst->context); 321 goto out_unlock; 322 } 323 dst_convert->sid = index_to_sid(count); 324 dst_convert->hash = context_compute_hash(&dst_convert->context); 325 convert->target->count = count + 1; 326 327 hash_add_rcu(convert->target->context_to_sid, 328 &dst_convert->list, dst_convert->hash); 329 } 330 331 if (context->len) 332 pr_info("SELinux: Context %s is not valid (left unmapped).\n", 333 context->str); 334 335 *sid = index_to_sid(count); 336 337 /* write entries before updating count */ 338 smp_store_release(&s->count, count + 1); 339 hash_add_rcu(s->context_to_sid, &dst->list, dst->hash); 340 341 rc = 0; 342 out_unlock: 343 spin_unlock_irqrestore(&s->lock, flags); 344 return rc; 345 } 346 347 static void sidtab_convert_hashtable(struct sidtab *s, u32 count) 348 { 349 struct sidtab_entry *entry; 350 u32 i; 351 352 for (i = 0; i < count; i++) { 353 entry = sidtab_do_lookup(s, i, 0); 354 entry->sid = index_to_sid(i); 355 entry->hash = context_compute_hash(&entry->context); 356 357 hash_add_rcu(s->context_to_sid, &entry->list, entry->hash); 358 } 359 } 360 361 static int sidtab_convert_tree(union sidtab_entry_inner *edst, 362 union sidtab_entry_inner *esrc, 363 u32 *pos, u32 count, u32 level, 364 struct sidtab_convert_params *convert) 365 { 366 int rc; 367 u32 i; 368 369 if (level != 0) { 370 if (!edst->ptr_inner) { 371 edst->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 372 GFP_KERNEL); 373 if (!edst->ptr_inner) 374 return -ENOMEM; 375 } 376 i = 0; 377 while (i < SIDTAB_INNER_ENTRIES && *pos < count) { 378 rc = sidtab_convert_tree(&edst->ptr_inner->entries[i], 379 &esrc->ptr_inner->entries[i], 380 pos, count, level - 1, 381 convert); 382 if (rc) 383 return rc; 384 i++; 385 } 386 } else { 387 if (!edst->ptr_leaf) { 388 edst->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 389 GFP_KERNEL); 390 if (!edst->ptr_leaf) 391 return -ENOMEM; 392 } 393 i = 0; 394 while (i < SIDTAB_LEAF_ENTRIES && *pos < count) { 395 rc = convert->func(&esrc->ptr_leaf->entries[i].context, 396 &edst->ptr_leaf->entries[i].context, 397 convert->args); 398 if (rc) 399 return rc; 400 (*pos)++; 401 i++; 402 } 403 cond_resched(); 404 } 405 return 0; 406 } 407 408 int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params) 409 { 410 unsigned long flags; 411 u32 count, level, pos; 412 int rc; 413 414 spin_lock_irqsave(&s->lock, flags); 415 416 /* concurrent policy loads are not allowed */ 417 if (s->convert) { 418 spin_unlock_irqrestore(&s->lock, flags); 419 return -EBUSY; 420 } 421 422 count = s->count; 423 level = sidtab_level_from_count(count); 424 425 /* allocate last leaf in the new sidtab (to avoid race with 426 * live convert) 427 */ 428 rc = sidtab_do_lookup(params->target, count - 1, 1) ? 0 : -ENOMEM; 429 if (rc) { 430 spin_unlock_irqrestore(&s->lock, flags); 431 return rc; 432 } 433 434 /* set count in case no new entries are added during conversion */ 435 params->target->count = count; 436 437 /* enable live convert of new entries */ 438 s->convert = params; 439 440 /* we can safely convert the tree outside the lock */ 441 spin_unlock_irqrestore(&s->lock, flags); 442 443 pr_info("SELinux: Converting %u SID table entries...\n", count); 444 445 /* convert all entries not covered by live convert */ 446 pos = 0; 447 rc = sidtab_convert_tree(¶ms->target->roots[level], 448 &s->roots[level], &pos, count, level, params); 449 if (rc) { 450 /* we need to keep the old table - disable live convert */ 451 spin_lock_irqsave(&s->lock, flags); 452 s->convert = NULL; 453 spin_unlock_irqrestore(&s->lock, flags); 454 return rc; 455 } 456 /* 457 * The hashtable can also be modified in sidtab_context_to_sid() 458 * so we must re-acquire the lock here. 459 */ 460 spin_lock_irqsave(&s->lock, flags); 461 sidtab_convert_hashtable(params->target, count); 462 spin_unlock_irqrestore(&s->lock, flags); 463 464 return 0; 465 } 466 467 void sidtab_cancel_convert(struct sidtab *s) 468 { 469 unsigned long flags; 470 471 /* cancelling policy load - disable live convert of sidtab */ 472 spin_lock_irqsave(&s->lock, flags); 473 s->convert = NULL; 474 spin_unlock_irqrestore(&s->lock, flags); 475 } 476 477 static void sidtab_destroy_entry(struct sidtab_entry *entry) 478 { 479 context_destroy(&entry->context); 480 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 481 kfree(rcu_dereference_raw(entry->cache)); 482 #endif 483 } 484 485 static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level) 486 { 487 u32 i; 488 489 if (level != 0) { 490 struct sidtab_node_inner *node = entry.ptr_inner; 491 492 if (!node) 493 return; 494 495 for (i = 0; i < SIDTAB_INNER_ENTRIES; i++) 496 sidtab_destroy_tree(node->entries[i], level - 1); 497 kfree(node); 498 } else { 499 struct sidtab_node_leaf *node = entry.ptr_leaf; 500 501 if (!node) 502 return; 503 504 for (i = 0; i < SIDTAB_LEAF_ENTRIES; i++) 505 sidtab_destroy_entry(&node->entries[i]); 506 kfree(node); 507 } 508 } 509 510 void sidtab_destroy(struct sidtab *s) 511 { 512 u32 i, level; 513 514 for (i = 0; i < SECINITSID_NUM; i++) 515 if (s->isids[i].set) 516 sidtab_destroy_entry(&s->isids[i].entry); 517 518 level = SIDTAB_MAX_LEVEL; 519 while (level && !s->roots[level].ptr_inner) 520 --level; 521 522 sidtab_destroy_tree(s->roots[level], level); 523 /* 524 * The context_to_sid hashtable's objects are all shared 525 * with the isids array and context tree, and so don't need 526 * to be cleaned up here. 527 */ 528 } 529 530 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 531 532 void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry, 533 const char *str, u32 str_len) 534 { 535 struct sidtab_str_cache *cache, *victim = NULL; 536 unsigned long flags; 537 538 /* do not cache invalid contexts */ 539 if (entry->context.len) 540 return; 541 542 spin_lock_irqsave(&s->cache_lock, flags); 543 544 cache = rcu_dereference_protected(entry->cache, 545 lockdep_is_held(&s->cache_lock)); 546 if (cache) { 547 /* entry in cache - just bump to the head of LRU list */ 548 list_move(&cache->lru_member, &s->cache_lru_list); 549 goto out_unlock; 550 } 551 552 cache = kmalloc(sizeof(struct sidtab_str_cache) + str_len, GFP_ATOMIC); 553 if (!cache) 554 goto out_unlock; 555 556 if (s->cache_free_slots == 0) { 557 /* pop a cache entry from the tail and free it */ 558 victim = container_of(s->cache_lru_list.prev, 559 struct sidtab_str_cache, lru_member); 560 list_del(&victim->lru_member); 561 rcu_assign_pointer(victim->parent->cache, NULL); 562 } else { 563 s->cache_free_slots--; 564 } 565 cache->parent = entry; 566 cache->len = str_len; 567 memcpy(cache->str, str, str_len); 568 list_add(&cache->lru_member, &s->cache_lru_list); 569 570 rcu_assign_pointer(entry->cache, cache); 571 572 out_unlock: 573 spin_unlock_irqrestore(&s->cache_lock, flags); 574 kfree_rcu(victim, rcu_member); 575 } 576 577 int sidtab_sid2str_get(struct sidtab *s, struct sidtab_entry *entry, 578 char **out, u32 *out_len) 579 { 580 struct sidtab_str_cache *cache; 581 int rc = 0; 582 583 if (entry->context.len) 584 return -ENOENT; /* do not cache invalid contexts */ 585 586 rcu_read_lock(); 587 588 cache = rcu_dereference(entry->cache); 589 if (!cache) { 590 rc = -ENOENT; 591 } else { 592 *out_len = cache->len; 593 if (out) { 594 *out = kmemdup(cache->str, cache->len, GFP_ATOMIC); 595 if (!*out) 596 rc = -ENOMEM; 597 } 598 } 599 600 rcu_read_unlock(); 601 602 if (!rc && out) 603 sidtab_sid2str_put(s, entry, *out, *out_len); 604 return rc; 605 } 606 607 #endif /* CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 */ 608