1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Implementation of the SID table type. 4 * 5 * Original author: Stephen Smalley, <sds@tycho.nsa.gov> 6 * Author: Ondrej Mosnacek, <omosnacek@gmail.com> 7 * 8 * Copyright (C) 2018 Red Hat, Inc. 9 */ 10 #include <linux/errno.h> 11 #include <linux/kernel.h> 12 #include <linux/list.h> 13 #include <linux/rcupdate.h> 14 #include <linux/slab.h> 15 #include <linux/sched.h> 16 #include <linux/spinlock.h> 17 #include <asm/barrier.h> 18 #include "flask.h" 19 #include "security.h" 20 #include "sidtab.h" 21 #include "services.h" 22 23 struct sidtab_str_cache { 24 struct rcu_head rcu_member; 25 struct list_head lru_member; 26 struct sidtab_entry *parent; 27 u32 len; 28 char str[]; 29 }; 30 31 #define index_to_sid(index) ((index) + SECINITSID_NUM + 1) 32 #define sid_to_index(sid) ((sid) - (SECINITSID_NUM + 1)) 33 34 int sidtab_init(struct sidtab *s) 35 { 36 u32 i; 37 38 memset(s->roots, 0, sizeof(s->roots)); 39 40 for (i = 0; i < SECINITSID_NUM; i++) 41 s->isids[i].set = 0; 42 43 s->frozen = false; 44 s->count = 0; 45 s->convert = NULL; 46 hash_init(s->context_to_sid); 47 48 spin_lock_init(&s->lock); 49 50 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 51 s->cache_free_slots = CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE; 52 INIT_LIST_HEAD(&s->cache_lru_list); 53 spin_lock_init(&s->cache_lock); 54 #endif 55 56 return 0; 57 } 58 59 static u32 context_to_sid(struct sidtab *s, struct context *context, u32 hash) 60 { 61 struct sidtab_entry *entry; 62 u32 sid = 0; 63 64 rcu_read_lock(); 65 hash_for_each_possible_rcu(s->context_to_sid, entry, list, hash) { 66 if (entry->hash != hash) 67 continue; 68 if (context_cmp(&entry->context, context)) { 69 sid = entry->sid; 70 break; 71 } 72 } 73 rcu_read_unlock(); 74 return sid; 75 } 76 77 int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context) 78 { 79 struct sidtab_isid_entry *isid; 80 u32 hash; 81 int rc; 82 83 if (sid == 0 || sid > SECINITSID_NUM) 84 return -EINVAL; 85 86 isid = &s->isids[sid - 1]; 87 88 rc = context_cpy(&isid->entry.context, context); 89 if (rc) 90 return rc; 91 92 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 93 isid->entry.cache = NULL; 94 #endif 95 isid->set = 1; 96 97 hash = context_compute_hash(context); 98 99 /* 100 * Multiple initial sids may map to the same context. Check that this 101 * context is not already represented in the context_to_sid hashtable 102 * to avoid duplicate entries and long linked lists upon hash 103 * collision. 104 */ 105 if (!context_to_sid(s, context, hash)) { 106 isid->entry.sid = sid; 107 isid->entry.hash = hash; 108 hash_add(s->context_to_sid, &isid->entry.list, hash); 109 } 110 111 return 0; 112 } 113 114 int sidtab_hash_stats(struct sidtab *sidtab, char *page) 115 { 116 int i; 117 int chain_len = 0; 118 int slots_used = 0; 119 int entries = 0; 120 int max_chain_len = 0; 121 int cur_bucket = 0; 122 struct sidtab_entry *entry; 123 124 rcu_read_lock(); 125 hash_for_each_rcu(sidtab->context_to_sid, i, entry, list) { 126 entries++; 127 if (i == cur_bucket) { 128 chain_len++; 129 if (chain_len == 1) 130 slots_used++; 131 } else { 132 cur_bucket = i; 133 if (chain_len > max_chain_len) 134 max_chain_len = chain_len; 135 chain_len = 0; 136 } 137 } 138 rcu_read_unlock(); 139 140 if (chain_len > max_chain_len) 141 max_chain_len = chain_len; 142 143 return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n" 144 "longest chain: %d\n", entries, 145 slots_used, SIDTAB_HASH_BUCKETS, max_chain_len); 146 } 147 148 static u32 sidtab_level_from_count(u32 count) 149 { 150 u32 capacity = SIDTAB_LEAF_ENTRIES; 151 u32 level = 0; 152 153 while (count > capacity) { 154 capacity <<= SIDTAB_INNER_SHIFT; 155 ++level; 156 } 157 return level; 158 } 159 160 static int sidtab_alloc_roots(struct sidtab *s, u32 level) 161 { 162 u32 l; 163 164 if (!s->roots[0].ptr_leaf) { 165 s->roots[0].ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 166 GFP_ATOMIC); 167 if (!s->roots[0].ptr_leaf) 168 return -ENOMEM; 169 } 170 for (l = 1; l <= level; ++l) 171 if (!s->roots[l].ptr_inner) { 172 s->roots[l].ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 173 GFP_ATOMIC); 174 if (!s->roots[l].ptr_inner) 175 return -ENOMEM; 176 s->roots[l].ptr_inner->entries[0] = s->roots[l - 1]; 177 } 178 return 0; 179 } 180 181 static struct sidtab_entry *sidtab_do_lookup(struct sidtab *s, u32 index, 182 int alloc) 183 { 184 union sidtab_entry_inner *entry; 185 u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES; 186 187 /* find the level of the subtree we need */ 188 level = sidtab_level_from_count(index + 1); 189 capacity_shift = level * SIDTAB_INNER_SHIFT; 190 191 /* allocate roots if needed */ 192 if (alloc && sidtab_alloc_roots(s, level) != 0) 193 return NULL; 194 195 /* lookup inside the subtree */ 196 entry = &s->roots[level]; 197 while (level != 0) { 198 capacity_shift -= SIDTAB_INNER_SHIFT; 199 --level; 200 201 entry = &entry->ptr_inner->entries[leaf_index >> capacity_shift]; 202 leaf_index &= ((u32)1 << capacity_shift) - 1; 203 204 if (!entry->ptr_inner) { 205 if (alloc) 206 entry->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 207 GFP_ATOMIC); 208 if (!entry->ptr_inner) 209 return NULL; 210 } 211 } 212 if (!entry->ptr_leaf) { 213 if (alloc) 214 entry->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 215 GFP_ATOMIC); 216 if (!entry->ptr_leaf) 217 return NULL; 218 } 219 return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES]; 220 } 221 222 static struct sidtab_entry *sidtab_lookup(struct sidtab *s, u32 index) 223 { 224 /* read entries only after reading count */ 225 u32 count = smp_load_acquire(&s->count); 226 227 if (index >= count) 228 return NULL; 229 230 return sidtab_do_lookup(s, index, 0); 231 } 232 233 static struct sidtab_entry *sidtab_lookup_initial(struct sidtab *s, u32 sid) 234 { 235 return s->isids[sid - 1].set ? &s->isids[sid - 1].entry : NULL; 236 } 237 238 static struct sidtab_entry *sidtab_search_core(struct sidtab *s, u32 sid, 239 int force) 240 { 241 if (sid != 0) { 242 struct sidtab_entry *entry; 243 244 if (sid > SECINITSID_NUM) 245 entry = sidtab_lookup(s, sid_to_index(sid)); 246 else 247 entry = sidtab_lookup_initial(s, sid); 248 if (entry && (!entry->context.len || force)) 249 return entry; 250 } 251 252 return sidtab_lookup_initial(s, SECINITSID_UNLABELED); 253 } 254 255 struct sidtab_entry *sidtab_search_entry(struct sidtab *s, u32 sid) 256 { 257 return sidtab_search_core(s, sid, 0); 258 } 259 260 struct sidtab_entry *sidtab_search_entry_force(struct sidtab *s, u32 sid) 261 { 262 return sidtab_search_core(s, sid, 1); 263 } 264 265 int sidtab_context_to_sid(struct sidtab *s, struct context *context, 266 u32 *sid) 267 { 268 unsigned long flags; 269 u32 count, hash = context_compute_hash(context); 270 struct sidtab_convert_params *convert; 271 struct sidtab_entry *dst, *dst_convert; 272 int rc; 273 274 *sid = context_to_sid(s, context, hash); 275 if (*sid) 276 return 0; 277 278 /* lock-free search failed: lock, re-search, and insert if not found */ 279 spin_lock_irqsave(&s->lock, flags); 280 281 rc = 0; 282 *sid = context_to_sid(s, context, hash); 283 if (*sid) 284 goto out_unlock; 285 286 if (unlikely(s->frozen)) { 287 /* 288 * This sidtab is now frozen - tell the caller to abort and 289 * get the new one. 290 */ 291 rc = -ESTALE; 292 goto out_unlock; 293 } 294 295 count = s->count; 296 297 /* bail out if we already reached max entries */ 298 rc = -EOVERFLOW; 299 if (count >= SIDTAB_MAX) 300 goto out_unlock; 301 302 /* insert context into new entry */ 303 rc = -ENOMEM; 304 dst = sidtab_do_lookup(s, count, 1); 305 if (!dst) 306 goto out_unlock; 307 308 dst->sid = index_to_sid(count); 309 dst->hash = hash; 310 311 rc = context_cpy(&dst->context, context); 312 if (rc) 313 goto out_unlock; 314 315 /* 316 * if we are building a new sidtab, we need to convert the context 317 * and insert it there as well 318 */ 319 convert = s->convert; 320 if (convert) { 321 struct sidtab *target = convert->target; 322 323 rc = -ENOMEM; 324 dst_convert = sidtab_do_lookup(target, count, 1); 325 if (!dst_convert) { 326 context_destroy(&dst->context); 327 goto out_unlock; 328 } 329 330 rc = services_convert_context(convert->args, 331 context, &dst_convert->context); 332 if (rc) { 333 context_destroy(&dst->context); 334 goto out_unlock; 335 } 336 dst_convert->sid = index_to_sid(count); 337 dst_convert->hash = context_compute_hash(&dst_convert->context); 338 target->count = count + 1; 339 340 hash_add_rcu(target->context_to_sid, 341 &dst_convert->list, dst_convert->hash); 342 } 343 344 if (context->len) 345 pr_info("SELinux: Context %s is not valid (left unmapped).\n", 346 context->str); 347 348 *sid = index_to_sid(count); 349 350 /* write entries before updating count */ 351 smp_store_release(&s->count, count + 1); 352 hash_add_rcu(s->context_to_sid, &dst->list, dst->hash); 353 354 rc = 0; 355 out_unlock: 356 spin_unlock_irqrestore(&s->lock, flags); 357 return rc; 358 } 359 360 static void sidtab_convert_hashtable(struct sidtab *s, u32 count) 361 { 362 struct sidtab_entry *entry; 363 u32 i; 364 365 for (i = 0; i < count; i++) { 366 entry = sidtab_do_lookup(s, i, 0); 367 entry->sid = index_to_sid(i); 368 entry->hash = context_compute_hash(&entry->context); 369 370 hash_add_rcu(s->context_to_sid, &entry->list, entry->hash); 371 } 372 } 373 374 static int sidtab_convert_tree(union sidtab_entry_inner *edst, 375 union sidtab_entry_inner *esrc, 376 u32 *pos, u32 count, u32 level, 377 struct sidtab_convert_params *convert) 378 { 379 int rc; 380 u32 i; 381 382 if (level != 0) { 383 if (!edst->ptr_inner) { 384 edst->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 385 GFP_KERNEL); 386 if (!edst->ptr_inner) 387 return -ENOMEM; 388 } 389 i = 0; 390 while (i < SIDTAB_INNER_ENTRIES && *pos < count) { 391 rc = sidtab_convert_tree(&edst->ptr_inner->entries[i], 392 &esrc->ptr_inner->entries[i], 393 pos, count, level - 1, 394 convert); 395 if (rc) 396 return rc; 397 i++; 398 } 399 } else { 400 if (!edst->ptr_leaf) { 401 edst->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 402 GFP_KERNEL); 403 if (!edst->ptr_leaf) 404 return -ENOMEM; 405 } 406 i = 0; 407 while (i < SIDTAB_LEAF_ENTRIES && *pos < count) { 408 rc = services_convert_context(convert->args, 409 &esrc->ptr_leaf->entries[i].context, 410 &edst->ptr_leaf->entries[i].context); 411 if (rc) 412 return rc; 413 (*pos)++; 414 i++; 415 } 416 cond_resched(); 417 } 418 return 0; 419 } 420 421 int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params) 422 { 423 unsigned long flags; 424 u32 count, level, pos; 425 int rc; 426 427 spin_lock_irqsave(&s->lock, flags); 428 429 /* concurrent policy loads are not allowed */ 430 if (s->convert) { 431 spin_unlock_irqrestore(&s->lock, flags); 432 return -EBUSY; 433 } 434 435 count = s->count; 436 level = sidtab_level_from_count(count); 437 438 /* allocate last leaf in the new sidtab (to avoid race with 439 * live convert) 440 */ 441 rc = sidtab_do_lookup(params->target, count - 1, 1) ? 0 : -ENOMEM; 442 if (rc) { 443 spin_unlock_irqrestore(&s->lock, flags); 444 return rc; 445 } 446 447 /* set count in case no new entries are added during conversion */ 448 params->target->count = count; 449 450 /* enable live convert of new entries */ 451 s->convert = params; 452 453 /* we can safely convert the tree outside the lock */ 454 spin_unlock_irqrestore(&s->lock, flags); 455 456 pr_info("SELinux: Converting %u SID table entries...\n", count); 457 458 /* convert all entries not covered by live convert */ 459 pos = 0; 460 rc = sidtab_convert_tree(¶ms->target->roots[level], 461 &s->roots[level], &pos, count, level, params); 462 if (rc) { 463 /* we need to keep the old table - disable live convert */ 464 spin_lock_irqsave(&s->lock, flags); 465 s->convert = NULL; 466 spin_unlock_irqrestore(&s->lock, flags); 467 return rc; 468 } 469 /* 470 * The hashtable can also be modified in sidtab_context_to_sid() 471 * so we must re-acquire the lock here. 472 */ 473 spin_lock_irqsave(&s->lock, flags); 474 sidtab_convert_hashtable(params->target, count); 475 spin_unlock_irqrestore(&s->lock, flags); 476 477 return 0; 478 } 479 480 void sidtab_cancel_convert(struct sidtab *s) 481 { 482 unsigned long flags; 483 484 /* cancelling policy load - disable live convert of sidtab */ 485 spin_lock_irqsave(&s->lock, flags); 486 s->convert = NULL; 487 spin_unlock_irqrestore(&s->lock, flags); 488 } 489 490 void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock) 491 { 492 spin_lock_irqsave(&s->lock, *flags); 493 s->frozen = true; 494 s->convert = NULL; 495 } 496 void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock) 497 { 498 spin_unlock_irqrestore(&s->lock, *flags); 499 } 500 501 static void sidtab_destroy_entry(struct sidtab_entry *entry) 502 { 503 context_destroy(&entry->context); 504 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 505 kfree(rcu_dereference_raw(entry->cache)); 506 #endif 507 } 508 509 static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level) 510 { 511 u32 i; 512 513 if (level != 0) { 514 struct sidtab_node_inner *node = entry.ptr_inner; 515 516 if (!node) 517 return; 518 519 for (i = 0; i < SIDTAB_INNER_ENTRIES; i++) 520 sidtab_destroy_tree(node->entries[i], level - 1); 521 kfree(node); 522 } else { 523 struct sidtab_node_leaf *node = entry.ptr_leaf; 524 525 if (!node) 526 return; 527 528 for (i = 0; i < SIDTAB_LEAF_ENTRIES; i++) 529 sidtab_destroy_entry(&node->entries[i]); 530 kfree(node); 531 } 532 } 533 534 void sidtab_destroy(struct sidtab *s) 535 { 536 u32 i, level; 537 538 for (i = 0; i < SECINITSID_NUM; i++) 539 if (s->isids[i].set) 540 sidtab_destroy_entry(&s->isids[i].entry); 541 542 level = SIDTAB_MAX_LEVEL; 543 while (level && !s->roots[level].ptr_inner) 544 --level; 545 546 sidtab_destroy_tree(s->roots[level], level); 547 /* 548 * The context_to_sid hashtable's objects are all shared 549 * with the isids array and context tree, and so don't need 550 * to be cleaned up here. 551 */ 552 } 553 554 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 555 556 void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry, 557 const char *str, u32 str_len) 558 { 559 struct sidtab_str_cache *cache, *victim = NULL; 560 unsigned long flags; 561 562 /* do not cache invalid contexts */ 563 if (entry->context.len) 564 return; 565 566 spin_lock_irqsave(&s->cache_lock, flags); 567 568 cache = rcu_dereference_protected(entry->cache, 569 lockdep_is_held(&s->cache_lock)); 570 if (cache) { 571 /* entry in cache - just bump to the head of LRU list */ 572 list_move(&cache->lru_member, &s->cache_lru_list); 573 goto out_unlock; 574 } 575 576 cache = kmalloc(struct_size(cache, str, str_len), GFP_ATOMIC); 577 if (!cache) 578 goto out_unlock; 579 580 if (s->cache_free_slots == 0) { 581 /* pop a cache entry from the tail and free it */ 582 victim = container_of(s->cache_lru_list.prev, 583 struct sidtab_str_cache, lru_member); 584 list_del(&victim->lru_member); 585 rcu_assign_pointer(victim->parent->cache, NULL); 586 } else { 587 s->cache_free_slots--; 588 } 589 cache->parent = entry; 590 cache->len = str_len; 591 memcpy(cache->str, str, str_len); 592 list_add(&cache->lru_member, &s->cache_lru_list); 593 594 rcu_assign_pointer(entry->cache, cache); 595 596 out_unlock: 597 spin_unlock_irqrestore(&s->cache_lock, flags); 598 kfree_rcu(victim, rcu_member); 599 } 600 601 int sidtab_sid2str_get(struct sidtab *s, struct sidtab_entry *entry, 602 char **out, u32 *out_len) 603 { 604 struct sidtab_str_cache *cache; 605 int rc = 0; 606 607 if (entry->context.len) 608 return -ENOENT; /* do not cache invalid contexts */ 609 610 rcu_read_lock(); 611 612 cache = rcu_dereference(entry->cache); 613 if (!cache) { 614 rc = -ENOENT; 615 } else { 616 *out_len = cache->len; 617 if (out) { 618 *out = kmemdup(cache->str, cache->len, GFP_ATOMIC); 619 if (!*out) 620 rc = -ENOMEM; 621 } 622 } 623 624 rcu_read_unlock(); 625 626 if (!rc && out) 627 sidtab_sid2str_put(s, entry, *out, *out_len); 628 return rc; 629 } 630 631 #endif /* CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 */ 632