1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Implementation of the SID table type. 4 * 5 * Original author: Stephen Smalley, <sds@tycho.nsa.gov> 6 * Author: Ondrej Mosnacek, <omosnacek@gmail.com> 7 * 8 * Copyright (C) 2018 Red Hat, Inc. 9 */ 10 #include <linux/errno.h> 11 #include <linux/kernel.h> 12 #include <linux/slab.h> 13 #include <linux/sched.h> 14 #include <linux/spinlock.h> 15 #include <linux/atomic.h> 16 #include "flask.h" 17 #include "security.h" 18 #include "sidtab.h" 19 20 int sidtab_init(struct sidtab *s) 21 { 22 u32 i; 23 24 memset(s->roots, 0, sizeof(s->roots)); 25 26 for (i = 0; i < SIDTAB_RCACHE_SIZE; i++) 27 atomic_set(&s->rcache[i], -1); 28 29 for (i = 0; i < SECINITSID_NUM; i++) 30 s->isids[i].set = 0; 31 32 atomic_set(&s->count, 0); 33 34 s->convert = NULL; 35 36 spin_lock_init(&s->lock); 37 return 0; 38 } 39 40 int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context) 41 { 42 struct sidtab_isid_entry *entry; 43 int rc; 44 45 if (sid == 0 || sid > SECINITSID_NUM) 46 return -EINVAL; 47 48 entry = &s->isids[sid - 1]; 49 50 rc = context_cpy(&entry->context, context); 51 if (rc) 52 return rc; 53 54 entry->set = 1; 55 return 0; 56 } 57 58 static u32 sidtab_level_from_count(u32 count) 59 { 60 u32 capacity = SIDTAB_LEAF_ENTRIES; 61 u32 level = 0; 62 63 while (count > capacity) { 64 capacity <<= SIDTAB_INNER_SHIFT; 65 ++level; 66 } 67 return level; 68 } 69 70 static int sidtab_alloc_roots(struct sidtab *s, u32 level) 71 { 72 u32 l; 73 74 if (!s->roots[0].ptr_leaf) { 75 s->roots[0].ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 76 GFP_ATOMIC); 77 if (!s->roots[0].ptr_leaf) 78 return -ENOMEM; 79 } 80 for (l = 1; l <= level; ++l) 81 if (!s->roots[l].ptr_inner) { 82 s->roots[l].ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 83 GFP_ATOMIC); 84 if (!s->roots[l].ptr_inner) 85 return -ENOMEM; 86 s->roots[l].ptr_inner->entries[0] = s->roots[l - 1]; 87 } 88 return 0; 89 } 90 91 static struct context *sidtab_do_lookup(struct sidtab *s, u32 index, int alloc) 92 { 93 union sidtab_entry_inner *entry; 94 u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES; 95 96 /* find the level of the subtree we need */ 97 level = sidtab_level_from_count(index + 1); 98 capacity_shift = level * SIDTAB_INNER_SHIFT; 99 100 /* allocate roots if needed */ 101 if (alloc && sidtab_alloc_roots(s, level) != 0) 102 return NULL; 103 104 /* lookup inside the subtree */ 105 entry = &s->roots[level]; 106 while (level != 0) { 107 capacity_shift -= SIDTAB_INNER_SHIFT; 108 --level; 109 110 entry = &entry->ptr_inner->entries[leaf_index >> capacity_shift]; 111 leaf_index &= ((u32)1 << capacity_shift) - 1; 112 113 if (!entry->ptr_inner) { 114 if (alloc) 115 entry->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 116 GFP_ATOMIC); 117 if (!entry->ptr_inner) 118 return NULL; 119 } 120 } 121 if (!entry->ptr_leaf) { 122 if (alloc) 123 entry->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 124 GFP_ATOMIC); 125 if (!entry->ptr_leaf) 126 return NULL; 127 } 128 return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES].context; 129 } 130 131 static struct context *sidtab_lookup(struct sidtab *s, u32 index) 132 { 133 u32 count = (u32)atomic_read(&s->count); 134 135 if (index >= count) 136 return NULL; 137 138 /* read entries after reading count */ 139 smp_rmb(); 140 141 return sidtab_do_lookup(s, index, 0); 142 } 143 144 static struct context *sidtab_lookup_initial(struct sidtab *s, u32 sid) 145 { 146 return s->isids[sid - 1].set ? &s->isids[sid - 1].context : NULL; 147 } 148 149 static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force) 150 { 151 struct context *context; 152 153 if (sid != 0) { 154 if (sid > SECINITSID_NUM) 155 context = sidtab_lookup(s, sid - (SECINITSID_NUM + 1)); 156 else 157 context = sidtab_lookup_initial(s, sid); 158 if (context && (!context->len || force)) 159 return context; 160 } 161 162 return sidtab_lookup_initial(s, SECINITSID_UNLABELED); 163 } 164 165 struct context *sidtab_search(struct sidtab *s, u32 sid) 166 { 167 return sidtab_search_core(s, sid, 0); 168 } 169 170 struct context *sidtab_search_force(struct sidtab *s, u32 sid) 171 { 172 return sidtab_search_core(s, sid, 1); 173 } 174 175 static int sidtab_find_context(union sidtab_entry_inner entry, 176 u32 *pos, u32 count, u32 level, 177 struct context *context, u32 *index) 178 { 179 int rc; 180 u32 i; 181 182 if (level != 0) { 183 struct sidtab_node_inner *node = entry.ptr_inner; 184 185 i = 0; 186 while (i < SIDTAB_INNER_ENTRIES && *pos < count) { 187 rc = sidtab_find_context(node->entries[i], 188 pos, count, level - 1, 189 context, index); 190 if (rc == 0) 191 return 0; 192 i++; 193 } 194 } else { 195 struct sidtab_node_leaf *node = entry.ptr_leaf; 196 197 i = 0; 198 while (i < SIDTAB_LEAF_ENTRIES && *pos < count) { 199 if (context_cmp(&node->entries[i].context, context)) { 200 *index = *pos; 201 return 0; 202 } 203 (*pos)++; 204 i++; 205 } 206 } 207 return -ENOENT; 208 } 209 210 static void sidtab_rcache_update(struct sidtab *s, u32 index, u32 pos) 211 { 212 while (pos > 0) { 213 atomic_set(&s->rcache[pos], atomic_read(&s->rcache[pos - 1])); 214 --pos; 215 } 216 atomic_set(&s->rcache[0], (int)index); 217 } 218 219 static void sidtab_rcache_push(struct sidtab *s, u32 index) 220 { 221 sidtab_rcache_update(s, index, SIDTAB_RCACHE_SIZE - 1); 222 } 223 224 static int sidtab_rcache_search(struct sidtab *s, struct context *context, 225 u32 *index) 226 { 227 u32 i; 228 229 for (i = 0; i < SIDTAB_RCACHE_SIZE; i++) { 230 int v = atomic_read(&s->rcache[i]); 231 232 if (v < 0) 233 continue; 234 235 if (context_cmp(sidtab_do_lookup(s, (u32)v, 0), context)) { 236 sidtab_rcache_update(s, (u32)v, i); 237 *index = (u32)v; 238 return 0; 239 } 240 } 241 return -ENOENT; 242 } 243 244 static int sidtab_reverse_lookup(struct sidtab *s, struct context *context, 245 u32 *index) 246 { 247 unsigned long flags; 248 u32 count = (u32)atomic_read(&s->count); 249 u32 count_locked, level, pos; 250 struct sidtab_convert_params *convert; 251 struct context *dst, *dst_convert; 252 int rc; 253 254 rc = sidtab_rcache_search(s, context, index); 255 if (rc == 0) 256 return 0; 257 258 level = sidtab_level_from_count(count); 259 260 /* read entries after reading count */ 261 smp_rmb(); 262 263 pos = 0; 264 rc = sidtab_find_context(s->roots[level], &pos, count, level, 265 context, index); 266 if (rc == 0) { 267 sidtab_rcache_push(s, *index); 268 return 0; 269 } 270 271 /* lock-free search failed: lock, re-search, and insert if not found */ 272 spin_lock_irqsave(&s->lock, flags); 273 274 convert = s->convert; 275 count_locked = (u32)atomic_read(&s->count); 276 level = sidtab_level_from_count(count_locked); 277 278 /* if count has changed before we acquired the lock, then catch up */ 279 while (count < count_locked) { 280 if (context_cmp(sidtab_do_lookup(s, count, 0), context)) { 281 sidtab_rcache_push(s, count); 282 *index = count; 283 rc = 0; 284 goto out_unlock; 285 } 286 ++count; 287 } 288 289 /* bail out if we already reached max entries */ 290 rc = -EOVERFLOW; 291 if (count >= SIDTAB_MAX) 292 goto out_unlock; 293 294 /* insert context into new entry */ 295 rc = -ENOMEM; 296 dst = sidtab_do_lookup(s, count, 1); 297 if (!dst) 298 goto out_unlock; 299 300 rc = context_cpy(dst, context); 301 if (rc) 302 goto out_unlock; 303 304 /* 305 * if we are building a new sidtab, we need to convert the context 306 * and insert it there as well 307 */ 308 if (convert) { 309 rc = -ENOMEM; 310 dst_convert = sidtab_do_lookup(convert->target, count, 1); 311 if (!dst_convert) { 312 context_destroy(dst); 313 goto out_unlock; 314 } 315 316 rc = convert->func(context, dst_convert, convert->args); 317 if (rc) { 318 context_destroy(dst); 319 goto out_unlock; 320 } 321 322 /* at this point we know the insert won't fail */ 323 atomic_set(&convert->target->count, count + 1); 324 } 325 326 if (context->len) 327 pr_info("SELinux: Context %s is not valid (left unmapped).\n", 328 context->str); 329 330 sidtab_rcache_push(s, count); 331 *index = count; 332 333 /* write entries before writing new count */ 334 smp_wmb(); 335 336 atomic_set(&s->count, count + 1); 337 338 rc = 0; 339 out_unlock: 340 spin_unlock_irqrestore(&s->lock, flags); 341 return rc; 342 } 343 344 int sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *sid) 345 { 346 int rc; 347 u32 i; 348 349 for (i = 0; i < SECINITSID_NUM; i++) { 350 struct sidtab_isid_entry *entry = &s->isids[i]; 351 352 if (entry->set && context_cmp(context, &entry->context)) { 353 *sid = i + 1; 354 return 0; 355 } 356 } 357 358 rc = sidtab_reverse_lookup(s, context, sid); 359 if (rc) 360 return rc; 361 *sid += SECINITSID_NUM + 1; 362 return 0; 363 } 364 365 static int sidtab_convert_tree(union sidtab_entry_inner *edst, 366 union sidtab_entry_inner *esrc, 367 u32 *pos, u32 count, u32 level, 368 struct sidtab_convert_params *convert) 369 { 370 int rc; 371 u32 i; 372 373 if (level != 0) { 374 if (!edst->ptr_inner) { 375 edst->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 376 GFP_KERNEL); 377 if (!edst->ptr_inner) 378 return -ENOMEM; 379 } 380 i = 0; 381 while (i < SIDTAB_INNER_ENTRIES && *pos < count) { 382 rc = sidtab_convert_tree(&edst->ptr_inner->entries[i], 383 &esrc->ptr_inner->entries[i], 384 pos, count, level - 1, 385 convert); 386 if (rc) 387 return rc; 388 i++; 389 } 390 } else { 391 if (!edst->ptr_leaf) { 392 edst->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 393 GFP_KERNEL); 394 if (!edst->ptr_leaf) 395 return -ENOMEM; 396 } 397 i = 0; 398 while (i < SIDTAB_LEAF_ENTRIES && *pos < count) { 399 rc = convert->func(&esrc->ptr_leaf->entries[i].context, 400 &edst->ptr_leaf->entries[i].context, 401 convert->args); 402 if (rc) 403 return rc; 404 (*pos)++; 405 i++; 406 } 407 cond_resched(); 408 } 409 return 0; 410 } 411 412 int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params) 413 { 414 unsigned long flags; 415 u32 count, level, pos; 416 int rc; 417 418 spin_lock_irqsave(&s->lock, flags); 419 420 /* concurrent policy loads are not allowed */ 421 if (s->convert) { 422 spin_unlock_irqrestore(&s->lock, flags); 423 return -EBUSY; 424 } 425 426 count = (u32)atomic_read(&s->count); 427 level = sidtab_level_from_count(count); 428 429 /* allocate last leaf in the new sidtab (to avoid race with 430 * live convert) 431 */ 432 rc = sidtab_do_lookup(params->target, count - 1, 1) ? 0 : -ENOMEM; 433 if (rc) { 434 spin_unlock_irqrestore(&s->lock, flags); 435 return rc; 436 } 437 438 /* set count in case no new entries are added during conversion */ 439 atomic_set(¶ms->target->count, count); 440 441 /* enable live convert of new entries */ 442 s->convert = params; 443 444 /* we can safely do the rest of the conversion outside the lock */ 445 spin_unlock_irqrestore(&s->lock, flags); 446 447 pr_info("SELinux: Converting %u SID table entries...\n", count); 448 449 /* convert all entries not covered by live convert */ 450 pos = 0; 451 rc = sidtab_convert_tree(¶ms->target->roots[level], 452 &s->roots[level], &pos, count, level, params); 453 if (rc) { 454 /* we need to keep the old table - disable live convert */ 455 spin_lock_irqsave(&s->lock, flags); 456 s->convert = NULL; 457 spin_unlock_irqrestore(&s->lock, flags); 458 } 459 return rc; 460 } 461 462 static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level) 463 { 464 u32 i; 465 466 if (level != 0) { 467 struct sidtab_node_inner *node = entry.ptr_inner; 468 469 if (!node) 470 return; 471 472 for (i = 0; i < SIDTAB_INNER_ENTRIES; i++) 473 sidtab_destroy_tree(node->entries[i], level - 1); 474 kfree(node); 475 } else { 476 struct sidtab_node_leaf *node = entry.ptr_leaf; 477 478 if (!node) 479 return; 480 481 for (i = 0; i < SIDTAB_LEAF_ENTRIES; i++) 482 context_destroy(&node->entries[i].context); 483 kfree(node); 484 } 485 } 486 487 void sidtab_destroy(struct sidtab *s) 488 { 489 u32 i, level; 490 491 for (i = 0; i < SECINITSID_NUM; i++) 492 if (s->isids[i].set) 493 context_destroy(&s->isids[i].context); 494 495 level = SIDTAB_MAX_LEVEL; 496 while (level && !s->roots[level].ptr_inner) 497 --level; 498 499 sidtab_destroy_tree(s->roots[level], level); 500 } 501