1 /* 2 * AppArmor security module 3 * 4 * This file contains AppArmor label definitions 5 * 6 * Copyright 2017 Canonical Ltd. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License as 10 * published by the Free Software Foundation, version 2 of the 11 * License. 12 */ 13 14 #include <linux/audit.h> 15 #include <linux/seq_file.h> 16 #include <linux/sort.h> 17 18 #include "include/apparmor.h" 19 #include "include/context.h" 20 #include "include/label.h" 21 #include "include/policy.h" 22 #include "include/secid.h" 23 24 25 /* 26 * the aa_label represents the set of profiles confining an object 27 * 28 * Labels maintain a reference count to the set of pointers they reference 29 * Labels are ref counted by 30 * tasks and object via the security field/security context off the field 31 * code - will take a ref count on a label if it needs the label 32 * beyond what is possible with an rcu_read_lock. 33 * profiles - each profile is a label 34 * secids - a pinned secid will keep a refcount of the label it is 35 * referencing 36 * objects - inode, files, sockets, ... 37 * 38 * Labels are not ref counted by the label set, so they maybe removed and 39 * freed when no longer in use. 40 * 41 */ 42 43 #define PROXY_POISON 97 44 #define LABEL_POISON 100 45 46 static void free_proxy(struct aa_proxy *proxy) 47 { 48 if (proxy) { 49 /* p->label will not updated any more as p is dead */ 50 aa_put_label(rcu_dereference_protected(proxy->label, true)); 51 memset(proxy, 0, sizeof(*proxy)); 52 RCU_INIT_POINTER(proxy->label, (struct aa_label *)PROXY_POISON); 53 kfree(proxy); 54 } 55 } 56 57 void aa_proxy_kref(struct kref *kref) 58 { 59 struct aa_proxy *proxy = container_of(kref, struct aa_proxy, count); 60 61 free_proxy(proxy); 62 } 63 64 struct aa_proxy *aa_alloc_proxy(struct aa_label *label, gfp_t gfp) 65 { 66 struct aa_proxy *new; 67 68 new = kzalloc(sizeof(struct aa_proxy), gfp); 69 if (new) { 70 kref_init(&new->count); 71 rcu_assign_pointer(new->label, aa_get_label(label)); 72 } 73 return new; 74 } 75 76 /* requires profile list write lock held */ 77 void __aa_proxy_redirect(struct aa_label *orig, struct aa_label *new) 78 { 79 struct aa_label *tmp; 80 81 AA_BUG(!orig); 82 AA_BUG(!new); 83 lockdep_assert_held_exclusive(&labels_set(orig)->lock); 84 85 tmp = rcu_dereference_protected(orig->proxy->label, 86 &labels_ns(orig)->lock); 87 rcu_assign_pointer(orig->proxy->label, aa_get_label(new)); 88 orig->flags |= FLAG_STALE; 89 aa_put_label(tmp); 90 } 91 92 static void __proxy_share(struct aa_label *old, struct aa_label *new) 93 { 94 struct aa_proxy *proxy = new->proxy; 95 96 new->proxy = aa_get_proxy(old->proxy); 97 __aa_proxy_redirect(old, new); 98 aa_put_proxy(proxy); 99 } 100 101 102 /** 103 * ns_cmp - compare ns for label set ordering 104 * @a: ns to compare (NOT NULL) 105 * @b: ns to compare (NOT NULL) 106 * 107 * Returns: <0 if a < b 108 * ==0 if a == b 109 * >0 if a > b 110 */ 111 static int ns_cmp(struct aa_ns *a, struct aa_ns *b) 112 { 113 int res; 114 115 AA_BUG(!a); 116 AA_BUG(!b); 117 AA_BUG(!a->base.hname); 118 AA_BUG(!b->base.hname); 119 120 if (a == b) 121 return 0; 122 123 res = a->level - b->level; 124 if (res) 125 return res; 126 127 return strcmp(a->base.hname, b->base.hname); 128 } 129 130 /** 131 * profile_cmp - profile comparision for set ordering 132 * @a: profile to compare (NOT NULL) 133 * @b: profile to compare (NOT NULL) 134 * 135 * Returns: <0 if a < b 136 * ==0 if a == b 137 * >0 if a > b 138 */ 139 static int profile_cmp(struct aa_profile *a, struct aa_profile *b) 140 { 141 int res; 142 143 AA_BUG(!a); 144 AA_BUG(!b); 145 AA_BUG(!a->ns); 146 AA_BUG(!b->ns); 147 AA_BUG(!a->base.hname); 148 AA_BUG(!b->base.hname); 149 150 if (a == b || a->base.hname == b->base.hname) 151 return 0; 152 res = ns_cmp(a->ns, b->ns); 153 if (res) 154 return res; 155 156 return strcmp(a->base.hname, b->base.hname); 157 } 158 159 /** 160 * vec_cmp - label comparision for set ordering 161 * @a: label to compare (NOT NULL) 162 * @vec: vector of profiles to compare (NOT NULL) 163 * @n: length of @vec 164 * 165 * Returns: <0 if a < vec 166 * ==0 if a == vec 167 * >0 if a > vec 168 */ 169 static int vec_cmp(struct aa_profile **a, int an, struct aa_profile **b, int bn) 170 { 171 int i; 172 173 AA_BUG(!a); 174 AA_BUG(!*a); 175 AA_BUG(!b); 176 AA_BUG(!*b); 177 AA_BUG(an <= 0); 178 AA_BUG(bn <= 0); 179 180 for (i = 0; i < an && i < bn; i++) { 181 int res = profile_cmp(a[i], b[i]); 182 183 if (res != 0) 184 return res; 185 } 186 187 return an - bn; 188 } 189 190 static bool vec_is_stale(struct aa_profile **vec, int n) 191 { 192 int i; 193 194 AA_BUG(!vec); 195 196 for (i = 0; i < n; i++) { 197 if (profile_is_stale(vec[i])) 198 return true; 199 } 200 201 return false; 202 } 203 204 static bool vec_unconfined(struct aa_profile **vec, int n) 205 { 206 int i; 207 208 AA_BUG(!vec); 209 210 for (i = 0; i < n; i++) { 211 if (!profile_unconfined(vec[i])) 212 return false; 213 } 214 215 return true; 216 } 217 218 static int sort_cmp(const void *a, const void *b) 219 { 220 return profile_cmp(*(struct aa_profile **)a, *(struct aa_profile **)b); 221 } 222 223 /* 224 * assumes vec is sorted 225 * Assumes @vec has null terminator at vec[n], and will null terminate 226 * vec[n - dups] 227 */ 228 static inline int unique(struct aa_profile **vec, int n) 229 { 230 int i, pos, dups = 0; 231 232 AA_BUG(n < 1); 233 AA_BUG(!vec); 234 235 pos = 0; 236 for (i = 1; i < n; i++) { 237 int res = profile_cmp(vec[pos], vec[i]); 238 239 AA_BUG(res > 0, "vec not sorted"); 240 if (res == 0) { 241 /* drop duplicate */ 242 aa_put_profile(vec[i]); 243 dups++; 244 continue; 245 } 246 pos++; 247 if (dups) 248 vec[pos] = vec[i]; 249 } 250 251 AA_BUG(dups < 0); 252 253 return dups; 254 } 255 256 /** 257 * aa_vec_unique - canonical sort and unique a list of profiles 258 * @n: number of refcounted profiles in the list (@n > 0) 259 * @vec: list of profiles to sort and merge 260 * 261 * Returns: the number of duplicates eliminated == references put 262 * 263 * If @flags & VEC_FLAG_TERMINATE @vec has null terminator at vec[n], and will 264 * null terminate vec[n - dups] 265 */ 266 int aa_vec_unique(struct aa_profile **vec, int n, int flags) 267 { 268 int i, dups = 0; 269 270 AA_BUG(n < 1); 271 AA_BUG(!vec); 272 273 /* vecs are usually small and inorder, have a fallback for larger */ 274 if (n > 8) { 275 sort(vec, n, sizeof(struct aa_profile *), sort_cmp, NULL); 276 dups = unique(vec, n); 277 goto out; 278 } 279 280 /* insertion sort + unique in one */ 281 for (i = 1; i < n; i++) { 282 struct aa_profile *tmp = vec[i]; 283 int pos, j; 284 285 for (pos = i - 1 - dups; pos >= 0; pos--) { 286 int res = profile_cmp(vec[pos], tmp); 287 288 if (res == 0) { 289 /* drop duplicate entry */ 290 aa_put_profile(tmp); 291 dups++; 292 goto continue_outer; 293 } else if (res < 0) 294 break; 295 } 296 /* pos is at entry < tmp, or index -1. Set to insert pos */ 297 pos++; 298 299 for (j = i - dups; j > pos; j--) 300 vec[j] = vec[j - 1]; 301 vec[pos] = tmp; 302 continue_outer: 303 ; 304 } 305 306 AA_BUG(dups < 0); 307 308 out: 309 if (flags & VEC_FLAG_TERMINATE) 310 vec[n - dups] = NULL; 311 312 return dups; 313 } 314 315 316 static void label_destroy(struct aa_label *label) 317 { 318 struct aa_label *tmp; 319 320 AA_BUG(!label); 321 322 if (!label_isprofile(label)) { 323 struct aa_profile *profile; 324 struct label_it i; 325 326 aa_put_str(label->hname); 327 328 label_for_each(i, label, profile) { 329 aa_put_profile(profile); 330 label->vec[i.i] = (struct aa_profile *) 331 (LABEL_POISON + (long) i.i); 332 } 333 } 334 335 if (rcu_dereference_protected(label->proxy->label, true) == label) 336 rcu_assign_pointer(label->proxy->label, NULL); 337 338 aa_free_secid(label->secid); 339 340 tmp = rcu_dereference_protected(label->proxy->label, true); 341 if (tmp == label) 342 rcu_assign_pointer(label->proxy->label, NULL); 343 344 aa_put_proxy(label->proxy); 345 label->proxy = (struct aa_proxy *) PROXY_POISON + 1; 346 } 347 348 void aa_label_free(struct aa_label *label) 349 { 350 if (!label) 351 return; 352 353 label_destroy(label); 354 kfree(label); 355 } 356 357 static void label_free_switch(struct aa_label *label) 358 { 359 if (label->flags & FLAG_NS_COUNT) 360 aa_free_ns(labels_ns(label)); 361 else if (label_isprofile(label)) 362 aa_free_profile(labels_profile(label)); 363 else 364 aa_label_free(label); 365 } 366 367 static void label_free_rcu(struct rcu_head *head) 368 { 369 struct aa_label *label = container_of(head, struct aa_label, rcu); 370 371 if (label->flags & FLAG_IN_TREE) 372 (void) aa_label_remove(label); 373 label_free_switch(label); 374 } 375 376 void aa_label_kref(struct kref *kref) 377 { 378 struct aa_label *label = container_of(kref, struct aa_label, count); 379 struct aa_ns *ns = labels_ns(label); 380 381 if (!ns) { 382 /* never live, no rcu callback needed, just using the fn */ 383 label_free_switch(label); 384 return; 385 } 386 /* TODO: update labels_profile macro so it works here */ 387 AA_BUG(label_isprofile(label) && 388 on_list_rcu(&label->vec[0]->base.profiles)); 389 AA_BUG(label_isprofile(label) && 390 on_list_rcu(&label->vec[0]->base.list)); 391 392 /* TODO: if compound label and not stale add to reclaim cache */ 393 call_rcu(&label->rcu, label_free_rcu); 394 } 395 396 static void label_free_or_put_new(struct aa_label *label, struct aa_label *new) 397 { 398 if (label != new) 399 /* need to free directly to break circular ref with proxy */ 400 aa_label_free(new); 401 else 402 aa_put_label(new); 403 } 404 405 bool aa_label_init(struct aa_label *label, int size) 406 { 407 AA_BUG(!label); 408 AA_BUG(size < 1); 409 410 label->secid = aa_alloc_secid(); 411 if (label->secid == AA_SECID_INVALID) 412 return false; 413 414 label->size = size; /* doesn't include null */ 415 label->vec[size] = NULL; /* null terminate */ 416 kref_init(&label->count); 417 RB_CLEAR_NODE(&label->node); 418 419 return true; 420 } 421 422 /** 423 * aa_label_alloc - allocate a label with a profile vector of @size length 424 * @size: size of profile vector in the label 425 * @proxy: proxy to use OR null if to allocate a new one 426 * @gfp: memory allocation type 427 * 428 * Returns: new label 429 * else NULL if failed 430 */ 431 struct aa_label *aa_label_alloc(int size, struct aa_proxy *proxy, gfp_t gfp) 432 { 433 struct aa_label *new; 434 435 AA_BUG(size < 1); 436 437 /* + 1 for null terminator entry on vec */ 438 new = kzalloc(sizeof(*new) + sizeof(struct aa_profile *) * (size + 1), 439 gfp); 440 AA_DEBUG("%s (%p)\n", __func__, new); 441 if (!new) 442 goto fail; 443 444 if (!aa_label_init(new, size)) 445 goto fail; 446 447 if (!proxy) { 448 proxy = aa_alloc_proxy(new, gfp); 449 if (!proxy) 450 goto fail; 451 } else 452 aa_get_proxy(proxy); 453 /* just set new's proxy, don't redirect proxy here if it was passed in*/ 454 new->proxy = proxy; 455 456 return new; 457 458 fail: 459 kfree(new); 460 461 return NULL; 462 } 463 464 465 /** 466 * label_cmp - label comparision for set ordering 467 * @a: label to compare (NOT NULL) 468 * @b: label to compare (NOT NULL) 469 * 470 * Returns: <0 if a < b 471 * ==0 if a == b 472 * >0 if a > b 473 */ 474 static int label_cmp(struct aa_label *a, struct aa_label *b) 475 { 476 AA_BUG(!b); 477 478 if (a == b) 479 return 0; 480 481 return vec_cmp(a->vec, a->size, b->vec, b->size); 482 } 483 484 /* helper fn for label_for_each_confined */ 485 int aa_label_next_confined(struct aa_label *label, int i) 486 { 487 AA_BUG(!label); 488 AA_BUG(i < 0); 489 490 for (; i < label->size; i++) { 491 if (!profile_unconfined(label->vec[i])) 492 return i; 493 } 494 495 return i; 496 } 497 498 /** 499 * aa_label_next_not_in_set - return the next profile of @sub not in @set 500 * @I: label iterator 501 * @set: label to test against 502 * @sub: label to if is subset of @set 503 * 504 * Returns: profile in @sub that is not in @set, with iterator set pos after 505 * else NULL if @sub is a subset of @set 506 */ 507 struct aa_profile *__aa_label_next_not_in_set(struct label_it *I, 508 struct aa_label *set, 509 struct aa_label *sub) 510 { 511 AA_BUG(!set); 512 AA_BUG(!I); 513 AA_BUG(I->i < 0); 514 AA_BUG(I->i > set->size); 515 AA_BUG(!sub); 516 AA_BUG(I->j < 0); 517 AA_BUG(I->j > sub->size); 518 519 while (I->j < sub->size && I->i < set->size) { 520 int res = profile_cmp(sub->vec[I->j], set->vec[I->i]); 521 522 if (res == 0) { 523 (I->j)++; 524 (I->i)++; 525 } else if (res > 0) 526 (I->i)++; 527 else 528 return sub->vec[(I->j)++]; 529 } 530 531 if (I->j < sub->size) 532 return sub->vec[(I->j)++]; 533 534 return NULL; 535 } 536 537 /** 538 * aa_label_is_subset - test if @sub is a subset of @set 539 * @set: label to test against 540 * @sub: label to test if is subset of @set 541 * 542 * Returns: true if @sub is subset of @set 543 * else false 544 */ 545 bool aa_label_is_subset(struct aa_label *set, struct aa_label *sub) 546 { 547 struct label_it i = { }; 548 549 AA_BUG(!set); 550 AA_BUG(!sub); 551 552 if (sub == set) 553 return true; 554 555 return __aa_label_next_not_in_set(&i, set, sub) == NULL; 556 } 557 558 559 560 /** 561 * __label_remove - remove @label from the label set 562 * @l: label to remove 563 * @new: label to redirect to 564 * 565 * Requires: labels_set(@label)->lock write_lock 566 * Returns: true if the label was in the tree and removed 567 */ 568 static bool __label_remove(struct aa_label *label, struct aa_label *new) 569 { 570 struct aa_labelset *ls = labels_set(label); 571 572 AA_BUG(!ls); 573 AA_BUG(!label); 574 lockdep_assert_held_exclusive(&ls->lock); 575 576 if (new) 577 __aa_proxy_redirect(label, new); 578 579 if (!label_is_stale(label)) 580 __label_make_stale(label); 581 582 if (label->flags & FLAG_IN_TREE) { 583 rb_erase(&label->node, &ls->root); 584 label->flags &= ~FLAG_IN_TREE; 585 return true; 586 } 587 588 return false; 589 } 590 591 /** 592 * __label_replace - replace @old with @new in label set 593 * @old: label to remove from label set 594 * @new: label to replace @old with 595 * 596 * Requires: labels_set(@old)->lock write_lock 597 * valid ref count be held on @new 598 * Returns: true if @old was in set and replaced by @new 599 * 600 * Note: current implementation requires label set be order in such a way 601 * that @new directly replaces @old position in the set (ie. 602 * using pointer comparison of the label address would not work) 603 */ 604 static bool __label_replace(struct aa_label *old, struct aa_label *new) 605 { 606 struct aa_labelset *ls = labels_set(old); 607 608 AA_BUG(!ls); 609 AA_BUG(!old); 610 AA_BUG(!new); 611 lockdep_assert_held_exclusive(&ls->lock); 612 AA_BUG(new->flags & FLAG_IN_TREE); 613 614 if (!label_is_stale(old)) 615 __label_make_stale(old); 616 617 if (old->flags & FLAG_IN_TREE) { 618 rb_replace_node(&old->node, &new->node, &ls->root); 619 old->flags &= ~FLAG_IN_TREE; 620 new->flags |= FLAG_IN_TREE; 621 return true; 622 } 623 624 return false; 625 } 626 627 /** 628 * __label_insert - attempt to insert @l into a label set 629 * @ls: set of labels to insert @l into (NOT NULL) 630 * @label: new label to insert (NOT NULL) 631 * @replace: whether insertion should replace existing entry that is not stale 632 * 633 * Requires: @ls->lock 634 * caller to hold a valid ref on l 635 * if @replace is true l has a preallocated proxy associated 636 * Returns: @l if successful in inserting @l - with additional refcount 637 * else ref counted equivalent label that is already in the set, 638 * the else condition only happens if @replace is false 639 */ 640 static struct aa_label *__label_insert(struct aa_labelset *ls, 641 struct aa_label *label, bool replace) 642 { 643 struct rb_node **new, *parent = NULL; 644 645 AA_BUG(!ls); 646 AA_BUG(!label); 647 AA_BUG(labels_set(label) != ls); 648 lockdep_assert_held_exclusive(&ls->lock); 649 AA_BUG(label->flags & FLAG_IN_TREE); 650 651 /* Figure out where to put new node */ 652 new = &ls->root.rb_node; 653 while (*new) { 654 struct aa_label *this = rb_entry(*new, struct aa_label, node); 655 int result = label_cmp(label, this); 656 657 parent = *new; 658 if (result == 0) { 659 /* !__aa_get_label means queued for destruction, 660 * so replace in place, however the label has 661 * died before the replacement so do not share 662 * the proxy 663 */ 664 if (!replace && !label_is_stale(this)) { 665 if (__aa_get_label(this)) 666 return this; 667 } else 668 __proxy_share(this, label); 669 AA_BUG(!__label_replace(this, label)); 670 return aa_get_label(label); 671 } else if (result < 0) 672 new = &((*new)->rb_left); 673 else /* (result > 0) */ 674 new = &((*new)->rb_right); 675 } 676 677 /* Add new node and rebalance tree. */ 678 rb_link_node(&label->node, parent, new); 679 rb_insert_color(&label->node, &ls->root); 680 label->flags |= FLAG_IN_TREE; 681 682 return aa_get_label(label); 683 } 684 685 /** 686 * __vec_find - find label that matches @vec in label set 687 * @vec: vec of profiles to find matching label for (NOT NULL) 688 * @n: length of @vec 689 * 690 * Requires: @vec_labelset(vec) lock held 691 * caller to hold a valid ref on l 692 * 693 * Returns: ref counted @label if matching label is in tree 694 * ref counted label that is equiv to @l in tree 695 * else NULL if @vec equiv is not in tree 696 */ 697 static struct aa_label *__vec_find(struct aa_profile **vec, int n) 698 { 699 struct rb_node *node; 700 701 AA_BUG(!vec); 702 AA_BUG(!*vec); 703 AA_BUG(n <= 0); 704 705 node = vec_labelset(vec, n)->root.rb_node; 706 while (node) { 707 struct aa_label *this = rb_entry(node, struct aa_label, node); 708 int result = vec_cmp(this->vec, this->size, vec, n); 709 710 if (result > 0) 711 node = node->rb_left; 712 else if (result < 0) 713 node = node->rb_right; 714 else 715 return __aa_get_label(this); 716 } 717 718 return NULL; 719 } 720 721 /** 722 * __label_find - find label @label in label set 723 * @label: label to find (NOT NULL) 724 * 725 * Requires: labels_set(@label)->lock held 726 * caller to hold a valid ref on l 727 * 728 * Returns: ref counted @label if @label is in tree OR 729 * ref counted label that is equiv to @label in tree 730 * else NULL if @label or equiv is not in tree 731 */ 732 static struct aa_label *__label_find(struct aa_label *label) 733 { 734 AA_BUG(!label); 735 736 return __vec_find(label->vec, label->size); 737 } 738 739 740 /** 741 * aa_label_remove - remove a label from the labelset 742 * @label: label to remove 743 * 744 * Returns: true if @label was removed from the tree 745 * else @label was not in tree so it could not be removed 746 */ 747 bool aa_label_remove(struct aa_label *label) 748 { 749 struct aa_labelset *ls = labels_set(label); 750 unsigned long flags; 751 bool res; 752 753 AA_BUG(!ls); 754 755 write_lock_irqsave(&ls->lock, flags); 756 res = __label_remove(label, ns_unconfined(labels_ns(label))); 757 write_unlock_irqrestore(&ls->lock, flags); 758 759 return res; 760 } 761 762 /** 763 * aa_label_replace - replace a label @old with a new version @new 764 * @old: label to replace 765 * @new: label replacing @old 766 * 767 * Returns: true if @old was in tree and replaced 768 * else @old was not in tree, and @new was not inserted 769 */ 770 bool aa_label_replace(struct aa_label *old, struct aa_label *new) 771 { 772 unsigned long flags; 773 bool res; 774 775 if (name_is_shared(old, new) && labels_ns(old) == labels_ns(new)) { 776 write_lock_irqsave(&labels_set(old)->lock, flags); 777 if (old->proxy != new->proxy) 778 __proxy_share(old, new); 779 else 780 __aa_proxy_redirect(old, new); 781 res = __label_replace(old, new); 782 write_unlock_irqrestore(&labels_set(old)->lock, flags); 783 } else { 784 struct aa_label *l; 785 struct aa_labelset *ls = labels_set(old); 786 787 write_lock_irqsave(&ls->lock, flags); 788 res = __label_remove(old, new); 789 if (labels_ns(old) != labels_ns(new)) { 790 write_unlock_irqrestore(&ls->lock, flags); 791 ls = labels_set(new); 792 write_lock_irqsave(&ls->lock, flags); 793 } 794 l = __label_insert(ls, new, true); 795 res = (l == new); 796 write_unlock_irqrestore(&ls->lock, flags); 797 aa_put_label(l); 798 } 799 800 return res; 801 } 802 803 /** 804 * vec_find - find label @l in label set 805 * @vec: array of profiles to find equiv label for (NOT NULL) 806 * @n: length of @vec 807 * 808 * Returns: refcounted label if @vec equiv is in tree 809 * else NULL if @vec equiv is not in tree 810 */ 811 static struct aa_label *vec_find(struct aa_profile **vec, int n) 812 { 813 struct aa_labelset *ls; 814 struct aa_label *label; 815 unsigned long flags; 816 817 AA_BUG(!vec); 818 AA_BUG(!*vec); 819 AA_BUG(n <= 0); 820 821 ls = vec_labelset(vec, n); 822 read_lock_irqsave(&ls->lock, flags); 823 label = __vec_find(vec, n); 824 read_unlock_irqrestore(&ls->lock, flags); 825 826 return label; 827 } 828 829 /* requires sort and merge done first */ 830 static struct aa_label *vec_create_and_insert_label(struct aa_profile **vec, 831 int len, gfp_t gfp) 832 { 833 struct aa_label *label = NULL; 834 struct aa_labelset *ls; 835 unsigned long flags; 836 struct aa_label *new; 837 int i; 838 839 AA_BUG(!vec); 840 841 if (len == 1) 842 return aa_get_label(&vec[0]->label); 843 844 ls = labels_set(&vec[len - 1]->label); 845 846 /* TODO: enable when read side is lockless 847 * check if label exists before taking locks 848 */ 849 new = aa_label_alloc(len, NULL, gfp); 850 if (!new) 851 return NULL; 852 853 for (i = 0; i < len; i++) 854 new->vec[i] = aa_get_profile(vec[i]); 855 856 write_lock_irqsave(&ls->lock, flags); 857 label = __label_insert(ls, new, false); 858 write_unlock_irqrestore(&ls->lock, flags); 859 label_free_or_put_new(label, new); 860 861 return label; 862 } 863 864 struct aa_label *aa_vec_find_or_create_label(struct aa_profile **vec, int len, 865 gfp_t gfp) 866 { 867 struct aa_label *label = vec_find(vec, len); 868 869 if (label) 870 return label; 871 872 return vec_create_and_insert_label(vec, len, gfp); 873 } 874 875 /** 876 * aa_label_find - find label @label in label set 877 * @label: label to find (NOT NULL) 878 * 879 * Requires: caller to hold a valid ref on l 880 * 881 * Returns: refcounted @label if @label is in tree 882 * refcounted label that is equiv to @label in tree 883 * else NULL if @label or equiv is not in tree 884 */ 885 struct aa_label *aa_label_find(struct aa_label *label) 886 { 887 AA_BUG(!label); 888 889 return vec_find(label->vec, label->size); 890 } 891 892 893 /** 894 * aa_label_insert - insert label @label into @ls or return existing label 895 * @ls - labelset to insert @label into 896 * @label - label to insert 897 * 898 * Requires: caller to hold a valid ref on @label 899 * 900 * Returns: ref counted @label if successful in inserting @label 901 * else ref counted equivalent label that is already in the set 902 */ 903 struct aa_label *aa_label_insert(struct aa_labelset *ls, struct aa_label *label) 904 { 905 struct aa_label *l; 906 unsigned long flags; 907 908 AA_BUG(!ls); 909 AA_BUG(!label); 910 911 /* check if label exists before taking lock */ 912 if (!label_is_stale(label)) { 913 read_lock_irqsave(&ls->lock, flags); 914 l = __label_find(label); 915 read_unlock_irqrestore(&ls->lock, flags); 916 if (l) 917 return l; 918 } 919 920 write_lock_irqsave(&ls->lock, flags); 921 l = __label_insert(ls, label, false); 922 write_unlock_irqrestore(&ls->lock, flags); 923 924 return l; 925 } 926 927 928 /** 929 * aa_label_next_in_merge - find the next profile when merging @a and @b 930 * @I: label iterator 931 * @a: label to merge 932 * @b: label to merge 933 * 934 * Returns: next profile 935 * else null if no more profiles 936 */ 937 struct aa_profile *aa_label_next_in_merge(struct label_it *I, 938 struct aa_label *a, 939 struct aa_label *b) 940 { 941 AA_BUG(!a); 942 AA_BUG(!b); 943 AA_BUG(!I); 944 AA_BUG(I->i < 0); 945 AA_BUG(I->i > a->size); 946 AA_BUG(I->j < 0); 947 AA_BUG(I->j > b->size); 948 949 if (I->i < a->size) { 950 if (I->j < b->size) { 951 int res = profile_cmp(a->vec[I->i], b->vec[I->j]); 952 953 if (res > 0) 954 return b->vec[(I->j)++]; 955 if (res == 0) 956 (I->j)++; 957 } 958 959 return a->vec[(I->i)++]; 960 } 961 962 if (I->j < b->size) 963 return b->vec[(I->j)++]; 964 965 return NULL; 966 } 967 968 /** 969 * label_merge_cmp - cmp of @a merging with @b against @z for set ordering 970 * @a: label to merge then compare (NOT NULL) 971 * @b: label to merge then compare (NOT NULL) 972 * @z: label to compare merge against (NOT NULL) 973 * 974 * Assumes: using the most recent versions of @a, @b, and @z 975 * 976 * Returns: <0 if a < b 977 * ==0 if a == b 978 * >0 if a > b 979 */ 980 static int label_merge_cmp(struct aa_label *a, struct aa_label *b, 981 struct aa_label *z) 982 { 983 struct aa_profile *p = NULL; 984 struct label_it i = { }; 985 int k; 986 987 AA_BUG(!a); 988 AA_BUG(!b); 989 AA_BUG(!z); 990 991 for (k = 0; 992 k < z->size && (p = aa_label_next_in_merge(&i, a, b)); 993 k++) { 994 int res = profile_cmp(p, z->vec[k]); 995 996 if (res != 0) 997 return res; 998 } 999 1000 if (p) 1001 return 1; 1002 else if (k < z->size) 1003 return -1; 1004 return 0; 1005 } 1006 1007 /** 1008 * label_merge_insert - create a new label by merging @a and @b 1009 * @new: preallocated label to merge into (NOT NULL) 1010 * @a: label to merge with @b (NOT NULL) 1011 * @b: label to merge with @a (NOT NULL) 1012 * 1013 * Requires: preallocated proxy 1014 * 1015 * Returns: ref counted label either @new if merge is unique 1016 * @a if @b is a subset of @a 1017 * @b if @a is a subset of @b 1018 * 1019 * NOTE: will not use @new if the merge results in @new == @a or @b 1020 * 1021 * Must be used within labelset write lock to avoid racing with 1022 * setting labels stale. 1023 */ 1024 static struct aa_label *label_merge_insert(struct aa_label *new, 1025 struct aa_label *a, 1026 struct aa_label *b) 1027 { 1028 struct aa_label *label; 1029 struct aa_labelset *ls; 1030 struct aa_profile *next; 1031 struct label_it i; 1032 unsigned long flags; 1033 int k = 0, invcount = 0; 1034 bool stale = false; 1035 1036 AA_BUG(!a); 1037 AA_BUG(a->size < 0); 1038 AA_BUG(!b); 1039 AA_BUG(b->size < 0); 1040 AA_BUG(!new); 1041 AA_BUG(new->size < a->size + b->size); 1042 1043 label_for_each_in_merge(i, a, b, next) { 1044 AA_BUG(!next); 1045 if (profile_is_stale(next)) { 1046 new->vec[k] = aa_get_newest_profile(next); 1047 AA_BUG(!new->vec[k]->label.proxy); 1048 AA_BUG(!new->vec[k]->label.proxy->label); 1049 if (next->label.proxy != new->vec[k]->label.proxy) 1050 invcount++; 1051 k++; 1052 stale = true; 1053 } else 1054 new->vec[k++] = aa_get_profile(next); 1055 } 1056 /* set to actual size which is <= allocated len */ 1057 new->size = k; 1058 new->vec[k] = NULL; 1059 1060 if (invcount) { 1061 new->size -= aa_vec_unique(&new->vec[0], new->size, 1062 VEC_FLAG_TERMINATE); 1063 /* TODO: deal with reference labels */ 1064 if (new->size == 1) { 1065 label = aa_get_label(&new->vec[0]->label); 1066 return label; 1067 } 1068 } else if (!stale) { 1069 /* 1070 * merge could be same as a || b, note: it is not possible 1071 * for new->size == a->size == b->size unless a == b 1072 */ 1073 if (k == a->size) 1074 return aa_get_label(a); 1075 else if (k == b->size) 1076 return aa_get_label(b); 1077 } 1078 if (vec_unconfined(new->vec, new->size)) 1079 new->flags |= FLAG_UNCONFINED; 1080 ls = labels_set(new); 1081 write_lock_irqsave(&ls->lock, flags); 1082 label = __label_insert(labels_set(new), new, false); 1083 write_unlock_irqrestore(&ls->lock, flags); 1084 1085 return label; 1086 } 1087 1088 /** 1089 * labelset_of_merge - find which labelset a merged label should be inserted 1090 * @a: label to merge and insert 1091 * @b: label to merge and insert 1092 * 1093 * Returns: labelset that the merged label should be inserted into 1094 */ 1095 static struct aa_labelset *labelset_of_merge(struct aa_label *a, 1096 struct aa_label *b) 1097 { 1098 struct aa_ns *nsa = labels_ns(a); 1099 struct aa_ns *nsb = labels_ns(b); 1100 1101 if (ns_cmp(nsa, nsb) <= 0) 1102 return &nsa->labels; 1103 return &nsb->labels; 1104 } 1105 1106 /** 1107 * __label_find_merge - find label that is equiv to merge of @a and @b 1108 * @ls: set of labels to search (NOT NULL) 1109 * @a: label to merge with @b (NOT NULL) 1110 * @b: label to merge with @a (NOT NULL) 1111 * 1112 * Requires: ls->lock read_lock held 1113 * 1114 * Returns: ref counted label that is equiv to merge of @a and @b 1115 * else NULL if merge of @a and @b is not in set 1116 */ 1117 static struct aa_label *__label_find_merge(struct aa_labelset *ls, 1118 struct aa_label *a, 1119 struct aa_label *b) 1120 { 1121 struct rb_node *node; 1122 1123 AA_BUG(!ls); 1124 AA_BUG(!a); 1125 AA_BUG(!b); 1126 1127 if (a == b) 1128 return __label_find(a); 1129 1130 node = ls->root.rb_node; 1131 while (node) { 1132 struct aa_label *this = container_of(node, struct aa_label, 1133 node); 1134 int result = label_merge_cmp(a, b, this); 1135 1136 if (result < 0) 1137 node = node->rb_left; 1138 else if (result > 0) 1139 node = node->rb_right; 1140 else 1141 return __aa_get_label(this); 1142 } 1143 1144 return NULL; 1145 } 1146 1147 1148 /** 1149 * aa_label_find_merge - find label that is equiv to merge of @a and @b 1150 * @a: label to merge with @b (NOT NULL) 1151 * @b: label to merge with @a (NOT NULL) 1152 * 1153 * Requires: labels be fully constructed with a valid ns 1154 * 1155 * Returns: ref counted label that is equiv to merge of @a and @b 1156 * else NULL if merge of @a and @b is not in set 1157 */ 1158 struct aa_label *aa_label_find_merge(struct aa_label *a, struct aa_label *b) 1159 { 1160 struct aa_labelset *ls; 1161 struct aa_label *label, *ar = NULL, *br = NULL; 1162 unsigned long flags; 1163 1164 AA_BUG(!a); 1165 AA_BUG(!b); 1166 1167 if (label_is_stale(a)) 1168 a = ar = aa_get_newest_label(a); 1169 if (label_is_stale(b)) 1170 b = br = aa_get_newest_label(b); 1171 ls = labelset_of_merge(a, b); 1172 read_lock_irqsave(&ls->lock, flags); 1173 label = __label_find_merge(ls, a, b); 1174 read_unlock_irqrestore(&ls->lock, flags); 1175 aa_put_label(ar); 1176 aa_put_label(br); 1177 1178 return label; 1179 } 1180 1181 /** 1182 * aa_label_merge - attempt to insert new merged label of @a and @b 1183 * @ls: set of labels to insert label into (NOT NULL) 1184 * @a: label to merge with @b (NOT NULL) 1185 * @b: label to merge with @a (NOT NULL) 1186 * @gfp: memory allocation type 1187 * 1188 * Requires: caller to hold valid refs on @a and @b 1189 * labels be fully constructed with a valid ns 1190 * 1191 * Returns: ref counted new label if successful in inserting merge of a & b 1192 * else ref counted equivalent label that is already in the set. 1193 * else NULL if could not create label (-ENOMEM) 1194 */ 1195 struct aa_label *aa_label_merge(struct aa_label *a, struct aa_label *b, 1196 gfp_t gfp) 1197 { 1198 struct aa_label *label = NULL; 1199 1200 AA_BUG(!a); 1201 AA_BUG(!b); 1202 1203 if (a == b) 1204 return aa_get_newest_label(a); 1205 1206 /* TODO: enable when read side is lockless 1207 * check if label exists before taking locks 1208 if (!label_is_stale(a) && !label_is_stale(b)) 1209 label = aa_label_find_merge(a, b); 1210 */ 1211 1212 if (!label) { 1213 struct aa_label *new; 1214 1215 a = aa_get_newest_label(a); 1216 b = aa_get_newest_label(b); 1217 1218 /* could use label_merge_len(a, b), but requires double 1219 * comparison for small savings 1220 */ 1221 new = aa_label_alloc(a->size + b->size, NULL, gfp); 1222 if (!new) 1223 goto out; 1224 1225 label = label_merge_insert(new, a, b); 1226 label_free_or_put_new(label, new); 1227 out: 1228 aa_put_label(a); 1229 aa_put_label(b); 1230 } 1231 1232 return label; 1233 } 1234 1235 static inline bool label_is_visible(struct aa_profile *profile, 1236 struct aa_label *label) 1237 { 1238 return aa_ns_visible(profile->ns, labels_ns(label), true); 1239 } 1240 1241 /* match a profile and its associated ns component if needed 1242 * Assumes visibility test has already been done. 1243 * If a subns profile is not to be matched should be prescreened with 1244 * visibility test. 1245 */ 1246 static inline unsigned int match_component(struct aa_profile *profile, 1247 struct aa_profile *tp, 1248 unsigned int state) 1249 { 1250 const char *ns_name; 1251 1252 if (profile->ns == tp->ns) 1253 return aa_dfa_match(profile->policy.dfa, state, tp->base.hname); 1254 1255 /* try matching with namespace name and then profile */ 1256 ns_name = aa_ns_name(profile->ns, tp->ns, true); 1257 state = aa_dfa_match_len(profile->policy.dfa, state, ":", 1); 1258 state = aa_dfa_match(profile->policy.dfa, state, ns_name); 1259 state = aa_dfa_match_len(profile->policy.dfa, state, ":", 1); 1260 return aa_dfa_match(profile->policy.dfa, state, tp->base.hname); 1261 } 1262 1263 /** 1264 * label_compound_match - find perms for full compound label 1265 * @profile: profile to find perms for 1266 * @label: label to check access permissions for 1267 * @start: state to start match in 1268 * @subns: whether to do permission checks on components in a subns 1269 * @request: permissions to request 1270 * @perms: perms struct to set 1271 * 1272 * Returns: 0 on success else ERROR 1273 * 1274 * For the label A//&B//&C this does the perm match for A//&B//&C 1275 * @perms should be preinitialized with allperms OR a previous permission 1276 * check to be stacked. 1277 */ 1278 static int label_compound_match(struct aa_profile *profile, 1279 struct aa_label *label, 1280 unsigned int state, bool subns, u32 request, 1281 struct aa_perms *perms) 1282 { 1283 struct aa_profile *tp; 1284 struct label_it i; 1285 1286 /* find first subcomponent that is visible */ 1287 label_for_each(i, label, tp) { 1288 if (!aa_ns_visible(profile->ns, tp->ns, subns)) 1289 continue; 1290 state = match_component(profile, tp, state); 1291 if (!state) 1292 goto fail; 1293 goto next; 1294 } 1295 1296 /* no component visible */ 1297 *perms = allperms; 1298 return 0; 1299 1300 next: 1301 label_for_each_cont(i, label, tp) { 1302 if (!aa_ns_visible(profile->ns, tp->ns, subns)) 1303 continue; 1304 state = aa_dfa_match(profile->policy.dfa, state, "//&"); 1305 state = match_component(profile, tp, state); 1306 if (!state) 1307 goto fail; 1308 } 1309 aa_compute_perms(profile->policy.dfa, state, perms); 1310 aa_apply_modes_to_perms(profile, perms); 1311 if ((perms->allow & request) != request) 1312 return -EACCES; 1313 1314 return 0; 1315 1316 fail: 1317 *perms = nullperms; 1318 return state; 1319 } 1320 1321 /** 1322 * label_components_match - find perms for all subcomponents of a label 1323 * @profile: profile to find perms for 1324 * @label: label to check access permissions for 1325 * @start: state to start match in 1326 * @subns: whether to do permission checks on components in a subns 1327 * @request: permissions to request 1328 * @perms: an initialized perms struct to add accumulation to 1329 * 1330 * Returns: 0 on success else ERROR 1331 * 1332 * For the label A//&B//&C this does the perm match for each of A and B and C 1333 * @perms should be preinitialized with allperms OR a previous permission 1334 * check to be stacked. 1335 */ 1336 static int label_components_match(struct aa_profile *profile, 1337 struct aa_label *label, unsigned int start, 1338 bool subns, u32 request, 1339 struct aa_perms *perms) 1340 { 1341 struct aa_profile *tp; 1342 struct label_it i; 1343 struct aa_perms tmp; 1344 unsigned int state = 0; 1345 1346 /* find first subcomponent to test */ 1347 label_for_each(i, label, tp) { 1348 if (!aa_ns_visible(profile->ns, tp->ns, subns)) 1349 continue; 1350 state = match_component(profile, tp, start); 1351 if (!state) 1352 goto fail; 1353 goto next; 1354 } 1355 1356 /* no subcomponents visible - no change in perms */ 1357 return 0; 1358 1359 next: 1360 aa_compute_perms(profile->policy.dfa, state, &tmp); 1361 aa_apply_modes_to_perms(profile, &tmp); 1362 aa_perms_accum(perms, &tmp); 1363 label_for_each_cont(i, label, tp) { 1364 if (!aa_ns_visible(profile->ns, tp->ns, subns)) 1365 continue; 1366 state = match_component(profile, tp, start); 1367 if (!state) 1368 goto fail; 1369 aa_compute_perms(profile->policy.dfa, state, &tmp); 1370 aa_apply_modes_to_perms(profile, &tmp); 1371 aa_perms_accum(perms, &tmp); 1372 } 1373 1374 if ((perms->allow & request) != request) 1375 return -EACCES; 1376 1377 return 0; 1378 1379 fail: 1380 *perms = nullperms; 1381 return -EACCES; 1382 } 1383 1384 /** 1385 * aa_label_match - do a multi-component label match 1386 * @profile: profile to match against (NOT NULL) 1387 * @label: label to match (NOT NULL) 1388 * @state: state to start in 1389 * @subns: whether to match subns components 1390 * @request: permission request 1391 * @perms: Returns computed perms (NOT NULL) 1392 * 1393 * Returns: the state the match finished in, may be the none matching state 1394 */ 1395 int aa_label_match(struct aa_profile *profile, struct aa_label *label, 1396 unsigned int state, bool subns, u32 request, 1397 struct aa_perms *perms) 1398 { 1399 int error = label_compound_match(profile, label, state, subns, request, 1400 perms); 1401 if (!error) 1402 return error; 1403 1404 *perms = allperms; 1405 return label_components_match(profile, label, state, subns, request, 1406 perms); 1407 } 1408 1409 1410 /** 1411 * aa_update_label_name - update a label to have a stored name 1412 * @ns: ns being viewed from (NOT NULL) 1413 * @label: label to update (NOT NULL) 1414 * @gfp: type of memory allocation 1415 * 1416 * Requires: labels_set(label) not locked in caller 1417 * 1418 * note: only updates the label name if it does not have a name already 1419 * and if it is in the labelset 1420 */ 1421 bool aa_update_label_name(struct aa_ns *ns, struct aa_label *label, gfp_t gfp) 1422 { 1423 struct aa_labelset *ls; 1424 unsigned long flags; 1425 char __counted *name; 1426 bool res = false; 1427 1428 AA_BUG(!ns); 1429 AA_BUG(!label); 1430 1431 if (label->hname || labels_ns(label) != ns) 1432 return res; 1433 1434 if (aa_label_acntsxprint(&name, ns, label, FLAGS_NONE, gfp) == -1) 1435 return res; 1436 1437 ls = labels_set(label); 1438 write_lock_irqsave(&ls->lock, flags); 1439 if (!label->hname && label->flags & FLAG_IN_TREE) { 1440 label->hname = name; 1441 res = true; 1442 } else 1443 aa_put_str(name); 1444 write_unlock_irqrestore(&ls->lock, flags); 1445 1446 return res; 1447 } 1448 1449 /* 1450 * cached label name is present and visible 1451 * @label->hname only exists if label is namespace hierachical 1452 */ 1453 static inline bool use_label_hname(struct aa_ns *ns, struct aa_label *label, 1454 int flags) 1455 { 1456 if (label->hname && (!ns || labels_ns(label) == ns) && 1457 !(flags & ~FLAG_SHOW_MODE)) 1458 return true; 1459 1460 return false; 1461 } 1462 1463 /* helper macro for snprint routines */ 1464 #define update_for_len(total, len, size, str) \ 1465 do { \ 1466 AA_BUG(len < 0); \ 1467 total += len; \ 1468 len = min(len, size); \ 1469 size -= len; \ 1470 str += len; \ 1471 } while (0) 1472 1473 /** 1474 * aa_profile_snxprint - print a profile name to a buffer 1475 * @str: buffer to write to. (MAY BE NULL if @size == 0) 1476 * @size: size of buffer 1477 * @view: namespace profile is being viewed from 1478 * @profile: profile to view (NOT NULL) 1479 * @flags: whether to include the mode string 1480 * @prev_ns: last ns printed when used in compound print 1481 * 1482 * Returns: size of name written or would be written if larger than 1483 * available buffer 1484 * 1485 * Note: will not print anything if the profile is not visible 1486 */ 1487 static int aa_profile_snxprint(char *str, size_t size, struct aa_ns *view, 1488 struct aa_profile *profile, int flags, 1489 struct aa_ns **prev_ns) 1490 { 1491 const char *ns_name = NULL; 1492 1493 AA_BUG(!str && size != 0); 1494 AA_BUG(!profile); 1495 1496 if (!view) 1497 view = profiles_ns(profile); 1498 1499 if (view != profile->ns && 1500 (!prev_ns || (*prev_ns != profile->ns))) { 1501 if (prev_ns) 1502 *prev_ns = profile->ns; 1503 ns_name = aa_ns_name(view, profile->ns, 1504 flags & FLAG_VIEW_SUBNS); 1505 if (ns_name == aa_hidden_ns_name) { 1506 if (flags & FLAG_HIDDEN_UNCONFINED) 1507 return snprintf(str, size, "%s", "unconfined"); 1508 return snprintf(str, size, "%s", ns_name); 1509 } 1510 } 1511 1512 if ((flags & FLAG_SHOW_MODE) && profile != profile->ns->unconfined) { 1513 const char *modestr = aa_profile_mode_names[profile->mode]; 1514 1515 if (ns_name) 1516 return snprintf(str, size, ":%s:%s (%s)", ns_name, 1517 profile->base.hname, modestr); 1518 return snprintf(str, size, "%s (%s)", profile->base.hname, 1519 modestr); 1520 } 1521 1522 if (ns_name) 1523 return snprintf(str, size, ":%s:%s", ns_name, 1524 profile->base.hname); 1525 return snprintf(str, size, "%s", profile->base.hname); 1526 } 1527 1528 static const char *label_modename(struct aa_ns *ns, struct aa_label *label, 1529 int flags) 1530 { 1531 struct aa_profile *profile; 1532 struct label_it i; 1533 int mode = -1, count = 0; 1534 1535 label_for_each(i, label, profile) { 1536 if (aa_ns_visible(ns, profile->ns, flags & FLAG_VIEW_SUBNS)) { 1537 if (profile->mode == APPARMOR_UNCONFINED) 1538 /* special case unconfined so stacks with 1539 * unconfined don't report as mixed. ie. 1540 * profile_foo//&:ns1:unconfined (mixed) 1541 */ 1542 continue; 1543 count++; 1544 if (mode == -1) 1545 mode = profile->mode; 1546 else if (mode != profile->mode) 1547 return "mixed"; 1548 } 1549 } 1550 1551 if (count == 0) 1552 return "-"; 1553 if (mode == -1) 1554 /* everything was unconfined */ 1555 mode = APPARMOR_UNCONFINED; 1556 1557 return aa_profile_mode_names[mode]; 1558 } 1559 1560 /* if any visible label is not unconfined the display_mode returns true */ 1561 static inline bool display_mode(struct aa_ns *ns, struct aa_label *label, 1562 int flags) 1563 { 1564 if ((flags & FLAG_SHOW_MODE)) { 1565 struct aa_profile *profile; 1566 struct label_it i; 1567 1568 label_for_each(i, label, profile) { 1569 if (aa_ns_visible(ns, profile->ns, 1570 flags & FLAG_VIEW_SUBNS) && 1571 profile != profile->ns->unconfined) 1572 return true; 1573 } 1574 /* only ns->unconfined in set of profiles in ns */ 1575 return false; 1576 } 1577 1578 return false; 1579 } 1580 1581 /** 1582 * aa_label_snxprint - print a label name to a string buffer 1583 * @str: buffer to write to. (MAY BE NULL if @size == 0) 1584 * @size: size of buffer 1585 * @ns: namespace profile is being viewed from 1586 * @label: label to view (NOT NULL) 1587 * @flags: whether to include the mode string 1588 * 1589 * Returns: size of name written or would be written if larger than 1590 * available buffer 1591 * 1592 * Note: labels do not have to be strictly hierarchical to the ns as 1593 * objects may be shared across different namespaces and thus 1594 * pickup labeling from each ns. If a particular part of the 1595 * label is not visible it will just be excluded. And if none 1596 * of the label is visible "---" will be used. 1597 */ 1598 int aa_label_snxprint(char *str, size_t size, struct aa_ns *ns, 1599 struct aa_label *label, int flags) 1600 { 1601 struct aa_profile *profile; 1602 struct aa_ns *prev_ns = NULL; 1603 struct label_it i; 1604 int count = 0, total = 0; 1605 size_t len; 1606 1607 AA_BUG(!str && size != 0); 1608 AA_BUG(!label); 1609 1610 if (flags & FLAG_ABS_ROOT) { 1611 ns = root_ns; 1612 len = snprintf(str, size, "="); 1613 update_for_len(total, len, size, str); 1614 } else if (!ns) { 1615 ns = labels_ns(label); 1616 } 1617 1618 label_for_each(i, label, profile) { 1619 if (aa_ns_visible(ns, profile->ns, flags & FLAG_VIEW_SUBNS)) { 1620 if (count > 0) { 1621 len = snprintf(str, size, "//&"); 1622 update_for_len(total, len, size, str); 1623 } 1624 len = aa_profile_snxprint(str, size, ns, profile, 1625 flags & FLAG_VIEW_SUBNS, 1626 &prev_ns); 1627 update_for_len(total, len, size, str); 1628 count++; 1629 } 1630 } 1631 1632 if (count == 0) { 1633 if (flags & FLAG_HIDDEN_UNCONFINED) 1634 return snprintf(str, size, "%s", "unconfined"); 1635 return snprintf(str, size, "%s", aa_hidden_ns_name); 1636 } 1637 1638 /* count == 1 && ... is for backwards compat where the mode 1639 * is not displayed for 'unconfined' in the current ns 1640 */ 1641 if (display_mode(ns, label, flags)) { 1642 len = snprintf(str, size, " (%s)", 1643 label_modename(ns, label, flags)); 1644 update_for_len(total, len, size, str); 1645 } 1646 1647 return total; 1648 } 1649 #undef update_for_len 1650 1651 /** 1652 * aa_label_asxprint - allocate a string buffer and print label into it 1653 * @strp: Returns - the allocated buffer with the label name. (NOT NULL) 1654 * @ns: namespace profile is being viewed from 1655 * @label: label to view (NOT NULL) 1656 * @flags: flags controlling what label info is printed 1657 * @gfp: kernel memory allocation type 1658 * 1659 * Returns: size of name written or would be written if larger than 1660 * available buffer 1661 */ 1662 int aa_label_asxprint(char **strp, struct aa_ns *ns, struct aa_label *label, 1663 int flags, gfp_t gfp) 1664 { 1665 int size; 1666 1667 AA_BUG(!strp); 1668 AA_BUG(!label); 1669 1670 size = aa_label_snxprint(NULL, 0, ns, label, flags); 1671 if (size < 0) 1672 return size; 1673 1674 *strp = kmalloc(size + 1, gfp); 1675 if (!*strp) 1676 return -ENOMEM; 1677 return aa_label_snxprint(*strp, size + 1, ns, label, flags); 1678 } 1679 1680 /** 1681 * aa_label_acntsxprint - allocate a __counted string buffer and print label 1682 * @strp: buffer to write to. (MAY BE NULL if @size == 0) 1683 * @ns: namespace profile is being viewed from 1684 * @label: label to view (NOT NULL) 1685 * @flags: flags controlling what label info is printed 1686 * @gfp: kernel memory allocation type 1687 * 1688 * Returns: size of name written or would be written if larger than 1689 * available buffer 1690 */ 1691 int aa_label_acntsxprint(char __counted **strp, struct aa_ns *ns, 1692 struct aa_label *label, int flags, gfp_t gfp) 1693 { 1694 int size; 1695 1696 AA_BUG(!strp); 1697 AA_BUG(!label); 1698 1699 size = aa_label_snxprint(NULL, 0, ns, label, flags); 1700 if (size < 0) 1701 return size; 1702 1703 *strp = aa_str_alloc(size + 1, gfp); 1704 if (!*strp) 1705 return -ENOMEM; 1706 return aa_label_snxprint(*strp, size + 1, ns, label, flags); 1707 } 1708 1709 1710 void aa_label_xaudit(struct audit_buffer *ab, struct aa_ns *ns, 1711 struct aa_label *label, int flags, gfp_t gfp) 1712 { 1713 const char *str; 1714 char *name = NULL; 1715 int len; 1716 1717 AA_BUG(!ab); 1718 AA_BUG(!label); 1719 1720 if (!use_label_hname(ns, label, flags) || 1721 display_mode(ns, label, flags)) { 1722 len = aa_label_asxprint(&name, ns, label, flags, gfp); 1723 if (len == -1) { 1724 AA_DEBUG("label print error"); 1725 return; 1726 } 1727 str = name; 1728 } else { 1729 str = (char *) label->hname; 1730 len = strlen(str); 1731 } 1732 if (audit_string_contains_control(str, len)) 1733 audit_log_n_hex(ab, str, len); 1734 else 1735 audit_log_n_string(ab, str, len); 1736 1737 kfree(name); 1738 } 1739 1740 void aa_label_seq_xprint(struct seq_file *f, struct aa_ns *ns, 1741 struct aa_label *label, int flags, gfp_t gfp) 1742 { 1743 AA_BUG(!f); 1744 AA_BUG(!label); 1745 1746 if (!use_label_hname(ns, label, flags)) { 1747 char *str; 1748 int len; 1749 1750 len = aa_label_asxprint(&str, ns, label, flags, gfp); 1751 if (len == -1) { 1752 AA_DEBUG("label print error"); 1753 return; 1754 } 1755 seq_printf(f, "%s", str); 1756 kfree(str); 1757 } else if (display_mode(ns, label, flags)) 1758 seq_printf(f, "%s (%s)", label->hname, 1759 label_modename(ns, label, flags)); 1760 else 1761 seq_printf(f, "%s", label->hname); 1762 } 1763 1764 void aa_label_xprintk(struct aa_ns *ns, struct aa_label *label, int flags, 1765 gfp_t gfp) 1766 { 1767 AA_BUG(!label); 1768 1769 if (!use_label_hname(ns, label, flags)) { 1770 char *str; 1771 int len; 1772 1773 len = aa_label_asxprint(&str, ns, label, flags, gfp); 1774 if (len == -1) { 1775 AA_DEBUG("label print error"); 1776 return; 1777 } 1778 pr_info("%s", str); 1779 kfree(str); 1780 } else if (display_mode(ns, label, flags)) 1781 pr_info("%s (%s)", label->hname, 1782 label_modename(ns, label, flags)); 1783 else 1784 pr_info("%s", label->hname); 1785 } 1786 1787 void aa_label_audit(struct audit_buffer *ab, struct aa_label *label, gfp_t gfp) 1788 { 1789 struct aa_ns *ns = aa_get_current_ns(); 1790 1791 aa_label_xaudit(ab, ns, label, FLAG_VIEW_SUBNS, gfp); 1792 aa_put_ns(ns); 1793 } 1794 1795 void aa_label_seq_print(struct seq_file *f, struct aa_label *label, gfp_t gfp) 1796 { 1797 struct aa_ns *ns = aa_get_current_ns(); 1798 1799 aa_label_seq_xprint(f, ns, label, FLAG_VIEW_SUBNS, gfp); 1800 aa_put_ns(ns); 1801 } 1802 1803 void aa_label_printk(struct aa_label *label, gfp_t gfp) 1804 { 1805 struct aa_ns *ns = aa_get_current_ns(); 1806 1807 aa_label_xprintk(ns, label, FLAG_VIEW_SUBNS, gfp); 1808 aa_put_ns(ns); 1809 } 1810 1811 static int label_count_str_entries(const char *str) 1812 { 1813 const char *split; 1814 int count = 1; 1815 1816 AA_BUG(!str); 1817 1818 for (split = strstr(str, "//&"); split; split = strstr(str, "//&")) { 1819 count++; 1820 str = split + 3; 1821 } 1822 1823 return count; 1824 } 1825 1826 /* 1827 * ensure stacks with components like 1828 * :ns:A//&B 1829 * have :ns: applied to both 'A' and 'B' by making the lookup relative 1830 * to the base if the lookup specifies an ns, else making the stacked lookup 1831 * relative to the last embedded ns in the string. 1832 */ 1833 static struct aa_profile *fqlookupn_profile(struct aa_label *base, 1834 struct aa_label *currentbase, 1835 const char *str, size_t n) 1836 { 1837 const char *first = skipn_spaces(str, n); 1838 1839 if (first && *first == ':') 1840 return aa_fqlookupn_profile(base, str, n); 1841 1842 return aa_fqlookupn_profile(currentbase, str, n); 1843 } 1844 1845 /** 1846 * aa_label_parse - parse, validate and convert a text string to a label 1847 * @base: base label to use for lookups (NOT NULL) 1848 * @str: null terminated text string (NOT NULL) 1849 * @gfp: allocation type 1850 * @create: true if should create compound labels if they don't exist 1851 * @force_stack: true if should stack even if no leading & 1852 * 1853 * Returns: the matching refcounted label if present 1854 * else ERRPTR 1855 */ 1856 struct aa_label *aa_label_parse(struct aa_label *base, const char *str, 1857 gfp_t gfp, bool create, bool force_stack) 1858 { 1859 DEFINE_VEC(profile, vec); 1860 struct aa_label *label, *currbase = base; 1861 int i, len, stack = 0, error; 1862 char *split; 1863 1864 AA_BUG(!base); 1865 AA_BUG(!str); 1866 1867 str = skip_spaces(str); 1868 len = label_count_str_entries(str); 1869 if (*str == '&' || force_stack) { 1870 /* stack on top of base */ 1871 stack = base->size; 1872 len += stack; 1873 if (*str == '&') 1874 str++; 1875 } 1876 if (*str == '=') 1877 base = &root_ns->unconfined->label; 1878 1879 error = vec_setup(profile, vec, len, gfp); 1880 if (error) 1881 return ERR_PTR(error); 1882 1883 for (i = 0; i < stack; i++) 1884 vec[i] = aa_get_profile(base->vec[i]); 1885 1886 for (split = strstr(str, "//&"), i = stack; split && i < len; i++) { 1887 vec[i] = fqlookupn_profile(base, currbase, str, split - str); 1888 if (!vec[i]) 1889 goto fail; 1890 /* 1891 * if component specified a new ns it becomes the new base 1892 * so that subsequent lookups are relative to it 1893 */ 1894 if (vec[i]->ns != labels_ns(currbase)) 1895 currbase = &vec[i]->label; 1896 str = split + 3; 1897 split = strstr(str, "//&"); 1898 } 1899 /* last element doesn't have a split */ 1900 if (i < len) { 1901 vec[i] = fqlookupn_profile(base, currbase, str, strlen(str)); 1902 if (!vec[i]) 1903 goto fail; 1904 } 1905 if (len == 1) 1906 /* no need to free vec as len < LOCAL_VEC_ENTRIES */ 1907 return &vec[0]->label; 1908 1909 len -= aa_vec_unique(vec, len, VEC_FLAG_TERMINATE); 1910 /* TODO: deal with reference labels */ 1911 if (len == 1) { 1912 label = aa_get_label(&vec[0]->label); 1913 goto out; 1914 } 1915 1916 if (create) 1917 label = aa_vec_find_or_create_label(vec, len, gfp); 1918 else 1919 label = vec_find(vec, len); 1920 if (!label) 1921 goto fail; 1922 1923 out: 1924 /* use adjusted len from after vec_unique, not original */ 1925 vec_cleanup(profile, vec, len); 1926 return label; 1927 1928 fail: 1929 label = ERR_PTR(-ENOENT); 1930 goto out; 1931 } 1932 1933 1934 /** 1935 * aa_labelset_destroy - remove all labels from the label set 1936 * @ls: label set to cleanup (NOT NULL) 1937 * 1938 * Labels that are removed from the set may still exist beyond the set 1939 * being destroyed depending on their reference counting 1940 */ 1941 void aa_labelset_destroy(struct aa_labelset *ls) 1942 { 1943 struct rb_node *node; 1944 unsigned long flags; 1945 1946 AA_BUG(!ls); 1947 1948 write_lock_irqsave(&ls->lock, flags); 1949 for (node = rb_first(&ls->root); node; node = rb_first(&ls->root)) { 1950 struct aa_label *this = rb_entry(node, struct aa_label, node); 1951 1952 if (labels_ns(this) != root_ns) 1953 __label_remove(this, 1954 ns_unconfined(labels_ns(this)->parent)); 1955 else 1956 __label_remove(this, NULL); 1957 } 1958 write_unlock_irqrestore(&ls->lock, flags); 1959 } 1960 1961 /* 1962 * @ls: labelset to init (NOT NULL) 1963 */ 1964 void aa_labelset_init(struct aa_labelset *ls) 1965 { 1966 AA_BUG(!ls); 1967 1968 rwlock_init(&ls->lock); 1969 ls->root = RB_ROOT; 1970 } 1971 1972 static struct aa_label *labelset_next_stale(struct aa_labelset *ls) 1973 { 1974 struct aa_label *label; 1975 struct rb_node *node; 1976 unsigned long flags; 1977 1978 AA_BUG(!ls); 1979 1980 read_lock_irqsave(&ls->lock, flags); 1981 1982 __labelset_for_each(ls, node) { 1983 label = rb_entry(node, struct aa_label, node); 1984 if ((label_is_stale(label) || 1985 vec_is_stale(label->vec, label->size)) && 1986 __aa_get_label(label)) 1987 goto out; 1988 1989 } 1990 label = NULL; 1991 1992 out: 1993 read_unlock_irqrestore(&ls->lock, flags); 1994 1995 return label; 1996 } 1997 1998 /** 1999 * __label_update - insert updated version of @label into labelset 2000 * @label - the label to update/repace 2001 * 2002 * Returns: new label that is up to date 2003 * else NULL on failure 2004 * 2005 * Requires: @ns lock be held 2006 * 2007 * Note: worst case is the stale @label does not get updated and has 2008 * to be updated at a later time. 2009 */ 2010 static struct aa_label *__label_update(struct aa_label *label) 2011 { 2012 struct aa_label *new, *tmp; 2013 struct aa_labelset *ls; 2014 unsigned long flags; 2015 int i, invcount = 0; 2016 2017 AA_BUG(!label); 2018 AA_BUG(!mutex_is_locked(&labels_ns(label)->lock)); 2019 2020 new = aa_label_alloc(label->size, label->proxy, GFP_KERNEL); 2021 if (!new) 2022 return NULL; 2023 2024 /* 2025 * while holding the ns_lock will stop profile replacement, removal, 2026 * and label updates, label merging and removal can be occurring 2027 */ 2028 ls = labels_set(label); 2029 write_lock_irqsave(&ls->lock, flags); 2030 for (i = 0; i < label->size; i++) { 2031 AA_BUG(!label->vec[i]); 2032 new->vec[i] = aa_get_newest_profile(label->vec[i]); 2033 AA_BUG(!new->vec[i]); 2034 AA_BUG(!new->vec[i]->label.proxy); 2035 AA_BUG(!new->vec[i]->label.proxy->label); 2036 if (new->vec[i]->label.proxy != label->vec[i]->label.proxy) 2037 invcount++; 2038 } 2039 2040 /* updated stale label by being removed/renamed from labelset */ 2041 if (invcount) { 2042 new->size -= aa_vec_unique(&new->vec[0], new->size, 2043 VEC_FLAG_TERMINATE); 2044 /* TODO: deal with reference labels */ 2045 if (new->size == 1) { 2046 tmp = aa_get_label(&new->vec[0]->label); 2047 AA_BUG(tmp == label); 2048 goto remove; 2049 } 2050 if (labels_set(label) != labels_set(new)) { 2051 write_unlock_irqrestore(&ls->lock, flags); 2052 tmp = aa_label_insert(labels_set(new), new); 2053 write_lock_irqsave(&ls->lock, flags); 2054 goto remove; 2055 } 2056 } else 2057 AA_BUG(labels_ns(label) != labels_ns(new)); 2058 2059 tmp = __label_insert(labels_set(label), new, true); 2060 remove: 2061 /* ensure label is removed, and redirected correctly */ 2062 __label_remove(label, tmp); 2063 write_unlock_irqrestore(&ls->lock, flags); 2064 label_free_or_put_new(tmp, new); 2065 2066 return tmp; 2067 } 2068 2069 /** 2070 * __labelset_update - update labels in @ns 2071 * @ns: namespace to update labels in (NOT NULL) 2072 * 2073 * Requires: @ns lock be held 2074 * 2075 * Walk the labelset ensuring that all labels are up to date and valid 2076 * Any label that has a stale component is marked stale and replaced and 2077 * by an updated version. 2078 * 2079 * If failures happen due to memory pressures then stale labels will 2080 * be left in place until the next pass. 2081 */ 2082 static void __labelset_update(struct aa_ns *ns) 2083 { 2084 struct aa_label *label; 2085 2086 AA_BUG(!ns); 2087 AA_BUG(!mutex_is_locked(&ns->lock)); 2088 2089 do { 2090 label = labelset_next_stale(&ns->labels); 2091 if (label) { 2092 struct aa_label *l = __label_update(label); 2093 2094 aa_put_label(l); 2095 aa_put_label(label); 2096 } 2097 } while (label); 2098 } 2099 2100 /** 2101 * __aa_labelset_udate_subtree - update all labels with a stale component 2102 * @ns: ns to start update at (NOT NULL) 2103 * 2104 * Requires: @ns lock be held 2105 * 2106 * Invalidates labels based on @p in @ns and any children namespaces. 2107 */ 2108 void __aa_labelset_update_subtree(struct aa_ns *ns) 2109 { 2110 struct aa_ns *child; 2111 2112 AA_BUG(!ns); 2113 AA_BUG(!mutex_is_locked(&ns->lock)); 2114 2115 __labelset_update(ns); 2116 2117 list_for_each_entry(child, &ns->sub_ns, base.list) { 2118 mutex_lock_nested(&child->lock, child->level); 2119 __aa_labelset_update_subtree(child); 2120 mutex_unlock(&child->lock); 2121 } 2122 } 2123