1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AppArmor security module 4 * 5 * This file contains AppArmor functions for unpacking policy loaded from 6 * userspace. 7 * 8 * Copyright (C) 1998-2008 Novell/SUSE 9 * Copyright 2009-2010 Canonical Ltd. 10 * 11 * AppArmor uses a serialized binary format for loading policy. To find 12 * policy format documentation see Documentation/admin-guide/LSM/apparmor.rst 13 * All policy is validated before it is used. 14 */ 15 16 #include <asm/unaligned.h> 17 #include <kunit/visibility.h> 18 #include <linux/ctype.h> 19 #include <linux/errno.h> 20 #include <linux/zstd.h> 21 22 #include "include/apparmor.h" 23 #include "include/audit.h" 24 #include "include/cred.h" 25 #include "include/crypto.h" 26 #include "include/file.h" 27 #include "include/match.h" 28 #include "include/path.h" 29 #include "include/policy.h" 30 #include "include/policy_unpack.h" 31 #include "include/policy_compat.h" 32 33 /* audit callback for unpack fields */ 34 static void audit_cb(struct audit_buffer *ab, void *va) 35 { 36 struct common_audit_data *sa = va; 37 38 if (aad(sa)->iface.ns) { 39 audit_log_format(ab, " ns="); 40 audit_log_untrustedstring(ab, aad(sa)->iface.ns); 41 } 42 if (aad(sa)->name) { 43 audit_log_format(ab, " name="); 44 audit_log_untrustedstring(ab, aad(sa)->name); 45 } 46 if (aad(sa)->iface.pos) 47 audit_log_format(ab, " offset=%ld", aad(sa)->iface.pos); 48 } 49 50 /** 51 * audit_iface - do audit message for policy unpacking/load/replace/remove 52 * @new: profile if it has been allocated (MAYBE NULL) 53 * @ns_name: name of the ns the profile is to be loaded to (MAY BE NULL) 54 * @name: name of the profile being manipulated (MAYBE NULL) 55 * @info: any extra info about the failure (MAYBE NULL) 56 * @e: buffer position info 57 * @error: error code 58 * 59 * Returns: %0 or error 60 */ 61 static int audit_iface(struct aa_profile *new, const char *ns_name, 62 const char *name, const char *info, struct aa_ext *e, 63 int error) 64 { 65 struct aa_profile *profile = labels_profile(aa_current_raw_label()); 66 DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, NULL); 67 if (e) 68 aad(&sa)->iface.pos = e->pos - e->start; 69 aad(&sa)->iface.ns = ns_name; 70 if (new) 71 aad(&sa)->name = new->base.hname; 72 else 73 aad(&sa)->name = name; 74 aad(&sa)->info = info; 75 aad(&sa)->error = error; 76 77 return aa_audit(AUDIT_APPARMOR_STATUS, profile, &sa, audit_cb); 78 } 79 80 void __aa_loaddata_update(struct aa_loaddata *data, long revision) 81 { 82 AA_BUG(!data); 83 AA_BUG(!data->ns); 84 AA_BUG(!mutex_is_locked(&data->ns->lock)); 85 AA_BUG(data->revision > revision); 86 87 data->revision = revision; 88 if ((data->dents[AAFS_LOADDATA_REVISION])) { 89 d_inode(data->dents[AAFS_LOADDATA_DIR])->i_mtime = 90 current_time(d_inode(data->dents[AAFS_LOADDATA_DIR])); 91 d_inode(data->dents[AAFS_LOADDATA_REVISION])->i_mtime = 92 current_time(d_inode(data->dents[AAFS_LOADDATA_REVISION])); 93 } 94 } 95 96 bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r) 97 { 98 if (l->size != r->size) 99 return false; 100 if (l->compressed_size != r->compressed_size) 101 return false; 102 if (aa_g_hash_policy && memcmp(l->hash, r->hash, aa_hash_size()) != 0) 103 return false; 104 return memcmp(l->data, r->data, r->compressed_size ?: r->size) == 0; 105 } 106 107 /* 108 * need to take the ns mutex lock which is NOT safe most places that 109 * put_loaddata is called, so we have to delay freeing it 110 */ 111 static void do_loaddata_free(struct work_struct *work) 112 { 113 struct aa_loaddata *d = container_of(work, struct aa_loaddata, work); 114 struct aa_ns *ns = aa_get_ns(d->ns); 115 116 if (ns) { 117 mutex_lock_nested(&ns->lock, ns->level); 118 __aa_fs_remove_rawdata(d); 119 mutex_unlock(&ns->lock); 120 aa_put_ns(ns); 121 } 122 123 kfree_sensitive(d->hash); 124 kfree_sensitive(d->name); 125 kvfree(d->data); 126 kfree_sensitive(d); 127 } 128 129 void aa_loaddata_kref(struct kref *kref) 130 { 131 struct aa_loaddata *d = container_of(kref, struct aa_loaddata, count); 132 133 if (d) { 134 INIT_WORK(&d->work, do_loaddata_free); 135 schedule_work(&d->work); 136 } 137 } 138 139 struct aa_loaddata *aa_loaddata_alloc(size_t size) 140 { 141 struct aa_loaddata *d; 142 143 d = kzalloc(sizeof(*d), GFP_KERNEL); 144 if (d == NULL) 145 return ERR_PTR(-ENOMEM); 146 d->data = kvzalloc(size, GFP_KERNEL); 147 if (!d->data) { 148 kfree(d); 149 return ERR_PTR(-ENOMEM); 150 } 151 kref_init(&d->count); 152 INIT_LIST_HEAD(&d->list); 153 154 return d; 155 } 156 157 /* test if read will be in packed data bounds */ 158 VISIBLE_IF_KUNIT bool aa_inbounds(struct aa_ext *e, size_t size) 159 { 160 return (size <= e->end - e->pos); 161 } 162 EXPORT_SYMBOL_IF_KUNIT(aa_inbounds); 163 164 /** 165 * aa_unpack_u16_chunk - test and do bounds checking for a u16 size based chunk 166 * @e: serialized data read head (NOT NULL) 167 * @chunk: start address for chunk of data (NOT NULL) 168 * 169 * Returns: the size of chunk found with the read head at the end of the chunk. 170 */ 171 VISIBLE_IF_KUNIT size_t aa_unpack_u16_chunk(struct aa_ext *e, char **chunk) 172 { 173 size_t size = 0; 174 void *pos = e->pos; 175 176 if (!aa_inbounds(e, sizeof(u16))) 177 goto fail; 178 size = le16_to_cpu(get_unaligned((__le16 *) e->pos)); 179 e->pos += sizeof(__le16); 180 if (!aa_inbounds(e, size)) 181 goto fail; 182 *chunk = e->pos; 183 e->pos += size; 184 return size; 185 186 fail: 187 e->pos = pos; 188 return 0; 189 } 190 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u16_chunk); 191 192 /* unpack control byte */ 193 VISIBLE_IF_KUNIT bool aa_unpack_X(struct aa_ext *e, enum aa_code code) 194 { 195 if (!aa_inbounds(e, 1)) 196 return false; 197 if (*(u8 *) e->pos != code) 198 return false; 199 e->pos++; 200 return true; 201 } 202 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_X); 203 204 /** 205 * aa_unpack_nameX - check is the next element is of type X with a name of @name 206 * @e: serialized data extent information (NOT NULL) 207 * @code: type code 208 * @name: name to match to the serialized element. (MAYBE NULL) 209 * 210 * check that the next serialized data element is of type X and has a tag 211 * name @name. If @name is specified then there must be a matching 212 * name element in the stream. If @name is NULL any name element will be 213 * skipped and only the typecode will be tested. 214 * 215 * Returns true on success (both type code and name tests match) and the read 216 * head is advanced past the headers 217 * 218 * Returns: false if either match fails, the read head does not move 219 */ 220 VISIBLE_IF_KUNIT bool aa_unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name) 221 { 222 /* 223 * May need to reset pos if name or type doesn't match 224 */ 225 void *pos = e->pos; 226 /* 227 * Check for presence of a tagname, and if present name size 228 * AA_NAME tag value is a u16. 229 */ 230 if (aa_unpack_X(e, AA_NAME)) { 231 char *tag = NULL; 232 size_t size = aa_unpack_u16_chunk(e, &tag); 233 /* if a name is specified it must match. otherwise skip tag */ 234 if (name && (!size || tag[size-1] != '\0' || strcmp(name, tag))) 235 goto fail; 236 } else if (name) { 237 /* if a name is specified and there is no name tag fail */ 238 goto fail; 239 } 240 241 /* now check if type code matches */ 242 if (aa_unpack_X(e, code)) 243 return true; 244 245 fail: 246 e->pos = pos; 247 return false; 248 } 249 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_nameX); 250 251 static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name) 252 { 253 void *pos = e->pos; 254 255 if (aa_unpack_nameX(e, AA_U8, name)) { 256 if (!aa_inbounds(e, sizeof(u8))) 257 goto fail; 258 if (data) 259 *data = *((u8 *)e->pos); 260 e->pos += sizeof(u8); 261 return true; 262 } 263 264 fail: 265 e->pos = pos; 266 return false; 267 } 268 269 VISIBLE_IF_KUNIT bool aa_unpack_u32(struct aa_ext *e, u32 *data, const char *name) 270 { 271 void *pos = e->pos; 272 273 if (aa_unpack_nameX(e, AA_U32, name)) { 274 if (!aa_inbounds(e, sizeof(u32))) 275 goto fail; 276 if (data) 277 *data = le32_to_cpu(get_unaligned((__le32 *) e->pos)); 278 e->pos += sizeof(u32); 279 return true; 280 } 281 282 fail: 283 e->pos = pos; 284 return false; 285 } 286 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u32); 287 288 VISIBLE_IF_KUNIT bool aa_unpack_u64(struct aa_ext *e, u64 *data, const char *name) 289 { 290 void *pos = e->pos; 291 292 if (aa_unpack_nameX(e, AA_U64, name)) { 293 if (!aa_inbounds(e, sizeof(u64))) 294 goto fail; 295 if (data) 296 *data = le64_to_cpu(get_unaligned((__le64 *) e->pos)); 297 e->pos += sizeof(u64); 298 return true; 299 } 300 301 fail: 302 e->pos = pos; 303 return false; 304 } 305 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u64); 306 307 VISIBLE_IF_KUNIT bool aa_unpack_array(struct aa_ext *e, const char *name, u16 *size) 308 { 309 void *pos = e->pos; 310 311 if (aa_unpack_nameX(e, AA_ARRAY, name)) { 312 if (!aa_inbounds(e, sizeof(u16))) 313 goto fail; 314 *size = le16_to_cpu(get_unaligned((__le16 *) e->pos)); 315 e->pos += sizeof(u16); 316 return true; 317 } 318 319 fail: 320 e->pos = pos; 321 return false; 322 } 323 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_array); 324 325 VISIBLE_IF_KUNIT size_t aa_unpack_blob(struct aa_ext *e, char **blob, const char *name) 326 { 327 void *pos = e->pos; 328 329 if (aa_unpack_nameX(e, AA_BLOB, name)) { 330 u32 size; 331 if (!aa_inbounds(e, sizeof(u32))) 332 goto fail; 333 size = le32_to_cpu(get_unaligned((__le32 *) e->pos)); 334 e->pos += sizeof(u32); 335 if (aa_inbounds(e, (size_t) size)) { 336 *blob = e->pos; 337 e->pos += size; 338 return size; 339 } 340 } 341 342 fail: 343 e->pos = pos; 344 return 0; 345 } 346 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_blob); 347 348 VISIBLE_IF_KUNIT int aa_unpack_str(struct aa_ext *e, const char **string, const char *name) 349 { 350 char *src_str; 351 size_t size = 0; 352 void *pos = e->pos; 353 *string = NULL; 354 if (aa_unpack_nameX(e, AA_STRING, name)) { 355 size = aa_unpack_u16_chunk(e, &src_str); 356 if (size) { 357 /* strings are null terminated, length is size - 1 */ 358 if (src_str[size - 1] != 0) 359 goto fail; 360 *string = src_str; 361 362 return size; 363 } 364 } 365 366 fail: 367 e->pos = pos; 368 return 0; 369 } 370 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_str); 371 372 VISIBLE_IF_KUNIT int aa_unpack_strdup(struct aa_ext *e, char **string, const char *name) 373 { 374 const char *tmp; 375 void *pos = e->pos; 376 int res = aa_unpack_str(e, &tmp, name); 377 *string = NULL; 378 379 if (!res) 380 return 0; 381 382 *string = kmemdup(tmp, res, GFP_KERNEL); 383 if (!*string) { 384 e->pos = pos; 385 return 0; 386 } 387 388 return res; 389 } 390 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_strdup); 391 392 393 /** 394 * unpack_dfa - unpack a file rule dfa 395 * @e: serialized data extent information (NOT NULL) 396 * @flags: dfa flags to check 397 * 398 * returns dfa or ERR_PTR or NULL if no dfa 399 */ 400 static struct aa_dfa *unpack_dfa(struct aa_ext *e, int flags) 401 { 402 char *blob = NULL; 403 size_t size; 404 struct aa_dfa *dfa = NULL; 405 406 size = aa_unpack_blob(e, &blob, "aadfa"); 407 if (size) { 408 /* 409 * The dfa is aligned with in the blob to 8 bytes 410 * from the beginning of the stream. 411 * alignment adjust needed by dfa unpack 412 */ 413 size_t sz = blob - (char *) e->start - 414 ((e->pos - e->start) & 7); 415 size_t pad = ALIGN(sz, 8) - sz; 416 if (aa_g_paranoid_load) 417 flags |= DFA_FLAG_VERIFY_STATES; 418 dfa = aa_dfa_unpack(blob + pad, size - pad, flags); 419 420 if (IS_ERR(dfa)) 421 return dfa; 422 423 } 424 425 return dfa; 426 } 427 428 /** 429 * unpack_trans_table - unpack a profile transition table 430 * @e: serialized data extent information (NOT NULL) 431 * @table: str table to unpack to (NOT NULL) 432 * 433 * Returns: true if table successfully unpacked or not present 434 */ 435 static bool unpack_trans_table(struct aa_ext *e, struct aa_str_table *strs) 436 { 437 void *saved_pos = e->pos; 438 char **table = NULL; 439 440 /* exec table is optional */ 441 if (aa_unpack_nameX(e, AA_STRUCT, "xtable")) { 442 u16 size; 443 int i; 444 445 if (!aa_unpack_array(e, NULL, &size)) 446 /* 447 * Note: index into trans table array is a max 448 * of 2^24, but unpack array can only unpack 449 * an array of 2^16 in size atm so no need 450 * for size check here 451 */ 452 goto fail; 453 table = kcalloc(size, sizeof(char *), GFP_KERNEL); 454 if (!table) 455 goto fail; 456 457 for (i = 0; i < size; i++) { 458 char *str; 459 int c, j, pos, size2 = aa_unpack_strdup(e, &str, NULL); 460 /* aa_unpack_strdup verifies that the last character is 461 * null termination byte. 462 */ 463 if (!size2) 464 goto fail; 465 table[i] = str; 466 /* verify that name doesn't start with space */ 467 if (isspace(*str)) 468 goto fail; 469 470 /* count internal # of internal \0 */ 471 for (c = j = 0; j < size2 - 1; j++) { 472 if (!str[j]) { 473 pos = j; 474 c++; 475 } 476 } 477 if (*str == ':') { 478 /* first character after : must be valid */ 479 if (!str[1]) 480 goto fail; 481 /* beginning with : requires an embedded \0, 482 * verify that exactly 1 internal \0 exists 483 * trailing \0 already verified by aa_unpack_strdup 484 * 485 * convert \0 back to : for label_parse 486 */ 487 if (c == 1) 488 str[pos] = ':'; 489 else if (c > 1) 490 goto fail; 491 } else if (c) 492 /* fail - all other cases with embedded \0 */ 493 goto fail; 494 } 495 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL)) 496 goto fail; 497 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 498 goto fail; 499 500 strs->table = table; 501 strs->size = size; 502 } 503 return true; 504 505 fail: 506 kfree_sensitive(table); 507 e->pos = saved_pos; 508 return false; 509 } 510 511 static bool unpack_xattrs(struct aa_ext *e, struct aa_profile *profile) 512 { 513 void *pos = e->pos; 514 515 if (aa_unpack_nameX(e, AA_STRUCT, "xattrs")) { 516 u16 size; 517 int i; 518 519 if (!aa_unpack_array(e, NULL, &size)) 520 goto fail; 521 profile->attach.xattr_count = size; 522 profile->attach.xattrs = kcalloc(size, sizeof(char *), GFP_KERNEL); 523 if (!profile->attach.xattrs) 524 goto fail; 525 for (i = 0; i < size; i++) { 526 if (!aa_unpack_strdup(e, &profile->attach.xattrs[i], NULL)) 527 goto fail; 528 } 529 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL)) 530 goto fail; 531 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 532 goto fail; 533 } 534 535 return true; 536 537 fail: 538 e->pos = pos; 539 return false; 540 } 541 542 static bool unpack_secmark(struct aa_ext *e, struct aa_ruleset *rules) 543 { 544 void *pos = e->pos; 545 u16 size; 546 int i; 547 548 if (aa_unpack_nameX(e, AA_STRUCT, "secmark")) { 549 if (!aa_unpack_array(e, NULL, &size)) 550 goto fail; 551 552 rules->secmark = kcalloc(size, sizeof(struct aa_secmark), 553 GFP_KERNEL); 554 if (!rules->secmark) 555 goto fail; 556 557 rules->secmark_count = size; 558 559 for (i = 0; i < size; i++) { 560 if (!unpack_u8(e, &rules->secmark[i].audit, NULL)) 561 goto fail; 562 if (!unpack_u8(e, &rules->secmark[i].deny, NULL)) 563 goto fail; 564 if (!aa_unpack_strdup(e, &rules->secmark[i].label, NULL)) 565 goto fail; 566 } 567 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL)) 568 goto fail; 569 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 570 goto fail; 571 } 572 573 return true; 574 575 fail: 576 if (rules->secmark) { 577 for (i = 0; i < size; i++) 578 kfree(rules->secmark[i].label); 579 kfree(rules->secmark); 580 rules->secmark_count = 0; 581 rules->secmark = NULL; 582 } 583 584 e->pos = pos; 585 return false; 586 } 587 588 static bool unpack_rlimits(struct aa_ext *e, struct aa_ruleset *rules) 589 { 590 void *pos = e->pos; 591 592 /* rlimits are optional */ 593 if (aa_unpack_nameX(e, AA_STRUCT, "rlimits")) { 594 u16 size; 595 int i; 596 u32 tmp = 0; 597 if (!aa_unpack_u32(e, &tmp, NULL)) 598 goto fail; 599 rules->rlimits.mask = tmp; 600 601 if (!aa_unpack_array(e, NULL, &size) || 602 size > RLIM_NLIMITS) 603 goto fail; 604 for (i = 0; i < size; i++) { 605 u64 tmp2 = 0; 606 int a = aa_map_resource(i); 607 if (!aa_unpack_u64(e, &tmp2, NULL)) 608 goto fail; 609 rules->rlimits.limits[a].rlim_max = tmp2; 610 } 611 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL)) 612 goto fail; 613 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 614 goto fail; 615 } 616 return true; 617 618 fail: 619 e->pos = pos; 620 return false; 621 } 622 623 static bool unpack_perm(struct aa_ext *e, u32 version, struct aa_perms *perm) 624 { 625 if (version != 1) 626 return false; 627 628 return aa_unpack_u32(e, &perm->allow, NULL) && 629 aa_unpack_u32(e, &perm->allow, NULL) && 630 aa_unpack_u32(e, &perm->deny, NULL) && 631 aa_unpack_u32(e, &perm->subtree, NULL) && 632 aa_unpack_u32(e, &perm->cond, NULL) && 633 aa_unpack_u32(e, &perm->kill, NULL) && 634 aa_unpack_u32(e, &perm->complain, NULL) && 635 aa_unpack_u32(e, &perm->prompt, NULL) && 636 aa_unpack_u32(e, &perm->audit, NULL) && 637 aa_unpack_u32(e, &perm->quiet, NULL) && 638 aa_unpack_u32(e, &perm->hide, NULL) && 639 aa_unpack_u32(e, &perm->xindex, NULL) && 640 aa_unpack_u32(e, &perm->tag, NULL) && 641 aa_unpack_u32(e, &perm->label, NULL); 642 } 643 644 static ssize_t unpack_perms_table(struct aa_ext *e, struct aa_perms **perms) 645 { 646 void *pos = e->pos; 647 u16 size = 0; 648 649 AA_BUG(!perms); 650 /* 651 * policy perms are optional, in which case perms are embedded 652 * in the dfa accept table 653 */ 654 if (aa_unpack_nameX(e, AA_STRUCT, "perms")) { 655 int i; 656 u32 version; 657 658 if (!aa_unpack_u32(e, &version, "version")) 659 goto fail_reset; 660 if (!aa_unpack_array(e, NULL, &size)) 661 goto fail_reset; 662 *perms = kcalloc(size, sizeof(struct aa_perms), GFP_KERNEL); 663 if (!*perms) 664 goto fail_reset; 665 for (i = 0; i < size; i++) { 666 if (!unpack_perm(e, version, &(*perms)[i])) 667 goto fail; 668 } 669 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL)) 670 goto fail; 671 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 672 goto fail; 673 } else 674 *perms = NULL; 675 676 return size; 677 678 fail: 679 kfree(*perms); 680 fail_reset: 681 e->pos = pos; 682 return -EPROTO; 683 } 684 685 static int unpack_pdb(struct aa_ext *e, struct aa_policydb *policy, 686 bool required_dfa, bool required_trans, 687 const char **info) 688 { 689 void *pos = e->pos; 690 int i, flags, error = -EPROTO; 691 ssize_t size; 692 693 size = unpack_perms_table(e, &policy->perms); 694 if (size < 0) { 695 error = size; 696 policy->perms = NULL; 697 *info = "failed to unpack - perms"; 698 goto fail; 699 } 700 policy->size = size; 701 702 if (policy->perms) { 703 /* perms table present accept is index */ 704 flags = TO_ACCEPT1_FLAG(YYTD_DATA32); 705 } else { 706 /* packed perms in accept1 and accept2 */ 707 flags = TO_ACCEPT1_FLAG(YYTD_DATA32) | 708 TO_ACCEPT2_FLAG(YYTD_DATA32); 709 } 710 711 policy->dfa = unpack_dfa(e, flags); 712 if (IS_ERR(policy->dfa)) { 713 error = PTR_ERR(policy->dfa); 714 policy->dfa = NULL; 715 *info = "failed to unpack - dfa"; 716 goto fail; 717 } else if (!policy->dfa) { 718 if (required_dfa) { 719 *info = "missing required dfa"; 720 goto fail; 721 } 722 goto out; 723 } 724 725 /* 726 * only unpack the following if a dfa is present 727 * 728 * sadly start was given different names for file and policydb 729 * but since it is optional we can try both 730 */ 731 if (!aa_unpack_u32(e, &policy->start[0], "start")) 732 /* default start state */ 733 policy->start[0] = DFA_START; 734 if (!aa_unpack_u32(e, &policy->start[AA_CLASS_FILE], "dfa_start")) { 735 /* default start state for xmatch and file dfa */ 736 policy->start[AA_CLASS_FILE] = DFA_START; 737 } /* setup class index */ 738 for (i = AA_CLASS_FILE + 1; i <= AA_CLASS_LAST; i++) { 739 policy->start[i] = aa_dfa_next(policy->dfa, policy->start[0], 740 i); 741 } 742 if (!unpack_trans_table(e, &policy->trans) && required_trans) { 743 *info = "failed to unpack profile transition table"; 744 goto fail; 745 } 746 747 /* TODO: move compat mapping here, requires dfa merging first */ 748 /* TODO: move verify here, it has to be done after compat mappings */ 749 out: 750 return 0; 751 752 fail: 753 e->pos = pos; 754 return error; 755 } 756 757 static u32 strhash(const void *data, u32 len, u32 seed) 758 { 759 const char * const *key = data; 760 761 return jhash(*key, strlen(*key), seed); 762 } 763 764 static int datacmp(struct rhashtable_compare_arg *arg, const void *obj) 765 { 766 const struct aa_data *data = obj; 767 const char * const *key = arg->key; 768 769 return strcmp(data->key, *key); 770 } 771 772 /** 773 * unpack_profile - unpack a serialized profile 774 * @e: serialized data extent information (NOT NULL) 775 * @ns_name: pointer of newly allocated copy of %NULL in case of error 776 * 777 * NOTE: unpack profile sets audit struct if there is a failure 778 */ 779 static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) 780 { 781 struct aa_ruleset *rules; 782 struct aa_profile *profile = NULL; 783 const char *tmpname, *tmpns = NULL, *name = NULL; 784 const char *info = "failed to unpack profile"; 785 size_t ns_len; 786 struct rhashtable_params params = { 0 }; 787 char *key = NULL; 788 struct aa_data *data; 789 int error = -EPROTO; 790 kernel_cap_t tmpcap; 791 u32 tmp; 792 793 *ns_name = NULL; 794 795 /* check that we have the right struct being passed */ 796 if (!aa_unpack_nameX(e, AA_STRUCT, "profile")) 797 goto fail; 798 if (!aa_unpack_str(e, &name, NULL)) 799 goto fail; 800 if (*name == '\0') 801 goto fail; 802 803 tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len); 804 if (tmpns) { 805 *ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL); 806 if (!*ns_name) { 807 info = "out of memory"; 808 error = -ENOMEM; 809 goto fail; 810 } 811 name = tmpname; 812 } 813 814 profile = aa_alloc_profile(name, NULL, GFP_KERNEL); 815 if (!profile) { 816 info = "out of memory"; 817 error = -ENOMEM; 818 goto fail; 819 } 820 rules = list_first_entry(&profile->rules, typeof(*rules), list); 821 822 /* profile renaming is optional */ 823 (void) aa_unpack_str(e, &profile->rename, "rename"); 824 825 /* attachment string is optional */ 826 (void) aa_unpack_str(e, &profile->attach.xmatch_str, "attach"); 827 828 /* xmatch is optional and may be NULL */ 829 error = unpack_pdb(e, &profile->attach.xmatch, false, false, &info); 830 if (error) { 831 info = "bad xmatch"; 832 goto fail; 833 } 834 835 /* neither xmatch_len not xmatch_perms are optional if xmatch is set */ 836 if (profile->attach.xmatch.dfa) { 837 if (!aa_unpack_u32(e, &tmp, NULL)) { 838 info = "missing xmatch len"; 839 goto fail; 840 } 841 profile->attach.xmatch_len = tmp; 842 profile->attach.xmatch.start[AA_CLASS_XMATCH] = DFA_START; 843 error = aa_compat_map_xmatch(&profile->attach.xmatch); 844 if (error) { 845 info = "failed to convert xmatch permission table"; 846 goto fail; 847 } 848 } 849 850 /* disconnected attachment string is optional */ 851 (void) aa_unpack_str(e, &profile->disconnected, "disconnected"); 852 853 /* per profile debug flags (complain, audit) */ 854 if (!aa_unpack_nameX(e, AA_STRUCT, "flags")) { 855 info = "profile missing flags"; 856 goto fail; 857 } 858 info = "failed to unpack profile flags"; 859 if (!aa_unpack_u32(e, &tmp, NULL)) 860 goto fail; 861 if (tmp & PACKED_FLAG_HAT) 862 profile->label.flags |= FLAG_HAT; 863 if (tmp & PACKED_FLAG_DEBUG1) 864 profile->label.flags |= FLAG_DEBUG1; 865 if (tmp & PACKED_FLAG_DEBUG2) 866 profile->label.flags |= FLAG_DEBUG2; 867 if (!aa_unpack_u32(e, &tmp, NULL)) 868 goto fail; 869 if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG)) { 870 profile->mode = APPARMOR_COMPLAIN; 871 } else if (tmp == PACKED_MODE_ENFORCE) { 872 profile->mode = APPARMOR_ENFORCE; 873 } else if (tmp == PACKED_MODE_KILL) { 874 profile->mode = APPARMOR_KILL; 875 } else if (tmp == PACKED_MODE_UNCONFINED) { 876 profile->mode = APPARMOR_UNCONFINED; 877 profile->label.flags |= FLAG_UNCONFINED; 878 } else if (tmp == PACKED_MODE_USER) { 879 profile->mode = APPARMOR_USER; 880 } else { 881 goto fail; 882 } 883 if (!aa_unpack_u32(e, &tmp, NULL)) 884 goto fail; 885 if (tmp) 886 profile->audit = AUDIT_ALL; 887 888 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 889 goto fail; 890 891 /* path_flags is optional */ 892 if (aa_unpack_u32(e, &profile->path_flags, "path_flags")) 893 profile->path_flags |= profile->label.flags & 894 PATH_MEDIATE_DELETED; 895 else 896 /* set a default value if path_flags field is not present */ 897 profile->path_flags = PATH_MEDIATE_DELETED; 898 899 info = "failed to unpack profile capabilities"; 900 if (!aa_unpack_u32(e, &(rules->caps.allow.cap[0]), NULL)) 901 goto fail; 902 if (!aa_unpack_u32(e, &(rules->caps.audit.cap[0]), NULL)) 903 goto fail; 904 if (!aa_unpack_u32(e, &(rules->caps.quiet.cap[0]), NULL)) 905 goto fail; 906 if (!aa_unpack_u32(e, &tmpcap.cap[0], NULL)) 907 goto fail; 908 909 info = "failed to unpack upper profile capabilities"; 910 if (aa_unpack_nameX(e, AA_STRUCT, "caps64")) { 911 /* optional upper half of 64 bit caps */ 912 if (!aa_unpack_u32(e, &(rules->caps.allow.cap[1]), NULL)) 913 goto fail; 914 if (!aa_unpack_u32(e, &(rules->caps.audit.cap[1]), NULL)) 915 goto fail; 916 if (!aa_unpack_u32(e, &(rules->caps.quiet.cap[1]), NULL)) 917 goto fail; 918 if (!aa_unpack_u32(e, &(tmpcap.cap[1]), NULL)) 919 goto fail; 920 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 921 goto fail; 922 } 923 924 info = "failed to unpack extended profile capabilities"; 925 if (aa_unpack_nameX(e, AA_STRUCT, "capsx")) { 926 /* optional extended caps mediation mask */ 927 if (!aa_unpack_u32(e, &(rules->caps.extended.cap[0]), NULL)) 928 goto fail; 929 if (!aa_unpack_u32(e, &(rules->caps.extended.cap[1]), NULL)) 930 goto fail; 931 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 932 goto fail; 933 } 934 935 if (!unpack_xattrs(e, profile)) { 936 info = "failed to unpack profile xattrs"; 937 goto fail; 938 } 939 940 if (!unpack_rlimits(e, rules)) { 941 info = "failed to unpack profile rlimits"; 942 goto fail; 943 } 944 945 if (!unpack_secmark(e, rules)) { 946 info = "failed to unpack profile secmark rules"; 947 goto fail; 948 } 949 950 if (aa_unpack_nameX(e, AA_STRUCT, "policydb")) { 951 /* generic policy dfa - optional and may be NULL */ 952 info = "failed to unpack policydb"; 953 error = unpack_pdb(e, &rules->policy, true, false, 954 &info); 955 if (error) 956 goto fail; 957 /* Fixup: drop when we get rid of start array */ 958 if (aa_dfa_next(rules->policy.dfa, rules->policy.start[0], 959 AA_CLASS_FILE)) 960 rules->policy.start[AA_CLASS_FILE] = 961 aa_dfa_next(rules->policy.dfa, 962 rules->policy.start[0], 963 AA_CLASS_FILE); 964 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 965 goto fail; 966 error = aa_compat_map_policy(&rules->policy, e->version); 967 if (error) { 968 info = "failed to remap policydb permission table"; 969 goto fail; 970 } 971 } else 972 rules->policy.dfa = aa_get_dfa(nulldfa); 973 974 /* get file rules */ 975 error = unpack_pdb(e, &rules->file, false, true, &info); 976 if (error) { 977 goto fail; 978 } else if (rules->file.dfa) { 979 error = aa_compat_map_file(&rules->file); 980 if (error) { 981 info = "failed to remap file permission table"; 982 goto fail; 983 } 984 } else if (rules->policy.dfa && 985 rules->policy.start[AA_CLASS_FILE]) { 986 rules->file.dfa = aa_get_dfa(rules->policy.dfa); 987 rules->file.start[AA_CLASS_FILE] = rules->policy.start[AA_CLASS_FILE]; 988 } else 989 rules->file.dfa = aa_get_dfa(nulldfa); 990 991 error = -EPROTO; 992 if (aa_unpack_nameX(e, AA_STRUCT, "data")) { 993 info = "out of memory"; 994 profile->data = kzalloc(sizeof(*profile->data), GFP_KERNEL); 995 if (!profile->data) { 996 error = -ENOMEM; 997 goto fail; 998 } 999 params.nelem_hint = 3; 1000 params.key_len = sizeof(void *); 1001 params.key_offset = offsetof(struct aa_data, key); 1002 params.head_offset = offsetof(struct aa_data, head); 1003 params.hashfn = strhash; 1004 params.obj_cmpfn = datacmp; 1005 1006 if (rhashtable_init(profile->data, ¶ms)) { 1007 info = "failed to init key, value hash table"; 1008 goto fail; 1009 } 1010 1011 while (aa_unpack_strdup(e, &key, NULL)) { 1012 data = kzalloc(sizeof(*data), GFP_KERNEL); 1013 if (!data) { 1014 kfree_sensitive(key); 1015 error = -ENOMEM; 1016 goto fail; 1017 } 1018 1019 data->key = key; 1020 data->size = aa_unpack_blob(e, &data->data, NULL); 1021 data->data = kvmemdup(data->data, data->size, GFP_KERNEL); 1022 if (data->size && !data->data) { 1023 kfree_sensitive(data->key); 1024 kfree_sensitive(data); 1025 error = -ENOMEM; 1026 goto fail; 1027 } 1028 1029 rhashtable_insert_fast(profile->data, &data->head, 1030 profile->data->p); 1031 } 1032 1033 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) { 1034 info = "failed to unpack end of key, value data table"; 1035 goto fail; 1036 } 1037 } 1038 1039 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) { 1040 info = "failed to unpack end of profile"; 1041 goto fail; 1042 } 1043 1044 return profile; 1045 1046 fail: 1047 if (error == 0) 1048 /* default error covers most cases */ 1049 error = -EPROTO; 1050 if (*ns_name) { 1051 kfree(*ns_name); 1052 *ns_name = NULL; 1053 } 1054 if (profile) 1055 name = NULL; 1056 else if (!name) 1057 name = "unknown"; 1058 audit_iface(profile, NULL, name, info, e, error); 1059 aa_free_profile(profile); 1060 1061 return ERR_PTR(error); 1062 } 1063 1064 /** 1065 * verify_header - unpack serialized stream header 1066 * @e: serialized data read head (NOT NULL) 1067 * @required: whether the header is required or optional 1068 * @ns: Returns - namespace if one is specified else NULL (NOT NULL) 1069 * 1070 * Returns: error or 0 if header is good 1071 */ 1072 static int verify_header(struct aa_ext *e, int required, const char **ns) 1073 { 1074 int error = -EPROTONOSUPPORT; 1075 const char *name = NULL; 1076 *ns = NULL; 1077 1078 /* get the interface version */ 1079 if (!aa_unpack_u32(e, &e->version, "version")) { 1080 if (required) { 1081 audit_iface(NULL, NULL, NULL, "invalid profile format", 1082 e, error); 1083 return error; 1084 } 1085 } 1086 1087 /* Check that the interface version is currently supported. 1088 * if not specified use previous version 1089 * Mask off everything that is not kernel abi version 1090 */ 1091 if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v9)) { 1092 audit_iface(NULL, NULL, NULL, "unsupported interface version", 1093 e, error); 1094 return error; 1095 } 1096 1097 /* read the namespace if present */ 1098 if (aa_unpack_str(e, &name, "namespace")) { 1099 if (*name == '\0') { 1100 audit_iface(NULL, NULL, NULL, "invalid namespace name", 1101 e, error); 1102 return error; 1103 } 1104 if (*ns && strcmp(*ns, name)) { 1105 audit_iface(NULL, NULL, NULL, "invalid ns change", e, 1106 error); 1107 } else if (!*ns) { 1108 *ns = kstrdup(name, GFP_KERNEL); 1109 if (!*ns) 1110 return -ENOMEM; 1111 } 1112 } 1113 1114 return 0; 1115 } 1116 1117 static bool verify_xindex(int xindex, int table_size) 1118 { 1119 int index, xtype; 1120 xtype = xindex & AA_X_TYPE_MASK; 1121 index = xindex & AA_X_INDEX_MASK; 1122 if (xtype == AA_X_TABLE && index >= table_size) 1123 return false; 1124 return true; 1125 } 1126 1127 /* verify dfa xindexes are in range of transition tables */ 1128 static bool verify_dfa_xindex(struct aa_dfa *dfa, int table_size) 1129 { 1130 int i; 1131 for (i = 0; i < dfa->tables[YYTD_ID_ACCEPT]->td_lolen; i++) { 1132 if (!verify_xindex(ACCEPT_TABLE(dfa)[i], table_size)) 1133 return false; 1134 } 1135 return true; 1136 } 1137 1138 static bool verify_perm(struct aa_perms *perm) 1139 { 1140 /* TODO: allow option to just force the perms into a valid state */ 1141 if (perm->allow & perm->deny) 1142 return false; 1143 if (perm->subtree & ~perm->allow) 1144 return false; 1145 if (perm->cond & (perm->allow | perm->deny)) 1146 return false; 1147 if (perm->kill & perm->allow) 1148 return false; 1149 if (perm->complain & (perm->allow | perm->deny)) 1150 return false; 1151 if (perm->prompt & (perm->allow | perm->deny)) 1152 return false; 1153 if (perm->complain & perm->prompt) 1154 return false; 1155 if (perm->hide & perm->allow) 1156 return false; 1157 1158 return true; 1159 } 1160 1161 static bool verify_perms(struct aa_policydb *pdb) 1162 { 1163 int i; 1164 1165 for (i = 0; i < pdb->size; i++) { 1166 if (!verify_perm(&pdb->perms[i])) 1167 return false; 1168 /* verify indexes into str table */ 1169 if (pdb->perms[i].xindex >= pdb->trans.size) 1170 return false; 1171 if (pdb->perms[i].tag >= pdb->trans.size) 1172 return false; 1173 if (pdb->perms[i].label >= pdb->trans.size) 1174 return false; 1175 } 1176 1177 return true; 1178 } 1179 1180 /** 1181 * verify_profile - Do post unpack analysis to verify profile consistency 1182 * @profile: profile to verify (NOT NULL) 1183 * 1184 * Returns: 0 if passes verification else error 1185 * 1186 * This verification is post any unpack mapping or changes 1187 */ 1188 static int verify_profile(struct aa_profile *profile) 1189 { 1190 struct aa_ruleset *rules = list_first_entry(&profile->rules, 1191 typeof(*rules), list); 1192 if (!rules) 1193 return 0; 1194 1195 if ((rules->file.dfa && !verify_dfa_xindex(rules->file.dfa, 1196 rules->file.trans.size)) || 1197 (rules->policy.dfa && 1198 !verify_dfa_xindex(rules->policy.dfa, rules->policy.trans.size))) { 1199 audit_iface(profile, NULL, NULL, 1200 "Unpack: Invalid named transition", NULL, -EPROTO); 1201 return -EPROTO; 1202 } 1203 1204 if (!verify_perms(&rules->file)) { 1205 audit_iface(profile, NULL, NULL, 1206 "Unpack: Invalid perm index", NULL, -EPROTO); 1207 return -EPROTO; 1208 } 1209 if (!verify_perms(&rules->policy)) { 1210 audit_iface(profile, NULL, NULL, 1211 "Unpack: Invalid perm index", NULL, -EPROTO); 1212 return -EPROTO; 1213 } 1214 if (!verify_perms(&profile->attach.xmatch)) { 1215 audit_iface(profile, NULL, NULL, 1216 "Unpack: Invalid perm index", NULL, -EPROTO); 1217 return -EPROTO; 1218 } 1219 1220 return 0; 1221 } 1222 1223 void aa_load_ent_free(struct aa_load_ent *ent) 1224 { 1225 if (ent) { 1226 aa_put_profile(ent->rename); 1227 aa_put_profile(ent->old); 1228 aa_put_profile(ent->new); 1229 kfree(ent->ns_name); 1230 kfree_sensitive(ent); 1231 } 1232 } 1233 1234 struct aa_load_ent *aa_load_ent_alloc(void) 1235 { 1236 struct aa_load_ent *ent = kzalloc(sizeof(*ent), GFP_KERNEL); 1237 if (ent) 1238 INIT_LIST_HEAD(&ent->list); 1239 return ent; 1240 } 1241 1242 static int compress_zstd(const char *src, size_t slen, char **dst, size_t *dlen) 1243 { 1244 #ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY 1245 const zstd_parameters params = 1246 zstd_get_params(aa_g_rawdata_compression_level, slen); 1247 const size_t wksp_len = zstd_cctx_workspace_bound(¶ms.cParams); 1248 void *wksp = NULL; 1249 zstd_cctx *ctx = NULL; 1250 size_t out_len = zstd_compress_bound(slen); 1251 void *out = NULL; 1252 int ret = 0; 1253 1254 out = kvzalloc(out_len, GFP_KERNEL); 1255 if (!out) { 1256 ret = -ENOMEM; 1257 goto cleanup; 1258 } 1259 1260 wksp = kvzalloc(wksp_len, GFP_KERNEL); 1261 if (!wksp) { 1262 ret = -ENOMEM; 1263 goto cleanup; 1264 } 1265 1266 ctx = zstd_init_cctx(wksp, wksp_len); 1267 if (!ctx) { 1268 ret = -EINVAL; 1269 goto cleanup; 1270 } 1271 1272 out_len = zstd_compress_cctx(ctx, out, out_len, src, slen, ¶ms); 1273 if (zstd_is_error(out_len) || out_len >= slen) { 1274 ret = -EINVAL; 1275 goto cleanup; 1276 } 1277 1278 if (is_vmalloc_addr(out)) { 1279 *dst = kvzalloc(out_len, GFP_KERNEL); 1280 if (*dst) { 1281 memcpy(*dst, out, out_len); 1282 kvfree(out); 1283 out = NULL; 1284 } 1285 } else { 1286 /* 1287 * If the staging buffer was kmalloc'd, then using krealloc is 1288 * probably going to be faster. The destination buffer will 1289 * always be smaller, so it's just shrunk, avoiding a memcpy 1290 */ 1291 *dst = krealloc(out, out_len, GFP_KERNEL); 1292 } 1293 1294 if (!*dst) { 1295 ret = -ENOMEM; 1296 goto cleanup; 1297 } 1298 1299 *dlen = out_len; 1300 1301 cleanup: 1302 if (ret) { 1303 kvfree(out); 1304 *dst = NULL; 1305 } 1306 1307 kvfree(wksp); 1308 return ret; 1309 #else 1310 *dlen = slen; 1311 return 0; 1312 #endif 1313 } 1314 1315 static int compress_loaddata(struct aa_loaddata *data) 1316 { 1317 AA_BUG(data->compressed_size > 0); 1318 1319 /* 1320 * Shortcut the no compression case, else we increase the amount of 1321 * storage required by a small amount 1322 */ 1323 if (aa_g_rawdata_compression_level != 0) { 1324 void *udata = data->data; 1325 int error = compress_zstd(udata, data->size, &data->data, 1326 &data->compressed_size); 1327 if (error) { 1328 data->compressed_size = data->size; 1329 return error; 1330 } 1331 if (udata != data->data) 1332 kvfree(udata); 1333 } else 1334 data->compressed_size = data->size; 1335 1336 return 0; 1337 } 1338 1339 /** 1340 * aa_unpack - unpack packed binary profile(s) data loaded from user space 1341 * @udata: user data copied to kmem (NOT NULL) 1342 * @lh: list to place unpacked profiles in a aa_repl_ws 1343 * @ns: Returns namespace profile is in if specified else NULL (NOT NULL) 1344 * 1345 * Unpack user data and return refcounted allocated profile(s) stored in 1346 * @lh in order of discovery, with the list chain stored in base.list 1347 * or error 1348 * 1349 * Returns: profile(s) on @lh else error pointer if fails to unpack 1350 */ 1351 int aa_unpack(struct aa_loaddata *udata, struct list_head *lh, 1352 const char **ns) 1353 { 1354 struct aa_load_ent *tmp, *ent; 1355 struct aa_profile *profile = NULL; 1356 char *ns_name = NULL; 1357 int error; 1358 struct aa_ext e = { 1359 .start = udata->data, 1360 .end = udata->data + udata->size, 1361 .pos = udata->data, 1362 }; 1363 1364 *ns = NULL; 1365 while (e.pos < e.end) { 1366 void *start; 1367 error = verify_header(&e, e.pos == e.start, ns); 1368 if (error) 1369 goto fail; 1370 1371 start = e.pos; 1372 profile = unpack_profile(&e, &ns_name); 1373 if (IS_ERR(profile)) { 1374 error = PTR_ERR(profile); 1375 goto fail; 1376 } 1377 1378 error = verify_profile(profile); 1379 if (error) 1380 goto fail_profile; 1381 1382 if (aa_g_hash_policy) 1383 error = aa_calc_profile_hash(profile, e.version, start, 1384 e.pos - start); 1385 if (error) 1386 goto fail_profile; 1387 1388 ent = aa_load_ent_alloc(); 1389 if (!ent) { 1390 error = -ENOMEM; 1391 goto fail_profile; 1392 } 1393 1394 ent->new = profile; 1395 ent->ns_name = ns_name; 1396 ns_name = NULL; 1397 list_add_tail(&ent->list, lh); 1398 } 1399 udata->abi = e.version & K_ABI_MASK; 1400 if (aa_g_hash_policy) { 1401 udata->hash = aa_calc_hash(udata->data, udata->size); 1402 if (IS_ERR(udata->hash)) { 1403 error = PTR_ERR(udata->hash); 1404 udata->hash = NULL; 1405 goto fail; 1406 } 1407 } 1408 1409 if (aa_g_export_binary) { 1410 error = compress_loaddata(udata); 1411 if (error) 1412 goto fail; 1413 } 1414 return 0; 1415 1416 fail_profile: 1417 kfree(ns_name); 1418 aa_put_profile(profile); 1419 1420 fail: 1421 list_for_each_entry_safe(ent, tmp, lh, list) { 1422 list_del_init(&ent->list); 1423 aa_load_ent_free(ent); 1424 } 1425 1426 return error; 1427 } 1428