1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AppArmor security module 4 * 5 * This file contains AppArmor functions for unpacking policy loaded from 6 * userspace. 7 * 8 * Copyright (C) 1998-2008 Novell/SUSE 9 * Copyright 2009-2010 Canonical Ltd. 10 * 11 * AppArmor uses a serialized binary format for loading policy. To find 12 * policy format documentation see Documentation/admin-guide/LSM/apparmor.rst 13 * All policy is validated before it is used. 14 */ 15 16 #include <asm/unaligned.h> 17 #include <linux/ctype.h> 18 #include <linux/errno.h> 19 #include <linux/zlib.h> 20 21 #include "include/apparmor.h" 22 #include "include/audit.h" 23 #include "include/cred.h" 24 #include "include/crypto.h" 25 #include "include/match.h" 26 #include "include/path.h" 27 #include "include/policy.h" 28 #include "include/policy_unpack.h" 29 30 #define K_ABI_MASK 0x3ff 31 #define FORCE_COMPLAIN_FLAG 0x800 32 #define VERSION_LT(X, Y) (((X) & K_ABI_MASK) < ((Y) & K_ABI_MASK)) 33 #define VERSION_GT(X, Y) (((X) & K_ABI_MASK) > ((Y) & K_ABI_MASK)) 34 35 #define v5 5 /* base version */ 36 #define v6 6 /* per entry policydb mediation check */ 37 #define v7 7 38 #define v8 8 /* full network masking */ 39 40 /* 41 * The AppArmor interface treats data as a type byte followed by the 42 * actual data. The interface has the notion of a named entry 43 * which has a name (AA_NAME typecode followed by name string) followed by 44 * the entries typecode and data. Named types allow for optional 45 * elements and extensions to be added and tested for without breaking 46 * backwards compatibility. 47 */ 48 49 enum aa_code { 50 AA_U8, 51 AA_U16, 52 AA_U32, 53 AA_U64, 54 AA_NAME, /* same as string except it is items name */ 55 AA_STRING, 56 AA_BLOB, 57 AA_STRUCT, 58 AA_STRUCTEND, 59 AA_LIST, 60 AA_LISTEND, 61 AA_ARRAY, 62 AA_ARRAYEND, 63 }; 64 65 /* 66 * aa_ext is the read of the buffer containing the serialized profile. The 67 * data is copied into a kernel buffer in apparmorfs and then handed off to 68 * the unpack routines. 69 */ 70 struct aa_ext { 71 void *start; 72 void *end; 73 void *pos; /* pointer to current position in the buffer */ 74 u32 version; 75 }; 76 77 /* audit callback for unpack fields */ 78 static void audit_cb(struct audit_buffer *ab, void *va) 79 { 80 struct common_audit_data *sa = va; 81 82 if (aad(sa)->iface.ns) { 83 audit_log_format(ab, " ns="); 84 audit_log_untrustedstring(ab, aad(sa)->iface.ns); 85 } 86 if (aad(sa)->name) { 87 audit_log_format(ab, " name="); 88 audit_log_untrustedstring(ab, aad(sa)->name); 89 } 90 if (aad(sa)->iface.pos) 91 audit_log_format(ab, " offset=%ld", aad(sa)->iface.pos); 92 } 93 94 /** 95 * audit_iface - do audit message for policy unpacking/load/replace/remove 96 * @new: profile if it has been allocated (MAYBE NULL) 97 * @ns_name: name of the ns the profile is to be loaded to (MAY BE NULL) 98 * @name: name of the profile being manipulated (MAYBE NULL) 99 * @info: any extra info about the failure (MAYBE NULL) 100 * @e: buffer position info 101 * @error: error code 102 * 103 * Returns: %0 or error 104 */ 105 static int audit_iface(struct aa_profile *new, const char *ns_name, 106 const char *name, const char *info, struct aa_ext *e, 107 int error) 108 { 109 struct aa_profile *profile = labels_profile(aa_current_raw_label()); 110 DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, NULL); 111 if (e) 112 aad(&sa)->iface.pos = e->pos - e->start; 113 aad(&sa)->iface.ns = ns_name; 114 if (new) 115 aad(&sa)->name = new->base.hname; 116 else 117 aad(&sa)->name = name; 118 aad(&sa)->info = info; 119 aad(&sa)->error = error; 120 121 return aa_audit(AUDIT_APPARMOR_STATUS, profile, &sa, audit_cb); 122 } 123 124 void __aa_loaddata_update(struct aa_loaddata *data, long revision) 125 { 126 AA_BUG(!data); 127 AA_BUG(!data->ns); 128 AA_BUG(!mutex_is_locked(&data->ns->lock)); 129 AA_BUG(data->revision > revision); 130 131 data->revision = revision; 132 if ((data->dents[AAFS_LOADDATA_REVISION])) { 133 d_inode(data->dents[AAFS_LOADDATA_DIR])->i_mtime = 134 current_time(d_inode(data->dents[AAFS_LOADDATA_DIR])); 135 d_inode(data->dents[AAFS_LOADDATA_REVISION])->i_mtime = 136 current_time(d_inode(data->dents[AAFS_LOADDATA_REVISION])); 137 } 138 } 139 140 bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r) 141 { 142 if (l->size != r->size) 143 return false; 144 if (l->compressed_size != r->compressed_size) 145 return false; 146 if (aa_g_hash_policy && memcmp(l->hash, r->hash, aa_hash_size()) != 0) 147 return false; 148 return memcmp(l->data, r->data, r->compressed_size ?: r->size) == 0; 149 } 150 151 /* 152 * need to take the ns mutex lock which is NOT safe most places that 153 * put_loaddata is called, so we have to delay freeing it 154 */ 155 static void do_loaddata_free(struct work_struct *work) 156 { 157 struct aa_loaddata *d = container_of(work, struct aa_loaddata, work); 158 struct aa_ns *ns = aa_get_ns(d->ns); 159 160 if (ns) { 161 mutex_lock_nested(&ns->lock, ns->level); 162 __aa_fs_remove_rawdata(d); 163 mutex_unlock(&ns->lock); 164 aa_put_ns(ns); 165 } 166 167 kfree_sensitive(d->hash); 168 kfree_sensitive(d->name); 169 kvfree(d->data); 170 kfree_sensitive(d); 171 } 172 173 void aa_loaddata_kref(struct kref *kref) 174 { 175 struct aa_loaddata *d = container_of(kref, struct aa_loaddata, count); 176 177 if (d) { 178 INIT_WORK(&d->work, do_loaddata_free); 179 schedule_work(&d->work); 180 } 181 } 182 183 struct aa_loaddata *aa_loaddata_alloc(size_t size) 184 { 185 struct aa_loaddata *d; 186 187 d = kzalloc(sizeof(*d), GFP_KERNEL); 188 if (d == NULL) 189 return ERR_PTR(-ENOMEM); 190 d->data = kvzalloc(size, GFP_KERNEL); 191 if (!d->data) { 192 kfree(d); 193 return ERR_PTR(-ENOMEM); 194 } 195 kref_init(&d->count); 196 INIT_LIST_HEAD(&d->list); 197 198 return d; 199 } 200 201 /* test if read will be in packed data bounds */ 202 static bool inbounds(struct aa_ext *e, size_t size) 203 { 204 return (size <= e->end - e->pos); 205 } 206 207 static void *kvmemdup(const void *src, size_t len) 208 { 209 void *p = kvmalloc(len, GFP_KERNEL); 210 211 if (p) 212 memcpy(p, src, len); 213 return p; 214 } 215 216 /** 217 * unpack_u16_chunk - test and do bounds checking for a u16 size based chunk 218 * @e: serialized data read head (NOT NULL) 219 * @chunk: start address for chunk of data (NOT NULL) 220 * 221 * Returns: the size of chunk found with the read head at the end of the chunk. 222 */ 223 static size_t unpack_u16_chunk(struct aa_ext *e, char **chunk) 224 { 225 size_t size = 0; 226 void *pos = e->pos; 227 228 if (!inbounds(e, sizeof(u16))) 229 goto fail; 230 size = le16_to_cpu(get_unaligned((__le16 *) e->pos)); 231 e->pos += sizeof(__le16); 232 if (!inbounds(e, size)) 233 goto fail; 234 *chunk = e->pos; 235 e->pos += size; 236 return size; 237 238 fail: 239 e->pos = pos; 240 return 0; 241 } 242 243 /* unpack control byte */ 244 static bool unpack_X(struct aa_ext *e, enum aa_code code) 245 { 246 if (!inbounds(e, 1)) 247 return false; 248 if (*(u8 *) e->pos != code) 249 return false; 250 e->pos++; 251 return true; 252 } 253 254 /** 255 * unpack_nameX - check is the next element is of type X with a name of @name 256 * @e: serialized data extent information (NOT NULL) 257 * @code: type code 258 * @name: name to match to the serialized element. (MAYBE NULL) 259 * 260 * check that the next serialized data element is of type X and has a tag 261 * name @name. If @name is specified then there must be a matching 262 * name element in the stream. If @name is NULL any name element will be 263 * skipped and only the typecode will be tested. 264 * 265 * Returns true on success (both type code and name tests match) and the read 266 * head is advanced past the headers 267 * 268 * Returns: false if either match fails, the read head does not move 269 */ 270 static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name) 271 { 272 /* 273 * May need to reset pos if name or type doesn't match 274 */ 275 void *pos = e->pos; 276 /* 277 * Check for presence of a tagname, and if present name size 278 * AA_NAME tag value is a u16. 279 */ 280 if (unpack_X(e, AA_NAME)) { 281 char *tag = NULL; 282 size_t size = unpack_u16_chunk(e, &tag); 283 /* if a name is specified it must match. otherwise skip tag */ 284 if (name && (!size || tag[size-1] != '\0' || strcmp(name, tag))) 285 goto fail; 286 } else if (name) { 287 /* if a name is specified and there is no name tag fail */ 288 goto fail; 289 } 290 291 /* now check if type code matches */ 292 if (unpack_X(e, code)) 293 return true; 294 295 fail: 296 e->pos = pos; 297 return false; 298 } 299 300 static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name) 301 { 302 void *pos = e->pos; 303 304 if (unpack_nameX(e, AA_U8, name)) { 305 if (!inbounds(e, sizeof(u8))) 306 goto fail; 307 if (data) 308 *data = *((u8 *)e->pos); 309 e->pos += sizeof(u8); 310 return true; 311 } 312 313 fail: 314 e->pos = pos; 315 return false; 316 } 317 318 static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name) 319 { 320 void *pos = e->pos; 321 322 if (unpack_nameX(e, AA_U32, name)) { 323 if (!inbounds(e, sizeof(u32))) 324 goto fail; 325 if (data) 326 *data = le32_to_cpu(get_unaligned((__le32 *) e->pos)); 327 e->pos += sizeof(u32); 328 return true; 329 } 330 331 fail: 332 e->pos = pos; 333 return false; 334 } 335 336 static bool unpack_u64(struct aa_ext *e, u64 *data, const char *name) 337 { 338 void *pos = e->pos; 339 340 if (unpack_nameX(e, AA_U64, name)) { 341 if (!inbounds(e, sizeof(u64))) 342 goto fail; 343 if (data) 344 *data = le64_to_cpu(get_unaligned((__le64 *) e->pos)); 345 e->pos += sizeof(u64); 346 return true; 347 } 348 349 fail: 350 e->pos = pos; 351 return false; 352 } 353 354 static size_t unpack_array(struct aa_ext *e, const char *name) 355 { 356 void *pos = e->pos; 357 358 if (unpack_nameX(e, AA_ARRAY, name)) { 359 int size; 360 if (!inbounds(e, sizeof(u16))) 361 goto fail; 362 size = (int)le16_to_cpu(get_unaligned((__le16 *) e->pos)); 363 e->pos += sizeof(u16); 364 return size; 365 } 366 367 fail: 368 e->pos = pos; 369 return 0; 370 } 371 372 static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name) 373 { 374 void *pos = e->pos; 375 376 if (unpack_nameX(e, AA_BLOB, name)) { 377 u32 size; 378 if (!inbounds(e, sizeof(u32))) 379 goto fail; 380 size = le32_to_cpu(get_unaligned((__le32 *) e->pos)); 381 e->pos += sizeof(u32); 382 if (inbounds(e, (size_t) size)) { 383 *blob = e->pos; 384 e->pos += size; 385 return size; 386 } 387 } 388 389 fail: 390 e->pos = pos; 391 return 0; 392 } 393 394 static int unpack_str(struct aa_ext *e, const char **string, const char *name) 395 { 396 char *src_str; 397 size_t size = 0; 398 void *pos = e->pos; 399 *string = NULL; 400 if (unpack_nameX(e, AA_STRING, name)) { 401 size = unpack_u16_chunk(e, &src_str); 402 if (size) { 403 /* strings are null terminated, length is size - 1 */ 404 if (src_str[size - 1] != 0) 405 goto fail; 406 *string = src_str; 407 408 return size; 409 } 410 } 411 412 fail: 413 e->pos = pos; 414 return 0; 415 } 416 417 static int unpack_strdup(struct aa_ext *e, char **string, const char *name) 418 { 419 const char *tmp; 420 void *pos = e->pos; 421 int res = unpack_str(e, &tmp, name); 422 *string = NULL; 423 424 if (!res) 425 return 0; 426 427 *string = kmemdup(tmp, res, GFP_KERNEL); 428 if (!*string) { 429 e->pos = pos; 430 return 0; 431 } 432 433 return res; 434 } 435 436 437 /** 438 * unpack_dfa - unpack a file rule dfa 439 * @e: serialized data extent information (NOT NULL) 440 * 441 * returns dfa or ERR_PTR or NULL if no dfa 442 */ 443 static struct aa_dfa *unpack_dfa(struct aa_ext *e) 444 { 445 char *blob = NULL; 446 size_t size; 447 struct aa_dfa *dfa = NULL; 448 449 size = unpack_blob(e, &blob, "aadfa"); 450 if (size) { 451 /* 452 * The dfa is aligned with in the blob to 8 bytes 453 * from the beginning of the stream. 454 * alignment adjust needed by dfa unpack 455 */ 456 size_t sz = blob - (char *) e->start - 457 ((e->pos - e->start) & 7); 458 size_t pad = ALIGN(sz, 8) - sz; 459 int flags = TO_ACCEPT1_FLAG(YYTD_DATA32) | 460 TO_ACCEPT2_FLAG(YYTD_DATA32); 461 if (aa_g_paranoid_load) 462 flags |= DFA_FLAG_VERIFY_STATES; 463 dfa = aa_dfa_unpack(blob + pad, size - pad, flags); 464 465 if (IS_ERR(dfa)) 466 return dfa; 467 468 } 469 470 return dfa; 471 } 472 473 /** 474 * unpack_trans_table - unpack a profile transition table 475 * @e: serialized data extent information (NOT NULL) 476 * @profile: profile to add the accept table to (NOT NULL) 477 * 478 * Returns: true if table successfully unpacked 479 */ 480 static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile) 481 { 482 void *saved_pos = e->pos; 483 484 /* exec table is optional */ 485 if (unpack_nameX(e, AA_STRUCT, "xtable")) { 486 int i, size; 487 488 size = unpack_array(e, NULL); 489 /* currently 4 exec bits and entries 0-3 are reserved iupcx */ 490 if (size > 16 - 4) 491 goto fail; 492 profile->file.trans.table = kcalloc(size, sizeof(char *), 493 GFP_KERNEL); 494 if (!profile->file.trans.table) 495 goto fail; 496 497 profile->file.trans.size = size; 498 for (i = 0; i < size; i++) { 499 char *str; 500 int c, j, pos, size2 = unpack_strdup(e, &str, NULL); 501 /* unpack_strdup verifies that the last character is 502 * null termination byte. 503 */ 504 if (!size2) 505 goto fail; 506 profile->file.trans.table[i] = str; 507 /* verify that name doesn't start with space */ 508 if (isspace(*str)) 509 goto fail; 510 511 /* count internal # of internal \0 */ 512 for (c = j = 0; j < size2 - 1; j++) { 513 if (!str[j]) { 514 pos = j; 515 c++; 516 } 517 } 518 if (*str == ':') { 519 /* first character after : must be valid */ 520 if (!str[1]) 521 goto fail; 522 /* beginning with : requires an embedded \0, 523 * verify that exactly 1 internal \0 exists 524 * trailing \0 already verified by unpack_strdup 525 * 526 * convert \0 back to : for label_parse 527 */ 528 if (c == 1) 529 str[pos] = ':'; 530 else if (c > 1) 531 goto fail; 532 } else if (c) 533 /* fail - all other cases with embedded \0 */ 534 goto fail; 535 } 536 if (!unpack_nameX(e, AA_ARRAYEND, NULL)) 537 goto fail; 538 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) 539 goto fail; 540 } 541 return true; 542 543 fail: 544 aa_free_domain_entries(&profile->file.trans); 545 e->pos = saved_pos; 546 return false; 547 } 548 549 static bool unpack_xattrs(struct aa_ext *e, struct aa_profile *profile) 550 { 551 void *pos = e->pos; 552 553 if (unpack_nameX(e, AA_STRUCT, "xattrs")) { 554 int i, size; 555 556 size = unpack_array(e, NULL); 557 profile->xattr_count = size; 558 profile->xattrs = kcalloc(size, sizeof(char *), GFP_KERNEL); 559 if (!profile->xattrs) 560 goto fail; 561 for (i = 0; i < size; i++) { 562 if (!unpack_strdup(e, &profile->xattrs[i], NULL)) 563 goto fail; 564 } 565 if (!unpack_nameX(e, AA_ARRAYEND, NULL)) 566 goto fail; 567 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) 568 goto fail; 569 } 570 571 return true; 572 573 fail: 574 e->pos = pos; 575 return false; 576 } 577 578 static bool unpack_secmark(struct aa_ext *e, struct aa_profile *profile) 579 { 580 void *pos = e->pos; 581 int i, size; 582 583 if (unpack_nameX(e, AA_STRUCT, "secmark")) { 584 size = unpack_array(e, NULL); 585 586 profile->secmark = kcalloc(size, sizeof(struct aa_secmark), 587 GFP_KERNEL); 588 if (!profile->secmark) 589 goto fail; 590 591 profile->secmark_count = size; 592 593 for (i = 0; i < size; i++) { 594 if (!unpack_u8(e, &profile->secmark[i].audit, NULL)) 595 goto fail; 596 if (!unpack_u8(e, &profile->secmark[i].deny, NULL)) 597 goto fail; 598 if (!unpack_strdup(e, &profile->secmark[i].label, NULL)) 599 goto fail; 600 } 601 if (!unpack_nameX(e, AA_ARRAYEND, NULL)) 602 goto fail; 603 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) 604 goto fail; 605 } 606 607 return true; 608 609 fail: 610 if (profile->secmark) { 611 for (i = 0; i < size; i++) 612 kfree(profile->secmark[i].label); 613 kfree(profile->secmark); 614 profile->secmark_count = 0; 615 profile->secmark = NULL; 616 } 617 618 e->pos = pos; 619 return false; 620 } 621 622 static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile) 623 { 624 void *pos = e->pos; 625 626 /* rlimits are optional */ 627 if (unpack_nameX(e, AA_STRUCT, "rlimits")) { 628 int i, size; 629 u32 tmp = 0; 630 if (!unpack_u32(e, &tmp, NULL)) 631 goto fail; 632 profile->rlimits.mask = tmp; 633 634 size = unpack_array(e, NULL); 635 if (size > RLIM_NLIMITS) 636 goto fail; 637 for (i = 0; i < size; i++) { 638 u64 tmp2 = 0; 639 int a = aa_map_resource(i); 640 if (!unpack_u64(e, &tmp2, NULL)) 641 goto fail; 642 profile->rlimits.limits[a].rlim_max = tmp2; 643 } 644 if (!unpack_nameX(e, AA_ARRAYEND, NULL)) 645 goto fail; 646 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) 647 goto fail; 648 } 649 return true; 650 651 fail: 652 e->pos = pos; 653 return false; 654 } 655 656 static u32 strhash(const void *data, u32 len, u32 seed) 657 { 658 const char * const *key = data; 659 660 return jhash(*key, strlen(*key), seed); 661 } 662 663 static int datacmp(struct rhashtable_compare_arg *arg, const void *obj) 664 { 665 const struct aa_data *data = obj; 666 const char * const *key = arg->key; 667 668 return strcmp(data->key, *key); 669 } 670 671 /** 672 * unpack_profile - unpack a serialized profile 673 * @e: serialized data extent information (NOT NULL) 674 * @ns_name: pointer of newly allocated copy of %NULL in case of error 675 * 676 * NOTE: unpack profile sets audit struct if there is a failure 677 */ 678 static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) 679 { 680 struct aa_profile *profile = NULL; 681 const char *tmpname, *tmpns = NULL, *name = NULL; 682 const char *info = "failed to unpack profile"; 683 size_t ns_len; 684 struct rhashtable_params params = { 0 }; 685 char *key = NULL; 686 struct aa_data *data; 687 int i, error = -EPROTO; 688 kernel_cap_t tmpcap; 689 u32 tmp; 690 691 *ns_name = NULL; 692 693 /* check that we have the right struct being passed */ 694 if (!unpack_nameX(e, AA_STRUCT, "profile")) 695 goto fail; 696 if (!unpack_str(e, &name, NULL)) 697 goto fail; 698 if (*name == '\0') 699 goto fail; 700 701 tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len); 702 if (tmpns) { 703 *ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL); 704 if (!*ns_name) { 705 info = "out of memory"; 706 goto fail; 707 } 708 name = tmpname; 709 } 710 711 profile = aa_alloc_profile(name, NULL, GFP_KERNEL); 712 if (!profile) 713 return ERR_PTR(-ENOMEM); 714 715 /* profile renaming is optional */ 716 (void) unpack_str(e, &profile->rename, "rename"); 717 718 /* attachment string is optional */ 719 (void) unpack_str(e, &profile->attach, "attach"); 720 721 /* xmatch is optional and may be NULL */ 722 profile->xmatch = unpack_dfa(e); 723 if (IS_ERR(profile->xmatch)) { 724 error = PTR_ERR(profile->xmatch); 725 profile->xmatch = NULL; 726 info = "bad xmatch"; 727 goto fail; 728 } 729 /* xmatch_len is not optional if xmatch is set */ 730 if (profile->xmatch) { 731 if (!unpack_u32(e, &tmp, NULL)) { 732 info = "missing xmatch len"; 733 goto fail; 734 } 735 profile->xmatch_len = tmp; 736 } 737 738 /* disconnected attachment string is optional */ 739 (void) unpack_str(e, &profile->disconnected, "disconnected"); 740 741 /* per profile debug flags (complain, audit) */ 742 if (!unpack_nameX(e, AA_STRUCT, "flags")) { 743 info = "profile missing flags"; 744 goto fail; 745 } 746 info = "failed to unpack profile flags"; 747 if (!unpack_u32(e, &tmp, NULL)) 748 goto fail; 749 if (tmp & PACKED_FLAG_HAT) 750 profile->label.flags |= FLAG_HAT; 751 if (tmp & PACKED_FLAG_DEBUG1) 752 profile->label.flags |= FLAG_DEBUG1; 753 if (tmp & PACKED_FLAG_DEBUG2) 754 profile->label.flags |= FLAG_DEBUG2; 755 if (!unpack_u32(e, &tmp, NULL)) 756 goto fail; 757 if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG)) { 758 profile->mode = APPARMOR_COMPLAIN; 759 } else if (tmp == PACKED_MODE_ENFORCE) { 760 profile->mode = APPARMOR_ENFORCE; 761 } else if (tmp == PACKED_MODE_KILL) { 762 profile->mode = APPARMOR_KILL; 763 } else if (tmp == PACKED_MODE_UNCONFINED) { 764 profile->mode = APPARMOR_UNCONFINED; 765 profile->label.flags |= FLAG_UNCONFINED; 766 } else { 767 goto fail; 768 } 769 if (!unpack_u32(e, &tmp, NULL)) 770 goto fail; 771 if (tmp) 772 profile->audit = AUDIT_ALL; 773 774 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) 775 goto fail; 776 777 /* path_flags is optional */ 778 if (unpack_u32(e, &profile->path_flags, "path_flags")) 779 profile->path_flags |= profile->label.flags & 780 PATH_MEDIATE_DELETED; 781 else 782 /* set a default value if path_flags field is not present */ 783 profile->path_flags = PATH_MEDIATE_DELETED; 784 785 info = "failed to unpack profile capabilities"; 786 if (!unpack_u32(e, &(profile->caps.allow.cap[0]), NULL)) 787 goto fail; 788 if (!unpack_u32(e, &(profile->caps.audit.cap[0]), NULL)) 789 goto fail; 790 if (!unpack_u32(e, &(profile->caps.quiet.cap[0]), NULL)) 791 goto fail; 792 if (!unpack_u32(e, &tmpcap.cap[0], NULL)) 793 goto fail; 794 795 info = "failed to unpack upper profile capabilities"; 796 if (unpack_nameX(e, AA_STRUCT, "caps64")) { 797 /* optional upper half of 64 bit caps */ 798 if (!unpack_u32(e, &(profile->caps.allow.cap[1]), NULL)) 799 goto fail; 800 if (!unpack_u32(e, &(profile->caps.audit.cap[1]), NULL)) 801 goto fail; 802 if (!unpack_u32(e, &(profile->caps.quiet.cap[1]), NULL)) 803 goto fail; 804 if (!unpack_u32(e, &(tmpcap.cap[1]), NULL)) 805 goto fail; 806 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) 807 goto fail; 808 } 809 810 info = "failed to unpack extended profile capabilities"; 811 if (unpack_nameX(e, AA_STRUCT, "capsx")) { 812 /* optional extended caps mediation mask */ 813 if (!unpack_u32(e, &(profile->caps.extended.cap[0]), NULL)) 814 goto fail; 815 if (!unpack_u32(e, &(profile->caps.extended.cap[1]), NULL)) 816 goto fail; 817 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) 818 goto fail; 819 } 820 821 if (!unpack_xattrs(e, profile)) { 822 info = "failed to unpack profile xattrs"; 823 goto fail; 824 } 825 826 if (!unpack_rlimits(e, profile)) { 827 info = "failed to unpack profile rlimits"; 828 goto fail; 829 } 830 831 if (!unpack_secmark(e, profile)) { 832 info = "failed to unpack profile secmark rules"; 833 goto fail; 834 } 835 836 if (unpack_nameX(e, AA_STRUCT, "policydb")) { 837 /* generic policy dfa - optional and may be NULL */ 838 info = "failed to unpack policydb"; 839 profile->policy.dfa = unpack_dfa(e); 840 if (IS_ERR(profile->policy.dfa)) { 841 error = PTR_ERR(profile->policy.dfa); 842 profile->policy.dfa = NULL; 843 goto fail; 844 } else if (!profile->policy.dfa) { 845 error = -EPROTO; 846 goto fail; 847 } 848 if (!unpack_u32(e, &profile->policy.start[0], "start")) 849 /* default start state */ 850 profile->policy.start[0] = DFA_START; 851 /* setup class index */ 852 for (i = AA_CLASS_FILE; i <= AA_CLASS_LAST; i++) { 853 profile->policy.start[i] = 854 aa_dfa_next(profile->policy.dfa, 855 profile->policy.start[0], 856 i); 857 } 858 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) 859 goto fail; 860 } else 861 profile->policy.dfa = aa_get_dfa(nulldfa); 862 863 /* get file rules */ 864 profile->file.dfa = unpack_dfa(e); 865 if (IS_ERR(profile->file.dfa)) { 866 error = PTR_ERR(profile->file.dfa); 867 profile->file.dfa = NULL; 868 info = "failed to unpack profile file rules"; 869 goto fail; 870 } else if (profile->file.dfa) { 871 if (!unpack_u32(e, &profile->file.start, "dfa_start")) 872 /* default start state */ 873 profile->file.start = DFA_START; 874 } else if (profile->policy.dfa && 875 profile->policy.start[AA_CLASS_FILE]) { 876 profile->file.dfa = aa_get_dfa(profile->policy.dfa); 877 profile->file.start = profile->policy.start[AA_CLASS_FILE]; 878 } else 879 profile->file.dfa = aa_get_dfa(nulldfa); 880 881 if (!unpack_trans_table(e, profile)) { 882 info = "failed to unpack profile transition table"; 883 goto fail; 884 } 885 886 if (unpack_nameX(e, AA_STRUCT, "data")) { 887 info = "out of memory"; 888 profile->data = kzalloc(sizeof(*profile->data), GFP_KERNEL); 889 if (!profile->data) 890 goto fail; 891 892 params.nelem_hint = 3; 893 params.key_len = sizeof(void *); 894 params.key_offset = offsetof(struct aa_data, key); 895 params.head_offset = offsetof(struct aa_data, head); 896 params.hashfn = strhash; 897 params.obj_cmpfn = datacmp; 898 899 if (rhashtable_init(profile->data, ¶ms)) { 900 info = "failed to init key, value hash table"; 901 goto fail; 902 } 903 904 while (unpack_strdup(e, &key, NULL)) { 905 data = kzalloc(sizeof(*data), GFP_KERNEL); 906 if (!data) { 907 kfree_sensitive(key); 908 goto fail; 909 } 910 911 data->key = key; 912 data->size = unpack_blob(e, &data->data, NULL); 913 data->data = kvmemdup(data->data, data->size); 914 if (data->size && !data->data) { 915 kfree_sensitive(data->key); 916 kfree_sensitive(data); 917 goto fail; 918 } 919 920 rhashtable_insert_fast(profile->data, &data->head, 921 profile->data->p); 922 } 923 924 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) { 925 info = "failed to unpack end of key, value data table"; 926 goto fail; 927 } 928 } 929 930 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) { 931 info = "failed to unpack end of profile"; 932 goto fail; 933 } 934 935 return profile; 936 937 fail: 938 if (profile) 939 name = NULL; 940 else if (!name) 941 name = "unknown"; 942 audit_iface(profile, NULL, name, info, e, error); 943 aa_free_profile(profile); 944 945 return ERR_PTR(error); 946 } 947 948 /** 949 * verify_header - unpack serialized stream header 950 * @e: serialized data read head (NOT NULL) 951 * @required: whether the header is required or optional 952 * @ns: Returns - namespace if one is specified else NULL (NOT NULL) 953 * 954 * Returns: error or 0 if header is good 955 */ 956 static int verify_header(struct aa_ext *e, int required, const char **ns) 957 { 958 int error = -EPROTONOSUPPORT; 959 const char *name = NULL; 960 *ns = NULL; 961 962 /* get the interface version */ 963 if (!unpack_u32(e, &e->version, "version")) { 964 if (required) { 965 audit_iface(NULL, NULL, NULL, "invalid profile format", 966 e, error); 967 return error; 968 } 969 } 970 971 /* Check that the interface version is currently supported. 972 * if not specified use previous version 973 * Mask off everything that is not kernel abi version 974 */ 975 if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v7)) { 976 audit_iface(NULL, NULL, NULL, "unsupported interface version", 977 e, error); 978 return error; 979 } 980 981 /* read the namespace if present */ 982 if (unpack_str(e, &name, "namespace")) { 983 if (*name == '\0') { 984 audit_iface(NULL, NULL, NULL, "invalid namespace name", 985 e, error); 986 return error; 987 } 988 if (*ns && strcmp(*ns, name)) { 989 audit_iface(NULL, NULL, NULL, "invalid ns change", e, 990 error); 991 } else if (!*ns) { 992 *ns = kstrdup(name, GFP_KERNEL); 993 if (!*ns) 994 return -ENOMEM; 995 } 996 } 997 998 return 0; 999 } 1000 1001 static bool verify_xindex(int xindex, int table_size) 1002 { 1003 int index, xtype; 1004 xtype = xindex & AA_X_TYPE_MASK; 1005 index = xindex & AA_X_INDEX_MASK; 1006 if (xtype == AA_X_TABLE && index >= table_size) 1007 return false; 1008 return true; 1009 } 1010 1011 /* verify dfa xindexes are in range of transition tables */ 1012 static bool verify_dfa_xindex(struct aa_dfa *dfa, int table_size) 1013 { 1014 int i; 1015 for (i = 0; i < dfa->tables[YYTD_ID_ACCEPT]->td_lolen; i++) { 1016 if (!verify_xindex(dfa_user_xindex(dfa, i), table_size)) 1017 return false; 1018 if (!verify_xindex(dfa_other_xindex(dfa, i), table_size)) 1019 return false; 1020 } 1021 return true; 1022 } 1023 1024 /** 1025 * verify_profile - Do post unpack analysis to verify profile consistency 1026 * @profile: profile to verify (NOT NULL) 1027 * 1028 * Returns: 0 if passes verification else error 1029 */ 1030 static int verify_profile(struct aa_profile *profile) 1031 { 1032 if (profile->file.dfa && 1033 !verify_dfa_xindex(profile->file.dfa, 1034 profile->file.trans.size)) { 1035 audit_iface(profile, NULL, NULL, "Invalid named transition", 1036 NULL, -EPROTO); 1037 return -EPROTO; 1038 } 1039 1040 return 0; 1041 } 1042 1043 void aa_load_ent_free(struct aa_load_ent *ent) 1044 { 1045 if (ent) { 1046 aa_put_profile(ent->rename); 1047 aa_put_profile(ent->old); 1048 aa_put_profile(ent->new); 1049 kfree(ent->ns_name); 1050 kfree_sensitive(ent); 1051 } 1052 } 1053 1054 struct aa_load_ent *aa_load_ent_alloc(void) 1055 { 1056 struct aa_load_ent *ent = kzalloc(sizeof(*ent), GFP_KERNEL); 1057 if (ent) 1058 INIT_LIST_HEAD(&ent->list); 1059 return ent; 1060 } 1061 1062 static int deflate_compress(const char *src, size_t slen, char **dst, 1063 size_t *dlen) 1064 { 1065 #ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY 1066 int error; 1067 struct z_stream_s strm; 1068 void *stgbuf, *dstbuf; 1069 size_t stglen = deflateBound(slen); 1070 1071 memset(&strm, 0, sizeof(strm)); 1072 1073 if (stglen < slen) 1074 return -EFBIG; 1075 1076 strm.workspace = kvzalloc(zlib_deflate_workspacesize(MAX_WBITS, 1077 MAX_MEM_LEVEL), 1078 GFP_KERNEL); 1079 if (!strm.workspace) 1080 return -ENOMEM; 1081 1082 error = zlib_deflateInit(&strm, aa_g_rawdata_compression_level); 1083 if (error != Z_OK) { 1084 error = -ENOMEM; 1085 goto fail_deflate_init; 1086 } 1087 1088 stgbuf = kvzalloc(stglen, GFP_KERNEL); 1089 if (!stgbuf) { 1090 error = -ENOMEM; 1091 goto fail_stg_alloc; 1092 } 1093 1094 strm.next_in = src; 1095 strm.avail_in = slen; 1096 strm.next_out = stgbuf; 1097 strm.avail_out = stglen; 1098 1099 error = zlib_deflate(&strm, Z_FINISH); 1100 if (error != Z_STREAM_END) { 1101 error = -EINVAL; 1102 goto fail_deflate; 1103 } 1104 error = 0; 1105 1106 if (is_vmalloc_addr(stgbuf)) { 1107 dstbuf = kvzalloc(strm.total_out, GFP_KERNEL); 1108 if (dstbuf) { 1109 memcpy(dstbuf, stgbuf, strm.total_out); 1110 kvfree(stgbuf); 1111 } 1112 } else 1113 /* 1114 * If the staging buffer was kmalloc'd, then using krealloc is 1115 * probably going to be faster. The destination buffer will 1116 * always be smaller, so it's just shrunk, avoiding a memcpy 1117 */ 1118 dstbuf = krealloc(stgbuf, strm.total_out, GFP_KERNEL); 1119 1120 if (!dstbuf) { 1121 error = -ENOMEM; 1122 goto fail_deflate; 1123 } 1124 1125 *dst = dstbuf; 1126 *dlen = strm.total_out; 1127 1128 fail_stg_alloc: 1129 zlib_deflateEnd(&strm); 1130 fail_deflate_init: 1131 kvfree(strm.workspace); 1132 return error; 1133 1134 fail_deflate: 1135 kvfree(stgbuf); 1136 goto fail_stg_alloc; 1137 #else 1138 *dlen = slen; 1139 return 0; 1140 #endif 1141 } 1142 1143 static int compress_loaddata(struct aa_loaddata *data) 1144 { 1145 1146 AA_BUG(data->compressed_size > 0); 1147 1148 /* 1149 * Shortcut the no compression case, else we increase the amount of 1150 * storage required by a small amount 1151 */ 1152 if (aa_g_rawdata_compression_level != 0) { 1153 void *udata = data->data; 1154 int error = deflate_compress(udata, data->size, &data->data, 1155 &data->compressed_size); 1156 if (error) 1157 return error; 1158 1159 if (udata != data->data) 1160 kvfree(udata); 1161 } else 1162 data->compressed_size = data->size; 1163 1164 return 0; 1165 } 1166 1167 /** 1168 * aa_unpack - unpack packed binary profile(s) data loaded from user space 1169 * @udata: user data copied to kmem (NOT NULL) 1170 * @lh: list to place unpacked profiles in a aa_repl_ws 1171 * @ns: Returns namespace profile is in if specified else NULL (NOT NULL) 1172 * 1173 * Unpack user data and return refcounted allocated profile(s) stored in 1174 * @lh in order of discovery, with the list chain stored in base.list 1175 * or error 1176 * 1177 * Returns: profile(s) on @lh else error pointer if fails to unpack 1178 */ 1179 int aa_unpack(struct aa_loaddata *udata, struct list_head *lh, 1180 const char **ns) 1181 { 1182 struct aa_load_ent *tmp, *ent; 1183 struct aa_profile *profile = NULL; 1184 int error; 1185 struct aa_ext e = { 1186 .start = udata->data, 1187 .end = udata->data + udata->size, 1188 .pos = udata->data, 1189 }; 1190 1191 *ns = NULL; 1192 while (e.pos < e.end) { 1193 char *ns_name = NULL; 1194 void *start; 1195 error = verify_header(&e, e.pos == e.start, ns); 1196 if (error) 1197 goto fail; 1198 1199 start = e.pos; 1200 profile = unpack_profile(&e, &ns_name); 1201 if (IS_ERR(profile)) { 1202 error = PTR_ERR(profile); 1203 goto fail; 1204 } 1205 1206 error = verify_profile(profile); 1207 if (error) 1208 goto fail_profile; 1209 1210 if (aa_g_hash_policy) 1211 error = aa_calc_profile_hash(profile, e.version, start, 1212 e.pos - start); 1213 if (error) 1214 goto fail_profile; 1215 1216 ent = aa_load_ent_alloc(); 1217 if (!ent) { 1218 error = -ENOMEM; 1219 goto fail_profile; 1220 } 1221 1222 ent->new = profile; 1223 ent->ns_name = ns_name; 1224 list_add_tail(&ent->list, lh); 1225 } 1226 udata->abi = e.version & K_ABI_MASK; 1227 if (aa_g_hash_policy) { 1228 udata->hash = aa_calc_hash(udata->data, udata->size); 1229 if (IS_ERR(udata->hash)) { 1230 error = PTR_ERR(udata->hash); 1231 udata->hash = NULL; 1232 goto fail; 1233 } 1234 } 1235 1236 if (aa_g_export_binary) { 1237 error = compress_loaddata(udata); 1238 if (error) 1239 goto fail; 1240 } 1241 return 0; 1242 1243 fail_profile: 1244 aa_put_profile(profile); 1245 1246 fail: 1247 list_for_each_entry_safe(ent, tmp, lh, list) { 1248 list_del_init(&ent->list); 1249 aa_load_ent_free(ent); 1250 } 1251 1252 return error; 1253 } 1254 1255 #ifdef CONFIG_SECURITY_APPARMOR_KUNIT_TEST 1256 #include "policy_unpack_test.c" 1257 #endif /* CONFIG_SECURITY_APPARMOR_KUNIT_TEST */ 1258