1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AppArmor security module 4 * 5 * This file contains AppArmor functions for unpacking policy loaded from 6 * userspace. 7 * 8 * Copyright (C) 1998-2008 Novell/SUSE 9 * Copyright 2009-2010 Canonical Ltd. 10 * 11 * AppArmor uses a serialized binary format for loading policy. To find 12 * policy format documentation see Documentation/admin-guide/LSM/apparmor.rst 13 * All policy is validated before it is used. 14 */ 15 16 #include <asm/unaligned.h> 17 #include <linux/ctype.h> 18 #include <linux/errno.h> 19 #include <linux/zlib.h> 20 21 #include "include/apparmor.h" 22 #include "include/audit.h" 23 #include "include/cred.h" 24 #include "include/crypto.h" 25 #include "include/match.h" 26 #include "include/path.h" 27 #include "include/policy.h" 28 #include "include/policy_unpack.h" 29 30 #define K_ABI_MASK 0x3ff 31 #define FORCE_COMPLAIN_FLAG 0x800 32 #define VERSION_LT(X, Y) (((X) & K_ABI_MASK) < ((Y) & K_ABI_MASK)) 33 #define VERSION_GT(X, Y) (((X) & K_ABI_MASK) > ((Y) & K_ABI_MASK)) 34 35 #define v5 5 /* base version */ 36 #define v6 6 /* per entry policydb mediation check */ 37 #define v7 7 38 #define v8 8 /* full network masking */ 39 40 /* 41 * The AppArmor interface treats data as a type byte followed by the 42 * actual data. The interface has the notion of a a named entry 43 * which has a name (AA_NAME typecode followed by name string) followed by 44 * the entries typecode and data. Named types allow for optional 45 * elements and extensions to be added and tested for without breaking 46 * backwards compatibility. 47 */ 48 49 enum aa_code { 50 AA_U8, 51 AA_U16, 52 AA_U32, 53 AA_U64, 54 AA_NAME, /* same as string except it is items name */ 55 AA_STRING, 56 AA_BLOB, 57 AA_STRUCT, 58 AA_STRUCTEND, 59 AA_LIST, 60 AA_LISTEND, 61 AA_ARRAY, 62 AA_ARRAYEND, 63 }; 64 65 /* 66 * aa_ext is the read of the buffer containing the serialized profile. The 67 * data is copied into a kernel buffer in apparmorfs and then handed off to 68 * the unpack routines. 69 */ 70 struct aa_ext { 71 void *start; 72 void *end; 73 void *pos; /* pointer to current position in the buffer */ 74 u32 version; 75 }; 76 77 /* audit callback for unpack fields */ 78 static void audit_cb(struct audit_buffer *ab, void *va) 79 { 80 struct common_audit_data *sa = va; 81 82 if (aad(sa)->iface.ns) { 83 audit_log_format(ab, " ns="); 84 audit_log_untrustedstring(ab, aad(sa)->iface.ns); 85 } 86 if (aad(sa)->name) { 87 audit_log_format(ab, " name="); 88 audit_log_untrustedstring(ab, aad(sa)->name); 89 } 90 if (aad(sa)->iface.pos) 91 audit_log_format(ab, " offset=%ld", aad(sa)->iface.pos); 92 } 93 94 /** 95 * audit_iface - do audit message for policy unpacking/load/replace/remove 96 * @new: profile if it has been allocated (MAYBE NULL) 97 * @ns_name: name of the ns the profile is to be loaded to (MAY BE NULL) 98 * @name: name of the profile being manipulated (MAYBE NULL) 99 * @info: any extra info about the failure (MAYBE NULL) 100 * @e: buffer position info 101 * @error: error code 102 * 103 * Returns: %0 or error 104 */ 105 static int audit_iface(struct aa_profile *new, const char *ns_name, 106 const char *name, const char *info, struct aa_ext *e, 107 int error) 108 { 109 struct aa_profile *profile = labels_profile(aa_current_raw_label()); 110 DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, NULL); 111 if (e) 112 aad(&sa)->iface.pos = e->pos - e->start; 113 aad(&sa)->iface.ns = ns_name; 114 if (new) 115 aad(&sa)->name = new->base.hname; 116 else 117 aad(&sa)->name = name; 118 aad(&sa)->info = info; 119 aad(&sa)->error = error; 120 121 return aa_audit(AUDIT_APPARMOR_STATUS, profile, &sa, audit_cb); 122 } 123 124 void __aa_loaddata_update(struct aa_loaddata *data, long revision) 125 { 126 AA_BUG(!data); 127 AA_BUG(!data->ns); 128 AA_BUG(!data->dents[AAFS_LOADDATA_REVISION]); 129 AA_BUG(!mutex_is_locked(&data->ns->lock)); 130 AA_BUG(data->revision > revision); 131 132 data->revision = revision; 133 d_inode(data->dents[AAFS_LOADDATA_DIR])->i_mtime = 134 current_time(d_inode(data->dents[AAFS_LOADDATA_DIR])); 135 d_inode(data->dents[AAFS_LOADDATA_REVISION])->i_mtime = 136 current_time(d_inode(data->dents[AAFS_LOADDATA_REVISION])); 137 } 138 139 bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r) 140 { 141 if (l->size != r->size) 142 return false; 143 if (l->compressed_size != r->compressed_size) 144 return false; 145 if (aa_g_hash_policy && memcmp(l->hash, r->hash, aa_hash_size()) != 0) 146 return false; 147 return memcmp(l->data, r->data, r->compressed_size ?: r->size) == 0; 148 } 149 150 /* 151 * need to take the ns mutex lock which is NOT safe most places that 152 * put_loaddata is called, so we have to delay freeing it 153 */ 154 static void do_loaddata_free(struct work_struct *work) 155 { 156 struct aa_loaddata *d = container_of(work, struct aa_loaddata, work); 157 struct aa_ns *ns = aa_get_ns(d->ns); 158 159 if (ns) { 160 mutex_lock_nested(&ns->lock, ns->level); 161 __aa_fs_remove_rawdata(d); 162 mutex_unlock(&ns->lock); 163 aa_put_ns(ns); 164 } 165 166 kfree_sensitive(d->hash); 167 kfree_sensitive(d->name); 168 kvfree(d->data); 169 kfree_sensitive(d); 170 } 171 172 void aa_loaddata_kref(struct kref *kref) 173 { 174 struct aa_loaddata *d = container_of(kref, struct aa_loaddata, count); 175 176 if (d) { 177 INIT_WORK(&d->work, do_loaddata_free); 178 schedule_work(&d->work); 179 } 180 } 181 182 struct aa_loaddata *aa_loaddata_alloc(size_t size) 183 { 184 struct aa_loaddata *d; 185 186 d = kzalloc(sizeof(*d), GFP_KERNEL); 187 if (d == NULL) 188 return ERR_PTR(-ENOMEM); 189 d->data = kvzalloc(size, GFP_KERNEL); 190 if (!d->data) { 191 kfree(d); 192 return ERR_PTR(-ENOMEM); 193 } 194 kref_init(&d->count); 195 INIT_LIST_HEAD(&d->list); 196 197 return d; 198 } 199 200 /* test if read will be in packed data bounds */ 201 static bool inbounds(struct aa_ext *e, size_t size) 202 { 203 return (size <= e->end - e->pos); 204 } 205 206 static void *kvmemdup(const void *src, size_t len) 207 { 208 void *p = kvmalloc(len, GFP_KERNEL); 209 210 if (p) 211 memcpy(p, src, len); 212 return p; 213 } 214 215 /** 216 * aa_u16_chunck - test and do bounds checking for a u16 size based chunk 217 * @e: serialized data read head (NOT NULL) 218 * @chunk: start address for chunk of data (NOT NULL) 219 * 220 * Returns: the size of chunk found with the read head at the end of the chunk. 221 */ 222 static size_t unpack_u16_chunk(struct aa_ext *e, char **chunk) 223 { 224 size_t size = 0; 225 void *pos = e->pos; 226 227 if (!inbounds(e, sizeof(u16))) 228 goto fail; 229 size = le16_to_cpu(get_unaligned((__le16 *) e->pos)); 230 e->pos += sizeof(__le16); 231 if (!inbounds(e, size)) 232 goto fail; 233 *chunk = e->pos; 234 e->pos += size; 235 return size; 236 237 fail: 238 e->pos = pos; 239 return 0; 240 } 241 242 /* unpack control byte */ 243 static bool unpack_X(struct aa_ext *e, enum aa_code code) 244 { 245 if (!inbounds(e, 1)) 246 return false; 247 if (*(u8 *) e->pos != code) 248 return false; 249 e->pos++; 250 return true; 251 } 252 253 /** 254 * unpack_nameX - check is the next element is of type X with a name of @name 255 * @e: serialized data extent information (NOT NULL) 256 * @code: type code 257 * @name: name to match to the serialized element. (MAYBE NULL) 258 * 259 * check that the next serialized data element is of type X and has a tag 260 * name @name. If @name is specified then there must be a matching 261 * name element in the stream. If @name is NULL any name element will be 262 * skipped and only the typecode will be tested. 263 * 264 * Returns true on success (both type code and name tests match) and the read 265 * head is advanced past the headers 266 * 267 * Returns: false if either match fails, the read head does not move 268 */ 269 static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name) 270 { 271 /* 272 * May need to reset pos if name or type doesn't match 273 */ 274 void *pos = e->pos; 275 /* 276 * Check for presence of a tagname, and if present name size 277 * AA_NAME tag value is a u16. 278 */ 279 if (unpack_X(e, AA_NAME)) { 280 char *tag = NULL; 281 size_t size = unpack_u16_chunk(e, &tag); 282 /* if a name is specified it must match. otherwise skip tag */ 283 if (name && (!size || tag[size-1] != '\0' || strcmp(name, tag))) 284 goto fail; 285 } else if (name) { 286 /* if a name is specified and there is no name tag fail */ 287 goto fail; 288 } 289 290 /* now check if type code matches */ 291 if (unpack_X(e, code)) 292 return true; 293 294 fail: 295 e->pos = pos; 296 return false; 297 } 298 299 static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name) 300 { 301 void *pos = e->pos; 302 303 if (unpack_nameX(e, AA_U8, name)) { 304 if (!inbounds(e, sizeof(u8))) 305 goto fail; 306 if (data) 307 *data = *((u8 *)e->pos); 308 e->pos += sizeof(u8); 309 return true; 310 } 311 312 fail: 313 e->pos = pos; 314 return false; 315 } 316 317 static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name) 318 { 319 void *pos = e->pos; 320 321 if (unpack_nameX(e, AA_U32, name)) { 322 if (!inbounds(e, sizeof(u32))) 323 goto fail; 324 if (data) 325 *data = le32_to_cpu(get_unaligned((__le32 *) e->pos)); 326 e->pos += sizeof(u32); 327 return true; 328 } 329 330 fail: 331 e->pos = pos; 332 return false; 333 } 334 335 static bool unpack_u64(struct aa_ext *e, u64 *data, const char *name) 336 { 337 void *pos = e->pos; 338 339 if (unpack_nameX(e, AA_U64, name)) { 340 if (!inbounds(e, sizeof(u64))) 341 goto fail; 342 if (data) 343 *data = le64_to_cpu(get_unaligned((__le64 *) e->pos)); 344 e->pos += sizeof(u64); 345 return true; 346 } 347 348 fail: 349 e->pos = pos; 350 return false; 351 } 352 353 static size_t unpack_array(struct aa_ext *e, const char *name) 354 { 355 void *pos = e->pos; 356 357 if (unpack_nameX(e, AA_ARRAY, name)) { 358 int size; 359 if (!inbounds(e, sizeof(u16))) 360 goto fail; 361 size = (int)le16_to_cpu(get_unaligned((__le16 *) e->pos)); 362 e->pos += sizeof(u16); 363 return size; 364 } 365 366 fail: 367 e->pos = pos; 368 return 0; 369 } 370 371 static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name) 372 { 373 void *pos = e->pos; 374 375 if (unpack_nameX(e, AA_BLOB, name)) { 376 u32 size; 377 if (!inbounds(e, sizeof(u32))) 378 goto fail; 379 size = le32_to_cpu(get_unaligned((__le32 *) e->pos)); 380 e->pos += sizeof(u32); 381 if (inbounds(e, (size_t) size)) { 382 *blob = e->pos; 383 e->pos += size; 384 return size; 385 } 386 } 387 388 fail: 389 e->pos = pos; 390 return 0; 391 } 392 393 static int unpack_str(struct aa_ext *e, const char **string, const char *name) 394 { 395 char *src_str; 396 size_t size = 0; 397 void *pos = e->pos; 398 *string = NULL; 399 if (unpack_nameX(e, AA_STRING, name)) { 400 size = unpack_u16_chunk(e, &src_str); 401 if (size) { 402 /* strings are null terminated, length is size - 1 */ 403 if (src_str[size - 1] != 0) 404 goto fail; 405 *string = src_str; 406 407 return size; 408 } 409 } 410 411 fail: 412 e->pos = pos; 413 return 0; 414 } 415 416 static int unpack_strdup(struct aa_ext *e, char **string, const char *name) 417 { 418 const char *tmp; 419 void *pos = e->pos; 420 int res = unpack_str(e, &tmp, name); 421 *string = NULL; 422 423 if (!res) 424 return 0; 425 426 *string = kmemdup(tmp, res, GFP_KERNEL); 427 if (!*string) { 428 e->pos = pos; 429 return 0; 430 } 431 432 return res; 433 } 434 435 436 /** 437 * unpack_dfa - unpack a file rule dfa 438 * @e: serialized data extent information (NOT NULL) 439 * 440 * returns dfa or ERR_PTR or NULL if no dfa 441 */ 442 static struct aa_dfa *unpack_dfa(struct aa_ext *e) 443 { 444 char *blob = NULL; 445 size_t size; 446 struct aa_dfa *dfa = NULL; 447 448 size = unpack_blob(e, &blob, "aadfa"); 449 if (size) { 450 /* 451 * The dfa is aligned with in the blob to 8 bytes 452 * from the beginning of the stream. 453 * alignment adjust needed by dfa unpack 454 */ 455 size_t sz = blob - (char *) e->start - 456 ((e->pos - e->start) & 7); 457 size_t pad = ALIGN(sz, 8) - sz; 458 int flags = TO_ACCEPT1_FLAG(YYTD_DATA32) | 459 TO_ACCEPT2_FLAG(YYTD_DATA32) | DFA_FLAG_VERIFY_STATES; 460 dfa = aa_dfa_unpack(blob + pad, size - pad, flags); 461 462 if (IS_ERR(dfa)) 463 return dfa; 464 465 } 466 467 return dfa; 468 } 469 470 /** 471 * unpack_trans_table - unpack a profile transition table 472 * @e: serialized data extent information (NOT NULL) 473 * @profile: profile to add the accept table to (NOT NULL) 474 * 475 * Returns: true if table successfully unpacked 476 */ 477 static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile) 478 { 479 void *saved_pos = e->pos; 480 481 /* exec table is optional */ 482 if (unpack_nameX(e, AA_STRUCT, "xtable")) { 483 int i, size; 484 485 size = unpack_array(e, NULL); 486 /* currently 4 exec bits and entries 0-3 are reserved iupcx */ 487 if (size > 16 - 4) 488 goto fail; 489 profile->file.trans.table = kcalloc(size, sizeof(char *), 490 GFP_KERNEL); 491 if (!profile->file.trans.table) 492 goto fail; 493 494 profile->file.trans.size = size; 495 for (i = 0; i < size; i++) { 496 char *str; 497 int c, j, pos, size2 = unpack_strdup(e, &str, NULL); 498 /* unpack_strdup verifies that the last character is 499 * null termination byte. 500 */ 501 if (!size2) 502 goto fail; 503 profile->file.trans.table[i] = str; 504 /* verify that name doesn't start with space */ 505 if (isspace(*str)) 506 goto fail; 507 508 /* count internal # of internal \0 */ 509 for (c = j = 0; j < size2 - 1; j++) { 510 if (!str[j]) { 511 pos = j; 512 c++; 513 } 514 } 515 if (*str == ':') { 516 /* first character after : must be valid */ 517 if (!str[1]) 518 goto fail; 519 /* beginning with : requires an embedded \0, 520 * verify that exactly 1 internal \0 exists 521 * trailing \0 already verified by unpack_strdup 522 * 523 * convert \0 back to : for label_parse 524 */ 525 if (c == 1) 526 str[pos] = ':'; 527 else if (c > 1) 528 goto fail; 529 } else if (c) 530 /* fail - all other cases with embedded \0 */ 531 goto fail; 532 } 533 if (!unpack_nameX(e, AA_ARRAYEND, NULL)) 534 goto fail; 535 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) 536 goto fail; 537 } 538 return true; 539 540 fail: 541 aa_free_domain_entries(&profile->file.trans); 542 e->pos = saved_pos; 543 return false; 544 } 545 546 static bool unpack_xattrs(struct aa_ext *e, struct aa_profile *profile) 547 { 548 void *pos = e->pos; 549 550 if (unpack_nameX(e, AA_STRUCT, "xattrs")) { 551 int i, size; 552 553 size = unpack_array(e, NULL); 554 profile->xattr_count = size; 555 profile->xattrs = kcalloc(size, sizeof(char *), GFP_KERNEL); 556 if (!profile->xattrs) 557 goto fail; 558 for (i = 0; i < size; i++) { 559 if (!unpack_strdup(e, &profile->xattrs[i], NULL)) 560 goto fail; 561 } 562 if (!unpack_nameX(e, AA_ARRAYEND, NULL)) 563 goto fail; 564 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) 565 goto fail; 566 } 567 568 return true; 569 570 fail: 571 e->pos = pos; 572 return false; 573 } 574 575 static bool unpack_secmark(struct aa_ext *e, struct aa_profile *profile) 576 { 577 void *pos = e->pos; 578 int i, size; 579 580 if (unpack_nameX(e, AA_STRUCT, "secmark")) { 581 size = unpack_array(e, NULL); 582 583 profile->secmark = kcalloc(size, sizeof(struct aa_secmark), 584 GFP_KERNEL); 585 if (!profile->secmark) 586 goto fail; 587 588 profile->secmark_count = size; 589 590 for (i = 0; i < size; i++) { 591 if (!unpack_u8(e, &profile->secmark[i].audit, NULL)) 592 goto fail; 593 if (!unpack_u8(e, &profile->secmark[i].deny, NULL)) 594 goto fail; 595 if (!unpack_strdup(e, &profile->secmark[i].label, NULL)) 596 goto fail; 597 } 598 if (!unpack_nameX(e, AA_ARRAYEND, NULL)) 599 goto fail; 600 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) 601 goto fail; 602 } 603 604 return true; 605 606 fail: 607 if (profile->secmark) { 608 for (i = 0; i < size; i++) 609 kfree(profile->secmark[i].label); 610 kfree(profile->secmark); 611 profile->secmark_count = 0; 612 profile->secmark = NULL; 613 } 614 615 e->pos = pos; 616 return false; 617 } 618 619 static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile) 620 { 621 void *pos = e->pos; 622 623 /* rlimits are optional */ 624 if (unpack_nameX(e, AA_STRUCT, "rlimits")) { 625 int i, size; 626 u32 tmp = 0; 627 if (!unpack_u32(e, &tmp, NULL)) 628 goto fail; 629 profile->rlimits.mask = tmp; 630 631 size = unpack_array(e, NULL); 632 if (size > RLIM_NLIMITS) 633 goto fail; 634 for (i = 0; i < size; i++) { 635 u64 tmp2 = 0; 636 int a = aa_map_resource(i); 637 if (!unpack_u64(e, &tmp2, NULL)) 638 goto fail; 639 profile->rlimits.limits[a].rlim_max = tmp2; 640 } 641 if (!unpack_nameX(e, AA_ARRAYEND, NULL)) 642 goto fail; 643 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) 644 goto fail; 645 } 646 return true; 647 648 fail: 649 e->pos = pos; 650 return false; 651 } 652 653 static u32 strhash(const void *data, u32 len, u32 seed) 654 { 655 const char * const *key = data; 656 657 return jhash(*key, strlen(*key), seed); 658 } 659 660 static int datacmp(struct rhashtable_compare_arg *arg, const void *obj) 661 { 662 const struct aa_data *data = obj; 663 const char * const *key = arg->key; 664 665 return strcmp(data->key, *key); 666 } 667 668 /** 669 * unpack_profile - unpack a serialized profile 670 * @e: serialized data extent information (NOT NULL) 671 * 672 * NOTE: unpack profile sets audit struct if there is a failure 673 */ 674 static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) 675 { 676 struct aa_profile *profile = NULL; 677 const char *tmpname, *tmpns = NULL, *name = NULL; 678 const char *info = "failed to unpack profile"; 679 size_t ns_len; 680 struct rhashtable_params params = { 0 }; 681 char *key = NULL; 682 struct aa_data *data; 683 int i, error = -EPROTO; 684 kernel_cap_t tmpcap; 685 u32 tmp; 686 687 *ns_name = NULL; 688 689 /* check that we have the right struct being passed */ 690 if (!unpack_nameX(e, AA_STRUCT, "profile")) 691 goto fail; 692 if (!unpack_str(e, &name, NULL)) 693 goto fail; 694 if (*name == '\0') 695 goto fail; 696 697 tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len); 698 if (tmpns) { 699 *ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL); 700 if (!*ns_name) { 701 info = "out of memory"; 702 goto fail; 703 } 704 name = tmpname; 705 } 706 707 profile = aa_alloc_profile(name, NULL, GFP_KERNEL); 708 if (!profile) 709 return ERR_PTR(-ENOMEM); 710 711 /* profile renaming is optional */ 712 (void) unpack_str(e, &profile->rename, "rename"); 713 714 /* attachment string is optional */ 715 (void) unpack_str(e, &profile->attach, "attach"); 716 717 /* xmatch is optional and may be NULL */ 718 profile->xmatch = unpack_dfa(e); 719 if (IS_ERR(profile->xmatch)) { 720 error = PTR_ERR(profile->xmatch); 721 profile->xmatch = NULL; 722 info = "bad xmatch"; 723 goto fail; 724 } 725 /* xmatch_len is not optional if xmatch is set */ 726 if (profile->xmatch) { 727 if (!unpack_u32(e, &tmp, NULL)) { 728 info = "missing xmatch len"; 729 goto fail; 730 } 731 profile->xmatch_len = tmp; 732 } 733 734 /* disconnected attachment string is optional */ 735 (void) unpack_str(e, &profile->disconnected, "disconnected"); 736 737 /* per profile debug flags (complain, audit) */ 738 if (!unpack_nameX(e, AA_STRUCT, "flags")) { 739 info = "profile missing flags"; 740 goto fail; 741 } 742 info = "failed to unpack profile flags"; 743 if (!unpack_u32(e, &tmp, NULL)) 744 goto fail; 745 if (tmp & PACKED_FLAG_HAT) 746 profile->label.flags |= FLAG_HAT; 747 if (!unpack_u32(e, &tmp, NULL)) 748 goto fail; 749 if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG)) 750 profile->mode = APPARMOR_COMPLAIN; 751 else if (tmp == PACKED_MODE_ENFORCE) 752 profile->mode = APPARMOR_ENFORCE; 753 else if (tmp == PACKED_MODE_KILL) 754 profile->mode = APPARMOR_KILL; 755 else if (tmp == PACKED_MODE_UNCONFINED) 756 profile->mode = APPARMOR_UNCONFINED; 757 else 758 goto fail; 759 if (!unpack_u32(e, &tmp, NULL)) 760 goto fail; 761 if (tmp) 762 profile->audit = AUDIT_ALL; 763 764 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) 765 goto fail; 766 767 /* path_flags is optional */ 768 if (unpack_u32(e, &profile->path_flags, "path_flags")) 769 profile->path_flags |= profile->label.flags & 770 PATH_MEDIATE_DELETED; 771 else 772 /* set a default value if path_flags field is not present */ 773 profile->path_flags = PATH_MEDIATE_DELETED; 774 775 info = "failed to unpack profile capabilities"; 776 if (!unpack_u32(e, &(profile->caps.allow.cap[0]), NULL)) 777 goto fail; 778 if (!unpack_u32(e, &(profile->caps.audit.cap[0]), NULL)) 779 goto fail; 780 if (!unpack_u32(e, &(profile->caps.quiet.cap[0]), NULL)) 781 goto fail; 782 if (!unpack_u32(e, &tmpcap.cap[0], NULL)) 783 goto fail; 784 785 info = "failed to unpack upper profile capabilities"; 786 if (unpack_nameX(e, AA_STRUCT, "caps64")) { 787 /* optional upper half of 64 bit caps */ 788 if (!unpack_u32(e, &(profile->caps.allow.cap[1]), NULL)) 789 goto fail; 790 if (!unpack_u32(e, &(profile->caps.audit.cap[1]), NULL)) 791 goto fail; 792 if (!unpack_u32(e, &(profile->caps.quiet.cap[1]), NULL)) 793 goto fail; 794 if (!unpack_u32(e, &(tmpcap.cap[1]), NULL)) 795 goto fail; 796 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) 797 goto fail; 798 } 799 800 info = "failed to unpack extended profile capabilities"; 801 if (unpack_nameX(e, AA_STRUCT, "capsx")) { 802 /* optional extended caps mediation mask */ 803 if (!unpack_u32(e, &(profile->caps.extended.cap[0]), NULL)) 804 goto fail; 805 if (!unpack_u32(e, &(profile->caps.extended.cap[1]), NULL)) 806 goto fail; 807 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) 808 goto fail; 809 } 810 811 if (!unpack_xattrs(e, profile)) { 812 info = "failed to unpack profile xattrs"; 813 goto fail; 814 } 815 816 if (!unpack_rlimits(e, profile)) { 817 info = "failed to unpack profile rlimits"; 818 goto fail; 819 } 820 821 if (!unpack_secmark(e, profile)) { 822 info = "failed to unpack profile secmark rules"; 823 goto fail; 824 } 825 826 if (unpack_nameX(e, AA_STRUCT, "policydb")) { 827 /* generic policy dfa - optional and may be NULL */ 828 info = "failed to unpack policydb"; 829 profile->policy.dfa = unpack_dfa(e); 830 if (IS_ERR(profile->policy.dfa)) { 831 error = PTR_ERR(profile->policy.dfa); 832 profile->policy.dfa = NULL; 833 goto fail; 834 } else if (!profile->policy.dfa) { 835 error = -EPROTO; 836 goto fail; 837 } 838 if (!unpack_u32(e, &profile->policy.start[0], "start")) 839 /* default start state */ 840 profile->policy.start[0] = DFA_START; 841 /* setup class index */ 842 for (i = AA_CLASS_FILE; i <= AA_CLASS_LAST; i++) { 843 profile->policy.start[i] = 844 aa_dfa_next(profile->policy.dfa, 845 profile->policy.start[0], 846 i); 847 } 848 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) 849 goto fail; 850 } else 851 profile->policy.dfa = aa_get_dfa(nulldfa); 852 853 /* get file rules */ 854 profile->file.dfa = unpack_dfa(e); 855 if (IS_ERR(profile->file.dfa)) { 856 error = PTR_ERR(profile->file.dfa); 857 profile->file.dfa = NULL; 858 info = "failed to unpack profile file rules"; 859 goto fail; 860 } else if (profile->file.dfa) { 861 if (!unpack_u32(e, &profile->file.start, "dfa_start")) 862 /* default start state */ 863 profile->file.start = DFA_START; 864 } else if (profile->policy.dfa && 865 profile->policy.start[AA_CLASS_FILE]) { 866 profile->file.dfa = aa_get_dfa(profile->policy.dfa); 867 profile->file.start = profile->policy.start[AA_CLASS_FILE]; 868 } else 869 profile->file.dfa = aa_get_dfa(nulldfa); 870 871 if (!unpack_trans_table(e, profile)) { 872 info = "failed to unpack profile transition table"; 873 goto fail; 874 } 875 876 if (unpack_nameX(e, AA_STRUCT, "data")) { 877 info = "out of memory"; 878 profile->data = kzalloc(sizeof(*profile->data), GFP_KERNEL); 879 if (!profile->data) 880 goto fail; 881 882 params.nelem_hint = 3; 883 params.key_len = sizeof(void *); 884 params.key_offset = offsetof(struct aa_data, key); 885 params.head_offset = offsetof(struct aa_data, head); 886 params.hashfn = strhash; 887 params.obj_cmpfn = datacmp; 888 889 if (rhashtable_init(profile->data, ¶ms)) { 890 info = "failed to init key, value hash table"; 891 goto fail; 892 } 893 894 while (unpack_strdup(e, &key, NULL)) { 895 data = kzalloc(sizeof(*data), GFP_KERNEL); 896 if (!data) { 897 kfree_sensitive(key); 898 goto fail; 899 } 900 901 data->key = key; 902 data->size = unpack_blob(e, &data->data, NULL); 903 data->data = kvmemdup(data->data, data->size); 904 if (data->size && !data->data) { 905 kfree_sensitive(data->key); 906 kfree_sensitive(data); 907 goto fail; 908 } 909 910 rhashtable_insert_fast(profile->data, &data->head, 911 profile->data->p); 912 } 913 914 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) { 915 info = "failed to unpack end of key, value data table"; 916 goto fail; 917 } 918 } 919 920 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) { 921 info = "failed to unpack end of profile"; 922 goto fail; 923 } 924 925 return profile; 926 927 fail: 928 if (profile) 929 name = NULL; 930 else if (!name) 931 name = "unknown"; 932 audit_iface(profile, NULL, name, info, e, error); 933 aa_free_profile(profile); 934 935 return ERR_PTR(error); 936 } 937 938 /** 939 * verify_head - unpack serialized stream header 940 * @e: serialized data read head (NOT NULL) 941 * @required: whether the header is required or optional 942 * @ns: Returns - namespace if one is specified else NULL (NOT NULL) 943 * 944 * Returns: error or 0 if header is good 945 */ 946 static int verify_header(struct aa_ext *e, int required, const char **ns) 947 { 948 int error = -EPROTONOSUPPORT; 949 const char *name = NULL; 950 *ns = NULL; 951 952 /* get the interface version */ 953 if (!unpack_u32(e, &e->version, "version")) { 954 if (required) { 955 audit_iface(NULL, NULL, NULL, "invalid profile format", 956 e, error); 957 return error; 958 } 959 } 960 961 /* Check that the interface version is currently supported. 962 * if not specified use previous version 963 * Mask off everything that is not kernel abi version 964 */ 965 if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v7)) { 966 audit_iface(NULL, NULL, NULL, "unsupported interface version", 967 e, error); 968 return error; 969 } 970 971 /* read the namespace if present */ 972 if (unpack_str(e, &name, "namespace")) { 973 if (*name == '\0') { 974 audit_iface(NULL, NULL, NULL, "invalid namespace name", 975 e, error); 976 return error; 977 } 978 if (*ns && strcmp(*ns, name)) { 979 audit_iface(NULL, NULL, NULL, "invalid ns change", e, 980 error); 981 } else if (!*ns) { 982 *ns = kstrdup(name, GFP_KERNEL); 983 if (!*ns) 984 return -ENOMEM; 985 } 986 } 987 988 return 0; 989 } 990 991 static bool verify_xindex(int xindex, int table_size) 992 { 993 int index, xtype; 994 xtype = xindex & AA_X_TYPE_MASK; 995 index = xindex & AA_X_INDEX_MASK; 996 if (xtype == AA_X_TABLE && index >= table_size) 997 return false; 998 return true; 999 } 1000 1001 /* verify dfa xindexes are in range of transition tables */ 1002 static bool verify_dfa_xindex(struct aa_dfa *dfa, int table_size) 1003 { 1004 int i; 1005 for (i = 0; i < dfa->tables[YYTD_ID_ACCEPT]->td_lolen; i++) { 1006 if (!verify_xindex(dfa_user_xindex(dfa, i), table_size)) 1007 return false; 1008 if (!verify_xindex(dfa_other_xindex(dfa, i), table_size)) 1009 return false; 1010 } 1011 return true; 1012 } 1013 1014 /** 1015 * verify_profile - Do post unpack analysis to verify profile consistency 1016 * @profile: profile to verify (NOT NULL) 1017 * 1018 * Returns: 0 if passes verification else error 1019 */ 1020 static int verify_profile(struct aa_profile *profile) 1021 { 1022 if (profile->file.dfa && 1023 !verify_dfa_xindex(profile->file.dfa, 1024 profile->file.trans.size)) { 1025 audit_iface(profile, NULL, NULL, "Invalid named transition", 1026 NULL, -EPROTO); 1027 return -EPROTO; 1028 } 1029 1030 return 0; 1031 } 1032 1033 void aa_load_ent_free(struct aa_load_ent *ent) 1034 { 1035 if (ent) { 1036 aa_put_profile(ent->rename); 1037 aa_put_profile(ent->old); 1038 aa_put_profile(ent->new); 1039 kfree(ent->ns_name); 1040 kfree_sensitive(ent); 1041 } 1042 } 1043 1044 struct aa_load_ent *aa_load_ent_alloc(void) 1045 { 1046 struct aa_load_ent *ent = kzalloc(sizeof(*ent), GFP_KERNEL); 1047 if (ent) 1048 INIT_LIST_HEAD(&ent->list); 1049 return ent; 1050 } 1051 1052 static int deflate_compress(const char *src, size_t slen, char **dst, 1053 size_t *dlen) 1054 { 1055 int error; 1056 struct z_stream_s strm; 1057 void *stgbuf, *dstbuf; 1058 size_t stglen = deflateBound(slen); 1059 1060 memset(&strm, 0, sizeof(strm)); 1061 1062 if (stglen < slen) 1063 return -EFBIG; 1064 1065 strm.workspace = kvzalloc(zlib_deflate_workspacesize(MAX_WBITS, 1066 MAX_MEM_LEVEL), 1067 GFP_KERNEL); 1068 if (!strm.workspace) 1069 return -ENOMEM; 1070 1071 error = zlib_deflateInit(&strm, aa_g_rawdata_compression_level); 1072 if (error != Z_OK) { 1073 error = -ENOMEM; 1074 goto fail_deflate_init; 1075 } 1076 1077 stgbuf = kvzalloc(stglen, GFP_KERNEL); 1078 if (!stgbuf) { 1079 error = -ENOMEM; 1080 goto fail_stg_alloc; 1081 } 1082 1083 strm.next_in = src; 1084 strm.avail_in = slen; 1085 strm.next_out = stgbuf; 1086 strm.avail_out = stglen; 1087 1088 error = zlib_deflate(&strm, Z_FINISH); 1089 if (error != Z_STREAM_END) { 1090 error = -EINVAL; 1091 goto fail_deflate; 1092 } 1093 error = 0; 1094 1095 if (is_vmalloc_addr(stgbuf)) { 1096 dstbuf = kvzalloc(strm.total_out, GFP_KERNEL); 1097 if (dstbuf) { 1098 memcpy(dstbuf, stgbuf, strm.total_out); 1099 kvfree(stgbuf); 1100 } 1101 } else 1102 /* 1103 * If the staging buffer was kmalloc'd, then using krealloc is 1104 * probably going to be faster. The destination buffer will 1105 * always be smaller, so it's just shrunk, avoiding a memcpy 1106 */ 1107 dstbuf = krealloc(stgbuf, strm.total_out, GFP_KERNEL); 1108 1109 if (!dstbuf) { 1110 error = -ENOMEM; 1111 goto fail_deflate; 1112 } 1113 1114 *dst = dstbuf; 1115 *dlen = strm.total_out; 1116 1117 fail_stg_alloc: 1118 zlib_deflateEnd(&strm); 1119 fail_deflate_init: 1120 kvfree(strm.workspace); 1121 return error; 1122 1123 fail_deflate: 1124 kvfree(stgbuf); 1125 goto fail_stg_alloc; 1126 } 1127 1128 static int compress_loaddata(struct aa_loaddata *data) 1129 { 1130 1131 AA_BUG(data->compressed_size > 0); 1132 1133 /* 1134 * Shortcut the no compression case, else we increase the amount of 1135 * storage required by a small amount 1136 */ 1137 if (aa_g_rawdata_compression_level != 0) { 1138 void *udata = data->data; 1139 int error = deflate_compress(udata, data->size, &data->data, 1140 &data->compressed_size); 1141 if (error) 1142 return error; 1143 1144 kvfree(udata); 1145 } else 1146 data->compressed_size = data->size; 1147 1148 return 0; 1149 } 1150 1151 /** 1152 * aa_unpack - unpack packed binary profile(s) data loaded from user space 1153 * @udata: user data copied to kmem (NOT NULL) 1154 * @lh: list to place unpacked profiles in a aa_repl_ws 1155 * @ns: Returns namespace profile is in if specified else NULL (NOT NULL) 1156 * 1157 * Unpack user data and return refcounted allocated profile(s) stored in 1158 * @lh in order of discovery, with the list chain stored in base.list 1159 * or error 1160 * 1161 * Returns: profile(s) on @lh else error pointer if fails to unpack 1162 */ 1163 int aa_unpack(struct aa_loaddata *udata, struct list_head *lh, 1164 const char **ns) 1165 { 1166 struct aa_load_ent *tmp, *ent; 1167 struct aa_profile *profile = NULL; 1168 int error; 1169 struct aa_ext e = { 1170 .start = udata->data, 1171 .end = udata->data + udata->size, 1172 .pos = udata->data, 1173 }; 1174 1175 *ns = NULL; 1176 while (e.pos < e.end) { 1177 char *ns_name = NULL; 1178 void *start; 1179 error = verify_header(&e, e.pos == e.start, ns); 1180 if (error) 1181 goto fail; 1182 1183 start = e.pos; 1184 profile = unpack_profile(&e, &ns_name); 1185 if (IS_ERR(profile)) { 1186 error = PTR_ERR(profile); 1187 goto fail; 1188 } 1189 1190 error = verify_profile(profile); 1191 if (error) 1192 goto fail_profile; 1193 1194 if (aa_g_hash_policy) 1195 error = aa_calc_profile_hash(profile, e.version, start, 1196 e.pos - start); 1197 if (error) 1198 goto fail_profile; 1199 1200 ent = aa_load_ent_alloc(); 1201 if (!ent) { 1202 error = -ENOMEM; 1203 goto fail_profile; 1204 } 1205 1206 ent->new = profile; 1207 ent->ns_name = ns_name; 1208 list_add_tail(&ent->list, lh); 1209 } 1210 udata->abi = e.version & K_ABI_MASK; 1211 if (aa_g_hash_policy) { 1212 udata->hash = aa_calc_hash(udata->data, udata->size); 1213 if (IS_ERR(udata->hash)) { 1214 error = PTR_ERR(udata->hash); 1215 udata->hash = NULL; 1216 goto fail; 1217 } 1218 } 1219 error = compress_loaddata(udata); 1220 if (error) 1221 goto fail; 1222 return 0; 1223 1224 fail_profile: 1225 aa_put_profile(profile); 1226 1227 fail: 1228 list_for_each_entry_safe(ent, tmp, lh, list) { 1229 list_del_init(&ent->list); 1230 aa_load_ent_free(ent); 1231 } 1232 1233 return error; 1234 } 1235 1236 #ifdef CONFIG_SECURITY_APPARMOR_KUNIT_TEST 1237 #include "policy_unpack_test.c" 1238 #endif /* CONFIG_SECURITY_APPARMOR_KUNIT_TEST */ 1239