1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AppArmor security module 4 * 5 * This file contains AppArmor functions for unpacking policy loaded from 6 * userspace. 7 * 8 * Copyright (C) 1998-2008 Novell/SUSE 9 * Copyright 2009-2010 Canonical Ltd. 10 * 11 * AppArmor uses a serialized binary format for loading policy. To find 12 * policy format documentation see Documentation/admin-guide/LSM/apparmor.rst 13 * All policy is validated before it is used. 14 */ 15 16 #include <asm/unaligned.h> 17 #include <kunit/visibility.h> 18 #include <linux/ctype.h> 19 #include <linux/errno.h> 20 #include <linux/zstd.h> 21 22 #include "include/apparmor.h" 23 #include "include/audit.h" 24 #include "include/cred.h" 25 #include "include/crypto.h" 26 #include "include/file.h" 27 #include "include/match.h" 28 #include "include/path.h" 29 #include "include/policy.h" 30 #include "include/policy_unpack.h" 31 #include "include/policy_compat.h" 32 33 /* audit callback for unpack fields */ 34 static void audit_cb(struct audit_buffer *ab, void *va) 35 { 36 struct common_audit_data *sa = va; 37 struct apparmor_audit_data *ad = aad(sa); 38 39 if (ad->iface.ns) { 40 audit_log_format(ab, " ns="); 41 audit_log_untrustedstring(ab, ad->iface.ns); 42 } 43 if (ad->name) { 44 audit_log_format(ab, " name="); 45 audit_log_untrustedstring(ab, ad->name); 46 } 47 if (ad->iface.pos) 48 audit_log_format(ab, " offset=%ld", ad->iface.pos); 49 } 50 51 /** 52 * audit_iface - do audit message for policy unpacking/load/replace/remove 53 * @new: profile if it has been allocated (MAYBE NULL) 54 * @ns_name: name of the ns the profile is to be loaded to (MAY BE NULL) 55 * @name: name of the profile being manipulated (MAYBE NULL) 56 * @info: any extra info about the failure (MAYBE NULL) 57 * @e: buffer position info 58 * @error: error code 59 * 60 * Returns: %0 or error 61 */ 62 static int audit_iface(struct aa_profile *new, const char *ns_name, 63 const char *name, const char *info, struct aa_ext *e, 64 int error) 65 { 66 struct aa_profile *profile = labels_profile(aa_current_raw_label()); 67 DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, NULL); 68 if (e) 69 ad.iface.pos = e->pos - e->start; 70 ad.iface.ns = ns_name; 71 if (new) 72 ad.name = new->base.hname; 73 else 74 ad.name = name; 75 ad.info = info; 76 ad.error = error; 77 78 return aa_audit(AUDIT_APPARMOR_STATUS, profile, &ad, audit_cb); 79 } 80 81 void __aa_loaddata_update(struct aa_loaddata *data, long revision) 82 { 83 AA_BUG(!data); 84 AA_BUG(!data->ns); 85 AA_BUG(!mutex_is_locked(&data->ns->lock)); 86 AA_BUG(data->revision > revision); 87 88 data->revision = revision; 89 if ((data->dents[AAFS_LOADDATA_REVISION])) { 90 struct inode *inode; 91 92 inode = d_inode(data->dents[AAFS_LOADDATA_DIR]); 93 inode->i_mtime = inode_set_ctime_current(inode); 94 95 inode = d_inode(data->dents[AAFS_LOADDATA_REVISION]); 96 inode->i_mtime = inode_set_ctime_current(inode); 97 } 98 } 99 100 bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r) 101 { 102 if (l->size != r->size) 103 return false; 104 if (l->compressed_size != r->compressed_size) 105 return false; 106 if (aa_g_hash_policy && memcmp(l->hash, r->hash, aa_hash_size()) != 0) 107 return false; 108 return memcmp(l->data, r->data, r->compressed_size ?: r->size) == 0; 109 } 110 111 /* 112 * need to take the ns mutex lock which is NOT safe most places that 113 * put_loaddata is called, so we have to delay freeing it 114 */ 115 static void do_loaddata_free(struct work_struct *work) 116 { 117 struct aa_loaddata *d = container_of(work, struct aa_loaddata, work); 118 struct aa_ns *ns = aa_get_ns(d->ns); 119 120 if (ns) { 121 mutex_lock_nested(&ns->lock, ns->level); 122 __aa_fs_remove_rawdata(d); 123 mutex_unlock(&ns->lock); 124 aa_put_ns(ns); 125 } 126 127 kfree_sensitive(d->hash); 128 kfree_sensitive(d->name); 129 kvfree(d->data); 130 kfree_sensitive(d); 131 } 132 133 void aa_loaddata_kref(struct kref *kref) 134 { 135 struct aa_loaddata *d = container_of(kref, struct aa_loaddata, count); 136 137 if (d) { 138 INIT_WORK(&d->work, do_loaddata_free); 139 schedule_work(&d->work); 140 } 141 } 142 143 struct aa_loaddata *aa_loaddata_alloc(size_t size) 144 { 145 struct aa_loaddata *d; 146 147 d = kzalloc(sizeof(*d), GFP_KERNEL); 148 if (d == NULL) 149 return ERR_PTR(-ENOMEM); 150 d->data = kvzalloc(size, GFP_KERNEL); 151 if (!d->data) { 152 kfree(d); 153 return ERR_PTR(-ENOMEM); 154 } 155 kref_init(&d->count); 156 INIT_LIST_HEAD(&d->list); 157 158 return d; 159 } 160 161 /* test if read will be in packed data bounds */ 162 VISIBLE_IF_KUNIT bool aa_inbounds(struct aa_ext *e, size_t size) 163 { 164 return (size <= e->end - e->pos); 165 } 166 EXPORT_SYMBOL_IF_KUNIT(aa_inbounds); 167 168 /** 169 * aa_unpack_u16_chunk - test and do bounds checking for a u16 size based chunk 170 * @e: serialized data read head (NOT NULL) 171 * @chunk: start address for chunk of data (NOT NULL) 172 * 173 * Returns: the size of chunk found with the read head at the end of the chunk. 174 */ 175 VISIBLE_IF_KUNIT size_t aa_unpack_u16_chunk(struct aa_ext *e, char **chunk) 176 { 177 size_t size = 0; 178 void *pos = e->pos; 179 180 if (!aa_inbounds(e, sizeof(u16))) 181 goto fail; 182 size = le16_to_cpu(get_unaligned((__le16 *) e->pos)); 183 e->pos += sizeof(__le16); 184 if (!aa_inbounds(e, size)) 185 goto fail; 186 *chunk = e->pos; 187 e->pos += size; 188 return size; 189 190 fail: 191 e->pos = pos; 192 return 0; 193 } 194 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u16_chunk); 195 196 /* unpack control byte */ 197 VISIBLE_IF_KUNIT bool aa_unpack_X(struct aa_ext *e, enum aa_code code) 198 { 199 if (!aa_inbounds(e, 1)) 200 return false; 201 if (*(u8 *) e->pos != code) 202 return false; 203 e->pos++; 204 return true; 205 } 206 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_X); 207 208 /** 209 * aa_unpack_nameX - check is the next element is of type X with a name of @name 210 * @e: serialized data extent information (NOT NULL) 211 * @code: type code 212 * @name: name to match to the serialized element. (MAYBE NULL) 213 * 214 * check that the next serialized data element is of type X and has a tag 215 * name @name. If @name is specified then there must be a matching 216 * name element in the stream. If @name is NULL any name element will be 217 * skipped and only the typecode will be tested. 218 * 219 * Returns true on success (both type code and name tests match) and the read 220 * head is advanced past the headers 221 * 222 * Returns: false if either match fails, the read head does not move 223 */ 224 VISIBLE_IF_KUNIT bool aa_unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name) 225 { 226 /* 227 * May need to reset pos if name or type doesn't match 228 */ 229 void *pos = e->pos; 230 /* 231 * Check for presence of a tagname, and if present name size 232 * AA_NAME tag value is a u16. 233 */ 234 if (aa_unpack_X(e, AA_NAME)) { 235 char *tag = NULL; 236 size_t size = aa_unpack_u16_chunk(e, &tag); 237 /* if a name is specified it must match. otherwise skip tag */ 238 if (name && (!size || tag[size-1] != '\0' || strcmp(name, tag))) 239 goto fail; 240 } else if (name) { 241 /* if a name is specified and there is no name tag fail */ 242 goto fail; 243 } 244 245 /* now check if type code matches */ 246 if (aa_unpack_X(e, code)) 247 return true; 248 249 fail: 250 e->pos = pos; 251 return false; 252 } 253 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_nameX); 254 255 static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name) 256 { 257 void *pos = e->pos; 258 259 if (aa_unpack_nameX(e, AA_U8, name)) { 260 if (!aa_inbounds(e, sizeof(u8))) 261 goto fail; 262 if (data) 263 *data = *((u8 *)e->pos); 264 e->pos += sizeof(u8); 265 return true; 266 } 267 268 fail: 269 e->pos = pos; 270 return false; 271 } 272 273 VISIBLE_IF_KUNIT bool aa_unpack_u32(struct aa_ext *e, u32 *data, const char *name) 274 { 275 void *pos = e->pos; 276 277 if (aa_unpack_nameX(e, AA_U32, name)) { 278 if (!aa_inbounds(e, sizeof(u32))) 279 goto fail; 280 if (data) 281 *data = le32_to_cpu(get_unaligned((__le32 *) e->pos)); 282 e->pos += sizeof(u32); 283 return true; 284 } 285 286 fail: 287 e->pos = pos; 288 return false; 289 } 290 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u32); 291 292 VISIBLE_IF_KUNIT bool aa_unpack_u64(struct aa_ext *e, u64 *data, const char *name) 293 { 294 void *pos = e->pos; 295 296 if (aa_unpack_nameX(e, AA_U64, name)) { 297 if (!aa_inbounds(e, sizeof(u64))) 298 goto fail; 299 if (data) 300 *data = le64_to_cpu(get_unaligned((__le64 *) e->pos)); 301 e->pos += sizeof(u64); 302 return true; 303 } 304 305 fail: 306 e->pos = pos; 307 return false; 308 } 309 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u64); 310 311 static bool aa_unpack_cap_low(struct aa_ext *e, kernel_cap_t *data, const char *name) 312 { 313 u32 val; 314 315 if (!aa_unpack_u32(e, &val, name)) 316 return false; 317 data->val = val; 318 return true; 319 } 320 321 static bool aa_unpack_cap_high(struct aa_ext *e, kernel_cap_t *data, const char *name) 322 { 323 u32 val; 324 325 if (!aa_unpack_u32(e, &val, name)) 326 return false; 327 data->val = (u32)data->val | ((u64)val << 32); 328 return true; 329 } 330 331 VISIBLE_IF_KUNIT bool aa_unpack_array(struct aa_ext *e, const char *name, u16 *size) 332 { 333 void *pos = e->pos; 334 335 if (aa_unpack_nameX(e, AA_ARRAY, name)) { 336 if (!aa_inbounds(e, sizeof(u16))) 337 goto fail; 338 *size = le16_to_cpu(get_unaligned((__le16 *) e->pos)); 339 e->pos += sizeof(u16); 340 return true; 341 } 342 343 fail: 344 e->pos = pos; 345 return false; 346 } 347 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_array); 348 349 VISIBLE_IF_KUNIT size_t aa_unpack_blob(struct aa_ext *e, char **blob, const char *name) 350 { 351 void *pos = e->pos; 352 353 if (aa_unpack_nameX(e, AA_BLOB, name)) { 354 u32 size; 355 if (!aa_inbounds(e, sizeof(u32))) 356 goto fail; 357 size = le32_to_cpu(get_unaligned((__le32 *) e->pos)); 358 e->pos += sizeof(u32); 359 if (aa_inbounds(e, (size_t) size)) { 360 *blob = e->pos; 361 e->pos += size; 362 return size; 363 } 364 } 365 366 fail: 367 e->pos = pos; 368 return 0; 369 } 370 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_blob); 371 372 VISIBLE_IF_KUNIT int aa_unpack_str(struct aa_ext *e, const char **string, const char *name) 373 { 374 char *src_str; 375 size_t size = 0; 376 void *pos = e->pos; 377 *string = NULL; 378 if (aa_unpack_nameX(e, AA_STRING, name)) { 379 size = aa_unpack_u16_chunk(e, &src_str); 380 if (size) { 381 /* strings are null terminated, length is size - 1 */ 382 if (src_str[size - 1] != 0) 383 goto fail; 384 *string = src_str; 385 386 return size; 387 } 388 } 389 390 fail: 391 e->pos = pos; 392 return 0; 393 } 394 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_str); 395 396 VISIBLE_IF_KUNIT int aa_unpack_strdup(struct aa_ext *e, char **string, const char *name) 397 { 398 const char *tmp; 399 void *pos = e->pos; 400 int res = aa_unpack_str(e, &tmp, name); 401 *string = NULL; 402 403 if (!res) 404 return 0; 405 406 *string = kmemdup(tmp, res, GFP_KERNEL); 407 if (!*string) { 408 e->pos = pos; 409 return 0; 410 } 411 412 return res; 413 } 414 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_strdup); 415 416 417 /** 418 * unpack_dfa - unpack a file rule dfa 419 * @e: serialized data extent information (NOT NULL) 420 * @flags: dfa flags to check 421 * 422 * returns dfa or ERR_PTR or NULL if no dfa 423 */ 424 static struct aa_dfa *unpack_dfa(struct aa_ext *e, int flags) 425 { 426 char *blob = NULL; 427 size_t size; 428 struct aa_dfa *dfa = NULL; 429 430 size = aa_unpack_blob(e, &blob, "aadfa"); 431 if (size) { 432 /* 433 * The dfa is aligned with in the blob to 8 bytes 434 * from the beginning of the stream. 435 * alignment adjust needed by dfa unpack 436 */ 437 size_t sz = blob - (char *) e->start - 438 ((e->pos - e->start) & 7); 439 size_t pad = ALIGN(sz, 8) - sz; 440 if (aa_g_paranoid_load) 441 flags |= DFA_FLAG_VERIFY_STATES; 442 dfa = aa_dfa_unpack(blob + pad, size - pad, flags); 443 444 if (IS_ERR(dfa)) 445 return dfa; 446 447 } 448 449 return dfa; 450 } 451 452 /** 453 * unpack_trans_table - unpack a profile transition table 454 * @e: serialized data extent information (NOT NULL) 455 * @strs: str table to unpack to (NOT NULL) 456 * 457 * Returns: true if table successfully unpacked or not present 458 */ 459 static bool unpack_trans_table(struct aa_ext *e, struct aa_str_table *strs) 460 { 461 void *saved_pos = e->pos; 462 char **table = NULL; 463 464 /* exec table is optional */ 465 if (aa_unpack_nameX(e, AA_STRUCT, "xtable")) { 466 u16 size; 467 int i; 468 469 if (!aa_unpack_array(e, NULL, &size)) 470 /* 471 * Note: index into trans table array is a max 472 * of 2^24, but unpack array can only unpack 473 * an array of 2^16 in size atm so no need 474 * for size check here 475 */ 476 goto fail; 477 table = kcalloc(size, sizeof(char *), GFP_KERNEL); 478 if (!table) 479 goto fail; 480 481 for (i = 0; i < size; i++) { 482 char *str; 483 int c, j, pos, size2 = aa_unpack_strdup(e, &str, NULL); 484 /* aa_unpack_strdup verifies that the last character is 485 * null termination byte. 486 */ 487 if (!size2) 488 goto fail; 489 table[i] = str; 490 /* verify that name doesn't start with space */ 491 if (isspace(*str)) 492 goto fail; 493 494 /* count internal # of internal \0 */ 495 for (c = j = 0; j < size2 - 1; j++) { 496 if (!str[j]) { 497 pos = j; 498 c++; 499 } 500 } 501 if (*str == ':') { 502 /* first character after : must be valid */ 503 if (!str[1]) 504 goto fail; 505 /* beginning with : requires an embedded \0, 506 * verify that exactly 1 internal \0 exists 507 * trailing \0 already verified by aa_unpack_strdup 508 * 509 * convert \0 back to : for label_parse 510 */ 511 if (c == 1) 512 str[pos] = ':'; 513 else if (c > 1) 514 goto fail; 515 } else if (c) 516 /* fail - all other cases with embedded \0 */ 517 goto fail; 518 } 519 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL)) 520 goto fail; 521 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 522 goto fail; 523 524 strs->table = table; 525 strs->size = size; 526 } 527 return true; 528 529 fail: 530 kfree_sensitive(table); 531 e->pos = saved_pos; 532 return false; 533 } 534 535 static bool unpack_xattrs(struct aa_ext *e, struct aa_profile *profile) 536 { 537 void *pos = e->pos; 538 539 if (aa_unpack_nameX(e, AA_STRUCT, "xattrs")) { 540 u16 size; 541 int i; 542 543 if (!aa_unpack_array(e, NULL, &size)) 544 goto fail; 545 profile->attach.xattr_count = size; 546 profile->attach.xattrs = kcalloc(size, sizeof(char *), GFP_KERNEL); 547 if (!profile->attach.xattrs) 548 goto fail; 549 for (i = 0; i < size; i++) { 550 if (!aa_unpack_strdup(e, &profile->attach.xattrs[i], NULL)) 551 goto fail; 552 } 553 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL)) 554 goto fail; 555 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 556 goto fail; 557 } 558 559 return true; 560 561 fail: 562 e->pos = pos; 563 return false; 564 } 565 566 static bool unpack_secmark(struct aa_ext *e, struct aa_ruleset *rules) 567 { 568 void *pos = e->pos; 569 u16 size; 570 int i; 571 572 if (aa_unpack_nameX(e, AA_STRUCT, "secmark")) { 573 if (!aa_unpack_array(e, NULL, &size)) 574 goto fail; 575 576 rules->secmark = kcalloc(size, sizeof(struct aa_secmark), 577 GFP_KERNEL); 578 if (!rules->secmark) 579 goto fail; 580 581 rules->secmark_count = size; 582 583 for (i = 0; i < size; i++) { 584 if (!unpack_u8(e, &rules->secmark[i].audit, NULL)) 585 goto fail; 586 if (!unpack_u8(e, &rules->secmark[i].deny, NULL)) 587 goto fail; 588 if (!aa_unpack_strdup(e, &rules->secmark[i].label, NULL)) 589 goto fail; 590 } 591 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL)) 592 goto fail; 593 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 594 goto fail; 595 } 596 597 return true; 598 599 fail: 600 if (rules->secmark) { 601 for (i = 0; i < size; i++) 602 kfree(rules->secmark[i].label); 603 kfree(rules->secmark); 604 rules->secmark_count = 0; 605 rules->secmark = NULL; 606 } 607 608 e->pos = pos; 609 return false; 610 } 611 612 static bool unpack_rlimits(struct aa_ext *e, struct aa_ruleset *rules) 613 { 614 void *pos = e->pos; 615 616 /* rlimits are optional */ 617 if (aa_unpack_nameX(e, AA_STRUCT, "rlimits")) { 618 u16 size; 619 int i; 620 u32 tmp = 0; 621 if (!aa_unpack_u32(e, &tmp, NULL)) 622 goto fail; 623 rules->rlimits.mask = tmp; 624 625 if (!aa_unpack_array(e, NULL, &size) || 626 size > RLIM_NLIMITS) 627 goto fail; 628 for (i = 0; i < size; i++) { 629 u64 tmp2 = 0; 630 int a = aa_map_resource(i); 631 if (!aa_unpack_u64(e, &tmp2, NULL)) 632 goto fail; 633 rules->rlimits.limits[a].rlim_max = tmp2; 634 } 635 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL)) 636 goto fail; 637 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 638 goto fail; 639 } 640 return true; 641 642 fail: 643 e->pos = pos; 644 return false; 645 } 646 647 static bool unpack_perm(struct aa_ext *e, u32 version, struct aa_perms *perm) 648 { 649 if (version != 1) 650 return false; 651 652 return aa_unpack_u32(e, &perm->allow, NULL) && 653 aa_unpack_u32(e, &perm->allow, NULL) && 654 aa_unpack_u32(e, &perm->deny, NULL) && 655 aa_unpack_u32(e, &perm->subtree, NULL) && 656 aa_unpack_u32(e, &perm->cond, NULL) && 657 aa_unpack_u32(e, &perm->kill, NULL) && 658 aa_unpack_u32(e, &perm->complain, NULL) && 659 aa_unpack_u32(e, &perm->prompt, NULL) && 660 aa_unpack_u32(e, &perm->audit, NULL) && 661 aa_unpack_u32(e, &perm->quiet, NULL) && 662 aa_unpack_u32(e, &perm->hide, NULL) && 663 aa_unpack_u32(e, &perm->xindex, NULL) && 664 aa_unpack_u32(e, &perm->tag, NULL) && 665 aa_unpack_u32(e, &perm->label, NULL); 666 } 667 668 static ssize_t unpack_perms_table(struct aa_ext *e, struct aa_perms **perms) 669 { 670 void *pos = e->pos; 671 u16 size = 0; 672 673 AA_BUG(!perms); 674 /* 675 * policy perms are optional, in which case perms are embedded 676 * in the dfa accept table 677 */ 678 if (aa_unpack_nameX(e, AA_STRUCT, "perms")) { 679 int i; 680 u32 version; 681 682 if (!aa_unpack_u32(e, &version, "version")) 683 goto fail_reset; 684 if (!aa_unpack_array(e, NULL, &size)) 685 goto fail_reset; 686 *perms = kcalloc(size, sizeof(struct aa_perms), GFP_KERNEL); 687 if (!*perms) 688 goto fail_reset; 689 for (i = 0; i < size; i++) { 690 if (!unpack_perm(e, version, &(*perms)[i])) 691 goto fail; 692 } 693 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL)) 694 goto fail; 695 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 696 goto fail; 697 } else 698 *perms = NULL; 699 700 return size; 701 702 fail: 703 kfree(*perms); 704 fail_reset: 705 e->pos = pos; 706 return -EPROTO; 707 } 708 709 static int unpack_pdb(struct aa_ext *e, struct aa_policydb *policy, 710 bool required_dfa, bool required_trans, 711 const char **info) 712 { 713 void *pos = e->pos; 714 int i, flags, error = -EPROTO; 715 ssize_t size; 716 717 size = unpack_perms_table(e, &policy->perms); 718 if (size < 0) { 719 error = size; 720 policy->perms = NULL; 721 *info = "failed to unpack - perms"; 722 goto fail; 723 } 724 policy->size = size; 725 726 if (policy->perms) { 727 /* perms table present accept is index */ 728 flags = TO_ACCEPT1_FLAG(YYTD_DATA32); 729 } else { 730 /* packed perms in accept1 and accept2 */ 731 flags = TO_ACCEPT1_FLAG(YYTD_DATA32) | 732 TO_ACCEPT2_FLAG(YYTD_DATA32); 733 } 734 735 policy->dfa = unpack_dfa(e, flags); 736 if (IS_ERR(policy->dfa)) { 737 error = PTR_ERR(policy->dfa); 738 policy->dfa = NULL; 739 *info = "failed to unpack - dfa"; 740 goto fail; 741 } else if (!policy->dfa) { 742 if (required_dfa) { 743 *info = "missing required dfa"; 744 goto fail; 745 } 746 goto out; 747 } 748 749 /* 750 * only unpack the following if a dfa is present 751 * 752 * sadly start was given different names for file and policydb 753 * but since it is optional we can try both 754 */ 755 if (!aa_unpack_u32(e, &policy->start[0], "start")) 756 /* default start state */ 757 policy->start[0] = DFA_START; 758 if (!aa_unpack_u32(e, &policy->start[AA_CLASS_FILE], "dfa_start")) { 759 /* default start state for xmatch and file dfa */ 760 policy->start[AA_CLASS_FILE] = DFA_START; 761 } /* setup class index */ 762 for (i = AA_CLASS_FILE + 1; i <= AA_CLASS_LAST; i++) { 763 policy->start[i] = aa_dfa_next(policy->dfa, policy->start[0], 764 i); 765 } 766 if (!unpack_trans_table(e, &policy->trans) && required_trans) { 767 *info = "failed to unpack profile transition table"; 768 goto fail; 769 } 770 771 /* TODO: move compat mapping here, requires dfa merging first */ 772 /* TODO: move verify here, it has to be done after compat mappings */ 773 out: 774 return 0; 775 776 fail: 777 e->pos = pos; 778 return error; 779 } 780 781 static u32 strhash(const void *data, u32 len, u32 seed) 782 { 783 const char * const *key = data; 784 785 return jhash(*key, strlen(*key), seed); 786 } 787 788 static int datacmp(struct rhashtable_compare_arg *arg, const void *obj) 789 { 790 const struct aa_data *data = obj; 791 const char * const *key = arg->key; 792 793 return strcmp(data->key, *key); 794 } 795 796 /** 797 * unpack_profile - unpack a serialized profile 798 * @e: serialized data extent information (NOT NULL) 799 * @ns_name: pointer of newly allocated copy of %NULL in case of error 800 * 801 * NOTE: unpack profile sets audit struct if there is a failure 802 */ 803 static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) 804 { 805 struct aa_ruleset *rules; 806 struct aa_profile *profile = NULL; 807 const char *tmpname, *tmpns = NULL, *name = NULL; 808 const char *info = "failed to unpack profile"; 809 size_t ns_len; 810 struct rhashtable_params params = { 0 }; 811 char *key = NULL, *disconnected = NULL; 812 struct aa_data *data; 813 int error = -EPROTO; 814 kernel_cap_t tmpcap; 815 u32 tmp; 816 817 *ns_name = NULL; 818 819 /* check that we have the right struct being passed */ 820 if (!aa_unpack_nameX(e, AA_STRUCT, "profile")) 821 goto fail; 822 if (!aa_unpack_str(e, &name, NULL)) 823 goto fail; 824 if (*name == '\0') 825 goto fail; 826 827 tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len); 828 if (tmpns) { 829 *ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL); 830 if (!*ns_name) { 831 info = "out of memory"; 832 error = -ENOMEM; 833 goto fail; 834 } 835 name = tmpname; 836 } 837 838 profile = aa_alloc_profile(name, NULL, GFP_KERNEL); 839 if (!profile) { 840 info = "out of memory"; 841 error = -ENOMEM; 842 goto fail; 843 } 844 rules = list_first_entry(&profile->rules, typeof(*rules), list); 845 846 /* profile renaming is optional */ 847 (void) aa_unpack_str(e, &profile->rename, "rename"); 848 849 /* attachment string is optional */ 850 (void) aa_unpack_str(e, &profile->attach.xmatch_str, "attach"); 851 852 /* xmatch is optional and may be NULL */ 853 error = unpack_pdb(e, &profile->attach.xmatch, false, false, &info); 854 if (error) { 855 info = "bad xmatch"; 856 goto fail; 857 } 858 859 /* neither xmatch_len not xmatch_perms are optional if xmatch is set */ 860 if (profile->attach.xmatch.dfa) { 861 if (!aa_unpack_u32(e, &tmp, NULL)) { 862 info = "missing xmatch len"; 863 goto fail; 864 } 865 profile->attach.xmatch_len = tmp; 866 profile->attach.xmatch.start[AA_CLASS_XMATCH] = DFA_START; 867 if (!profile->attach.xmatch.perms) { 868 error = aa_compat_map_xmatch(&profile->attach.xmatch); 869 if (error) { 870 info = "failed to convert xmatch permission table"; 871 goto fail; 872 } 873 } 874 } 875 876 /* disconnected attachment string is optional */ 877 (void) aa_unpack_strdup(e, &disconnected, "disconnected"); 878 profile->disconnected = disconnected; 879 880 /* per profile debug flags (complain, audit) */ 881 if (!aa_unpack_nameX(e, AA_STRUCT, "flags")) { 882 info = "profile missing flags"; 883 goto fail; 884 } 885 info = "failed to unpack profile flags"; 886 if (!aa_unpack_u32(e, &tmp, NULL)) 887 goto fail; 888 if (tmp & PACKED_FLAG_HAT) 889 profile->label.flags |= FLAG_HAT; 890 if (tmp & PACKED_FLAG_DEBUG1) 891 profile->label.flags |= FLAG_DEBUG1; 892 if (tmp & PACKED_FLAG_DEBUG2) 893 profile->label.flags |= FLAG_DEBUG2; 894 if (!aa_unpack_u32(e, &tmp, NULL)) 895 goto fail; 896 if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG)) { 897 profile->mode = APPARMOR_COMPLAIN; 898 } else if (tmp == PACKED_MODE_ENFORCE) { 899 profile->mode = APPARMOR_ENFORCE; 900 } else if (tmp == PACKED_MODE_KILL) { 901 profile->mode = APPARMOR_KILL; 902 } else if (tmp == PACKED_MODE_UNCONFINED) { 903 profile->mode = APPARMOR_UNCONFINED; 904 profile->label.flags |= FLAG_UNCONFINED; 905 } else if (tmp == PACKED_MODE_USER) { 906 profile->mode = APPARMOR_USER; 907 } else { 908 goto fail; 909 } 910 if (!aa_unpack_u32(e, &tmp, NULL)) 911 goto fail; 912 if (tmp) 913 profile->audit = AUDIT_ALL; 914 915 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 916 goto fail; 917 918 /* path_flags is optional */ 919 if (aa_unpack_u32(e, &profile->path_flags, "path_flags")) 920 profile->path_flags |= profile->label.flags & 921 PATH_MEDIATE_DELETED; 922 else 923 /* set a default value if path_flags field is not present */ 924 profile->path_flags = PATH_MEDIATE_DELETED; 925 926 info = "failed to unpack profile capabilities"; 927 if (!aa_unpack_cap_low(e, &rules->caps.allow, NULL)) 928 goto fail; 929 if (!aa_unpack_cap_low(e, &rules->caps.audit, NULL)) 930 goto fail; 931 if (!aa_unpack_cap_low(e, &rules->caps.quiet, NULL)) 932 goto fail; 933 if (!aa_unpack_cap_low(e, &tmpcap, NULL)) 934 goto fail; 935 936 info = "failed to unpack upper profile capabilities"; 937 if (aa_unpack_nameX(e, AA_STRUCT, "caps64")) { 938 /* optional upper half of 64 bit caps */ 939 if (!aa_unpack_cap_high(e, &rules->caps.allow, NULL)) 940 goto fail; 941 if (!aa_unpack_cap_high(e, &rules->caps.audit, NULL)) 942 goto fail; 943 if (!aa_unpack_cap_high(e, &rules->caps.quiet, NULL)) 944 goto fail; 945 if (!aa_unpack_cap_high(e, &tmpcap, NULL)) 946 goto fail; 947 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 948 goto fail; 949 } 950 951 info = "failed to unpack extended profile capabilities"; 952 if (aa_unpack_nameX(e, AA_STRUCT, "capsx")) { 953 /* optional extended caps mediation mask */ 954 if (!aa_unpack_cap_low(e, &rules->caps.extended, NULL)) 955 goto fail; 956 if (!aa_unpack_cap_high(e, &rules->caps.extended, NULL)) 957 goto fail; 958 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 959 goto fail; 960 } 961 962 if (!unpack_xattrs(e, profile)) { 963 info = "failed to unpack profile xattrs"; 964 goto fail; 965 } 966 967 if (!unpack_rlimits(e, rules)) { 968 info = "failed to unpack profile rlimits"; 969 goto fail; 970 } 971 972 if (!unpack_secmark(e, rules)) { 973 info = "failed to unpack profile secmark rules"; 974 goto fail; 975 } 976 977 if (aa_unpack_nameX(e, AA_STRUCT, "policydb")) { 978 /* generic policy dfa - optional and may be NULL */ 979 info = "failed to unpack policydb"; 980 error = unpack_pdb(e, &rules->policy, true, false, 981 &info); 982 if (error) 983 goto fail; 984 /* Fixup: drop when we get rid of start array */ 985 if (aa_dfa_next(rules->policy.dfa, rules->policy.start[0], 986 AA_CLASS_FILE)) 987 rules->policy.start[AA_CLASS_FILE] = 988 aa_dfa_next(rules->policy.dfa, 989 rules->policy.start[0], 990 AA_CLASS_FILE); 991 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 992 goto fail; 993 if (!rules->policy.perms) { 994 error = aa_compat_map_policy(&rules->policy, 995 e->version); 996 if (error) { 997 info = "failed to remap policydb permission table"; 998 goto fail; 999 } 1000 } 1001 } else { 1002 rules->policy.dfa = aa_get_dfa(nulldfa); 1003 rules->policy.perms = kcalloc(2, sizeof(struct aa_perms), 1004 GFP_KERNEL); 1005 if (!rules->policy.perms) 1006 goto fail; 1007 rules->policy.size = 2; 1008 } 1009 /* get file rules */ 1010 error = unpack_pdb(e, &rules->file, false, true, &info); 1011 if (error) { 1012 goto fail; 1013 } else if (rules->file.dfa) { 1014 if (!rules->file.perms) { 1015 error = aa_compat_map_file(&rules->file); 1016 if (error) { 1017 info = "failed to remap file permission table"; 1018 goto fail; 1019 } 1020 } 1021 } else if (rules->policy.dfa && 1022 rules->policy.start[AA_CLASS_FILE]) { 1023 rules->file.dfa = aa_get_dfa(rules->policy.dfa); 1024 rules->file.start[AA_CLASS_FILE] = rules->policy.start[AA_CLASS_FILE]; 1025 rules->file.perms = kcalloc(rules->policy.size, 1026 sizeof(struct aa_perms), 1027 GFP_KERNEL); 1028 if (!rules->file.perms) 1029 goto fail; 1030 memcpy(rules->file.perms, rules->policy.perms, 1031 rules->policy.size * sizeof(struct aa_perms)); 1032 rules->file.size = rules->policy.size; 1033 } else { 1034 rules->file.dfa = aa_get_dfa(nulldfa); 1035 rules->file.perms = kcalloc(2, sizeof(struct aa_perms), 1036 GFP_KERNEL); 1037 if (!rules->file.perms) 1038 goto fail; 1039 rules->file.size = 2; 1040 } 1041 error = -EPROTO; 1042 if (aa_unpack_nameX(e, AA_STRUCT, "data")) { 1043 info = "out of memory"; 1044 profile->data = kzalloc(sizeof(*profile->data), GFP_KERNEL); 1045 if (!profile->data) { 1046 error = -ENOMEM; 1047 goto fail; 1048 } 1049 params.nelem_hint = 3; 1050 params.key_len = sizeof(void *); 1051 params.key_offset = offsetof(struct aa_data, key); 1052 params.head_offset = offsetof(struct aa_data, head); 1053 params.hashfn = strhash; 1054 params.obj_cmpfn = datacmp; 1055 1056 if (rhashtable_init(profile->data, ¶ms)) { 1057 info = "failed to init key, value hash table"; 1058 goto fail; 1059 } 1060 1061 while (aa_unpack_strdup(e, &key, NULL)) { 1062 data = kzalloc(sizeof(*data), GFP_KERNEL); 1063 if (!data) { 1064 kfree_sensitive(key); 1065 error = -ENOMEM; 1066 goto fail; 1067 } 1068 1069 data->key = key; 1070 data->size = aa_unpack_blob(e, &data->data, NULL); 1071 data->data = kvmemdup(data->data, data->size, GFP_KERNEL); 1072 if (data->size && !data->data) { 1073 kfree_sensitive(data->key); 1074 kfree_sensitive(data); 1075 error = -ENOMEM; 1076 goto fail; 1077 } 1078 1079 if (rhashtable_insert_fast(profile->data, &data->head, 1080 profile->data->p)) { 1081 kfree_sensitive(data->key); 1082 kfree_sensitive(data); 1083 info = "failed to insert data to table"; 1084 goto fail; 1085 } 1086 } 1087 1088 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) { 1089 info = "failed to unpack end of key, value data table"; 1090 goto fail; 1091 } 1092 } 1093 1094 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) { 1095 info = "failed to unpack end of profile"; 1096 goto fail; 1097 } 1098 1099 return profile; 1100 1101 fail: 1102 if (error == 0) 1103 /* default error covers most cases */ 1104 error = -EPROTO; 1105 if (*ns_name) { 1106 kfree(*ns_name); 1107 *ns_name = NULL; 1108 } 1109 if (profile) 1110 name = NULL; 1111 else if (!name) 1112 name = "unknown"; 1113 audit_iface(profile, NULL, name, info, e, error); 1114 aa_free_profile(profile); 1115 1116 return ERR_PTR(error); 1117 } 1118 1119 /** 1120 * verify_header - unpack serialized stream header 1121 * @e: serialized data read head (NOT NULL) 1122 * @required: whether the header is required or optional 1123 * @ns: Returns - namespace if one is specified else NULL (NOT NULL) 1124 * 1125 * Returns: error or 0 if header is good 1126 */ 1127 static int verify_header(struct aa_ext *e, int required, const char **ns) 1128 { 1129 int error = -EPROTONOSUPPORT; 1130 const char *name = NULL; 1131 *ns = NULL; 1132 1133 /* get the interface version */ 1134 if (!aa_unpack_u32(e, &e->version, "version")) { 1135 if (required) { 1136 audit_iface(NULL, NULL, NULL, "invalid profile format", 1137 e, error); 1138 return error; 1139 } 1140 } 1141 1142 /* Check that the interface version is currently supported. 1143 * if not specified use previous version 1144 * Mask off everything that is not kernel abi version 1145 */ 1146 if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v9)) { 1147 audit_iface(NULL, NULL, NULL, "unsupported interface version", 1148 e, error); 1149 return error; 1150 } 1151 1152 /* read the namespace if present */ 1153 if (aa_unpack_str(e, &name, "namespace")) { 1154 if (*name == '\0') { 1155 audit_iface(NULL, NULL, NULL, "invalid namespace name", 1156 e, error); 1157 return error; 1158 } 1159 if (*ns && strcmp(*ns, name)) { 1160 audit_iface(NULL, NULL, NULL, "invalid ns change", e, 1161 error); 1162 } else if (!*ns) { 1163 *ns = kstrdup(name, GFP_KERNEL); 1164 if (!*ns) 1165 return -ENOMEM; 1166 } 1167 } 1168 1169 return 0; 1170 } 1171 1172 /** 1173 * verify_dfa_accept_index - verify accept indexes are in range of perms table 1174 * @dfa: the dfa to check accept indexes are in range 1175 * table_size: the permission table size the indexes should be within 1176 */ 1177 static bool verify_dfa_accept_index(struct aa_dfa *dfa, int table_size) 1178 { 1179 int i; 1180 for (i = 0; i < dfa->tables[YYTD_ID_ACCEPT]->td_lolen; i++) { 1181 if (ACCEPT_TABLE(dfa)[i] >= table_size) 1182 return false; 1183 } 1184 return true; 1185 } 1186 1187 static bool verify_perm(struct aa_perms *perm) 1188 { 1189 /* TODO: allow option to just force the perms into a valid state */ 1190 if (perm->allow & perm->deny) 1191 return false; 1192 if (perm->subtree & ~perm->allow) 1193 return false; 1194 if (perm->cond & (perm->allow | perm->deny)) 1195 return false; 1196 if (perm->kill & perm->allow) 1197 return false; 1198 if (perm->complain & (perm->allow | perm->deny)) 1199 return false; 1200 if (perm->prompt & (perm->allow | perm->deny)) 1201 return false; 1202 if (perm->complain & perm->prompt) 1203 return false; 1204 if (perm->hide & perm->allow) 1205 return false; 1206 1207 return true; 1208 } 1209 1210 static bool verify_perms(struct aa_policydb *pdb) 1211 { 1212 int i; 1213 1214 for (i = 0; i < pdb->size; i++) { 1215 if (!verify_perm(&pdb->perms[i])) 1216 return false; 1217 /* verify indexes into str table */ 1218 if ((pdb->perms[i].xindex & AA_X_TYPE_MASK) == AA_X_TABLE && 1219 (pdb->perms[i].xindex & AA_X_INDEX_MASK) >= pdb->trans.size) 1220 return false; 1221 if (pdb->perms[i].tag && pdb->perms[i].tag >= pdb->trans.size) 1222 return false; 1223 if (pdb->perms[i].label && 1224 pdb->perms[i].label >= pdb->trans.size) 1225 return false; 1226 } 1227 1228 return true; 1229 } 1230 1231 /** 1232 * verify_profile - Do post unpack analysis to verify profile consistency 1233 * @profile: profile to verify (NOT NULL) 1234 * 1235 * Returns: 0 if passes verification else error 1236 * 1237 * This verification is post any unpack mapping or changes 1238 */ 1239 static int verify_profile(struct aa_profile *profile) 1240 { 1241 struct aa_ruleset *rules = list_first_entry(&profile->rules, 1242 typeof(*rules), list); 1243 if (!rules) 1244 return 0; 1245 1246 if ((rules->file.dfa && !verify_dfa_accept_index(rules->file.dfa, 1247 rules->file.size)) || 1248 (rules->policy.dfa && 1249 !verify_dfa_accept_index(rules->policy.dfa, rules->policy.size))) { 1250 audit_iface(profile, NULL, NULL, 1251 "Unpack: Invalid named transition", NULL, -EPROTO); 1252 return -EPROTO; 1253 } 1254 1255 if (!verify_perms(&rules->file)) { 1256 audit_iface(profile, NULL, NULL, 1257 "Unpack: Invalid perm index", NULL, -EPROTO); 1258 return -EPROTO; 1259 } 1260 if (!verify_perms(&rules->policy)) { 1261 audit_iface(profile, NULL, NULL, 1262 "Unpack: Invalid perm index", NULL, -EPROTO); 1263 return -EPROTO; 1264 } 1265 if (!verify_perms(&profile->attach.xmatch)) { 1266 audit_iface(profile, NULL, NULL, 1267 "Unpack: Invalid perm index", NULL, -EPROTO); 1268 return -EPROTO; 1269 } 1270 1271 return 0; 1272 } 1273 1274 void aa_load_ent_free(struct aa_load_ent *ent) 1275 { 1276 if (ent) { 1277 aa_put_profile(ent->rename); 1278 aa_put_profile(ent->old); 1279 aa_put_profile(ent->new); 1280 kfree(ent->ns_name); 1281 kfree_sensitive(ent); 1282 } 1283 } 1284 1285 struct aa_load_ent *aa_load_ent_alloc(void) 1286 { 1287 struct aa_load_ent *ent = kzalloc(sizeof(*ent), GFP_KERNEL); 1288 if (ent) 1289 INIT_LIST_HEAD(&ent->list); 1290 return ent; 1291 } 1292 1293 static int compress_zstd(const char *src, size_t slen, char **dst, size_t *dlen) 1294 { 1295 #ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY 1296 const zstd_parameters params = 1297 zstd_get_params(aa_g_rawdata_compression_level, slen); 1298 const size_t wksp_len = zstd_cctx_workspace_bound(¶ms.cParams); 1299 void *wksp = NULL; 1300 zstd_cctx *ctx = NULL; 1301 size_t out_len = zstd_compress_bound(slen); 1302 void *out = NULL; 1303 int ret = 0; 1304 1305 out = kvzalloc(out_len, GFP_KERNEL); 1306 if (!out) { 1307 ret = -ENOMEM; 1308 goto cleanup; 1309 } 1310 1311 wksp = kvzalloc(wksp_len, GFP_KERNEL); 1312 if (!wksp) { 1313 ret = -ENOMEM; 1314 goto cleanup; 1315 } 1316 1317 ctx = zstd_init_cctx(wksp, wksp_len); 1318 if (!ctx) { 1319 ret = -EINVAL; 1320 goto cleanup; 1321 } 1322 1323 out_len = zstd_compress_cctx(ctx, out, out_len, src, slen, ¶ms); 1324 if (zstd_is_error(out_len) || out_len >= slen) { 1325 ret = -EINVAL; 1326 goto cleanup; 1327 } 1328 1329 if (is_vmalloc_addr(out)) { 1330 *dst = kvzalloc(out_len, GFP_KERNEL); 1331 if (*dst) { 1332 memcpy(*dst, out, out_len); 1333 kvfree(out); 1334 out = NULL; 1335 } 1336 } else { 1337 /* 1338 * If the staging buffer was kmalloc'd, then using krealloc is 1339 * probably going to be faster. The destination buffer will 1340 * always be smaller, so it's just shrunk, avoiding a memcpy 1341 */ 1342 *dst = krealloc(out, out_len, GFP_KERNEL); 1343 } 1344 1345 if (!*dst) { 1346 ret = -ENOMEM; 1347 goto cleanup; 1348 } 1349 1350 *dlen = out_len; 1351 1352 cleanup: 1353 if (ret) { 1354 kvfree(out); 1355 *dst = NULL; 1356 } 1357 1358 kvfree(wksp); 1359 return ret; 1360 #else 1361 *dlen = slen; 1362 return 0; 1363 #endif 1364 } 1365 1366 static int compress_loaddata(struct aa_loaddata *data) 1367 { 1368 AA_BUG(data->compressed_size > 0); 1369 1370 /* 1371 * Shortcut the no compression case, else we increase the amount of 1372 * storage required by a small amount 1373 */ 1374 if (aa_g_rawdata_compression_level != 0) { 1375 void *udata = data->data; 1376 int error = compress_zstd(udata, data->size, &data->data, 1377 &data->compressed_size); 1378 if (error) { 1379 data->compressed_size = data->size; 1380 return error; 1381 } 1382 if (udata != data->data) 1383 kvfree(udata); 1384 } else 1385 data->compressed_size = data->size; 1386 1387 return 0; 1388 } 1389 1390 /** 1391 * aa_unpack - unpack packed binary profile(s) data loaded from user space 1392 * @udata: user data copied to kmem (NOT NULL) 1393 * @lh: list to place unpacked profiles in a aa_repl_ws 1394 * @ns: Returns namespace profile is in if specified else NULL (NOT NULL) 1395 * 1396 * Unpack user data and return refcounted allocated profile(s) stored in 1397 * @lh in order of discovery, with the list chain stored in base.list 1398 * or error 1399 * 1400 * Returns: profile(s) on @lh else error pointer if fails to unpack 1401 */ 1402 int aa_unpack(struct aa_loaddata *udata, struct list_head *lh, 1403 const char **ns) 1404 { 1405 struct aa_load_ent *tmp, *ent; 1406 struct aa_profile *profile = NULL; 1407 char *ns_name = NULL; 1408 int error; 1409 struct aa_ext e = { 1410 .start = udata->data, 1411 .end = udata->data + udata->size, 1412 .pos = udata->data, 1413 }; 1414 1415 *ns = NULL; 1416 while (e.pos < e.end) { 1417 void *start; 1418 error = verify_header(&e, e.pos == e.start, ns); 1419 if (error) 1420 goto fail; 1421 1422 start = e.pos; 1423 profile = unpack_profile(&e, &ns_name); 1424 if (IS_ERR(profile)) { 1425 error = PTR_ERR(profile); 1426 goto fail; 1427 } 1428 1429 error = verify_profile(profile); 1430 if (error) 1431 goto fail_profile; 1432 1433 if (aa_g_hash_policy) 1434 error = aa_calc_profile_hash(profile, e.version, start, 1435 e.pos - start); 1436 if (error) 1437 goto fail_profile; 1438 1439 ent = aa_load_ent_alloc(); 1440 if (!ent) { 1441 error = -ENOMEM; 1442 goto fail_profile; 1443 } 1444 1445 ent->new = profile; 1446 ent->ns_name = ns_name; 1447 ns_name = NULL; 1448 list_add_tail(&ent->list, lh); 1449 } 1450 udata->abi = e.version & K_ABI_MASK; 1451 if (aa_g_hash_policy) { 1452 udata->hash = aa_calc_hash(udata->data, udata->size); 1453 if (IS_ERR(udata->hash)) { 1454 error = PTR_ERR(udata->hash); 1455 udata->hash = NULL; 1456 goto fail; 1457 } 1458 } 1459 1460 if (aa_g_export_binary) { 1461 error = compress_loaddata(udata); 1462 if (error) 1463 goto fail; 1464 } 1465 return 0; 1466 1467 fail_profile: 1468 kfree(ns_name); 1469 aa_put_profile(profile); 1470 1471 fail: 1472 list_for_each_entry_safe(ent, tmp, lh, list) { 1473 list_del_init(&ent->list); 1474 aa_load_ent_free(ent); 1475 } 1476 1477 return error; 1478 } 1479