1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2005,2006,2007,2008 IBM Corporation 4 * 5 * Authors: 6 * Mimi Zohar <zohar@us.ibm.com> 7 * Kylene Hall <kjhall@us.ibm.com> 8 * 9 * File: ima_crypto.c 10 * Calculates md5/sha1 file hash, template hash, boot-aggreate hash 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/moduleparam.h> 15 #include <linux/ratelimit.h> 16 #include <linux/file.h> 17 #include <linux/crypto.h> 18 #include <linux/scatterlist.h> 19 #include <linux/err.h> 20 #include <linux/slab.h> 21 #include <crypto/hash.h> 22 23 #include "ima.h" 24 25 /* minimum file size for ahash use */ 26 static unsigned long ima_ahash_minsize; 27 module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644); 28 MODULE_PARM_DESC(ahash_minsize, "Minimum file size for ahash use"); 29 30 /* default is 0 - 1 page. */ 31 static int ima_maxorder; 32 static unsigned int ima_bufsize = PAGE_SIZE; 33 34 static int param_set_bufsize(const char *val, const struct kernel_param *kp) 35 { 36 unsigned long long size; 37 int order; 38 39 size = memparse(val, NULL); 40 order = get_order(size); 41 if (order >= MAX_ORDER) 42 return -EINVAL; 43 ima_maxorder = order; 44 ima_bufsize = PAGE_SIZE << order; 45 return 0; 46 } 47 48 static const struct kernel_param_ops param_ops_bufsize = { 49 .set = param_set_bufsize, 50 .get = param_get_uint, 51 }; 52 #define param_check_bufsize(name, p) __param_check(name, p, unsigned int) 53 54 module_param_named(ahash_bufsize, ima_bufsize, bufsize, 0644); 55 MODULE_PARM_DESC(ahash_bufsize, "Maximum ahash buffer size"); 56 57 static struct crypto_shash *ima_shash_tfm; 58 static struct crypto_ahash *ima_ahash_tfm; 59 60 struct ima_algo_desc { 61 struct crypto_shash *tfm; 62 enum hash_algo algo; 63 }; 64 65 int ima_sha1_idx __ro_after_init; 66 int ima_hash_algo_idx __ro_after_init; 67 /* 68 * Additional number of slots reserved, as needed, for SHA1 69 * and IMA default algo. 70 */ 71 int ima_extra_slots __ro_after_init; 72 73 static struct ima_algo_desc *ima_algo_array; 74 75 static int __init ima_init_ima_crypto(void) 76 { 77 long rc; 78 79 ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0); 80 if (IS_ERR(ima_shash_tfm)) { 81 rc = PTR_ERR(ima_shash_tfm); 82 pr_err("Can not allocate %s (reason: %ld)\n", 83 hash_algo_name[ima_hash_algo], rc); 84 return rc; 85 } 86 pr_info("Allocated hash algorithm: %s\n", 87 hash_algo_name[ima_hash_algo]); 88 return 0; 89 } 90 91 static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo) 92 { 93 struct crypto_shash *tfm = ima_shash_tfm; 94 int rc, i; 95 96 if (algo < 0 || algo >= HASH_ALGO__LAST) 97 algo = ima_hash_algo; 98 99 if (algo == ima_hash_algo) 100 return tfm; 101 102 for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) 103 if (ima_algo_array[i].tfm && ima_algo_array[i].algo == algo) 104 return ima_algo_array[i].tfm; 105 106 tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0); 107 if (IS_ERR(tfm)) { 108 rc = PTR_ERR(tfm); 109 pr_err("Can not allocate %s (reason: %d)\n", 110 hash_algo_name[algo], rc); 111 } 112 return tfm; 113 } 114 115 int __init ima_init_crypto(void) 116 { 117 enum hash_algo algo; 118 long rc; 119 int i; 120 121 rc = ima_init_ima_crypto(); 122 if (rc) 123 return rc; 124 125 ima_sha1_idx = -1; 126 ima_hash_algo_idx = -1; 127 128 for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) { 129 algo = ima_tpm_chip->allocated_banks[i].crypto_id; 130 if (algo == HASH_ALGO_SHA1) 131 ima_sha1_idx = i; 132 133 if (algo == ima_hash_algo) 134 ima_hash_algo_idx = i; 135 } 136 137 if (ima_sha1_idx < 0) { 138 ima_sha1_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++; 139 if (ima_hash_algo == HASH_ALGO_SHA1) 140 ima_hash_algo_idx = ima_sha1_idx; 141 } 142 143 if (ima_hash_algo_idx < 0) 144 ima_hash_algo_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++; 145 146 ima_algo_array = kcalloc(NR_BANKS(ima_tpm_chip) + ima_extra_slots, 147 sizeof(*ima_algo_array), GFP_KERNEL); 148 if (!ima_algo_array) { 149 rc = -ENOMEM; 150 goto out; 151 } 152 153 for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) { 154 algo = ima_tpm_chip->allocated_banks[i].crypto_id; 155 ima_algo_array[i].algo = algo; 156 157 /* unknown TPM algorithm */ 158 if (algo == HASH_ALGO__LAST) 159 continue; 160 161 if (algo == ima_hash_algo) { 162 ima_algo_array[i].tfm = ima_shash_tfm; 163 continue; 164 } 165 166 ima_algo_array[i].tfm = ima_alloc_tfm(algo); 167 if (IS_ERR(ima_algo_array[i].tfm)) { 168 if (algo == HASH_ALGO_SHA1) { 169 rc = PTR_ERR(ima_algo_array[i].tfm); 170 ima_algo_array[i].tfm = NULL; 171 goto out_array; 172 } 173 174 ima_algo_array[i].tfm = NULL; 175 } 176 } 177 178 if (ima_sha1_idx >= NR_BANKS(ima_tpm_chip)) { 179 if (ima_hash_algo == HASH_ALGO_SHA1) { 180 ima_algo_array[ima_sha1_idx].tfm = ima_shash_tfm; 181 } else { 182 ima_algo_array[ima_sha1_idx].tfm = 183 ima_alloc_tfm(HASH_ALGO_SHA1); 184 if (IS_ERR(ima_algo_array[ima_sha1_idx].tfm)) { 185 rc = PTR_ERR(ima_algo_array[ima_sha1_idx].tfm); 186 goto out_array; 187 } 188 } 189 190 ima_algo_array[ima_sha1_idx].algo = HASH_ALGO_SHA1; 191 } 192 193 if (ima_hash_algo_idx >= NR_BANKS(ima_tpm_chip) && 194 ima_hash_algo_idx != ima_sha1_idx) { 195 ima_algo_array[ima_hash_algo_idx].tfm = ima_shash_tfm; 196 ima_algo_array[ima_hash_algo_idx].algo = ima_hash_algo; 197 } 198 199 return 0; 200 out_array: 201 for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) { 202 if (!ima_algo_array[i].tfm || 203 ima_algo_array[i].tfm == ima_shash_tfm) 204 continue; 205 206 crypto_free_shash(ima_algo_array[i].tfm); 207 } 208 out: 209 crypto_free_shash(ima_shash_tfm); 210 return rc; 211 } 212 213 static void ima_free_tfm(struct crypto_shash *tfm) 214 { 215 int i; 216 217 if (tfm == ima_shash_tfm) 218 return; 219 220 for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) 221 if (ima_algo_array[i].tfm == tfm) 222 return; 223 224 crypto_free_shash(tfm); 225 } 226 227 /** 228 * ima_alloc_pages() - Allocate contiguous pages. 229 * @max_size: Maximum amount of memory to allocate. 230 * @allocated_size: Returned size of actual allocation. 231 * @last_warn: Should the min_size allocation warn or not. 232 * 233 * Tries to do opportunistic allocation for memory first trying to allocate 234 * max_size amount of memory and then splitting that until zero order is 235 * reached. Allocation is tried without generating allocation warnings unless 236 * last_warn is set. Last_warn set affects only last allocation of zero order. 237 * 238 * By default, ima_maxorder is 0 and it is equivalent to kmalloc(GFP_KERNEL) 239 * 240 * Return pointer to allocated memory, or NULL on failure. 241 */ 242 static void *ima_alloc_pages(loff_t max_size, size_t *allocated_size, 243 int last_warn) 244 { 245 void *ptr; 246 int order = ima_maxorder; 247 gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY; 248 249 if (order) 250 order = min(get_order(max_size), order); 251 252 for (; order; order--) { 253 ptr = (void *)__get_free_pages(gfp_mask, order); 254 if (ptr) { 255 *allocated_size = PAGE_SIZE << order; 256 return ptr; 257 } 258 } 259 260 /* order is zero - one page */ 261 262 gfp_mask = GFP_KERNEL; 263 264 if (!last_warn) 265 gfp_mask |= __GFP_NOWARN; 266 267 ptr = (void *)__get_free_pages(gfp_mask, 0); 268 if (ptr) { 269 *allocated_size = PAGE_SIZE; 270 return ptr; 271 } 272 273 *allocated_size = 0; 274 return NULL; 275 } 276 277 /** 278 * ima_free_pages() - Free pages allocated by ima_alloc_pages(). 279 * @ptr: Pointer to allocated pages. 280 * @size: Size of allocated buffer. 281 */ 282 static void ima_free_pages(void *ptr, size_t size) 283 { 284 if (!ptr) 285 return; 286 free_pages((unsigned long)ptr, get_order(size)); 287 } 288 289 static struct crypto_ahash *ima_alloc_atfm(enum hash_algo algo) 290 { 291 struct crypto_ahash *tfm = ima_ahash_tfm; 292 int rc; 293 294 if (algo < 0 || algo >= HASH_ALGO__LAST) 295 algo = ima_hash_algo; 296 297 if (algo != ima_hash_algo || !tfm) { 298 tfm = crypto_alloc_ahash(hash_algo_name[algo], 0, 0); 299 if (!IS_ERR(tfm)) { 300 if (algo == ima_hash_algo) 301 ima_ahash_tfm = tfm; 302 } else { 303 rc = PTR_ERR(tfm); 304 pr_err("Can not allocate %s (reason: %d)\n", 305 hash_algo_name[algo], rc); 306 } 307 } 308 return tfm; 309 } 310 311 static void ima_free_atfm(struct crypto_ahash *tfm) 312 { 313 if (tfm != ima_ahash_tfm) 314 crypto_free_ahash(tfm); 315 } 316 317 static inline int ahash_wait(int err, struct crypto_wait *wait) 318 { 319 320 err = crypto_wait_req(err, wait); 321 322 if (err) 323 pr_crit_ratelimited("ahash calculation failed: err: %d\n", err); 324 325 return err; 326 } 327 328 static int ima_calc_file_hash_atfm(struct file *file, 329 struct ima_digest_data *hash, 330 struct crypto_ahash *tfm) 331 { 332 loff_t i_size, offset; 333 char *rbuf[2] = { NULL, }; 334 int rc, rbuf_len, active = 0, ahash_rc = 0; 335 struct ahash_request *req; 336 struct scatterlist sg[1]; 337 struct crypto_wait wait; 338 size_t rbuf_size[2]; 339 340 hash->length = crypto_ahash_digestsize(tfm); 341 342 req = ahash_request_alloc(tfm, GFP_KERNEL); 343 if (!req) 344 return -ENOMEM; 345 346 crypto_init_wait(&wait); 347 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | 348 CRYPTO_TFM_REQ_MAY_SLEEP, 349 crypto_req_done, &wait); 350 351 rc = ahash_wait(crypto_ahash_init(req), &wait); 352 if (rc) 353 goto out1; 354 355 i_size = i_size_read(file_inode(file)); 356 357 if (i_size == 0) 358 goto out2; 359 360 /* 361 * Try to allocate maximum size of memory. 362 * Fail if even a single page cannot be allocated. 363 */ 364 rbuf[0] = ima_alloc_pages(i_size, &rbuf_size[0], 1); 365 if (!rbuf[0]) { 366 rc = -ENOMEM; 367 goto out1; 368 } 369 370 /* Only allocate one buffer if that is enough. */ 371 if (i_size > rbuf_size[0]) { 372 /* 373 * Try to allocate secondary buffer. If that fails fallback to 374 * using single buffering. Use previous memory allocation size 375 * as baseline for possible allocation size. 376 */ 377 rbuf[1] = ima_alloc_pages(i_size - rbuf_size[0], 378 &rbuf_size[1], 0); 379 } 380 381 for (offset = 0; offset < i_size; offset += rbuf_len) { 382 if (!rbuf[1] && offset) { 383 /* Not using two buffers, and it is not the first 384 * read/request, wait for the completion of the 385 * previous ahash_update() request. 386 */ 387 rc = ahash_wait(ahash_rc, &wait); 388 if (rc) 389 goto out3; 390 } 391 /* read buffer */ 392 rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]); 393 rc = integrity_kernel_read(file, offset, rbuf[active], 394 rbuf_len); 395 if (rc != rbuf_len) { 396 if (rc >= 0) 397 rc = -EINVAL; 398 /* 399 * Forward current rc, do not overwrite with return value 400 * from ahash_wait() 401 */ 402 ahash_wait(ahash_rc, &wait); 403 goto out3; 404 } 405 406 if (rbuf[1] && offset) { 407 /* Using two buffers, and it is not the first 408 * read/request, wait for the completion of the 409 * previous ahash_update() request. 410 */ 411 rc = ahash_wait(ahash_rc, &wait); 412 if (rc) 413 goto out3; 414 } 415 416 sg_init_one(&sg[0], rbuf[active], rbuf_len); 417 ahash_request_set_crypt(req, sg, NULL, rbuf_len); 418 419 ahash_rc = crypto_ahash_update(req); 420 421 if (rbuf[1]) 422 active = !active; /* swap buffers, if we use two */ 423 } 424 /* wait for the last update request to complete */ 425 rc = ahash_wait(ahash_rc, &wait); 426 out3: 427 ima_free_pages(rbuf[0], rbuf_size[0]); 428 ima_free_pages(rbuf[1], rbuf_size[1]); 429 out2: 430 if (!rc) { 431 ahash_request_set_crypt(req, NULL, hash->digest, 0); 432 rc = ahash_wait(crypto_ahash_final(req), &wait); 433 } 434 out1: 435 ahash_request_free(req); 436 return rc; 437 } 438 439 static int ima_calc_file_ahash(struct file *file, struct ima_digest_data *hash) 440 { 441 struct crypto_ahash *tfm; 442 int rc; 443 444 tfm = ima_alloc_atfm(hash->algo); 445 if (IS_ERR(tfm)) 446 return PTR_ERR(tfm); 447 448 rc = ima_calc_file_hash_atfm(file, hash, tfm); 449 450 ima_free_atfm(tfm); 451 452 return rc; 453 } 454 455 static int ima_calc_file_hash_tfm(struct file *file, 456 struct ima_digest_data *hash, 457 struct crypto_shash *tfm) 458 { 459 loff_t i_size, offset = 0; 460 char *rbuf; 461 int rc; 462 SHASH_DESC_ON_STACK(shash, tfm); 463 464 shash->tfm = tfm; 465 466 hash->length = crypto_shash_digestsize(tfm); 467 468 rc = crypto_shash_init(shash); 469 if (rc != 0) 470 return rc; 471 472 i_size = i_size_read(file_inode(file)); 473 474 if (i_size == 0) 475 goto out; 476 477 rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL); 478 if (!rbuf) 479 return -ENOMEM; 480 481 while (offset < i_size) { 482 int rbuf_len; 483 484 rbuf_len = integrity_kernel_read(file, offset, rbuf, PAGE_SIZE); 485 if (rbuf_len < 0) { 486 rc = rbuf_len; 487 break; 488 } 489 if (rbuf_len == 0) { /* unexpected EOF */ 490 rc = -EINVAL; 491 break; 492 } 493 offset += rbuf_len; 494 495 rc = crypto_shash_update(shash, rbuf, rbuf_len); 496 if (rc) 497 break; 498 } 499 kfree(rbuf); 500 out: 501 if (!rc) 502 rc = crypto_shash_final(shash, hash->digest); 503 return rc; 504 } 505 506 static int ima_calc_file_shash(struct file *file, struct ima_digest_data *hash) 507 { 508 struct crypto_shash *tfm; 509 int rc; 510 511 tfm = ima_alloc_tfm(hash->algo); 512 if (IS_ERR(tfm)) 513 return PTR_ERR(tfm); 514 515 rc = ima_calc_file_hash_tfm(file, hash, tfm); 516 517 ima_free_tfm(tfm); 518 519 return rc; 520 } 521 522 /* 523 * ima_calc_file_hash - calculate file hash 524 * 525 * Asynchronous hash (ahash) allows using HW acceleration for calculating 526 * a hash. ahash performance varies for different data sizes on different 527 * crypto accelerators. shash performance might be better for smaller files. 528 * The 'ima.ahash_minsize' module parameter allows specifying the best 529 * minimum file size for using ahash on the system. 530 * 531 * If the ima.ahash_minsize parameter is not specified, this function uses 532 * shash for the hash calculation. If ahash fails, it falls back to using 533 * shash. 534 */ 535 int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash) 536 { 537 loff_t i_size; 538 int rc; 539 struct file *f = file; 540 bool new_file_instance = false, modified_mode = false; 541 542 /* 543 * For consistency, fail file's opened with the O_DIRECT flag on 544 * filesystems mounted with/without DAX option. 545 */ 546 if (file->f_flags & O_DIRECT) { 547 hash->length = hash_digest_size[ima_hash_algo]; 548 hash->algo = ima_hash_algo; 549 return -EINVAL; 550 } 551 552 /* Open a new file instance in O_RDONLY if we cannot read */ 553 if (!(file->f_mode & FMODE_READ)) { 554 int flags = file->f_flags & ~(O_WRONLY | O_APPEND | 555 O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL); 556 flags |= O_RDONLY; 557 f = dentry_open(&file->f_path, flags, file->f_cred); 558 if (IS_ERR(f)) { 559 /* 560 * Cannot open the file again, lets modify f_mode 561 * of original and continue 562 */ 563 pr_info_ratelimited("Unable to reopen file for reading.\n"); 564 f = file; 565 f->f_mode |= FMODE_READ; 566 modified_mode = true; 567 } else { 568 new_file_instance = true; 569 } 570 } 571 572 i_size = i_size_read(file_inode(f)); 573 574 if (ima_ahash_minsize && i_size >= ima_ahash_minsize) { 575 rc = ima_calc_file_ahash(f, hash); 576 if (!rc) 577 goto out; 578 } 579 580 rc = ima_calc_file_shash(f, hash); 581 out: 582 if (new_file_instance) 583 fput(f); 584 else if (modified_mode) 585 f->f_mode &= ~FMODE_READ; 586 return rc; 587 } 588 589 /* 590 * Calculate the hash of template data 591 */ 592 static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data, 593 struct ima_template_entry *entry, 594 int tfm_idx) 595 { 596 SHASH_DESC_ON_STACK(shash, ima_algo_array[tfm_idx].tfm); 597 struct ima_template_desc *td = entry->template_desc; 598 int num_fields = entry->template_desc->num_fields; 599 int rc, i; 600 601 shash->tfm = ima_algo_array[tfm_idx].tfm; 602 603 rc = crypto_shash_init(shash); 604 if (rc != 0) 605 return rc; 606 607 for (i = 0; i < num_fields; i++) { 608 u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 }; 609 u8 *data_to_hash = field_data[i].data; 610 u32 datalen = field_data[i].len; 611 u32 datalen_to_hash = 612 !ima_canonical_fmt ? datalen : cpu_to_le32(datalen); 613 614 if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) { 615 rc = crypto_shash_update(shash, 616 (const u8 *) &datalen_to_hash, 617 sizeof(datalen_to_hash)); 618 if (rc) 619 break; 620 } else if (strcmp(td->fields[i]->field_id, "n") == 0) { 621 memcpy(buffer, data_to_hash, datalen); 622 data_to_hash = buffer; 623 datalen = IMA_EVENT_NAME_LEN_MAX + 1; 624 } 625 rc = crypto_shash_update(shash, data_to_hash, datalen); 626 if (rc) 627 break; 628 } 629 630 if (!rc) 631 rc = crypto_shash_final(shash, entry->digests[tfm_idx].digest); 632 633 return rc; 634 } 635 636 int ima_calc_field_array_hash(struct ima_field_data *field_data, 637 struct ima_template_entry *entry) 638 { 639 u16 alg_id; 640 int rc, i; 641 642 rc = ima_calc_field_array_hash_tfm(field_data, entry, ima_sha1_idx); 643 if (rc) 644 return rc; 645 646 entry->digests[ima_sha1_idx].alg_id = TPM_ALG_SHA1; 647 648 for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) { 649 if (i == ima_sha1_idx) 650 continue; 651 652 if (i < NR_BANKS(ima_tpm_chip)) { 653 alg_id = ima_tpm_chip->allocated_banks[i].alg_id; 654 entry->digests[i].alg_id = alg_id; 655 } 656 657 /* for unmapped TPM algorithms digest is still a padded SHA1 */ 658 if (!ima_algo_array[i].tfm) { 659 memcpy(entry->digests[i].digest, 660 entry->digests[ima_sha1_idx].digest, 661 TPM_DIGEST_SIZE); 662 continue; 663 } 664 665 rc = ima_calc_field_array_hash_tfm(field_data, entry, i); 666 if (rc) 667 return rc; 668 } 669 return rc; 670 } 671 672 static int calc_buffer_ahash_atfm(const void *buf, loff_t len, 673 struct ima_digest_data *hash, 674 struct crypto_ahash *tfm) 675 { 676 struct ahash_request *req; 677 struct scatterlist sg; 678 struct crypto_wait wait; 679 int rc, ahash_rc = 0; 680 681 hash->length = crypto_ahash_digestsize(tfm); 682 683 req = ahash_request_alloc(tfm, GFP_KERNEL); 684 if (!req) 685 return -ENOMEM; 686 687 crypto_init_wait(&wait); 688 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | 689 CRYPTO_TFM_REQ_MAY_SLEEP, 690 crypto_req_done, &wait); 691 692 rc = ahash_wait(crypto_ahash_init(req), &wait); 693 if (rc) 694 goto out; 695 696 sg_init_one(&sg, buf, len); 697 ahash_request_set_crypt(req, &sg, NULL, len); 698 699 ahash_rc = crypto_ahash_update(req); 700 701 /* wait for the update request to complete */ 702 rc = ahash_wait(ahash_rc, &wait); 703 if (!rc) { 704 ahash_request_set_crypt(req, NULL, hash->digest, 0); 705 rc = ahash_wait(crypto_ahash_final(req), &wait); 706 } 707 out: 708 ahash_request_free(req); 709 return rc; 710 } 711 712 static int calc_buffer_ahash(const void *buf, loff_t len, 713 struct ima_digest_data *hash) 714 { 715 struct crypto_ahash *tfm; 716 int rc; 717 718 tfm = ima_alloc_atfm(hash->algo); 719 if (IS_ERR(tfm)) 720 return PTR_ERR(tfm); 721 722 rc = calc_buffer_ahash_atfm(buf, len, hash, tfm); 723 724 ima_free_atfm(tfm); 725 726 return rc; 727 } 728 729 static int calc_buffer_shash_tfm(const void *buf, loff_t size, 730 struct ima_digest_data *hash, 731 struct crypto_shash *tfm) 732 { 733 SHASH_DESC_ON_STACK(shash, tfm); 734 unsigned int len; 735 int rc; 736 737 shash->tfm = tfm; 738 739 hash->length = crypto_shash_digestsize(tfm); 740 741 rc = crypto_shash_init(shash); 742 if (rc != 0) 743 return rc; 744 745 while (size) { 746 len = size < PAGE_SIZE ? size : PAGE_SIZE; 747 rc = crypto_shash_update(shash, buf, len); 748 if (rc) 749 break; 750 buf += len; 751 size -= len; 752 } 753 754 if (!rc) 755 rc = crypto_shash_final(shash, hash->digest); 756 return rc; 757 } 758 759 static int calc_buffer_shash(const void *buf, loff_t len, 760 struct ima_digest_data *hash) 761 { 762 struct crypto_shash *tfm; 763 int rc; 764 765 tfm = ima_alloc_tfm(hash->algo); 766 if (IS_ERR(tfm)) 767 return PTR_ERR(tfm); 768 769 rc = calc_buffer_shash_tfm(buf, len, hash, tfm); 770 771 ima_free_tfm(tfm); 772 return rc; 773 } 774 775 int ima_calc_buffer_hash(const void *buf, loff_t len, 776 struct ima_digest_data *hash) 777 { 778 int rc; 779 780 if (ima_ahash_minsize && len >= ima_ahash_minsize) { 781 rc = calc_buffer_ahash(buf, len, hash); 782 if (!rc) 783 return 0; 784 } 785 786 return calc_buffer_shash(buf, len, hash); 787 } 788 789 static void ima_pcrread(u32 idx, struct tpm_digest *d) 790 { 791 if (!ima_tpm_chip) 792 return; 793 794 if (tpm_pcr_read(ima_tpm_chip, idx, d) != 0) 795 pr_err("Error Communicating to TPM chip\n"); 796 } 797 798 /* 799 * The boot_aggregate is a cumulative hash over TPM registers 0 - 7. With 800 * TPM 1.2 the boot_aggregate was based on reading the SHA1 PCRs, but with 801 * TPM 2.0 hash agility, TPM chips could support multiple TPM PCR banks, 802 * allowing firmware to configure and enable different banks. 803 * 804 * Knowing which TPM bank is read to calculate the boot_aggregate digest 805 * needs to be conveyed to a verifier. For this reason, use the same 806 * hash algorithm for reading the TPM PCRs as for calculating the boot 807 * aggregate digest as stored in the measurement list. 808 */ 809 static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id, 810 struct crypto_shash *tfm) 811 { 812 struct tpm_digest d = { .alg_id = alg_id, .digest = {0} }; 813 int rc; 814 u32 i; 815 SHASH_DESC_ON_STACK(shash, tfm); 816 817 shash->tfm = tfm; 818 819 pr_devel("calculating the boot-aggregate based on TPM bank: %04x\n", 820 d.alg_id); 821 822 rc = crypto_shash_init(shash); 823 if (rc != 0) 824 return rc; 825 826 /* cumulative digest over TPM registers 0-7 */ 827 for (i = TPM_PCR0; i < TPM_PCR8; i++) { 828 ima_pcrread(i, &d); 829 /* now accumulate with current aggregate */ 830 rc = crypto_shash_update(shash, d.digest, 831 crypto_shash_digestsize(tfm)); 832 } 833 /* 834 * Extend cumulative digest over TPM registers 8-9, which contain 835 * measurement for the kernel command line (reg. 8) and image (reg. 9) 836 * in a typical PCR allocation. Registers 8-9 are only included in 837 * non-SHA1 boot_aggregate digests to avoid ambiguity. 838 */ 839 if (alg_id != TPM_ALG_SHA1) { 840 for (i = TPM_PCR8; i < TPM_PCR10; i++) { 841 ima_pcrread(i, &d); 842 rc = crypto_shash_update(shash, d.digest, 843 crypto_shash_digestsize(tfm)); 844 } 845 } 846 if (!rc) 847 crypto_shash_final(shash, digest); 848 return rc; 849 } 850 851 int ima_calc_boot_aggregate(struct ima_digest_data *hash) 852 { 853 struct crypto_shash *tfm; 854 u16 crypto_id, alg_id; 855 int rc, i, bank_idx = -1; 856 857 for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) { 858 crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id; 859 if (crypto_id == hash->algo) { 860 bank_idx = i; 861 break; 862 } 863 864 if (crypto_id == HASH_ALGO_SHA256) 865 bank_idx = i; 866 867 if (bank_idx == -1 && crypto_id == HASH_ALGO_SHA1) 868 bank_idx = i; 869 } 870 871 if (bank_idx == -1) { 872 pr_err("No suitable TPM algorithm for boot aggregate\n"); 873 return 0; 874 } 875 876 hash->algo = ima_tpm_chip->allocated_banks[bank_idx].crypto_id; 877 878 tfm = ima_alloc_tfm(hash->algo); 879 if (IS_ERR(tfm)) 880 return PTR_ERR(tfm); 881 882 hash->length = crypto_shash_digestsize(tfm); 883 alg_id = ima_tpm_chip->allocated_banks[bank_idx].alg_id; 884 rc = ima_calc_boot_aggregate_tfm(hash->digest, alg_id, tfm); 885 886 ima_free_tfm(tfm); 887 888 return rc; 889 } 890