1 /*
2  * Copyright (C) 2005,2006,2007,2008 IBM Corporation
3  *
4  * Authors:
5  * Mimi Zohar <zohar@us.ibm.com>
6  * Kylene Hall <kjhall@us.ibm.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation, version 2 of the License.
11  *
12  * File: ima_crypto.c
13  *	Calculates md5/sha1 file hash, template hash, boot-aggreate hash
14  */
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 
18 #include <linux/kernel.h>
19 #include <linux/moduleparam.h>
20 #include <linux/ratelimit.h>
21 #include <linux/file.h>
22 #include <linux/crypto.h>
23 #include <linux/scatterlist.h>
24 #include <linux/err.h>
25 #include <linux/slab.h>
26 #include <crypto/hash.h>
27 
28 #include "ima.h"
29 
30 /* minimum file size for ahash use */
31 static unsigned long ima_ahash_minsize;
32 module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644);
33 MODULE_PARM_DESC(ahash_minsize, "Minimum file size for ahash use");
34 
35 /* default is 0 - 1 page. */
36 static int ima_maxorder;
37 static unsigned int ima_bufsize = PAGE_SIZE;
38 
39 static int param_set_bufsize(const char *val, const struct kernel_param *kp)
40 {
41 	unsigned long long size;
42 	int order;
43 
44 	size = memparse(val, NULL);
45 	order = get_order(size);
46 	if (order >= MAX_ORDER)
47 		return -EINVAL;
48 	ima_maxorder = order;
49 	ima_bufsize = PAGE_SIZE << order;
50 	return 0;
51 }
52 
53 static const struct kernel_param_ops param_ops_bufsize = {
54 	.set = param_set_bufsize,
55 	.get = param_get_uint,
56 };
57 #define param_check_bufsize(name, p) __param_check(name, p, unsigned int)
58 
59 module_param_named(ahash_bufsize, ima_bufsize, bufsize, 0644);
60 MODULE_PARM_DESC(ahash_bufsize, "Maximum ahash buffer size");
61 
62 static struct crypto_shash *ima_shash_tfm;
63 static struct crypto_ahash *ima_ahash_tfm;
64 
65 int __init ima_init_crypto(void)
66 {
67 	long rc;
68 
69 	ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0);
70 	if (IS_ERR(ima_shash_tfm)) {
71 		rc = PTR_ERR(ima_shash_tfm);
72 		pr_err("Can not allocate %s (reason: %ld)\n",
73 		       hash_algo_name[ima_hash_algo], rc);
74 		return rc;
75 	}
76 	return 0;
77 }
78 
79 static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo)
80 {
81 	struct crypto_shash *tfm = ima_shash_tfm;
82 	int rc;
83 
84 	if (algo < 0 || algo >= HASH_ALGO__LAST)
85 		algo = ima_hash_algo;
86 
87 	if (algo != ima_hash_algo) {
88 		tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
89 		if (IS_ERR(tfm)) {
90 			rc = PTR_ERR(tfm);
91 			pr_err("Can not allocate %s (reason: %d)\n",
92 			       hash_algo_name[algo], rc);
93 		}
94 	}
95 	return tfm;
96 }
97 
98 static void ima_free_tfm(struct crypto_shash *tfm)
99 {
100 	if (tfm != ima_shash_tfm)
101 		crypto_free_shash(tfm);
102 }
103 
104 /**
105  * ima_alloc_pages() - Allocate contiguous pages.
106  * @max_size:       Maximum amount of memory to allocate.
107  * @allocated_size: Returned size of actual allocation.
108  * @last_warn:      Should the min_size allocation warn or not.
109  *
110  * Tries to do opportunistic allocation for memory first trying to allocate
111  * max_size amount of memory and then splitting that until zero order is
112  * reached. Allocation is tried without generating allocation warnings unless
113  * last_warn is set. Last_warn set affects only last allocation of zero order.
114  *
115  * By default, ima_maxorder is 0 and it is equivalent to kmalloc(GFP_KERNEL)
116  *
117  * Return pointer to allocated memory, or NULL on failure.
118  */
119 static void *ima_alloc_pages(loff_t max_size, size_t *allocated_size,
120 			     int last_warn)
121 {
122 	void *ptr;
123 	int order = ima_maxorder;
124 	gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY;
125 
126 	if (order)
127 		order = min(get_order(max_size), order);
128 
129 	for (; order; order--) {
130 		ptr = (void *)__get_free_pages(gfp_mask, order);
131 		if (ptr) {
132 			*allocated_size = PAGE_SIZE << order;
133 			return ptr;
134 		}
135 	}
136 
137 	/* order is zero - one page */
138 
139 	gfp_mask = GFP_KERNEL;
140 
141 	if (!last_warn)
142 		gfp_mask |= __GFP_NOWARN;
143 
144 	ptr = (void *)__get_free_pages(gfp_mask, 0);
145 	if (ptr) {
146 		*allocated_size = PAGE_SIZE;
147 		return ptr;
148 	}
149 
150 	*allocated_size = 0;
151 	return NULL;
152 }
153 
154 /**
155  * ima_free_pages() - Free pages allocated by ima_alloc_pages().
156  * @ptr:  Pointer to allocated pages.
157  * @size: Size of allocated buffer.
158  */
159 static void ima_free_pages(void *ptr, size_t size)
160 {
161 	if (!ptr)
162 		return;
163 	free_pages((unsigned long)ptr, get_order(size));
164 }
165 
166 static struct crypto_ahash *ima_alloc_atfm(enum hash_algo algo)
167 {
168 	struct crypto_ahash *tfm = ima_ahash_tfm;
169 	int rc;
170 
171 	if (algo < 0 || algo >= HASH_ALGO__LAST)
172 		algo = ima_hash_algo;
173 
174 	if (algo != ima_hash_algo || !tfm) {
175 		tfm = crypto_alloc_ahash(hash_algo_name[algo], 0, 0);
176 		if (!IS_ERR(tfm)) {
177 			if (algo == ima_hash_algo)
178 				ima_ahash_tfm = tfm;
179 		} else {
180 			rc = PTR_ERR(tfm);
181 			pr_err("Can not allocate %s (reason: %d)\n",
182 			       hash_algo_name[algo], rc);
183 		}
184 	}
185 	return tfm;
186 }
187 
188 static void ima_free_atfm(struct crypto_ahash *tfm)
189 {
190 	if (tfm != ima_ahash_tfm)
191 		crypto_free_ahash(tfm);
192 }
193 
194 static inline int ahash_wait(int err, struct crypto_wait *wait)
195 {
196 
197 	err = crypto_wait_req(err, wait);
198 
199 	if (err)
200 		pr_crit_ratelimited("ahash calculation failed: err: %d\n", err);
201 
202 	return err;
203 }
204 
205 static int ima_calc_file_hash_atfm(struct file *file,
206 				   struct ima_digest_data *hash,
207 				   struct crypto_ahash *tfm)
208 {
209 	loff_t i_size, offset;
210 	char *rbuf[2] = { NULL, };
211 	int rc, read = 0, rbuf_len, active = 0, ahash_rc = 0;
212 	struct ahash_request *req;
213 	struct scatterlist sg[1];
214 	struct crypto_wait wait;
215 	size_t rbuf_size[2];
216 
217 	hash->length = crypto_ahash_digestsize(tfm);
218 
219 	req = ahash_request_alloc(tfm, GFP_KERNEL);
220 	if (!req)
221 		return -ENOMEM;
222 
223 	crypto_init_wait(&wait);
224 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
225 				   CRYPTO_TFM_REQ_MAY_SLEEP,
226 				   crypto_req_done, &wait);
227 
228 	rc = ahash_wait(crypto_ahash_init(req), &wait);
229 	if (rc)
230 		goto out1;
231 
232 	i_size = i_size_read(file_inode(file));
233 
234 	if (i_size == 0)
235 		goto out2;
236 
237 	/*
238 	 * Try to allocate maximum size of memory.
239 	 * Fail if even a single page cannot be allocated.
240 	 */
241 	rbuf[0] = ima_alloc_pages(i_size, &rbuf_size[0], 1);
242 	if (!rbuf[0]) {
243 		rc = -ENOMEM;
244 		goto out1;
245 	}
246 
247 	/* Only allocate one buffer if that is enough. */
248 	if (i_size > rbuf_size[0]) {
249 		/*
250 		 * Try to allocate secondary buffer. If that fails fallback to
251 		 * using single buffering. Use previous memory allocation size
252 		 * as baseline for possible allocation size.
253 		 */
254 		rbuf[1] = ima_alloc_pages(i_size - rbuf_size[0],
255 					  &rbuf_size[1], 0);
256 	}
257 
258 	if (!(file->f_mode & FMODE_READ)) {
259 		file->f_mode |= FMODE_READ;
260 		read = 1;
261 	}
262 
263 	for (offset = 0; offset < i_size; offset += rbuf_len) {
264 		if (!rbuf[1] && offset) {
265 			/* Not using two buffers, and it is not the first
266 			 * read/request, wait for the completion of the
267 			 * previous ahash_update() request.
268 			 */
269 			rc = ahash_wait(ahash_rc, &wait);
270 			if (rc)
271 				goto out3;
272 		}
273 		/* read buffer */
274 		rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
275 		rc = integrity_kernel_read(file, offset, rbuf[active],
276 					   rbuf_len);
277 		if (rc != rbuf_len)
278 			goto out3;
279 
280 		if (rbuf[1] && offset) {
281 			/* Using two buffers, and it is not the first
282 			 * read/request, wait for the completion of the
283 			 * previous ahash_update() request.
284 			 */
285 			rc = ahash_wait(ahash_rc, &wait);
286 			if (rc)
287 				goto out3;
288 		}
289 
290 		sg_init_one(&sg[0], rbuf[active], rbuf_len);
291 		ahash_request_set_crypt(req, sg, NULL, rbuf_len);
292 
293 		ahash_rc = crypto_ahash_update(req);
294 
295 		if (rbuf[1])
296 			active = !active; /* swap buffers, if we use two */
297 	}
298 	/* wait for the last update request to complete */
299 	rc = ahash_wait(ahash_rc, &wait);
300 out3:
301 	if (read)
302 		file->f_mode &= ~FMODE_READ;
303 	ima_free_pages(rbuf[0], rbuf_size[0]);
304 	ima_free_pages(rbuf[1], rbuf_size[1]);
305 out2:
306 	if (!rc) {
307 		ahash_request_set_crypt(req, NULL, hash->digest, 0);
308 		rc = ahash_wait(crypto_ahash_final(req), &wait);
309 	}
310 out1:
311 	ahash_request_free(req);
312 	return rc;
313 }
314 
315 static int ima_calc_file_ahash(struct file *file, struct ima_digest_data *hash)
316 {
317 	struct crypto_ahash *tfm;
318 	int rc;
319 
320 	tfm = ima_alloc_atfm(hash->algo);
321 	if (IS_ERR(tfm))
322 		return PTR_ERR(tfm);
323 
324 	rc = ima_calc_file_hash_atfm(file, hash, tfm);
325 
326 	ima_free_atfm(tfm);
327 
328 	return rc;
329 }
330 
331 static int ima_calc_file_hash_tfm(struct file *file,
332 				  struct ima_digest_data *hash,
333 				  struct crypto_shash *tfm)
334 {
335 	loff_t i_size, offset = 0;
336 	char *rbuf;
337 	int rc, read = 0;
338 	SHASH_DESC_ON_STACK(shash, tfm);
339 
340 	shash->tfm = tfm;
341 	shash->flags = 0;
342 
343 	hash->length = crypto_shash_digestsize(tfm);
344 
345 	rc = crypto_shash_init(shash);
346 	if (rc != 0)
347 		return rc;
348 
349 	i_size = i_size_read(file_inode(file));
350 
351 	if (i_size == 0)
352 		goto out;
353 
354 	rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
355 	if (!rbuf)
356 		return -ENOMEM;
357 
358 	if (!(file->f_mode & FMODE_READ)) {
359 		file->f_mode |= FMODE_READ;
360 		read = 1;
361 	}
362 
363 	while (offset < i_size) {
364 		int rbuf_len;
365 
366 		rbuf_len = integrity_kernel_read(file, offset, rbuf, PAGE_SIZE);
367 		if (rbuf_len < 0) {
368 			rc = rbuf_len;
369 			break;
370 		}
371 		if (rbuf_len == 0)
372 			break;
373 		offset += rbuf_len;
374 
375 		rc = crypto_shash_update(shash, rbuf, rbuf_len);
376 		if (rc)
377 			break;
378 	}
379 	if (read)
380 		file->f_mode &= ~FMODE_READ;
381 	kfree(rbuf);
382 out:
383 	if (!rc)
384 		rc = crypto_shash_final(shash, hash->digest);
385 	return rc;
386 }
387 
388 static int ima_calc_file_shash(struct file *file, struct ima_digest_data *hash)
389 {
390 	struct crypto_shash *tfm;
391 	int rc;
392 
393 	tfm = ima_alloc_tfm(hash->algo);
394 	if (IS_ERR(tfm))
395 		return PTR_ERR(tfm);
396 
397 	rc = ima_calc_file_hash_tfm(file, hash, tfm);
398 
399 	ima_free_tfm(tfm);
400 
401 	return rc;
402 }
403 
404 /*
405  * ima_calc_file_hash - calculate file hash
406  *
407  * Asynchronous hash (ahash) allows using HW acceleration for calculating
408  * a hash. ahash performance varies for different data sizes on different
409  * crypto accelerators. shash performance might be better for smaller files.
410  * The 'ima.ahash_minsize' module parameter allows specifying the best
411  * minimum file size for using ahash on the system.
412  *
413  * If the ima.ahash_minsize parameter is not specified, this function uses
414  * shash for the hash calculation.  If ahash fails, it falls back to using
415  * shash.
416  */
417 int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
418 {
419 	loff_t i_size;
420 	int rc;
421 
422 	/*
423 	 * For consistency, fail file's opened with the O_DIRECT flag on
424 	 * filesystems mounted with/without DAX option.
425 	 */
426 	if (file->f_flags & O_DIRECT) {
427 		hash->length = hash_digest_size[ima_hash_algo];
428 		hash->algo = ima_hash_algo;
429 		return -EINVAL;
430 	}
431 
432 	i_size = i_size_read(file_inode(file));
433 
434 	if (ima_ahash_minsize && i_size >= ima_ahash_minsize) {
435 		rc = ima_calc_file_ahash(file, hash);
436 		if (!rc)
437 			return 0;
438 	}
439 
440 	return ima_calc_file_shash(file, hash);
441 }
442 
443 /*
444  * Calculate the hash of template data
445  */
446 static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
447 					 struct ima_template_desc *td,
448 					 int num_fields,
449 					 struct ima_digest_data *hash,
450 					 struct crypto_shash *tfm)
451 {
452 	SHASH_DESC_ON_STACK(shash, tfm);
453 	int rc, i;
454 
455 	shash->tfm = tfm;
456 	shash->flags = 0;
457 
458 	hash->length = crypto_shash_digestsize(tfm);
459 
460 	rc = crypto_shash_init(shash);
461 	if (rc != 0)
462 		return rc;
463 
464 	for (i = 0; i < num_fields; i++) {
465 		u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
466 		u8 *data_to_hash = field_data[i].data;
467 		u32 datalen = field_data[i].len;
468 		u32 datalen_to_hash =
469 		    !ima_canonical_fmt ? datalen : cpu_to_le32(datalen);
470 
471 		if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
472 			rc = crypto_shash_update(shash,
473 						(const u8 *) &datalen_to_hash,
474 						sizeof(datalen_to_hash));
475 			if (rc)
476 				break;
477 		} else if (strcmp(td->fields[i]->field_id, "n") == 0) {
478 			memcpy(buffer, data_to_hash, datalen);
479 			data_to_hash = buffer;
480 			datalen = IMA_EVENT_NAME_LEN_MAX + 1;
481 		}
482 		rc = crypto_shash_update(shash, data_to_hash, datalen);
483 		if (rc)
484 			break;
485 	}
486 
487 	if (!rc)
488 		rc = crypto_shash_final(shash, hash->digest);
489 
490 	return rc;
491 }
492 
493 int ima_calc_field_array_hash(struct ima_field_data *field_data,
494 			      struct ima_template_desc *desc, int num_fields,
495 			      struct ima_digest_data *hash)
496 {
497 	struct crypto_shash *tfm;
498 	int rc;
499 
500 	tfm = ima_alloc_tfm(hash->algo);
501 	if (IS_ERR(tfm))
502 		return PTR_ERR(tfm);
503 
504 	rc = ima_calc_field_array_hash_tfm(field_data, desc, num_fields,
505 					   hash, tfm);
506 
507 	ima_free_tfm(tfm);
508 
509 	return rc;
510 }
511 
512 static int calc_buffer_ahash_atfm(const void *buf, loff_t len,
513 				  struct ima_digest_data *hash,
514 				  struct crypto_ahash *tfm)
515 {
516 	struct ahash_request *req;
517 	struct scatterlist sg;
518 	struct crypto_wait wait;
519 	int rc, ahash_rc = 0;
520 
521 	hash->length = crypto_ahash_digestsize(tfm);
522 
523 	req = ahash_request_alloc(tfm, GFP_KERNEL);
524 	if (!req)
525 		return -ENOMEM;
526 
527 	crypto_init_wait(&wait);
528 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
529 				   CRYPTO_TFM_REQ_MAY_SLEEP,
530 				   crypto_req_done, &wait);
531 
532 	rc = ahash_wait(crypto_ahash_init(req), &wait);
533 	if (rc)
534 		goto out;
535 
536 	sg_init_one(&sg, buf, len);
537 	ahash_request_set_crypt(req, &sg, NULL, len);
538 
539 	ahash_rc = crypto_ahash_update(req);
540 
541 	/* wait for the update request to complete */
542 	rc = ahash_wait(ahash_rc, &wait);
543 	if (!rc) {
544 		ahash_request_set_crypt(req, NULL, hash->digest, 0);
545 		rc = ahash_wait(crypto_ahash_final(req), &wait);
546 	}
547 out:
548 	ahash_request_free(req);
549 	return rc;
550 }
551 
552 static int calc_buffer_ahash(const void *buf, loff_t len,
553 			     struct ima_digest_data *hash)
554 {
555 	struct crypto_ahash *tfm;
556 	int rc;
557 
558 	tfm = ima_alloc_atfm(hash->algo);
559 	if (IS_ERR(tfm))
560 		return PTR_ERR(tfm);
561 
562 	rc = calc_buffer_ahash_atfm(buf, len, hash, tfm);
563 
564 	ima_free_atfm(tfm);
565 
566 	return rc;
567 }
568 
569 static int calc_buffer_shash_tfm(const void *buf, loff_t size,
570 				struct ima_digest_data *hash,
571 				struct crypto_shash *tfm)
572 {
573 	SHASH_DESC_ON_STACK(shash, tfm);
574 	unsigned int len;
575 	int rc;
576 
577 	shash->tfm = tfm;
578 	shash->flags = 0;
579 
580 	hash->length = crypto_shash_digestsize(tfm);
581 
582 	rc = crypto_shash_init(shash);
583 	if (rc != 0)
584 		return rc;
585 
586 	while (size) {
587 		len = size < PAGE_SIZE ? size : PAGE_SIZE;
588 		rc = crypto_shash_update(shash, buf, len);
589 		if (rc)
590 			break;
591 		buf += len;
592 		size -= len;
593 	}
594 
595 	if (!rc)
596 		rc = crypto_shash_final(shash, hash->digest);
597 	return rc;
598 }
599 
600 static int calc_buffer_shash(const void *buf, loff_t len,
601 			     struct ima_digest_data *hash)
602 {
603 	struct crypto_shash *tfm;
604 	int rc;
605 
606 	tfm = ima_alloc_tfm(hash->algo);
607 	if (IS_ERR(tfm))
608 		return PTR_ERR(tfm);
609 
610 	rc = calc_buffer_shash_tfm(buf, len, hash, tfm);
611 
612 	ima_free_tfm(tfm);
613 	return rc;
614 }
615 
616 int ima_calc_buffer_hash(const void *buf, loff_t len,
617 			 struct ima_digest_data *hash)
618 {
619 	int rc;
620 
621 	if (ima_ahash_minsize && len >= ima_ahash_minsize) {
622 		rc = calc_buffer_ahash(buf, len, hash);
623 		if (!rc)
624 			return 0;
625 	}
626 
627 	return calc_buffer_shash(buf, len, hash);
628 }
629 
630 static void __init ima_pcrread(int idx, u8 *pcr)
631 {
632 	if (!ima_used_chip)
633 		return;
634 
635 	if (tpm_pcr_read(NULL, idx, pcr) != 0)
636 		pr_err("Error Communicating to TPM chip\n");
637 }
638 
639 /*
640  * Calculate the boot aggregate hash
641  */
642 static int __init ima_calc_boot_aggregate_tfm(char *digest,
643 					      struct crypto_shash *tfm)
644 {
645 	u8 pcr_i[TPM_DIGEST_SIZE];
646 	int rc, i;
647 	SHASH_DESC_ON_STACK(shash, tfm);
648 
649 	shash->tfm = tfm;
650 	shash->flags = 0;
651 
652 	rc = crypto_shash_init(shash);
653 	if (rc != 0)
654 		return rc;
655 
656 	/* cumulative sha1 over tpm registers 0-7 */
657 	for (i = TPM_PCR0; i < TPM_PCR8; i++) {
658 		ima_pcrread(i, pcr_i);
659 		/* now accumulate with current aggregate */
660 		rc = crypto_shash_update(shash, pcr_i, TPM_DIGEST_SIZE);
661 	}
662 	if (!rc)
663 		crypto_shash_final(shash, digest);
664 	return rc;
665 }
666 
667 int __init ima_calc_boot_aggregate(struct ima_digest_data *hash)
668 {
669 	struct crypto_shash *tfm;
670 	int rc;
671 
672 	tfm = ima_alloc_tfm(hash->algo);
673 	if (IS_ERR(tfm))
674 		return PTR_ERR(tfm);
675 
676 	hash->length = crypto_shash_digestsize(tfm);
677 	rc = ima_calc_boot_aggregate_tfm(hash->digest, tfm);
678 
679 	ima_free_tfm(tfm);
680 
681 	return rc;
682 }
683