1 /*
2  * Copyright (C) 2005,2006,2007,2008 IBM Corporation
3  *
4  * Authors:
5  * Serge Hallyn <serue@us.ibm.com>
6  * Reiner Sailer <sailer@watson.ibm.com>
7  * Mimi Zohar <zohar@us.ibm.com>
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License as
11  * published by the Free Software Foundation, version 2 of the
12  * License.
13  *
14  * File: ima_queue.c
15  *       Implements queues that store template measurements and
16  *       maintains aggregate over the stored measurements
17  *       in the pre-configured TPM PCR (if available).
18  *       The measurement list is append-only. No entry is
19  *       ever removed or changed during the boot-cycle.
20  */
21 
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 
24 #include <linux/rculist.h>
25 #include <linux/slab.h>
26 #include "ima.h"
27 
28 #define AUDIT_CAUSE_LEN_MAX 32
29 
30 /* pre-allocated array of tpm_digest structures to extend a PCR */
31 static struct tpm_digest *digests;
32 
33 LIST_HEAD(ima_measurements);	/* list of all measurements */
34 #ifdef CONFIG_IMA_KEXEC
35 static unsigned long binary_runtime_size;
36 #else
37 static unsigned long binary_runtime_size = ULONG_MAX;
38 #endif
39 
40 /* key: inode (before secure-hashing a file) */
41 struct ima_h_table ima_htable = {
42 	.len = ATOMIC_LONG_INIT(0),
43 	.violations = ATOMIC_LONG_INIT(0),
44 	.queue[0 ... IMA_MEASURE_HTABLE_SIZE - 1] = HLIST_HEAD_INIT
45 };
46 
47 /* mutex protects atomicity of extending measurement list
48  * and extending the TPM PCR aggregate. Since tpm_extend can take
49  * long (and the tpm driver uses a mutex), we can't use the spinlock.
50  */
51 static DEFINE_MUTEX(ima_extend_list_mutex);
52 
53 /* lookup up the digest value in the hash table, and return the entry */
54 static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value,
55 						       int pcr)
56 {
57 	struct ima_queue_entry *qe, *ret = NULL;
58 	unsigned int key;
59 	int rc;
60 
61 	key = ima_hash_key(digest_value);
62 	rcu_read_lock();
63 	hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) {
64 		rc = memcmp(qe->entry->digest, digest_value, TPM_DIGEST_SIZE);
65 		if ((rc == 0) && (qe->entry->pcr == pcr)) {
66 			ret = qe;
67 			break;
68 		}
69 	}
70 	rcu_read_unlock();
71 	return ret;
72 }
73 
74 /*
75  * Calculate the memory required for serializing a single
76  * binary_runtime_measurement list entry, which contains a
77  * couple of variable length fields (e.g template name and data).
78  */
79 static int get_binary_runtime_size(struct ima_template_entry *entry)
80 {
81 	int size = 0;
82 
83 	size += sizeof(u32);	/* pcr */
84 	size += sizeof(entry->digest);
85 	size += sizeof(int);	/* template name size field */
86 	size += strlen(entry->template_desc->name);
87 	size += sizeof(entry->template_data_len);
88 	size += entry->template_data_len;
89 	return size;
90 }
91 
92 /* ima_add_template_entry helper function:
93  * - Add template entry to the measurement list and hash table, for
94  *   all entries except those carried across kexec.
95  *
96  * (Called with ima_extend_list_mutex held.)
97  */
98 static int ima_add_digest_entry(struct ima_template_entry *entry,
99 				bool update_htable)
100 {
101 	struct ima_queue_entry *qe;
102 	unsigned int key;
103 
104 	qe = kmalloc(sizeof(*qe), GFP_KERNEL);
105 	if (qe == NULL) {
106 		pr_err("OUT OF MEMORY ERROR creating queue entry\n");
107 		return -ENOMEM;
108 	}
109 	qe->entry = entry;
110 
111 	INIT_LIST_HEAD(&qe->later);
112 	list_add_tail_rcu(&qe->later, &ima_measurements);
113 
114 	atomic_long_inc(&ima_htable.len);
115 	if (update_htable) {
116 		key = ima_hash_key(entry->digest);
117 		hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
118 	}
119 
120 	if (binary_runtime_size != ULONG_MAX) {
121 		int size;
122 
123 		size = get_binary_runtime_size(entry);
124 		binary_runtime_size = (binary_runtime_size < ULONG_MAX - size) ?
125 		     binary_runtime_size + size : ULONG_MAX;
126 	}
127 	return 0;
128 }
129 
130 /*
131  * Return the amount of memory required for serializing the
132  * entire binary_runtime_measurement list, including the ima_kexec_hdr
133  * structure.
134  */
135 unsigned long ima_get_binary_runtime_size(void)
136 {
137 	if (binary_runtime_size >= (ULONG_MAX - sizeof(struct ima_kexec_hdr)))
138 		return ULONG_MAX;
139 	else
140 		return binary_runtime_size + sizeof(struct ima_kexec_hdr);
141 };
142 
143 static int ima_pcr_extend(const u8 *hash, int pcr)
144 {
145 	int result = 0;
146 	int i;
147 
148 	if (!ima_tpm_chip)
149 		return result;
150 
151 	for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++)
152 		memcpy(digests[i].digest, hash, TPM_DIGEST_SIZE);
153 
154 	result = tpm_pcr_extend(ima_tpm_chip, pcr, digests);
155 	if (result != 0)
156 		pr_err("Error Communicating to TPM chip, result: %d\n", result);
157 	return result;
158 }
159 
160 /*
161  * Add template entry to the measurement list and hash table, and
162  * extend the pcr.
163  *
164  * On systems which support carrying the IMA measurement list across
165  * kexec, maintain the total memory size required for serializing the
166  * binary_runtime_measurements.
167  */
168 int ima_add_template_entry(struct ima_template_entry *entry, int violation,
169 			   const char *op, struct inode *inode,
170 			   const unsigned char *filename)
171 {
172 	u8 digest[TPM_DIGEST_SIZE];
173 	const char *audit_cause = "hash_added";
174 	char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX];
175 	int audit_info = 1;
176 	int result = 0, tpmresult = 0;
177 
178 	mutex_lock(&ima_extend_list_mutex);
179 	if (!violation) {
180 		memcpy(digest, entry->digest, sizeof(digest));
181 		if (ima_lookup_digest_entry(digest, entry->pcr)) {
182 			audit_cause = "hash_exists";
183 			result = -EEXIST;
184 			goto out;
185 		}
186 	}
187 
188 	result = ima_add_digest_entry(entry, 1);
189 	if (result < 0) {
190 		audit_cause = "ENOMEM";
191 		audit_info = 0;
192 		goto out;
193 	}
194 
195 	if (violation)		/* invalidate pcr */
196 		memset(digest, 0xff, sizeof(digest));
197 
198 	tpmresult = ima_pcr_extend(digest, entry->pcr);
199 	if (tpmresult != 0) {
200 		snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)",
201 			 tpmresult);
202 		audit_cause = tpm_audit_cause;
203 		audit_info = 0;
204 	}
205 out:
206 	mutex_unlock(&ima_extend_list_mutex);
207 	integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
208 			    op, audit_cause, result, audit_info);
209 	return result;
210 }
211 
212 int ima_restore_measurement_entry(struct ima_template_entry *entry)
213 {
214 	int result = 0;
215 
216 	mutex_lock(&ima_extend_list_mutex);
217 	result = ima_add_digest_entry(entry, 0);
218 	mutex_unlock(&ima_extend_list_mutex);
219 	return result;
220 }
221 
222 int __init ima_init_digests(void)
223 {
224 	int i;
225 
226 	if (!ima_tpm_chip)
227 		return 0;
228 
229 	digests = kcalloc(ima_tpm_chip->nr_allocated_banks, sizeof(*digests),
230 			  GFP_NOFS);
231 	if (!digests)
232 		return -ENOMEM;
233 
234 	for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++)
235 		digests[i].alg_id = ima_tpm_chip->allocated_banks[i].alg_id;
236 
237 	return 0;
238 }
239