xref: /openbmc/linux/fs/verity/hash_algs.c (revision 67f3c209)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs-verity hash algorithms
4  *
5  * Copyright 2019 Google LLC
6  */
7 
8 #include "fsverity_private.h"
9 
10 #include <crypto/hash.h>
11 #include <linux/scatterlist.h>
12 
13 /* The hash algorithms supported by fs-verity */
14 struct fsverity_hash_alg fsverity_hash_algs[] = {
15 	[FS_VERITY_HASH_ALG_SHA256] = {
16 		.name = "sha256",
17 		.digest_size = SHA256_DIGEST_SIZE,
18 		.block_size = SHA256_BLOCK_SIZE,
19 		.algo_id = HASH_ALGO_SHA256,
20 	},
21 	[FS_VERITY_HASH_ALG_SHA512] = {
22 		.name = "sha512",
23 		.digest_size = SHA512_DIGEST_SIZE,
24 		.block_size = SHA512_BLOCK_SIZE,
25 		.algo_id = HASH_ALGO_SHA512,
26 	},
27 };
28 
29 static DEFINE_MUTEX(fsverity_hash_alg_init_mutex);
30 
31 /**
32  * fsverity_get_hash_alg() - validate and prepare a hash algorithm
33  * @inode: optional inode for logging purposes
34  * @num: the hash algorithm number
35  *
36  * Get the struct fsverity_hash_alg for the given hash algorithm number, and
37  * ensure it has a hash transform ready to go.  The hash transforms are
38  * allocated on-demand so that we don't waste resources unnecessarily, and
39  * because the crypto modules may be initialized later than fs/verity/.
40  *
41  * Return: pointer to the hash alg on success, else an ERR_PTR()
42  */
43 struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode,
44 						unsigned int num)
45 {
46 	struct fsverity_hash_alg *alg;
47 	struct crypto_ahash *tfm;
48 	int err;
49 
50 	if (num >= ARRAY_SIZE(fsverity_hash_algs) ||
51 	    !fsverity_hash_algs[num].name) {
52 		fsverity_warn(inode, "Unknown hash algorithm number: %u", num);
53 		return ERR_PTR(-EINVAL);
54 	}
55 	alg = &fsverity_hash_algs[num];
56 
57 	/* pairs with smp_store_release() below */
58 	if (likely(smp_load_acquire(&alg->tfm) != NULL))
59 		return alg;
60 
61 	mutex_lock(&fsverity_hash_alg_init_mutex);
62 
63 	if (alg->tfm != NULL)
64 		goto out_unlock;
65 
66 	/*
67 	 * Using the shash API would make things a bit simpler, but the ahash
68 	 * API is preferable as it allows the use of crypto accelerators.
69 	 */
70 	tfm = crypto_alloc_ahash(alg->name, 0, 0);
71 	if (IS_ERR(tfm)) {
72 		if (PTR_ERR(tfm) == -ENOENT) {
73 			fsverity_warn(inode,
74 				      "Missing crypto API support for hash algorithm \"%s\"",
75 				      alg->name);
76 			alg = ERR_PTR(-ENOPKG);
77 			goto out_unlock;
78 		}
79 		fsverity_err(inode,
80 			     "Error allocating hash algorithm \"%s\": %ld",
81 			     alg->name, PTR_ERR(tfm));
82 		alg = ERR_CAST(tfm);
83 		goto out_unlock;
84 	}
85 
86 	err = -EINVAL;
87 	if (WARN_ON(alg->digest_size != crypto_ahash_digestsize(tfm)))
88 		goto err_free_tfm;
89 	if (WARN_ON(alg->block_size != crypto_ahash_blocksize(tfm)))
90 		goto err_free_tfm;
91 
92 	err = mempool_init_kmalloc_pool(&alg->req_pool, 1,
93 					sizeof(struct ahash_request) +
94 					crypto_ahash_reqsize(tfm));
95 	if (err)
96 		goto err_free_tfm;
97 
98 	pr_info("%s using implementation \"%s\"\n",
99 		alg->name, crypto_ahash_driver_name(tfm));
100 
101 	/* pairs with smp_load_acquire() above */
102 	smp_store_release(&alg->tfm, tfm);
103 	goto out_unlock;
104 
105 err_free_tfm:
106 	crypto_free_ahash(tfm);
107 	alg = ERR_PTR(err);
108 out_unlock:
109 	mutex_unlock(&fsverity_hash_alg_init_mutex);
110 	return alg;
111 }
112 
113 /**
114  * fsverity_alloc_hash_request() - allocate a hash request object
115  * @alg: the hash algorithm for which to allocate the request
116  * @gfp_flags: memory allocation flags
117  *
118  * This is mempool-backed, so this never fails if __GFP_DIRECT_RECLAIM is set in
119  * @gfp_flags.  However, in that case this might need to wait for all
120  * previously-allocated requests to be freed.  So to avoid deadlocks, callers
121  * must never need multiple requests at a time to make forward progress.
122  *
123  * Return: the request object on success; NULL on failure (but see above)
124  */
125 struct ahash_request *fsverity_alloc_hash_request(struct fsverity_hash_alg *alg,
126 						  gfp_t gfp_flags)
127 {
128 	struct ahash_request *req = mempool_alloc(&alg->req_pool, gfp_flags);
129 
130 	if (req)
131 		ahash_request_set_tfm(req, alg->tfm);
132 	return req;
133 }
134 
135 /**
136  * fsverity_free_hash_request() - free a hash request object
137  * @alg: the hash algorithm
138  * @req: the hash request object to free
139  */
140 void fsverity_free_hash_request(struct fsverity_hash_alg *alg,
141 				struct ahash_request *req)
142 {
143 	if (req) {
144 		ahash_request_zero(req);
145 		mempool_free(req, &alg->req_pool);
146 	}
147 }
148 
149 /**
150  * fsverity_prepare_hash_state() - precompute the initial hash state
151  * @alg: hash algorithm
152  * @salt: a salt which is to be prepended to all data to be hashed
153  * @salt_size: salt size in bytes, possibly 0
154  *
155  * Return: NULL if the salt is empty, otherwise the kmalloc()'ed precomputed
156  *	   initial hash state on success or an ERR_PTR() on failure.
157  */
158 const u8 *fsverity_prepare_hash_state(struct fsverity_hash_alg *alg,
159 				      const u8 *salt, size_t salt_size)
160 {
161 	u8 *hashstate = NULL;
162 	struct ahash_request *req = NULL;
163 	u8 *padded_salt = NULL;
164 	size_t padded_salt_size;
165 	struct scatterlist sg;
166 	DECLARE_CRYPTO_WAIT(wait);
167 	int err;
168 
169 	if (salt_size == 0)
170 		return NULL;
171 
172 	hashstate = kmalloc(crypto_ahash_statesize(alg->tfm), GFP_KERNEL);
173 	if (!hashstate)
174 		return ERR_PTR(-ENOMEM);
175 
176 	/* This allocation never fails, since it's mempool-backed. */
177 	req = fsverity_alloc_hash_request(alg, GFP_KERNEL);
178 
179 	/*
180 	 * Zero-pad the salt to the next multiple of the input size of the hash
181 	 * algorithm's compression function, e.g. 64 bytes for SHA-256 or 128
182 	 * bytes for SHA-512.  This ensures that the hash algorithm won't have
183 	 * any bytes buffered internally after processing the salt, thus making
184 	 * salted hashing just as fast as unsalted hashing.
185 	 */
186 	padded_salt_size = round_up(salt_size, alg->block_size);
187 	padded_salt = kzalloc(padded_salt_size, GFP_KERNEL);
188 	if (!padded_salt) {
189 		err = -ENOMEM;
190 		goto err_free;
191 	}
192 	memcpy(padded_salt, salt, salt_size);
193 
194 	sg_init_one(&sg, padded_salt, padded_salt_size);
195 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
196 					CRYPTO_TFM_REQ_MAY_BACKLOG,
197 				   crypto_req_done, &wait);
198 	ahash_request_set_crypt(req, &sg, NULL, padded_salt_size);
199 
200 	err = crypto_wait_req(crypto_ahash_init(req), &wait);
201 	if (err)
202 		goto err_free;
203 
204 	err = crypto_wait_req(crypto_ahash_update(req), &wait);
205 	if (err)
206 		goto err_free;
207 
208 	err = crypto_ahash_export(req, hashstate);
209 	if (err)
210 		goto err_free;
211 out:
212 	fsverity_free_hash_request(alg, req);
213 	kfree(padded_salt);
214 	return hashstate;
215 
216 err_free:
217 	kfree(hashstate);
218 	hashstate = ERR_PTR(err);
219 	goto out;
220 }
221 
222 /**
223  * fsverity_hash_page() - hash a single data or hash page
224  * @params: the Merkle tree's parameters
225  * @inode: inode for which the hashing is being done
226  * @req: preallocated hash request
227  * @page: the page to hash
228  * @out: output digest, size 'params->digest_size' bytes
229  *
230  * Hash a single data or hash block, assuming block_size == PAGE_SIZE.
231  * The hash is salted if a salt is specified in the Merkle tree parameters.
232  *
233  * Return: 0 on success, -errno on failure
234  */
235 int fsverity_hash_page(const struct merkle_tree_params *params,
236 		       const struct inode *inode,
237 		       struct ahash_request *req, struct page *page, u8 *out)
238 {
239 	struct scatterlist sg;
240 	DECLARE_CRYPTO_WAIT(wait);
241 	int err;
242 
243 	if (WARN_ON(params->block_size != PAGE_SIZE))
244 		return -EINVAL;
245 
246 	sg_init_table(&sg, 1);
247 	sg_set_page(&sg, page, PAGE_SIZE, 0);
248 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
249 					CRYPTO_TFM_REQ_MAY_BACKLOG,
250 				   crypto_req_done, &wait);
251 	ahash_request_set_crypt(req, &sg, out, PAGE_SIZE);
252 
253 	if (params->hashstate) {
254 		err = crypto_ahash_import(req, params->hashstate);
255 		if (err) {
256 			fsverity_err(inode,
257 				     "Error %d importing hash state", err);
258 			return err;
259 		}
260 		err = crypto_ahash_finup(req);
261 	} else {
262 		err = crypto_ahash_digest(req);
263 	}
264 
265 	err = crypto_wait_req(err, &wait);
266 	if (err)
267 		fsverity_err(inode, "Error %d computing page hash", err);
268 	return err;
269 }
270 
271 /**
272  * fsverity_hash_buffer() - hash some data
273  * @alg: the hash algorithm to use
274  * @data: the data to hash
275  * @size: size of data to hash, in bytes
276  * @out: output digest, size 'alg->digest_size' bytes
277  *
278  * Hash some data which is located in physically contiguous memory (i.e. memory
279  * allocated by kmalloc(), not by vmalloc()).  No salt is used.
280  *
281  * Return: 0 on success, -errno on failure
282  */
283 int fsverity_hash_buffer(struct fsverity_hash_alg *alg,
284 			 const void *data, size_t size, u8 *out)
285 {
286 	struct ahash_request *req;
287 	struct scatterlist sg;
288 	DECLARE_CRYPTO_WAIT(wait);
289 	int err;
290 
291 	/* This allocation never fails, since it's mempool-backed. */
292 	req = fsverity_alloc_hash_request(alg, GFP_KERNEL);
293 
294 	sg_init_one(&sg, data, size);
295 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
296 					CRYPTO_TFM_REQ_MAY_BACKLOG,
297 				   crypto_req_done, &wait);
298 	ahash_request_set_crypt(req, &sg, out, size);
299 
300 	err = crypto_wait_req(crypto_ahash_digest(req), &wait);
301 
302 	fsverity_free_hash_request(alg, req);
303 	return err;
304 }
305 
306 void __init fsverity_check_hash_algs(void)
307 {
308 	size_t i;
309 
310 	/*
311 	 * Sanity check the hash algorithms (could be a build-time check, but
312 	 * they're in an array)
313 	 */
314 	for (i = 0; i < ARRAY_SIZE(fsverity_hash_algs); i++) {
315 		const struct fsverity_hash_alg *alg = &fsverity_hash_algs[i];
316 
317 		if (!alg->name)
318 			continue;
319 
320 		BUG_ON(alg->digest_size > FS_VERITY_MAX_DIGEST_SIZE);
321 
322 		/*
323 		 * For efficiency, the implementation currently assumes the
324 		 * digest and block sizes are powers of 2.  This limitation can
325 		 * be lifted if the code is updated to handle other values.
326 		 */
327 		BUG_ON(!is_power_of_2(alg->digest_size));
328 		BUG_ON(!is_power_of_2(alg->block_size));
329 
330 		/* Verify that there is a valid mapping to HASH_ALGO_*. */
331 		BUG_ON(alg->algo_id == 0);
332 		BUG_ON(alg->digest_size != hash_digest_size[alg->algo_id]);
333 	}
334 }
335