xref: /openbmc/linux/fs/verity/hash_algs.c (revision 6f2bde9b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs-verity hash algorithms
4  *
5  * Copyright 2019 Google LLC
6  */
7 
8 #include "fsverity_private.h"
9 
10 #include <crypto/hash.h>
11 #include <linux/scatterlist.h>
12 
13 /* The hash algorithms supported by fs-verity */
14 struct fsverity_hash_alg fsverity_hash_algs[] = {
15 	[FS_VERITY_HASH_ALG_SHA256] = {
16 		.name = "sha256",
17 		.digest_size = SHA256_DIGEST_SIZE,
18 		.block_size = SHA256_BLOCK_SIZE,
19 		.algo_id = HASH_ALGO_SHA256,
20 	},
21 	[FS_VERITY_HASH_ALG_SHA512] = {
22 		.name = "sha512",
23 		.digest_size = SHA512_DIGEST_SIZE,
24 		.block_size = SHA512_BLOCK_SIZE,
25 		.algo_id = HASH_ALGO_SHA512,
26 	},
27 };
28 
29 static DEFINE_MUTEX(fsverity_hash_alg_init_mutex);
30 
31 /**
32  * fsverity_get_hash_alg() - validate and prepare a hash algorithm
33  * @inode: optional inode for logging purposes
34  * @num: the hash algorithm number
35  *
36  * Get the struct fsverity_hash_alg for the given hash algorithm number, and
37  * ensure it has a hash transform ready to go.  The hash transforms are
38  * allocated on-demand so that we don't waste resources unnecessarily, and
39  * because the crypto modules may be initialized later than fs/verity/.
40  *
41  * Return: pointer to the hash alg on success, else an ERR_PTR()
42  */
43 struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode,
44 						unsigned int num)
45 {
46 	struct fsverity_hash_alg *alg;
47 	struct crypto_ahash *tfm;
48 	int err;
49 
50 	if (num >= ARRAY_SIZE(fsverity_hash_algs) ||
51 	    !fsverity_hash_algs[num].name) {
52 		fsverity_warn(inode, "Unknown hash algorithm number: %u", num);
53 		return ERR_PTR(-EINVAL);
54 	}
55 	alg = &fsverity_hash_algs[num];
56 
57 	/* pairs with smp_store_release() below */
58 	if (likely(smp_load_acquire(&alg->tfm) != NULL))
59 		return alg;
60 
61 	mutex_lock(&fsverity_hash_alg_init_mutex);
62 
63 	if (alg->tfm != NULL)
64 		goto out_unlock;
65 
66 	/*
67 	 * Using the shash API would make things a bit simpler, but the ahash
68 	 * API is preferable as it allows the use of crypto accelerators.
69 	 */
70 	tfm = crypto_alloc_ahash(alg->name, 0, 0);
71 	if (IS_ERR(tfm)) {
72 		if (PTR_ERR(tfm) == -ENOENT) {
73 			fsverity_warn(inode,
74 				      "Missing crypto API support for hash algorithm \"%s\"",
75 				      alg->name);
76 			alg = ERR_PTR(-ENOPKG);
77 			goto out_unlock;
78 		}
79 		fsverity_err(inode,
80 			     "Error allocating hash algorithm \"%s\": %ld",
81 			     alg->name, PTR_ERR(tfm));
82 		alg = ERR_CAST(tfm);
83 		goto out_unlock;
84 	}
85 
86 	err = -EINVAL;
87 	if (WARN_ON_ONCE(alg->digest_size != crypto_ahash_digestsize(tfm)))
88 		goto err_free_tfm;
89 	if (WARN_ON_ONCE(alg->block_size != crypto_ahash_blocksize(tfm)))
90 		goto err_free_tfm;
91 
92 	err = mempool_init_kmalloc_pool(&alg->req_pool, 1,
93 					sizeof(struct ahash_request) +
94 					crypto_ahash_reqsize(tfm));
95 	if (err)
96 		goto err_free_tfm;
97 
98 	pr_info("%s using implementation \"%s\"\n",
99 		alg->name, crypto_ahash_driver_name(tfm));
100 
101 	/* pairs with smp_load_acquire() above */
102 	smp_store_release(&alg->tfm, tfm);
103 	goto out_unlock;
104 
105 err_free_tfm:
106 	crypto_free_ahash(tfm);
107 	alg = ERR_PTR(err);
108 out_unlock:
109 	mutex_unlock(&fsverity_hash_alg_init_mutex);
110 	return alg;
111 }
112 
113 /**
114  * fsverity_alloc_hash_request() - allocate a hash request object
115  * @alg: the hash algorithm for which to allocate the request
116  * @gfp_flags: memory allocation flags
117  *
118  * This is mempool-backed, so this never fails if __GFP_DIRECT_RECLAIM is set in
119  * @gfp_flags.  However, in that case this might need to wait for all
120  * previously-allocated requests to be freed.  So to avoid deadlocks, callers
121  * must never need multiple requests at a time to make forward progress.
122  *
123  * Return: the request object on success; NULL on failure (but see above)
124  */
125 struct ahash_request *fsverity_alloc_hash_request(struct fsverity_hash_alg *alg,
126 						  gfp_t gfp_flags)
127 {
128 	struct ahash_request *req = mempool_alloc(&alg->req_pool, gfp_flags);
129 
130 	if (req)
131 		ahash_request_set_tfm(req, alg->tfm);
132 	return req;
133 }
134 
135 /**
136  * fsverity_free_hash_request() - free a hash request object
137  * @alg: the hash algorithm
138  * @req: the hash request object to free
139  */
140 void fsverity_free_hash_request(struct fsverity_hash_alg *alg,
141 				struct ahash_request *req)
142 {
143 	if (req) {
144 		ahash_request_zero(req);
145 		mempool_free(req, &alg->req_pool);
146 	}
147 }
148 
149 /**
150  * fsverity_prepare_hash_state() - precompute the initial hash state
151  * @alg: hash algorithm
152  * @salt: a salt which is to be prepended to all data to be hashed
153  * @salt_size: salt size in bytes, possibly 0
154  *
155  * Return: NULL if the salt is empty, otherwise the kmalloc()'ed precomputed
156  *	   initial hash state on success or an ERR_PTR() on failure.
157  */
158 const u8 *fsverity_prepare_hash_state(struct fsverity_hash_alg *alg,
159 				      const u8 *salt, size_t salt_size)
160 {
161 	u8 *hashstate = NULL;
162 	struct ahash_request *req = NULL;
163 	u8 *padded_salt = NULL;
164 	size_t padded_salt_size;
165 	struct scatterlist sg;
166 	DECLARE_CRYPTO_WAIT(wait);
167 	int err;
168 
169 	if (salt_size == 0)
170 		return NULL;
171 
172 	hashstate = kmalloc(crypto_ahash_statesize(alg->tfm), GFP_KERNEL);
173 	if (!hashstate)
174 		return ERR_PTR(-ENOMEM);
175 
176 	/* This allocation never fails, since it's mempool-backed. */
177 	req = fsverity_alloc_hash_request(alg, GFP_KERNEL);
178 
179 	/*
180 	 * Zero-pad the salt to the next multiple of the input size of the hash
181 	 * algorithm's compression function, e.g. 64 bytes for SHA-256 or 128
182 	 * bytes for SHA-512.  This ensures that the hash algorithm won't have
183 	 * any bytes buffered internally after processing the salt, thus making
184 	 * salted hashing just as fast as unsalted hashing.
185 	 */
186 	padded_salt_size = round_up(salt_size, alg->block_size);
187 	padded_salt = kzalloc(padded_salt_size, GFP_KERNEL);
188 	if (!padded_salt) {
189 		err = -ENOMEM;
190 		goto err_free;
191 	}
192 	memcpy(padded_salt, salt, salt_size);
193 
194 	sg_init_one(&sg, padded_salt, padded_salt_size);
195 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
196 					CRYPTO_TFM_REQ_MAY_BACKLOG,
197 				   crypto_req_done, &wait);
198 	ahash_request_set_crypt(req, &sg, NULL, padded_salt_size);
199 
200 	err = crypto_wait_req(crypto_ahash_init(req), &wait);
201 	if (err)
202 		goto err_free;
203 
204 	err = crypto_wait_req(crypto_ahash_update(req), &wait);
205 	if (err)
206 		goto err_free;
207 
208 	err = crypto_ahash_export(req, hashstate);
209 	if (err)
210 		goto err_free;
211 out:
212 	fsverity_free_hash_request(alg, req);
213 	kfree(padded_salt);
214 	return hashstate;
215 
216 err_free:
217 	kfree(hashstate);
218 	hashstate = ERR_PTR(err);
219 	goto out;
220 }
221 
222 /**
223  * fsverity_hash_block() - hash a single data or hash block
224  * @params: the Merkle tree's parameters
225  * @inode: inode for which the hashing is being done
226  * @req: preallocated hash request
227  * @page: the page containing the block to hash
228  * @offset: the offset of the block within @page
229  * @out: output digest, size 'params->digest_size' bytes
230  *
231  * Hash a single data or hash block.  The hash is salted if a salt is specified
232  * in the Merkle tree parameters.
233  *
234  * Return: 0 on success, -errno on failure
235  */
236 int fsverity_hash_block(const struct merkle_tree_params *params,
237 			const struct inode *inode, struct ahash_request *req,
238 			struct page *page, unsigned int offset, u8 *out)
239 {
240 	struct scatterlist sg;
241 	DECLARE_CRYPTO_WAIT(wait);
242 	int err;
243 
244 	sg_init_table(&sg, 1);
245 	sg_set_page(&sg, page, params->block_size, offset);
246 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
247 					CRYPTO_TFM_REQ_MAY_BACKLOG,
248 				   crypto_req_done, &wait);
249 	ahash_request_set_crypt(req, &sg, out, params->block_size);
250 
251 	if (params->hashstate) {
252 		err = crypto_ahash_import(req, params->hashstate);
253 		if (err) {
254 			fsverity_err(inode,
255 				     "Error %d importing hash state", err);
256 			return err;
257 		}
258 		err = crypto_ahash_finup(req);
259 	} else {
260 		err = crypto_ahash_digest(req);
261 	}
262 
263 	err = crypto_wait_req(err, &wait);
264 	if (err)
265 		fsverity_err(inode, "Error %d computing block hash", err);
266 	return err;
267 }
268 
269 /**
270  * fsverity_hash_buffer() - hash some data
271  * @alg: the hash algorithm to use
272  * @data: the data to hash
273  * @size: size of data to hash, in bytes
274  * @out: output digest, size 'alg->digest_size' bytes
275  *
276  * Hash some data which is located in physically contiguous memory (i.e. memory
277  * allocated by kmalloc(), not by vmalloc()).  No salt is used.
278  *
279  * Return: 0 on success, -errno on failure
280  */
281 int fsverity_hash_buffer(struct fsverity_hash_alg *alg,
282 			 const void *data, size_t size, u8 *out)
283 {
284 	struct ahash_request *req;
285 	struct scatterlist sg;
286 	DECLARE_CRYPTO_WAIT(wait);
287 	int err;
288 
289 	/* This allocation never fails, since it's mempool-backed. */
290 	req = fsverity_alloc_hash_request(alg, GFP_KERNEL);
291 
292 	sg_init_one(&sg, data, size);
293 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
294 					CRYPTO_TFM_REQ_MAY_BACKLOG,
295 				   crypto_req_done, &wait);
296 	ahash_request_set_crypt(req, &sg, out, size);
297 
298 	err = crypto_wait_req(crypto_ahash_digest(req), &wait);
299 
300 	fsverity_free_hash_request(alg, req);
301 	return err;
302 }
303 
304 void __init fsverity_check_hash_algs(void)
305 {
306 	size_t i;
307 
308 	/*
309 	 * Sanity check the hash algorithms (could be a build-time check, but
310 	 * they're in an array)
311 	 */
312 	for (i = 0; i < ARRAY_SIZE(fsverity_hash_algs); i++) {
313 		const struct fsverity_hash_alg *alg = &fsverity_hash_algs[i];
314 
315 		if (!alg->name)
316 			continue;
317 
318 		BUG_ON(alg->digest_size > FS_VERITY_MAX_DIGEST_SIZE);
319 
320 		/*
321 		 * For efficiency, the implementation currently assumes the
322 		 * digest and block sizes are powers of 2.  This limitation can
323 		 * be lifted if the code is updated to handle other values.
324 		 */
325 		BUG_ON(!is_power_of_2(alg->digest_size));
326 		BUG_ON(!is_power_of_2(alg->block_size));
327 
328 		/* Verify that there is a valid mapping to HASH_ALGO_*. */
329 		BUG_ON(alg->algo_id == 0);
330 		BUG_ON(alg->digest_size != hash_digest_size[alg->algo_id]);
331 	}
332 }
333