xref: /openbmc/linux/fs/verity/hash_algs.c (revision bd4af432)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/verity/hash_algs.c: fs-verity hash algorithms
4  *
5  * Copyright 2019 Google LLC
6  */
7 
8 #include "fsverity_private.h"
9 
10 #include <crypto/hash.h>
11 #include <linux/scatterlist.h>
12 
13 /* The hash algorithms supported by fs-verity */
14 struct fsverity_hash_alg fsverity_hash_algs[] = {
15 	[FS_VERITY_HASH_ALG_SHA256] = {
16 		.name = "sha256",
17 		.digest_size = SHA256_DIGEST_SIZE,
18 		.block_size = SHA256_BLOCK_SIZE,
19 	},
20 	[FS_VERITY_HASH_ALG_SHA512] = {
21 		.name = "sha512",
22 		.digest_size = SHA512_DIGEST_SIZE,
23 		.block_size = SHA512_BLOCK_SIZE,
24 	},
25 };
26 
27 static DEFINE_MUTEX(fsverity_hash_alg_init_mutex);
28 
29 /**
30  * fsverity_get_hash_alg() - validate and prepare a hash algorithm
31  * @inode: optional inode for logging purposes
32  * @num: the hash algorithm number
33  *
34  * Get the struct fsverity_hash_alg for the given hash algorithm number, and
35  * ensure it has a hash transform ready to go.  The hash transforms are
36  * allocated on-demand so that we don't waste resources unnecessarily, and
37  * because the crypto modules may be initialized later than fs/verity/.
38  *
39  * Return: pointer to the hash alg on success, else an ERR_PTR()
40  */
41 struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode,
42 						unsigned int num)
43 {
44 	struct fsverity_hash_alg *alg;
45 	struct crypto_ahash *tfm;
46 	int err;
47 
48 	if (num >= ARRAY_SIZE(fsverity_hash_algs) ||
49 	    !fsverity_hash_algs[num].name) {
50 		fsverity_warn(inode, "Unknown hash algorithm number: %u", num);
51 		return ERR_PTR(-EINVAL);
52 	}
53 	alg = &fsverity_hash_algs[num];
54 
55 	/* pairs with smp_store_release() below */
56 	if (likely(smp_load_acquire(&alg->tfm) != NULL))
57 		return alg;
58 
59 	mutex_lock(&fsverity_hash_alg_init_mutex);
60 
61 	if (alg->tfm != NULL)
62 		goto out_unlock;
63 
64 	/*
65 	 * Using the shash API would make things a bit simpler, but the ahash
66 	 * API is preferable as it allows the use of crypto accelerators.
67 	 */
68 	tfm = crypto_alloc_ahash(alg->name, 0, 0);
69 	if (IS_ERR(tfm)) {
70 		if (PTR_ERR(tfm) == -ENOENT) {
71 			fsverity_warn(inode,
72 				      "Missing crypto API support for hash algorithm \"%s\"",
73 				      alg->name);
74 			alg = ERR_PTR(-ENOPKG);
75 			goto out_unlock;
76 		}
77 		fsverity_err(inode,
78 			     "Error allocating hash algorithm \"%s\": %ld",
79 			     alg->name, PTR_ERR(tfm));
80 		alg = ERR_CAST(tfm);
81 		goto out_unlock;
82 	}
83 
84 	err = -EINVAL;
85 	if (WARN_ON(alg->digest_size != crypto_ahash_digestsize(tfm)))
86 		goto err_free_tfm;
87 	if (WARN_ON(alg->block_size != crypto_ahash_blocksize(tfm)))
88 		goto err_free_tfm;
89 
90 	err = mempool_init_kmalloc_pool(&alg->req_pool, 1,
91 					sizeof(struct ahash_request) +
92 					crypto_ahash_reqsize(tfm));
93 	if (err)
94 		goto err_free_tfm;
95 
96 	pr_info("%s using implementation \"%s\"\n",
97 		alg->name, crypto_ahash_driver_name(tfm));
98 
99 	/* pairs with smp_load_acquire() above */
100 	smp_store_release(&alg->tfm, tfm);
101 	goto out_unlock;
102 
103 err_free_tfm:
104 	crypto_free_ahash(tfm);
105 	alg = ERR_PTR(err);
106 out_unlock:
107 	mutex_unlock(&fsverity_hash_alg_init_mutex);
108 	return alg;
109 }
110 
111 /**
112  * fsverity_alloc_hash_request() - allocate a hash request object
113  * @alg: the hash algorithm for which to allocate the request
114  * @gfp_flags: memory allocation flags
115  *
116  * This is mempool-backed, so this never fails if __GFP_DIRECT_RECLAIM is set in
117  * @gfp_flags.  However, in that case this might need to wait for all
118  * previously-allocated requests to be freed.  So to avoid deadlocks, callers
119  * must never need multiple requests at a time to make forward progress.
120  *
121  * Return: the request object on success; NULL on failure (but see above)
122  */
123 struct ahash_request *fsverity_alloc_hash_request(struct fsverity_hash_alg *alg,
124 						  gfp_t gfp_flags)
125 {
126 	struct ahash_request *req = mempool_alloc(&alg->req_pool, gfp_flags);
127 
128 	if (req)
129 		ahash_request_set_tfm(req, alg->tfm);
130 	return req;
131 }
132 
133 /**
134  * fsverity_free_hash_request() - free a hash request object
135  * @alg: the hash algorithm
136  * @req: the hash request object to free
137  */
138 void fsverity_free_hash_request(struct fsverity_hash_alg *alg,
139 				struct ahash_request *req)
140 {
141 	if (req) {
142 		ahash_request_zero(req);
143 		mempool_free(req, &alg->req_pool);
144 	}
145 }
146 
147 /**
148  * fsverity_prepare_hash_state() - precompute the initial hash state
149  * @alg: hash algorithm
150  * @salt: a salt which is to be prepended to all data to be hashed
151  * @salt_size: salt size in bytes, possibly 0
152  *
153  * Return: NULL if the salt is empty, otherwise the kmalloc()'ed precomputed
154  *	   initial hash state on success or an ERR_PTR() on failure.
155  */
156 const u8 *fsverity_prepare_hash_state(struct fsverity_hash_alg *alg,
157 				      const u8 *salt, size_t salt_size)
158 {
159 	u8 *hashstate = NULL;
160 	struct ahash_request *req = NULL;
161 	u8 *padded_salt = NULL;
162 	size_t padded_salt_size;
163 	struct scatterlist sg;
164 	DECLARE_CRYPTO_WAIT(wait);
165 	int err;
166 
167 	if (salt_size == 0)
168 		return NULL;
169 
170 	hashstate = kmalloc(crypto_ahash_statesize(alg->tfm), GFP_KERNEL);
171 	if (!hashstate)
172 		return ERR_PTR(-ENOMEM);
173 
174 	/* This allocation never fails, since it's mempool-backed. */
175 	req = fsverity_alloc_hash_request(alg, GFP_KERNEL);
176 
177 	/*
178 	 * Zero-pad the salt to the next multiple of the input size of the hash
179 	 * algorithm's compression function, e.g. 64 bytes for SHA-256 or 128
180 	 * bytes for SHA-512.  This ensures that the hash algorithm won't have
181 	 * any bytes buffered internally after processing the salt, thus making
182 	 * salted hashing just as fast as unsalted hashing.
183 	 */
184 	padded_salt_size = round_up(salt_size, alg->block_size);
185 	padded_salt = kzalloc(padded_salt_size, GFP_KERNEL);
186 	if (!padded_salt) {
187 		err = -ENOMEM;
188 		goto err_free;
189 	}
190 	memcpy(padded_salt, salt, salt_size);
191 
192 	sg_init_one(&sg, padded_salt, padded_salt_size);
193 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
194 					CRYPTO_TFM_REQ_MAY_BACKLOG,
195 				   crypto_req_done, &wait);
196 	ahash_request_set_crypt(req, &sg, NULL, padded_salt_size);
197 
198 	err = crypto_wait_req(crypto_ahash_init(req), &wait);
199 	if (err)
200 		goto err_free;
201 
202 	err = crypto_wait_req(crypto_ahash_update(req), &wait);
203 	if (err)
204 		goto err_free;
205 
206 	err = crypto_ahash_export(req, hashstate);
207 	if (err)
208 		goto err_free;
209 out:
210 	fsverity_free_hash_request(alg, req);
211 	kfree(padded_salt);
212 	return hashstate;
213 
214 err_free:
215 	kfree(hashstate);
216 	hashstate = ERR_PTR(err);
217 	goto out;
218 }
219 
220 /**
221  * fsverity_hash_page() - hash a single data or hash page
222  * @params: the Merkle tree's parameters
223  * @inode: inode for which the hashing is being done
224  * @req: preallocated hash request
225  * @page: the page to hash
226  * @out: output digest, size 'params->digest_size' bytes
227  *
228  * Hash a single data or hash block, assuming block_size == PAGE_SIZE.
229  * The hash is salted if a salt is specified in the Merkle tree parameters.
230  *
231  * Return: 0 on success, -errno on failure
232  */
233 int fsverity_hash_page(const struct merkle_tree_params *params,
234 		       const struct inode *inode,
235 		       struct ahash_request *req, struct page *page, u8 *out)
236 {
237 	struct scatterlist sg;
238 	DECLARE_CRYPTO_WAIT(wait);
239 	int err;
240 
241 	if (WARN_ON(params->block_size != PAGE_SIZE))
242 		return -EINVAL;
243 
244 	sg_init_table(&sg, 1);
245 	sg_set_page(&sg, page, PAGE_SIZE, 0);
246 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
247 					CRYPTO_TFM_REQ_MAY_BACKLOG,
248 				   crypto_req_done, &wait);
249 	ahash_request_set_crypt(req, &sg, out, PAGE_SIZE);
250 
251 	if (params->hashstate) {
252 		err = crypto_ahash_import(req, params->hashstate);
253 		if (err) {
254 			fsverity_err(inode,
255 				     "Error %d importing hash state", err);
256 			return err;
257 		}
258 		err = crypto_ahash_finup(req);
259 	} else {
260 		err = crypto_ahash_digest(req);
261 	}
262 
263 	err = crypto_wait_req(err, &wait);
264 	if (err)
265 		fsverity_err(inode, "Error %d computing page hash", err);
266 	return err;
267 }
268 
269 /**
270  * fsverity_hash_buffer() - hash some data
271  * @alg: the hash algorithm to use
272  * @data: the data to hash
273  * @size: size of data to hash, in bytes
274  * @out: output digest, size 'alg->digest_size' bytes
275  *
276  * Hash some data which is located in physically contiguous memory (i.e. memory
277  * allocated by kmalloc(), not by vmalloc()).  No salt is used.
278  *
279  * Return: 0 on success, -errno on failure
280  */
281 int fsverity_hash_buffer(struct fsverity_hash_alg *alg,
282 			 const void *data, size_t size, u8 *out)
283 {
284 	struct ahash_request *req;
285 	struct scatterlist sg;
286 	DECLARE_CRYPTO_WAIT(wait);
287 	int err;
288 
289 	/* This allocation never fails, since it's mempool-backed. */
290 	req = fsverity_alloc_hash_request(alg, GFP_KERNEL);
291 
292 	sg_init_one(&sg, data, size);
293 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
294 					CRYPTO_TFM_REQ_MAY_BACKLOG,
295 				   crypto_req_done, &wait);
296 	ahash_request_set_crypt(req, &sg, out, size);
297 
298 	err = crypto_wait_req(crypto_ahash_digest(req), &wait);
299 
300 	fsverity_free_hash_request(alg, req);
301 	return err;
302 }
303 
304 void __init fsverity_check_hash_algs(void)
305 {
306 	size_t i;
307 
308 	/*
309 	 * Sanity check the hash algorithms (could be a build-time check, but
310 	 * they're in an array)
311 	 */
312 	for (i = 0; i < ARRAY_SIZE(fsverity_hash_algs); i++) {
313 		const struct fsverity_hash_alg *alg = &fsverity_hash_algs[i];
314 
315 		if (!alg->name)
316 			continue;
317 
318 		BUG_ON(alg->digest_size > FS_VERITY_MAX_DIGEST_SIZE);
319 
320 		/*
321 		 * For efficiency, the implementation currently assumes the
322 		 * digest and block sizes are powers of 2.  This limitation can
323 		 * be lifted if the code is updated to handle other values.
324 		 */
325 		BUG_ON(!is_power_of_2(alg->digest_size));
326 		BUG_ON(!is_power_of_2(alg->block_size));
327 	}
328 }
329