xref: /openbmc/linux/fs/ext4/crypto.c (revision 4f3db074)
1 /*
2  * linux/fs/ext4/crypto.c
3  *
4  * Copyright (C) 2015, Google, Inc.
5  *
6  * This contains encryption functions for ext4
7  *
8  * Written by Michael Halcrow, 2014.
9  *
10  * Filename encryption additions
11  *	Uday Savagaonkar, 2014
12  * Encryption policy handling additions
13  *	Ildar Muslukhov, 2014
14  *
15  * This has not yet undergone a rigorous security audit.
16  *
17  * The usage of AES-XTS should conform to recommendations in NIST
18  * Special Publication 800-38E and IEEE P1619/D16.
19  */
20 
21 #include <crypto/hash.h>
22 #include <crypto/sha.h>
23 #include <keys/user-type.h>
24 #include <keys/encrypted-type.h>
25 #include <linux/crypto.h>
26 #include <linux/ecryptfs.h>
27 #include <linux/gfp.h>
28 #include <linux/kernel.h>
29 #include <linux/key.h>
30 #include <linux/list.h>
31 #include <linux/mempool.h>
32 #include <linux/module.h>
33 #include <linux/mutex.h>
34 #include <linux/random.h>
35 #include <linux/scatterlist.h>
36 #include <linux/spinlock_types.h>
37 
38 #include "ext4_extents.h"
39 #include "xattr.h"
40 
41 /* Encryption added and removed here! (L: */
42 
43 static unsigned int num_prealloc_crypto_pages = 32;
44 static unsigned int num_prealloc_crypto_ctxs = 128;
45 
46 module_param(num_prealloc_crypto_pages, uint, 0444);
47 MODULE_PARM_DESC(num_prealloc_crypto_pages,
48 		 "Number of crypto pages to preallocate");
49 module_param(num_prealloc_crypto_ctxs, uint, 0444);
50 MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
51 		 "Number of crypto contexts to preallocate");
52 
53 static mempool_t *ext4_bounce_page_pool;
54 
55 static LIST_HEAD(ext4_free_crypto_ctxs);
56 static DEFINE_SPINLOCK(ext4_crypto_ctx_lock);
57 
58 /**
59  * ext4_release_crypto_ctx() - Releases an encryption context
60  * @ctx: The encryption context to release.
61  *
62  * If the encryption context was allocated from the pre-allocated pool, returns
63  * it to that pool. Else, frees it.
64  *
65  * If there's a bounce page in the context, this frees that.
66  */
67 void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
68 {
69 	unsigned long flags;
70 
71 	if (ctx->bounce_page) {
72 		if (ctx->flags & EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL)
73 			__free_page(ctx->bounce_page);
74 		else
75 			mempool_free(ctx->bounce_page, ext4_bounce_page_pool);
76 		ctx->bounce_page = NULL;
77 	}
78 	ctx->control_page = NULL;
79 	if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) {
80 		if (ctx->tfm)
81 			crypto_free_tfm(ctx->tfm);
82 		kfree(ctx);
83 	} else {
84 		spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
85 		list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
86 		spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
87 	}
88 }
89 
90 /**
91  * ext4_alloc_and_init_crypto_ctx() - Allocates and inits an encryption context
92  * @mask: The allocation mask.
93  *
94  * Return: An allocated and initialized encryption context on success. An error
95  * value or NULL otherwise.
96  */
97 static struct ext4_crypto_ctx *ext4_alloc_and_init_crypto_ctx(gfp_t mask)
98 {
99 	struct ext4_crypto_ctx *ctx = kzalloc(sizeof(struct ext4_crypto_ctx),
100 					      mask);
101 
102 	if (!ctx)
103 		return ERR_PTR(-ENOMEM);
104 	return ctx;
105 }
106 
107 /**
108  * ext4_get_crypto_ctx() - Gets an encryption context
109  * @inode:       The inode for which we are doing the crypto
110  *
111  * Allocates and initializes an encryption context.
112  *
113  * Return: An allocated and initialized encryption context on success; error
114  * value or NULL otherwise.
115  */
116 struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
117 {
118 	struct ext4_crypto_ctx *ctx = NULL;
119 	int res = 0;
120 	unsigned long flags;
121 	struct ext4_encryption_key *key = &EXT4_I(inode)->i_encryption_key;
122 
123 	if (!ext4_read_workqueue)
124 		ext4_init_crypto();
125 
126 	/*
127 	 * We first try getting the ctx from a free list because in
128 	 * the common case the ctx will have an allocated and
129 	 * initialized crypto tfm, so it's probably a worthwhile
130 	 * optimization. For the bounce page, we first try getting it
131 	 * from the kernel allocator because that's just about as fast
132 	 * as getting it from a list and because a cache of free pages
133 	 * should generally be a "last resort" option for a filesystem
134 	 * to be able to do its job.
135 	 */
136 	spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
137 	ctx = list_first_entry_or_null(&ext4_free_crypto_ctxs,
138 				       struct ext4_crypto_ctx, free_list);
139 	if (ctx)
140 		list_del(&ctx->free_list);
141 	spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
142 	if (!ctx) {
143 		ctx = ext4_alloc_and_init_crypto_ctx(GFP_NOFS);
144 		if (IS_ERR(ctx)) {
145 			res = PTR_ERR(ctx);
146 			goto out;
147 		}
148 		ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
149 	} else {
150 		ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
151 	}
152 
153 	/* Allocate a new Crypto API context if we don't already have
154 	 * one or if it isn't the right mode. */
155 	BUG_ON(key->mode == EXT4_ENCRYPTION_MODE_INVALID);
156 	if (ctx->tfm && (ctx->mode != key->mode)) {
157 		crypto_free_tfm(ctx->tfm);
158 		ctx->tfm = NULL;
159 		ctx->mode = EXT4_ENCRYPTION_MODE_INVALID;
160 	}
161 	if (!ctx->tfm) {
162 		switch (key->mode) {
163 		case EXT4_ENCRYPTION_MODE_AES_256_XTS:
164 			ctx->tfm = crypto_ablkcipher_tfm(
165 				crypto_alloc_ablkcipher("xts(aes)", 0, 0));
166 			break;
167 		case EXT4_ENCRYPTION_MODE_AES_256_GCM:
168 			/* TODO(mhalcrow): AEAD w/ gcm(aes);
169 			 * crypto_aead_setauthsize() */
170 			ctx->tfm = ERR_PTR(-ENOTSUPP);
171 			break;
172 		default:
173 			BUG();
174 		}
175 		if (IS_ERR_OR_NULL(ctx->tfm)) {
176 			res = PTR_ERR(ctx->tfm);
177 			ctx->tfm = NULL;
178 			goto out;
179 		}
180 		ctx->mode = key->mode;
181 	}
182 	BUG_ON(key->size != ext4_encryption_key_size(key->mode));
183 
184 	/* There shouldn't be a bounce page attached to the crypto
185 	 * context at this point. */
186 	BUG_ON(ctx->bounce_page);
187 
188 out:
189 	if (res) {
190 		if (!IS_ERR_OR_NULL(ctx))
191 			ext4_release_crypto_ctx(ctx);
192 		ctx = ERR_PTR(res);
193 	}
194 	return ctx;
195 }
196 
197 struct workqueue_struct *ext4_read_workqueue;
198 static DEFINE_MUTEX(crypto_init);
199 
200 /**
201  * ext4_exit_crypto() - Shutdown the ext4 encryption system
202  */
203 void ext4_exit_crypto(void)
204 {
205 	struct ext4_crypto_ctx *pos, *n;
206 
207 	list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list) {
208 		if (pos->bounce_page) {
209 			if (pos->flags &
210 			    EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL) {
211 				__free_page(pos->bounce_page);
212 			} else {
213 				mempool_free(pos->bounce_page,
214 					     ext4_bounce_page_pool);
215 			}
216 		}
217 		if (pos->tfm)
218 			crypto_free_tfm(pos->tfm);
219 		kfree(pos);
220 	}
221 	INIT_LIST_HEAD(&ext4_free_crypto_ctxs);
222 	if (ext4_bounce_page_pool)
223 		mempool_destroy(ext4_bounce_page_pool);
224 	ext4_bounce_page_pool = NULL;
225 	if (ext4_read_workqueue)
226 		destroy_workqueue(ext4_read_workqueue);
227 	ext4_read_workqueue = NULL;
228 }
229 
230 /**
231  * ext4_init_crypto() - Set up for ext4 encryption.
232  *
233  * We only call this when we start accessing encrypted files, since it
234  * results in memory getting allocated that wouldn't otherwise be used.
235  *
236  * Return: Zero on success, non-zero otherwise.
237  */
238 int ext4_init_crypto(void)
239 {
240 	int i, res;
241 
242 	mutex_lock(&crypto_init);
243 	if (ext4_read_workqueue)
244 		goto already_initialized;
245 	ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0);
246 	if (!ext4_read_workqueue) {
247 		res = -ENOMEM;
248 		goto fail;
249 	}
250 
251 	for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
252 		struct ext4_crypto_ctx *ctx;
253 
254 		ctx = ext4_alloc_and_init_crypto_ctx(GFP_KERNEL);
255 		if (IS_ERR(ctx)) {
256 			res = PTR_ERR(ctx);
257 			goto fail;
258 		}
259 		list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
260 	}
261 
262 	ext4_bounce_page_pool =
263 		mempool_create_page_pool(num_prealloc_crypto_pages, 0);
264 	if (!ext4_bounce_page_pool) {
265 		res = -ENOMEM;
266 		goto fail;
267 	}
268 already_initialized:
269 	mutex_unlock(&crypto_init);
270 	return 0;
271 fail:
272 	ext4_exit_crypto();
273 	mutex_unlock(&crypto_init);
274 	return res;
275 }
276 
277 void ext4_restore_control_page(struct page *data_page)
278 {
279 	struct ext4_crypto_ctx *ctx =
280 		(struct ext4_crypto_ctx *)page_private(data_page);
281 
282 	set_page_private(data_page, (unsigned long)NULL);
283 	ClearPagePrivate(data_page);
284 	unlock_page(data_page);
285 	ext4_release_crypto_ctx(ctx);
286 }
287 
288 /**
289  * ext4_crypt_complete() - The completion callback for page encryption
290  * @req: The asynchronous encryption request context
291  * @res: The result of the encryption operation
292  */
293 static void ext4_crypt_complete(struct crypto_async_request *req, int res)
294 {
295 	struct ext4_completion_result *ecr = req->data;
296 
297 	if (res == -EINPROGRESS)
298 		return;
299 	ecr->res = res;
300 	complete(&ecr->completion);
301 }
302 
303 typedef enum {
304 	EXT4_DECRYPT = 0,
305 	EXT4_ENCRYPT,
306 } ext4_direction_t;
307 
308 static int ext4_page_crypto(struct ext4_crypto_ctx *ctx,
309 			    struct inode *inode,
310 			    ext4_direction_t rw,
311 			    pgoff_t index,
312 			    struct page *src_page,
313 			    struct page *dest_page)
314 
315 {
316 	u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
317 	struct ablkcipher_request *req = NULL;
318 	DECLARE_EXT4_COMPLETION_RESULT(ecr);
319 	struct scatterlist dst, src;
320 	struct ext4_inode_info *ei = EXT4_I(inode);
321 	struct crypto_ablkcipher *atfm = __crypto_ablkcipher_cast(ctx->tfm);
322 	int res = 0;
323 
324 	BUG_ON(!ctx->tfm);
325 	BUG_ON(ctx->mode != ei->i_encryption_key.mode);
326 
327 	if (ctx->mode != EXT4_ENCRYPTION_MODE_AES_256_XTS) {
328 		printk_ratelimited(KERN_ERR
329 				   "%s: unsupported crypto algorithm: %d\n",
330 				   __func__, ctx->mode);
331 		return -ENOTSUPP;
332 	}
333 
334 	crypto_ablkcipher_clear_flags(atfm, ~0);
335 	crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
336 
337 	res = crypto_ablkcipher_setkey(atfm, ei->i_encryption_key.raw,
338 				       ei->i_encryption_key.size);
339 	if (res) {
340 		printk_ratelimited(KERN_ERR
341 				   "%s: crypto_ablkcipher_setkey() failed\n",
342 				   __func__);
343 		return res;
344 	}
345 	req = ablkcipher_request_alloc(atfm, GFP_NOFS);
346 	if (!req) {
347 		printk_ratelimited(KERN_ERR
348 				   "%s: crypto_request_alloc() failed\n",
349 				   __func__);
350 		return -ENOMEM;
351 	}
352 	ablkcipher_request_set_callback(
353 		req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
354 		ext4_crypt_complete, &ecr);
355 
356 	BUILD_BUG_ON(EXT4_XTS_TWEAK_SIZE < sizeof(index));
357 	memcpy(xts_tweak, &index, sizeof(index));
358 	memset(&xts_tweak[sizeof(index)], 0,
359 	       EXT4_XTS_TWEAK_SIZE - sizeof(index));
360 
361 	sg_init_table(&dst, 1);
362 	sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
363 	sg_init_table(&src, 1);
364 	sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
365 	ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
366 				     xts_tweak);
367 	if (rw == EXT4_DECRYPT)
368 		res = crypto_ablkcipher_decrypt(req);
369 	else
370 		res = crypto_ablkcipher_encrypt(req);
371 	if (res == -EINPROGRESS || res == -EBUSY) {
372 		BUG_ON(req->base.data != &ecr);
373 		wait_for_completion(&ecr.completion);
374 		res = ecr.res;
375 	}
376 	ablkcipher_request_free(req);
377 	if (res) {
378 		printk_ratelimited(
379 			KERN_ERR
380 			"%s: crypto_ablkcipher_encrypt() returned %d\n",
381 			__func__, res);
382 		return res;
383 	}
384 	return 0;
385 }
386 
387 /**
388  * ext4_encrypt() - Encrypts a page
389  * @inode:          The inode for which the encryption should take place
390  * @plaintext_page: The page to encrypt. Must be locked.
391  *
392  * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
393  * encryption context.
394  *
395  * Called on the page write path.  The caller must call
396  * ext4_restore_control_page() on the returned ciphertext page to
397  * release the bounce buffer and the encryption context.
398  *
399  * Return: An allocated page with the encrypted content on success. Else, an
400  * error value or NULL.
401  */
402 struct page *ext4_encrypt(struct inode *inode,
403 			  struct page *plaintext_page)
404 {
405 	struct ext4_crypto_ctx *ctx;
406 	struct page *ciphertext_page = NULL;
407 	int err;
408 
409 	BUG_ON(!PageLocked(plaintext_page));
410 
411 	ctx = ext4_get_crypto_ctx(inode);
412 	if (IS_ERR(ctx))
413 		return (struct page *) ctx;
414 
415 	/* The encryption operation will require a bounce page. */
416 	ciphertext_page = alloc_page(GFP_NOFS);
417 	if (!ciphertext_page) {
418 		/* This is a potential bottleneck, but at least we'll have
419 		 * forward progress. */
420 		ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
421 						 GFP_NOFS);
422 		if (WARN_ON_ONCE(!ciphertext_page)) {
423 			ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
424 							 GFP_NOFS | __GFP_WAIT);
425 		}
426 		ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
427 	} else {
428 		ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
429 	}
430 	ctx->bounce_page = ciphertext_page;
431 	ctx->control_page = plaintext_page;
432 	err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index,
433 			       plaintext_page, ciphertext_page);
434 	if (err) {
435 		ext4_release_crypto_ctx(ctx);
436 		return ERR_PTR(err);
437 	}
438 	SetPagePrivate(ciphertext_page);
439 	set_page_private(ciphertext_page, (unsigned long)ctx);
440 	lock_page(ciphertext_page);
441 	return ciphertext_page;
442 }
443 
444 /**
445  * ext4_decrypt() - Decrypts a page in-place
446  * @ctx:  The encryption context.
447  * @page: The page to decrypt. Must be locked.
448  *
449  * Decrypts page in-place using the ctx encryption context.
450  *
451  * Called from the read completion callback.
452  *
453  * Return: Zero on success, non-zero otherwise.
454  */
455 int ext4_decrypt(struct ext4_crypto_ctx *ctx, struct page *page)
456 {
457 	BUG_ON(!PageLocked(page));
458 
459 	return ext4_page_crypto(ctx, page->mapping->host,
460 				EXT4_DECRYPT, page->index, page, page);
461 }
462 
463 /*
464  * Convenience function which takes care of allocating and
465  * deallocating the encryption context
466  */
467 int ext4_decrypt_one(struct inode *inode, struct page *page)
468 {
469 	int ret;
470 
471 	struct ext4_crypto_ctx *ctx = ext4_get_crypto_ctx(inode);
472 
473 	if (!ctx)
474 		return -ENOMEM;
475 	ret = ext4_decrypt(ctx, page);
476 	ext4_release_crypto_ctx(ctx);
477 	return ret;
478 }
479 
480 int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
481 {
482 	struct ext4_crypto_ctx	*ctx;
483 	struct page		*ciphertext_page = NULL;
484 	struct bio		*bio;
485 	ext4_lblk_t		lblk = ex->ee_block;
486 	ext4_fsblk_t		pblk = ext4_ext_pblock(ex);
487 	unsigned int		len = ext4_ext_get_actual_len(ex);
488 	int			err = 0;
489 
490 	BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);
491 
492 	ctx = ext4_get_crypto_ctx(inode);
493 	if (IS_ERR(ctx))
494 		return PTR_ERR(ctx);
495 
496 	ciphertext_page = alloc_page(GFP_NOFS);
497 	if (!ciphertext_page) {
498 		/* This is a potential bottleneck, but at least we'll have
499 		 * forward progress. */
500 		ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
501 						 GFP_NOFS);
502 		if (WARN_ON_ONCE(!ciphertext_page)) {
503 			ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
504 							 GFP_NOFS | __GFP_WAIT);
505 		}
506 		ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
507 	} else {
508 		ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
509 	}
510 	ctx->bounce_page = ciphertext_page;
511 
512 	while (len--) {
513 		err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk,
514 				       ZERO_PAGE(0), ciphertext_page);
515 		if (err)
516 			goto errout;
517 
518 		bio = bio_alloc(GFP_KERNEL, 1);
519 		if (!bio) {
520 			err = -ENOMEM;
521 			goto errout;
522 		}
523 		bio->bi_bdev = inode->i_sb->s_bdev;
524 		bio->bi_iter.bi_sector = pblk;
525 		err = bio_add_page(bio, ciphertext_page,
526 				   inode->i_sb->s_blocksize, 0);
527 		if (err) {
528 			bio_put(bio);
529 			goto errout;
530 		}
531 		err = submit_bio_wait(WRITE, bio);
532 		if (err)
533 			goto errout;
534 	}
535 	err = 0;
536 errout:
537 	ext4_release_crypto_ctx(ctx);
538 	return err;
539 }
540 
541 bool ext4_valid_contents_enc_mode(uint32_t mode)
542 {
543 	return (mode == EXT4_ENCRYPTION_MODE_AES_256_XTS);
544 }
545 
546 /**
547  * ext4_validate_encryption_key_size() - Validate the encryption key size
548  * @mode: The key mode.
549  * @size: The key size to validate.
550  *
551  * Return: The validated key size for @mode. Zero if invalid.
552  */
553 uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
554 {
555 	if (size == ext4_encryption_key_size(mode))
556 		return size;
557 	return 0;
558 }
559