xref: /openbmc/linux/fs/crypto/crypto.c (revision dc6a81c3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This contains encryption functions for per-file encryption.
4  *
5  * Copyright (C) 2015, Google, Inc.
6  * Copyright (C) 2015, Motorola Mobility
7  *
8  * Written by Michael Halcrow, 2014.
9  *
10  * Filename encryption additions
11  *	Uday Savagaonkar, 2014
12  * Encryption policy handling additions
13  *	Ildar Muslukhov, 2014
14  * Add fscrypt_pullback_bio_page()
15  *	Jaegeuk Kim, 2015.
16  *
17  * This has not yet undergone a rigorous security audit.
18  *
19  * The usage of AES-XTS should conform to recommendations in NIST
20  * Special Publication 800-38E and IEEE P1619/D16.
21  */
22 
23 #include <linux/pagemap.h>
24 #include <linux/mempool.h>
25 #include <linux/module.h>
26 #include <linux/scatterlist.h>
27 #include <linux/ratelimit.h>
28 #include <crypto/skcipher.h>
29 #include "fscrypt_private.h"
30 
31 static unsigned int num_prealloc_crypto_pages = 32;
32 
33 module_param(num_prealloc_crypto_pages, uint, 0444);
34 MODULE_PARM_DESC(num_prealloc_crypto_pages,
35 		"Number of crypto pages to preallocate");
36 
37 static mempool_t *fscrypt_bounce_page_pool = NULL;
38 
39 static struct workqueue_struct *fscrypt_read_workqueue;
40 static DEFINE_MUTEX(fscrypt_init_mutex);
41 
42 struct kmem_cache *fscrypt_info_cachep;
43 
44 void fscrypt_enqueue_decrypt_work(struct work_struct *work)
45 {
46 	queue_work(fscrypt_read_workqueue, work);
47 }
48 EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
49 
50 struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags)
51 {
52 	return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
53 }
54 
55 /**
56  * fscrypt_free_bounce_page() - free a ciphertext bounce page
57  *
58  * Free a bounce page that was allocated by fscrypt_encrypt_pagecache_blocks(),
59  * or by fscrypt_alloc_bounce_page() directly.
60  */
61 void fscrypt_free_bounce_page(struct page *bounce_page)
62 {
63 	if (!bounce_page)
64 		return;
65 	set_page_private(bounce_page, (unsigned long)NULL);
66 	ClearPagePrivate(bounce_page);
67 	mempool_free(bounce_page, fscrypt_bounce_page_pool);
68 }
69 EXPORT_SYMBOL(fscrypt_free_bounce_page);
70 
71 void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
72 			 const struct fscrypt_info *ci)
73 {
74 	u8 flags = fscrypt_policy_flags(&ci->ci_policy);
75 
76 	memset(iv, 0, ci->ci_mode->ivsize);
77 
78 	if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) {
79 		WARN_ON_ONCE((u32)lblk_num != lblk_num);
80 		lblk_num |= (u64)ci->ci_inode->i_ino << 32;
81 	} else if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
82 		memcpy(iv->nonce, ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE);
83 	}
84 	iv->lblk_num = cpu_to_le64(lblk_num);
85 }
86 
87 /* Encrypt or decrypt a single filesystem block of file contents */
88 int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
89 			u64 lblk_num, struct page *src_page,
90 			struct page *dest_page, unsigned int len,
91 			unsigned int offs, gfp_t gfp_flags)
92 {
93 	union fscrypt_iv iv;
94 	struct skcipher_request *req = NULL;
95 	DECLARE_CRYPTO_WAIT(wait);
96 	struct scatterlist dst, src;
97 	struct fscrypt_info *ci = inode->i_crypt_info;
98 	struct crypto_skcipher *tfm = ci->ci_ctfm;
99 	int res = 0;
100 
101 	if (WARN_ON_ONCE(len <= 0))
102 		return -EINVAL;
103 	if (WARN_ON_ONCE(len % FS_CRYPTO_BLOCK_SIZE != 0))
104 		return -EINVAL;
105 
106 	fscrypt_generate_iv(&iv, lblk_num, ci);
107 
108 	req = skcipher_request_alloc(tfm, gfp_flags);
109 	if (!req)
110 		return -ENOMEM;
111 
112 	skcipher_request_set_callback(
113 		req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
114 		crypto_req_done, &wait);
115 
116 	sg_init_table(&dst, 1);
117 	sg_set_page(&dst, dest_page, len, offs);
118 	sg_init_table(&src, 1);
119 	sg_set_page(&src, src_page, len, offs);
120 	skcipher_request_set_crypt(req, &src, &dst, len, &iv);
121 	if (rw == FS_DECRYPT)
122 		res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
123 	else
124 		res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
125 	skcipher_request_free(req);
126 	if (res) {
127 		fscrypt_err(inode, "%scryption failed for block %llu: %d",
128 			    (rw == FS_DECRYPT ? "De" : "En"), lblk_num, res);
129 		return res;
130 	}
131 	return 0;
132 }
133 
134 /**
135  * fscrypt_encrypt_pagecache_blocks() - Encrypt filesystem blocks from a pagecache page
136  * @page:      The locked pagecache page containing the block(s) to encrypt
137  * @len:       Total size of the block(s) to encrypt.  Must be a nonzero
138  *		multiple of the filesystem's block size.
139  * @offs:      Byte offset within @page of the first block to encrypt.  Must be
140  *		a multiple of the filesystem's block size.
141  * @gfp_flags: Memory allocation flags.  See details below.
142  *
143  * A new bounce page is allocated, and the specified block(s) are encrypted into
144  * it.  In the bounce page, the ciphertext block(s) will be located at the same
145  * offsets at which the plaintext block(s) were located in the source page; any
146  * other parts of the bounce page will be left uninitialized.  However, normally
147  * blocksize == PAGE_SIZE and the whole page is encrypted at once.
148  *
149  * This is for use by the filesystem's ->writepages() method.
150  *
151  * The bounce page allocation is mempool-backed, so it will always succeed when
152  * @gfp_flags includes __GFP_DIRECT_RECLAIM, e.g. when it's GFP_NOFS.  However,
153  * only the first page of each bio can be allocated this way.  To prevent
154  * deadlocks, for any additional pages a mask like GFP_NOWAIT must be used.
155  *
156  * Return: the new encrypted bounce page on success; an ERR_PTR() on failure
157  */
158 struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
159 					      unsigned int len,
160 					      unsigned int offs,
161 					      gfp_t gfp_flags)
162 
163 {
164 	const struct inode *inode = page->mapping->host;
165 	const unsigned int blockbits = inode->i_blkbits;
166 	const unsigned int blocksize = 1 << blockbits;
167 	struct page *ciphertext_page;
168 	u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
169 		       (offs >> blockbits);
170 	unsigned int i;
171 	int err;
172 
173 	if (WARN_ON_ONCE(!PageLocked(page)))
174 		return ERR_PTR(-EINVAL);
175 
176 	if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
177 		return ERR_PTR(-EINVAL);
178 
179 	ciphertext_page = fscrypt_alloc_bounce_page(gfp_flags);
180 	if (!ciphertext_page)
181 		return ERR_PTR(-ENOMEM);
182 
183 	for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
184 		err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num,
185 					  page, ciphertext_page,
186 					  blocksize, i, gfp_flags);
187 		if (err) {
188 			fscrypt_free_bounce_page(ciphertext_page);
189 			return ERR_PTR(err);
190 		}
191 	}
192 	SetPagePrivate(ciphertext_page);
193 	set_page_private(ciphertext_page, (unsigned long)page);
194 	return ciphertext_page;
195 }
196 EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks);
197 
198 /**
199  * fscrypt_encrypt_block_inplace() - Encrypt a filesystem block in-place
200  * @inode:     The inode to which this block belongs
201  * @page:      The page containing the block to encrypt
202  * @len:       Size of block to encrypt.  Doesn't need to be a multiple of the
203  *		fs block size, but must be a multiple of FS_CRYPTO_BLOCK_SIZE.
204  * @offs:      Byte offset within @page at which the block to encrypt begins
205  * @lblk_num:  Filesystem logical block number of the block, i.e. the 0-based
206  *		number of the block within the file
207  * @gfp_flags: Memory allocation flags
208  *
209  * Encrypt a possibly-compressed filesystem block that is located in an
210  * arbitrary page, not necessarily in the original pagecache page.  The @inode
211  * and @lblk_num must be specified, as they can't be determined from @page.
212  *
213  * Return: 0 on success; -errno on failure
214  */
215 int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
216 				  unsigned int len, unsigned int offs,
217 				  u64 lblk_num, gfp_t gfp_flags)
218 {
219 	return fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num, page, page,
220 				   len, offs, gfp_flags);
221 }
222 EXPORT_SYMBOL(fscrypt_encrypt_block_inplace);
223 
224 /**
225  * fscrypt_decrypt_pagecache_blocks() - Decrypt filesystem blocks in a pagecache page
226  * @page:      The locked pagecache page containing the block(s) to decrypt
227  * @len:       Total size of the block(s) to decrypt.  Must be a nonzero
228  *		multiple of the filesystem's block size.
229  * @offs:      Byte offset within @page of the first block to decrypt.  Must be
230  *		a multiple of the filesystem's block size.
231  *
232  * The specified block(s) are decrypted in-place within the pagecache page,
233  * which must still be locked and not uptodate.  Normally, blocksize ==
234  * PAGE_SIZE and the whole page is decrypted at once.
235  *
236  * This is for use by the filesystem's ->readpages() method.
237  *
238  * Return: 0 on success; -errno on failure
239  */
240 int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len,
241 				     unsigned int offs)
242 {
243 	const struct inode *inode = page->mapping->host;
244 	const unsigned int blockbits = inode->i_blkbits;
245 	const unsigned int blocksize = 1 << blockbits;
246 	u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
247 		       (offs >> blockbits);
248 	unsigned int i;
249 	int err;
250 
251 	if (WARN_ON_ONCE(!PageLocked(page)))
252 		return -EINVAL;
253 
254 	if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
255 		return -EINVAL;
256 
257 	for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
258 		err = fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page,
259 					  page, blocksize, i, GFP_NOFS);
260 		if (err)
261 			return err;
262 	}
263 	return 0;
264 }
265 EXPORT_SYMBOL(fscrypt_decrypt_pagecache_blocks);
266 
267 /**
268  * fscrypt_decrypt_block_inplace() - Decrypt a filesystem block in-place
269  * @inode:     The inode to which this block belongs
270  * @page:      The page containing the block to decrypt
271  * @len:       Size of block to decrypt.  Doesn't need to be a multiple of the
272  *		fs block size, but must be a multiple of FS_CRYPTO_BLOCK_SIZE.
273  * @offs:      Byte offset within @page at which the block to decrypt begins
274  * @lblk_num:  Filesystem logical block number of the block, i.e. the 0-based
275  *		number of the block within the file
276  *
277  * Decrypt a possibly-compressed filesystem block that is located in an
278  * arbitrary page, not necessarily in the original pagecache page.  The @inode
279  * and @lblk_num must be specified, as they can't be determined from @page.
280  *
281  * Return: 0 on success; -errno on failure
282  */
283 int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
284 				  unsigned int len, unsigned int offs,
285 				  u64 lblk_num)
286 {
287 	return fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page, page,
288 				   len, offs, GFP_NOFS);
289 }
290 EXPORT_SYMBOL(fscrypt_decrypt_block_inplace);
291 
292 /**
293  * fscrypt_initialize() - allocate major buffers for fs encryption.
294  * @cop_flags:  fscrypt operations flags
295  *
296  * We only call this when we start accessing encrypted files, since it
297  * results in memory getting allocated that wouldn't otherwise be used.
298  *
299  * Return: 0 on success; -errno on failure
300  */
301 int fscrypt_initialize(unsigned int cop_flags)
302 {
303 	int err = 0;
304 
305 	/* No need to allocate a bounce page pool if this FS won't use it. */
306 	if (cop_flags & FS_CFLG_OWN_PAGES)
307 		return 0;
308 
309 	mutex_lock(&fscrypt_init_mutex);
310 	if (fscrypt_bounce_page_pool)
311 		goto out_unlock;
312 
313 	err = -ENOMEM;
314 	fscrypt_bounce_page_pool =
315 		mempool_create_page_pool(num_prealloc_crypto_pages, 0);
316 	if (!fscrypt_bounce_page_pool)
317 		goto out_unlock;
318 
319 	err = 0;
320 out_unlock:
321 	mutex_unlock(&fscrypt_init_mutex);
322 	return err;
323 }
324 
325 void fscrypt_msg(const struct inode *inode, const char *level,
326 		 const char *fmt, ...)
327 {
328 	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
329 				      DEFAULT_RATELIMIT_BURST);
330 	struct va_format vaf;
331 	va_list args;
332 
333 	if (!__ratelimit(&rs))
334 		return;
335 
336 	va_start(args, fmt);
337 	vaf.fmt = fmt;
338 	vaf.va = &args;
339 	if (inode)
340 		printk("%sfscrypt (%s, inode %lu): %pV\n",
341 		       level, inode->i_sb->s_id, inode->i_ino, &vaf);
342 	else
343 		printk("%sfscrypt: %pV\n", level, &vaf);
344 	va_end(args);
345 }
346 
347 /**
348  * fscrypt_init() - Set up for fs encryption.
349  */
350 static int __init fscrypt_init(void)
351 {
352 	int err = -ENOMEM;
353 
354 	/*
355 	 * Use an unbound workqueue to allow bios to be decrypted in parallel
356 	 * even when they happen to complete on the same CPU.  This sacrifices
357 	 * locality, but it's worthwhile since decryption is CPU-intensive.
358 	 *
359 	 * Also use a high-priority workqueue to prioritize decryption work,
360 	 * which blocks reads from completing, over regular application tasks.
361 	 */
362 	fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
363 						 WQ_UNBOUND | WQ_HIGHPRI,
364 						 num_online_cpus());
365 	if (!fscrypt_read_workqueue)
366 		goto fail;
367 
368 	fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
369 	if (!fscrypt_info_cachep)
370 		goto fail_free_queue;
371 
372 	err = fscrypt_init_keyring();
373 	if (err)
374 		goto fail_free_info;
375 
376 	return 0;
377 
378 fail_free_info:
379 	kmem_cache_destroy(fscrypt_info_cachep);
380 fail_free_queue:
381 	destroy_workqueue(fscrypt_read_workqueue);
382 fail:
383 	return err;
384 }
385 late_initcall(fscrypt_init)
386