xref: /openbmc/linux/fs/crypto/crypto.c (revision a34a9f1a)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * This contains encryption functions for per-file encryption.
4   *
5   * Copyright (C) 2015, Google, Inc.
6   * Copyright (C) 2015, Motorola Mobility
7   *
8   * Written by Michael Halcrow, 2014.
9   *
10   * Filename encryption additions
11   *	Uday Savagaonkar, 2014
12   * Encryption policy handling additions
13   *	Ildar Muslukhov, 2014
14   * Add fscrypt_pullback_bio_page()
15   *	Jaegeuk Kim, 2015.
16   *
17   * This has not yet undergone a rigorous security audit.
18   *
19   * The usage of AES-XTS should conform to recommendations in NIST
20   * Special Publication 800-38E and IEEE P1619/D16.
21   */
22  
23  #include <linux/pagemap.h>
24  #include <linux/mempool.h>
25  #include <linux/module.h>
26  #include <linux/scatterlist.h>
27  #include <linux/ratelimit.h>
28  #include <crypto/skcipher.h>
29  #include "fscrypt_private.h"
30  
31  static unsigned int num_prealloc_crypto_pages = 32;
32  
33  module_param(num_prealloc_crypto_pages, uint, 0444);
34  MODULE_PARM_DESC(num_prealloc_crypto_pages,
35  		"Number of crypto pages to preallocate");
36  
37  static mempool_t *fscrypt_bounce_page_pool = NULL;
38  
39  static struct workqueue_struct *fscrypt_read_workqueue;
40  static DEFINE_MUTEX(fscrypt_init_mutex);
41  
42  struct kmem_cache *fscrypt_info_cachep;
43  
44  void fscrypt_enqueue_decrypt_work(struct work_struct *work)
45  {
46  	queue_work(fscrypt_read_workqueue, work);
47  }
48  EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
49  
50  struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags)
51  {
52  	return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
53  }
54  
55  /**
56   * fscrypt_free_bounce_page() - free a ciphertext bounce page
57   * @bounce_page: the bounce page to free, or NULL
58   *
59   * Free a bounce page that was allocated by fscrypt_encrypt_pagecache_blocks(),
60   * or by fscrypt_alloc_bounce_page() directly.
61   */
62  void fscrypt_free_bounce_page(struct page *bounce_page)
63  {
64  	if (!bounce_page)
65  		return;
66  	set_page_private(bounce_page, (unsigned long)NULL);
67  	ClearPagePrivate(bounce_page);
68  	mempool_free(bounce_page, fscrypt_bounce_page_pool);
69  }
70  EXPORT_SYMBOL(fscrypt_free_bounce_page);
71  
72  /*
73   * Generate the IV for the given logical block number within the given file.
74   * For filenames encryption, lblk_num == 0.
75   *
76   * Keep this in sync with fscrypt_limit_io_blocks().  fscrypt_limit_io_blocks()
77   * needs to know about any IV generation methods where the low bits of IV don't
78   * simply contain the lblk_num (e.g., IV_INO_LBLK_32).
79   */
80  void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
81  			 const struct fscrypt_info *ci)
82  {
83  	u8 flags = fscrypt_policy_flags(&ci->ci_policy);
84  
85  	memset(iv, 0, ci->ci_mode->ivsize);
86  
87  	if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) {
88  		WARN_ON_ONCE(lblk_num > U32_MAX);
89  		WARN_ON_ONCE(ci->ci_inode->i_ino > U32_MAX);
90  		lblk_num |= (u64)ci->ci_inode->i_ino << 32;
91  	} else if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) {
92  		WARN_ON_ONCE(lblk_num > U32_MAX);
93  		lblk_num = (u32)(ci->ci_hashed_ino + lblk_num);
94  	} else if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
95  		memcpy(iv->nonce, ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE);
96  	}
97  	iv->lblk_num = cpu_to_le64(lblk_num);
98  }
99  
100  /* Encrypt or decrypt a single filesystem block of file contents */
101  int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
102  			u64 lblk_num, struct page *src_page,
103  			struct page *dest_page, unsigned int len,
104  			unsigned int offs, gfp_t gfp_flags)
105  {
106  	union fscrypt_iv iv;
107  	struct skcipher_request *req = NULL;
108  	DECLARE_CRYPTO_WAIT(wait);
109  	struct scatterlist dst, src;
110  	struct fscrypt_info *ci = inode->i_crypt_info;
111  	struct crypto_skcipher *tfm = ci->ci_enc_key.tfm;
112  	int res = 0;
113  
114  	if (WARN_ON_ONCE(len <= 0))
115  		return -EINVAL;
116  	if (WARN_ON_ONCE(len % FSCRYPT_CONTENTS_ALIGNMENT != 0))
117  		return -EINVAL;
118  
119  	fscrypt_generate_iv(&iv, lblk_num, ci);
120  
121  	req = skcipher_request_alloc(tfm, gfp_flags);
122  	if (!req)
123  		return -ENOMEM;
124  
125  	skcipher_request_set_callback(
126  		req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
127  		crypto_req_done, &wait);
128  
129  	sg_init_table(&dst, 1);
130  	sg_set_page(&dst, dest_page, len, offs);
131  	sg_init_table(&src, 1);
132  	sg_set_page(&src, src_page, len, offs);
133  	skcipher_request_set_crypt(req, &src, &dst, len, &iv);
134  	if (rw == FS_DECRYPT)
135  		res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
136  	else
137  		res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
138  	skcipher_request_free(req);
139  	if (res) {
140  		fscrypt_err(inode, "%scryption failed for block %llu: %d",
141  			    (rw == FS_DECRYPT ? "De" : "En"), lblk_num, res);
142  		return res;
143  	}
144  	return 0;
145  }
146  
147  /**
148   * fscrypt_encrypt_pagecache_blocks() - Encrypt filesystem blocks from a
149   *					pagecache page
150   * @page:      The locked pagecache page containing the block(s) to encrypt
151   * @len:       Total size of the block(s) to encrypt.  Must be a nonzero
152   *		multiple of the filesystem's block size.
153   * @offs:      Byte offset within @page of the first block to encrypt.  Must be
154   *		a multiple of the filesystem's block size.
155   * @gfp_flags: Memory allocation flags.  See details below.
156   *
157   * A new bounce page is allocated, and the specified block(s) are encrypted into
158   * it.  In the bounce page, the ciphertext block(s) will be located at the same
159   * offsets at which the plaintext block(s) were located in the source page; any
160   * other parts of the bounce page will be left uninitialized.  However, normally
161   * blocksize == PAGE_SIZE and the whole page is encrypted at once.
162   *
163   * This is for use by the filesystem's ->writepages() method.
164   *
165   * The bounce page allocation is mempool-backed, so it will always succeed when
166   * @gfp_flags includes __GFP_DIRECT_RECLAIM, e.g. when it's GFP_NOFS.  However,
167   * only the first page of each bio can be allocated this way.  To prevent
168   * deadlocks, for any additional pages a mask like GFP_NOWAIT must be used.
169   *
170   * Return: the new encrypted bounce page on success; an ERR_PTR() on failure
171   */
172  struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
173  					      unsigned int len,
174  					      unsigned int offs,
175  					      gfp_t gfp_flags)
176  
177  {
178  	const struct inode *inode = page->mapping->host;
179  	const unsigned int blockbits = inode->i_blkbits;
180  	const unsigned int blocksize = 1 << blockbits;
181  	struct page *ciphertext_page;
182  	u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
183  		       (offs >> blockbits);
184  	unsigned int i;
185  	int err;
186  
187  	if (WARN_ON_ONCE(!PageLocked(page)))
188  		return ERR_PTR(-EINVAL);
189  
190  	if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
191  		return ERR_PTR(-EINVAL);
192  
193  	ciphertext_page = fscrypt_alloc_bounce_page(gfp_flags);
194  	if (!ciphertext_page)
195  		return ERR_PTR(-ENOMEM);
196  
197  	for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
198  		err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num,
199  					  page, ciphertext_page,
200  					  blocksize, i, gfp_flags);
201  		if (err) {
202  			fscrypt_free_bounce_page(ciphertext_page);
203  			return ERR_PTR(err);
204  		}
205  	}
206  	SetPagePrivate(ciphertext_page);
207  	set_page_private(ciphertext_page, (unsigned long)page);
208  	return ciphertext_page;
209  }
210  EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks);
211  
212  /**
213   * fscrypt_encrypt_block_inplace() - Encrypt a filesystem block in-place
214   * @inode:     The inode to which this block belongs
215   * @page:      The page containing the block to encrypt
216   * @len:       Size of block to encrypt.  This must be a multiple of
217   *		FSCRYPT_CONTENTS_ALIGNMENT.
218   * @offs:      Byte offset within @page at which the block to encrypt begins
219   * @lblk_num:  Filesystem logical block number of the block, i.e. the 0-based
220   *		number of the block within the file
221   * @gfp_flags: Memory allocation flags
222   *
223   * Encrypt a possibly-compressed filesystem block that is located in an
224   * arbitrary page, not necessarily in the original pagecache page.  The @inode
225   * and @lblk_num must be specified, as they can't be determined from @page.
226   *
227   * Return: 0 on success; -errno on failure
228   */
229  int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
230  				  unsigned int len, unsigned int offs,
231  				  u64 lblk_num, gfp_t gfp_flags)
232  {
233  	return fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num, page, page,
234  				   len, offs, gfp_flags);
235  }
236  EXPORT_SYMBOL(fscrypt_encrypt_block_inplace);
237  
238  /**
239   * fscrypt_decrypt_pagecache_blocks() - Decrypt filesystem blocks in a
240   *					pagecache folio
241   * @folio:     The locked pagecache folio containing the block(s) to decrypt
242   * @len:       Total size of the block(s) to decrypt.  Must be a nonzero
243   *		multiple of the filesystem's block size.
244   * @offs:      Byte offset within @folio of the first block to decrypt.  Must be
245   *		a multiple of the filesystem's block size.
246   *
247   * The specified block(s) are decrypted in-place within the pagecache folio,
248   * which must still be locked and not uptodate.
249   *
250   * This is for use by the filesystem's ->readahead() method.
251   *
252   * Return: 0 on success; -errno on failure
253   */
254  int fscrypt_decrypt_pagecache_blocks(struct folio *folio, size_t len,
255  				     size_t offs)
256  {
257  	const struct inode *inode = folio->mapping->host;
258  	const unsigned int blockbits = inode->i_blkbits;
259  	const unsigned int blocksize = 1 << blockbits;
260  	u64 lblk_num = ((u64)folio->index << (PAGE_SHIFT - blockbits)) +
261  		       (offs >> blockbits);
262  	size_t i;
263  	int err;
264  
265  	if (WARN_ON_ONCE(!folio_test_locked(folio)))
266  		return -EINVAL;
267  
268  	if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
269  		return -EINVAL;
270  
271  	for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
272  		struct page *page = folio_page(folio, i >> PAGE_SHIFT);
273  
274  		err = fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page,
275  					  page, blocksize, i & ~PAGE_MASK,
276  					  GFP_NOFS);
277  		if (err)
278  			return err;
279  	}
280  	return 0;
281  }
282  EXPORT_SYMBOL(fscrypt_decrypt_pagecache_blocks);
283  
284  /**
285   * fscrypt_decrypt_block_inplace() - Decrypt a filesystem block in-place
286   * @inode:     The inode to which this block belongs
287   * @page:      The page containing the block to decrypt
288   * @len:       Size of block to decrypt.  This must be a multiple of
289   *		FSCRYPT_CONTENTS_ALIGNMENT.
290   * @offs:      Byte offset within @page at which the block to decrypt begins
291   * @lblk_num:  Filesystem logical block number of the block, i.e. the 0-based
292   *		number of the block within the file
293   *
294   * Decrypt a possibly-compressed filesystem block that is located in an
295   * arbitrary page, not necessarily in the original pagecache page.  The @inode
296   * and @lblk_num must be specified, as they can't be determined from @page.
297   *
298   * Return: 0 on success; -errno on failure
299   */
300  int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
301  				  unsigned int len, unsigned int offs,
302  				  u64 lblk_num)
303  {
304  	return fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page, page,
305  				   len, offs, GFP_NOFS);
306  }
307  EXPORT_SYMBOL(fscrypt_decrypt_block_inplace);
308  
309  /**
310   * fscrypt_initialize() - allocate major buffers for fs encryption.
311   * @sb: the filesystem superblock
312   *
313   * We only call this when we start accessing encrypted files, since it
314   * results in memory getting allocated that wouldn't otherwise be used.
315   *
316   * Return: 0 on success; -errno on failure
317   */
318  int fscrypt_initialize(struct super_block *sb)
319  {
320  	int err = 0;
321  	mempool_t *pool;
322  
323  	/* pairs with smp_store_release() below */
324  	if (likely(smp_load_acquire(&fscrypt_bounce_page_pool)))
325  		return 0;
326  
327  	/* No need to allocate a bounce page pool if this FS won't use it. */
328  	if (sb->s_cop->flags & FS_CFLG_OWN_PAGES)
329  		return 0;
330  
331  	mutex_lock(&fscrypt_init_mutex);
332  	if (fscrypt_bounce_page_pool)
333  		goto out_unlock;
334  
335  	err = -ENOMEM;
336  	pool = mempool_create_page_pool(num_prealloc_crypto_pages, 0);
337  	if (!pool)
338  		goto out_unlock;
339  	/* pairs with smp_load_acquire() above */
340  	smp_store_release(&fscrypt_bounce_page_pool, pool);
341  	err = 0;
342  out_unlock:
343  	mutex_unlock(&fscrypt_init_mutex);
344  	return err;
345  }
346  
347  void fscrypt_msg(const struct inode *inode, const char *level,
348  		 const char *fmt, ...)
349  {
350  	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
351  				      DEFAULT_RATELIMIT_BURST);
352  	struct va_format vaf;
353  	va_list args;
354  
355  	if (!__ratelimit(&rs))
356  		return;
357  
358  	va_start(args, fmt);
359  	vaf.fmt = fmt;
360  	vaf.va = &args;
361  	if (inode && inode->i_ino)
362  		printk("%sfscrypt (%s, inode %lu): %pV\n",
363  		       level, inode->i_sb->s_id, inode->i_ino, &vaf);
364  	else if (inode)
365  		printk("%sfscrypt (%s): %pV\n", level, inode->i_sb->s_id, &vaf);
366  	else
367  		printk("%sfscrypt: %pV\n", level, &vaf);
368  	va_end(args);
369  }
370  
371  /**
372   * fscrypt_init() - Set up for fs encryption.
373   *
374   * Return: 0 on success; -errno on failure
375   */
376  static int __init fscrypt_init(void)
377  {
378  	int err = -ENOMEM;
379  
380  	/*
381  	 * Use an unbound workqueue to allow bios to be decrypted in parallel
382  	 * even when they happen to complete on the same CPU.  This sacrifices
383  	 * locality, but it's worthwhile since decryption is CPU-intensive.
384  	 *
385  	 * Also use a high-priority workqueue to prioritize decryption work,
386  	 * which blocks reads from completing, over regular application tasks.
387  	 */
388  	fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
389  						 WQ_UNBOUND | WQ_HIGHPRI,
390  						 num_online_cpus());
391  	if (!fscrypt_read_workqueue)
392  		goto fail;
393  
394  	fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
395  	if (!fscrypt_info_cachep)
396  		goto fail_free_queue;
397  
398  	err = fscrypt_init_keyring();
399  	if (err)
400  		goto fail_free_info;
401  
402  	return 0;
403  
404  fail_free_info:
405  	kmem_cache_destroy(fscrypt_info_cachep);
406  fail_free_queue:
407  	destroy_workqueue(fscrypt_read_workqueue);
408  fail:
409  	return err;
410  }
411  late_initcall(fscrypt_init)
412