109c434b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
20b81d077SJaegeuk Kim /*
30b81d077SJaegeuk Kim * This contains encryption functions for per-file encryption.
40b81d077SJaegeuk Kim *
50b81d077SJaegeuk Kim * Copyright (C) 2015, Google, Inc.
60b81d077SJaegeuk Kim * Copyright (C) 2015, Motorola Mobility
70b81d077SJaegeuk Kim *
80b81d077SJaegeuk Kim * Written by Michael Halcrow, 2014.
90b81d077SJaegeuk Kim *
100b81d077SJaegeuk Kim * Filename encryption additions
110b81d077SJaegeuk Kim * Uday Savagaonkar, 2014
120b81d077SJaegeuk Kim * Encryption policy handling additions
130b81d077SJaegeuk Kim * Ildar Muslukhov, 2014
140b81d077SJaegeuk Kim * Add fscrypt_pullback_bio_page()
150b81d077SJaegeuk Kim * Jaegeuk Kim, 2015.
160b81d077SJaegeuk Kim *
170b81d077SJaegeuk Kim * This has not yet undergone a rigorous security audit.
180b81d077SJaegeuk Kim *
190b81d077SJaegeuk Kim * The usage of AES-XTS should conform to recommendations in NIST
200b81d077SJaegeuk Kim * Special Publication 800-38E and IEEE P1619/D16.
210b81d077SJaegeuk Kim */
220b81d077SJaegeuk Kim
230b81d077SJaegeuk Kim #include <linux/pagemap.h>
240b81d077SJaegeuk Kim #include <linux/mempool.h>
250b81d077SJaegeuk Kim #include <linux/module.h>
260b81d077SJaegeuk Kim #include <linux/scatterlist.h>
270b81d077SJaegeuk Kim #include <linux/ratelimit.h>
28a575784cSEric Biggers #include <crypto/skcipher.h>
29cc4e0df0STheodore Ts'o #include "fscrypt_private.h"
300b81d077SJaegeuk Kim
310b81d077SJaegeuk Kim static unsigned int num_prealloc_crypto_pages = 32;
320b81d077SJaegeuk Kim
330b81d077SJaegeuk Kim module_param(num_prealloc_crypto_pages, uint, 0444);
340b81d077SJaegeuk Kim MODULE_PARM_DESC(num_prealloc_crypto_pages,
350b81d077SJaegeuk Kim "Number of crypto pages to preallocate");
360b81d077SJaegeuk Kim
370b81d077SJaegeuk Kim static mempool_t *fscrypt_bounce_page_pool = NULL;
380b81d077SJaegeuk Kim
390cb8dae4SEric Biggers static struct workqueue_struct *fscrypt_read_workqueue;
400b81d077SJaegeuk Kim static DEFINE_MUTEX(fscrypt_init_mutex);
410b81d077SJaegeuk Kim
420b81d077SJaegeuk Kim struct kmem_cache *fscrypt_info_cachep;
430b81d077SJaegeuk Kim
fscrypt_enqueue_decrypt_work(struct work_struct * work)440cb8dae4SEric Biggers void fscrypt_enqueue_decrypt_work(struct work_struct *work)
450cb8dae4SEric Biggers {
460cb8dae4SEric Biggers queue_work(fscrypt_read_workqueue, work);
470cb8dae4SEric Biggers }
480cb8dae4SEric Biggers EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
490cb8dae4SEric Biggers
fscrypt_alloc_bounce_page(gfp_t gfp_flags)50d2d0727bSEric Biggers struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags)
51d2d0727bSEric Biggers {
52d2d0727bSEric Biggers return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
53d2d0727bSEric Biggers }
54d2d0727bSEric Biggers
55d2d0727bSEric Biggers /**
56d2d0727bSEric Biggers * fscrypt_free_bounce_page() - free a ciphertext bounce page
57d2fe9754SEric Biggers * @bounce_page: the bounce page to free, or NULL
58d2d0727bSEric Biggers *
5953bc1d85SEric Biggers * Free a bounce page that was allocated by fscrypt_encrypt_pagecache_blocks(),
6053bc1d85SEric Biggers * or by fscrypt_alloc_bounce_page() directly.
61d2d0727bSEric Biggers */
fscrypt_free_bounce_page(struct page * bounce_page)62d2d0727bSEric Biggers void fscrypt_free_bounce_page(struct page *bounce_page)
63d2d0727bSEric Biggers {
64d2d0727bSEric Biggers if (!bounce_page)
65d2d0727bSEric Biggers return;
66d2d0727bSEric Biggers set_page_private(bounce_page, (unsigned long)NULL);
67d2d0727bSEric Biggers ClearPagePrivate(bounce_page);
68d2d0727bSEric Biggers mempool_free(bounce_page, fscrypt_bounce_page_pool);
69d2d0727bSEric Biggers }
70d2d0727bSEric Biggers EXPORT_SYMBOL(fscrypt_free_bounce_page);
71d2d0727bSEric Biggers
72c6c89783SEric Biggers /*
73c6c89783SEric Biggers * Generate the IV for the given logical block number within the given file.
74c6c89783SEric Biggers * For filenames encryption, lblk_num == 0.
75c6c89783SEric Biggers *
76c6c89783SEric Biggers * Keep this in sync with fscrypt_limit_io_blocks(). fscrypt_limit_io_blocks()
77c6c89783SEric Biggers * needs to know about any IV generation methods where the low bits of IV don't
78c6c89783SEric Biggers * simply contain the lblk_num (e.g., IV_INO_LBLK_32).
79c6c89783SEric Biggers */
fscrypt_generate_iv(union fscrypt_iv * iv,u64 lblk_num,const struct fscrypt_info * ci)808094c3ceSEric Biggers void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
818094c3ceSEric Biggers const struct fscrypt_info *ci)
828094c3ceSEric Biggers {
83b103fb76SEric Biggers u8 flags = fscrypt_policy_flags(&ci->ci_policy);
848094c3ceSEric Biggers
85b103fb76SEric Biggers memset(iv, 0, ci->ci_mode->ivsize);
86b103fb76SEric Biggers
87b103fb76SEric Biggers if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) {
88e3b1078bSEric Biggers WARN_ON_ONCE(lblk_num > U32_MAX);
89e3b1078bSEric Biggers WARN_ON_ONCE(ci->ci_inode->i_ino > U32_MAX);
90b103fb76SEric Biggers lblk_num |= (u64)ci->ci_inode->i_ino << 32;
91e3b1078bSEric Biggers } else if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) {
92e3b1078bSEric Biggers WARN_ON_ONCE(lblk_num > U32_MAX);
93e3b1078bSEric Biggers lblk_num = (u32)(ci->ci_hashed_ino + lblk_num);
94b103fb76SEric Biggers } else if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
951d6217a4SEric Biggers memcpy(iv->nonce, ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE);
968094c3ceSEric Biggers }
97b103fb76SEric Biggers iv->lblk_num = cpu_to_le64(lblk_num);
98b103fb76SEric Biggers }
998094c3ceSEric Biggers
100f47fcbb2SEric Biggers /* Encrypt or decrypt a single filesystem block of file contents */
fscrypt_crypt_block(const struct inode * inode,fscrypt_direction_t rw,u64 lblk_num,struct page * src_page,struct page * dest_page,unsigned int len,unsigned int offs,gfp_t gfp_flags)101f47fcbb2SEric Biggers int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
10258ae7468SRichard Weinberger u64 lblk_num, struct page *src_page,
10358ae7468SRichard Weinberger struct page *dest_page, unsigned int len,
10458ae7468SRichard Weinberger unsigned int offs, gfp_t gfp_flags)
1050b81d077SJaegeuk Kim {
1068094c3ceSEric Biggers union fscrypt_iv iv;
107d407574eSLinus Torvalds struct skcipher_request *req = NULL;
108d0082e1aSGilad Ben-Yossef DECLARE_CRYPTO_WAIT(wait);
1090b81d077SJaegeuk Kim struct scatterlist dst, src;
1100b81d077SJaegeuk Kim struct fscrypt_info *ci = inode->i_crypt_info;
1115fee3609SSatya Tangirala struct crypto_skcipher *tfm = ci->ci_enc_key.tfm;
1120b81d077SJaegeuk Kim int res = 0;
1130b81d077SJaegeuk Kim
114eeacfdc6SEric Biggers if (WARN_ON_ONCE(len <= 0))
115eeacfdc6SEric Biggers return -EINVAL;
11663cec138SEric Biggers if (WARN_ON_ONCE(len % FSCRYPT_CONTENTS_ALIGNMENT != 0))
117eeacfdc6SEric Biggers return -EINVAL;
1181400451fSDavid Gstir
1198094c3ceSEric Biggers fscrypt_generate_iv(&iv, lblk_num, ci);
120b7e7cf7aSDaniel Walter
121b32e4482SJaegeuk Kim req = skcipher_request_alloc(tfm, gfp_flags);
122c90fd775SEric Biggers if (!req)
1230b81d077SJaegeuk Kim return -ENOMEM;
1240b81d077SJaegeuk Kim
125d407574eSLinus Torvalds skcipher_request_set_callback(
1260b81d077SJaegeuk Kim req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
127d0082e1aSGilad Ben-Yossef crypto_req_done, &wait);
1280b81d077SJaegeuk Kim
1290b81d077SJaegeuk Kim sg_init_table(&dst, 1);
1301400451fSDavid Gstir sg_set_page(&dst, dest_page, len, offs);
1310b81d077SJaegeuk Kim sg_init_table(&src, 1);
1321400451fSDavid Gstir sg_set_page(&src, src_page, len, offs);
133b7e7cf7aSDaniel Walter skcipher_request_set_crypt(req, &src, &dst, len, &iv);
1340b81d077SJaegeuk Kim if (rw == FS_DECRYPT)
135d0082e1aSGilad Ben-Yossef res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
1360b81d077SJaegeuk Kim else
137d0082e1aSGilad Ben-Yossef res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
138d407574eSLinus Torvalds skcipher_request_free(req);
1390b81d077SJaegeuk Kim if (res) {
140886da8b3SEric Biggers fscrypt_err(inode, "%scryption failed for block %llu: %d",
141886da8b3SEric Biggers (rw == FS_DECRYPT ? "De" : "En"), lblk_num, res);
1420b81d077SJaegeuk Kim return res;
1430b81d077SJaegeuk Kim }
1440b81d077SJaegeuk Kim return 0;
1450b81d077SJaegeuk Kim }
1460b81d077SJaegeuk Kim
1470b81d077SJaegeuk Kim /**
148d2fe9754SEric Biggers * fscrypt_encrypt_pagecache_blocks() - Encrypt filesystem blocks from a
149d2fe9754SEric Biggers * pagecache page
15053bc1d85SEric Biggers * @page: The locked pagecache page containing the block(s) to encrypt
15153bc1d85SEric Biggers * @len: Total size of the block(s) to encrypt. Must be a nonzero
15253bc1d85SEric Biggers * multiple of the filesystem's block size.
15353bc1d85SEric Biggers * @offs: Byte offset within @page of the first block to encrypt. Must be
15453bc1d85SEric Biggers * a multiple of the filesystem's block size.
1552d8f7f11SEric Biggers * @gfp_flags: Memory allocation flags. See details below.
1560b81d077SJaegeuk Kim *
15753bc1d85SEric Biggers * A new bounce page is allocated, and the specified block(s) are encrypted into
15853bc1d85SEric Biggers * it. In the bounce page, the ciphertext block(s) will be located at the same
15953bc1d85SEric Biggers * offsets at which the plaintext block(s) were located in the source page; any
16053bc1d85SEric Biggers * other parts of the bounce page will be left uninitialized. However, normally
16153bc1d85SEric Biggers * blocksize == PAGE_SIZE and the whole page is encrypted at once.
1620b81d077SJaegeuk Kim *
16353bc1d85SEric Biggers * This is for use by the filesystem's ->writepages() method.
16453bc1d85SEric Biggers *
1652d8f7f11SEric Biggers * The bounce page allocation is mempool-backed, so it will always succeed when
1662d8f7f11SEric Biggers * @gfp_flags includes __GFP_DIRECT_RECLAIM, e.g. when it's GFP_NOFS. However,
1672d8f7f11SEric Biggers * only the first page of each bio can be allocated this way. To prevent
1682d8f7f11SEric Biggers * deadlocks, for any additional pages a mask like GFP_NOWAIT must be used.
1692d8f7f11SEric Biggers *
17053bc1d85SEric Biggers * Return: the new encrypted bounce page on success; an ERR_PTR() on failure
1710b81d077SJaegeuk Kim */
fscrypt_encrypt_pagecache_blocks(struct page * page,unsigned int len,unsigned int offs,gfp_t gfp_flags)17253bc1d85SEric Biggers struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
1731400451fSDavid Gstir unsigned int len,
1741400451fSDavid Gstir unsigned int offs,
17553bc1d85SEric Biggers gfp_t gfp_flags)
1767821d4ddSDavid Gstir
1770b81d077SJaegeuk Kim {
17853bc1d85SEric Biggers const struct inode *inode = page->mapping->host;
17953bc1d85SEric Biggers const unsigned int blockbits = inode->i_blkbits;
18053bc1d85SEric Biggers const unsigned int blocksize = 1 << blockbits;
18103569f2fSEric Biggers struct page *ciphertext_page;
18253bc1d85SEric Biggers u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
18353bc1d85SEric Biggers (offs >> blockbits);
18453bc1d85SEric Biggers unsigned int i;
1850b81d077SJaegeuk Kim int err;
1860b81d077SJaegeuk Kim
187eeacfdc6SEric Biggers if (WARN_ON_ONCE(!PageLocked(page)))
188eeacfdc6SEric Biggers return ERR_PTR(-EINVAL);
189bd7b8290SDavid Gstir
19053bc1d85SEric Biggers if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
19153bc1d85SEric Biggers return ERR_PTR(-EINVAL);
19253bc1d85SEric Biggers
193d2d0727bSEric Biggers ciphertext_page = fscrypt_alloc_bounce_page(gfp_flags);
194d2d0727bSEric Biggers if (!ciphertext_page)
195d2d0727bSEric Biggers return ERR_PTR(-ENOMEM);
1960b81d077SJaegeuk Kim
19753bc1d85SEric Biggers for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
19853bc1d85SEric Biggers err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num,
19953bc1d85SEric Biggers page, ciphertext_page,
20053bc1d85SEric Biggers blocksize, i, gfp_flags);
2010b81d077SJaegeuk Kim if (err) {
202d2d0727bSEric Biggers fscrypt_free_bounce_page(ciphertext_page);
203d2d0727bSEric Biggers return ERR_PTR(err);
2040b81d077SJaegeuk Kim }
20553bc1d85SEric Biggers }
2060b81d077SJaegeuk Kim SetPagePrivate(ciphertext_page);
207d2d0727bSEric Biggers set_page_private(ciphertext_page, (unsigned long)page);
2080b81d077SJaegeuk Kim return ciphertext_page;
2090b81d077SJaegeuk Kim }
21053bc1d85SEric Biggers EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks);
2110b81d077SJaegeuk Kim
2120b81d077SJaegeuk Kim /**
21303569f2fSEric Biggers * fscrypt_encrypt_block_inplace() - Encrypt a filesystem block in-place
21403569f2fSEric Biggers * @inode: The inode to which this block belongs
21503569f2fSEric Biggers * @page: The page containing the block to encrypt
21663cec138SEric Biggers * @len: Size of block to encrypt. This must be a multiple of
21763cec138SEric Biggers * FSCRYPT_CONTENTS_ALIGNMENT.
21803569f2fSEric Biggers * @offs: Byte offset within @page at which the block to encrypt begins
21903569f2fSEric Biggers * @lblk_num: Filesystem logical block number of the block, i.e. the 0-based
22003569f2fSEric Biggers * number of the block within the file
22103569f2fSEric Biggers * @gfp_flags: Memory allocation flags
22203569f2fSEric Biggers *
22303569f2fSEric Biggers * Encrypt a possibly-compressed filesystem block that is located in an
22403569f2fSEric Biggers * arbitrary page, not necessarily in the original pagecache page. The @inode
22503569f2fSEric Biggers * and @lblk_num must be specified, as they can't be determined from @page.
22603569f2fSEric Biggers *
22703569f2fSEric Biggers * Return: 0 on success; -errno on failure
22803569f2fSEric Biggers */
fscrypt_encrypt_block_inplace(const struct inode * inode,struct page * page,unsigned int len,unsigned int offs,u64 lblk_num,gfp_t gfp_flags)22903569f2fSEric Biggers int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
23003569f2fSEric Biggers unsigned int len, unsigned int offs,
23103569f2fSEric Biggers u64 lblk_num, gfp_t gfp_flags)
23203569f2fSEric Biggers {
23303569f2fSEric Biggers return fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num, page, page,
23403569f2fSEric Biggers len, offs, gfp_flags);
23503569f2fSEric Biggers }
23603569f2fSEric Biggers EXPORT_SYMBOL(fscrypt_encrypt_block_inplace);
23703569f2fSEric Biggers
23803569f2fSEric Biggers /**
239d2fe9754SEric Biggers * fscrypt_decrypt_pagecache_blocks() - Decrypt filesystem blocks in a
24051e4e315SEric Biggers * pagecache folio
24151e4e315SEric Biggers * @folio: The locked pagecache folio containing the block(s) to decrypt
242aa8bc1acSEric Biggers * @len: Total size of the block(s) to decrypt. Must be a nonzero
243aa8bc1acSEric Biggers * multiple of the filesystem's block size.
24451e4e315SEric Biggers * @offs: Byte offset within @folio of the first block to decrypt. Must be
245aa8bc1acSEric Biggers * a multiple of the filesystem's block size.
2460b81d077SJaegeuk Kim *
24751e4e315SEric Biggers * The specified block(s) are decrypted in-place within the pagecache folio,
24851e4e315SEric Biggers * which must still be locked and not uptodate.
2490b81d077SJaegeuk Kim *
250704528d8SMatthew Wilcox (Oracle) * This is for use by the filesystem's ->readahead() method.
2510b81d077SJaegeuk Kim *
252aa8bc1acSEric Biggers * Return: 0 on success; -errno on failure
2530b81d077SJaegeuk Kim */
fscrypt_decrypt_pagecache_blocks(struct folio * folio,size_t len,size_t offs)25451e4e315SEric Biggers int fscrypt_decrypt_pagecache_blocks(struct folio *folio, size_t len,
25551e4e315SEric Biggers size_t offs)
2560b81d077SJaegeuk Kim {
25751e4e315SEric Biggers const struct inode *inode = folio->mapping->host;
258aa8bc1acSEric Biggers const unsigned int blockbits = inode->i_blkbits;
259aa8bc1acSEric Biggers const unsigned int blocksize = 1 << blockbits;
26051e4e315SEric Biggers u64 lblk_num = ((u64)folio->index << (PAGE_SHIFT - blockbits)) +
261aa8bc1acSEric Biggers (offs >> blockbits);
26251e4e315SEric Biggers size_t i;
263aa8bc1acSEric Biggers int err;
264aa8bc1acSEric Biggers
26551e4e315SEric Biggers if (WARN_ON_ONCE(!folio_test_locked(folio)))
266eeacfdc6SEric Biggers return -EINVAL;
267bd7b8290SDavid Gstir
268aa8bc1acSEric Biggers if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
269aa8bc1acSEric Biggers return -EINVAL;
270aa8bc1acSEric Biggers
271aa8bc1acSEric Biggers for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
27251e4e315SEric Biggers struct page *page = folio_page(folio, i >> PAGE_SHIFT);
27351e4e315SEric Biggers
274aa8bc1acSEric Biggers err = fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page,
27551e4e315SEric Biggers page, blocksize, i & ~PAGE_MASK,
27651e4e315SEric Biggers GFP_NOFS);
277aa8bc1acSEric Biggers if (err)
278aa8bc1acSEric Biggers return err;
2790b81d077SJaegeuk Kim }
280aa8bc1acSEric Biggers return 0;
281aa8bc1acSEric Biggers }
282aa8bc1acSEric Biggers EXPORT_SYMBOL(fscrypt_decrypt_pagecache_blocks);
2830b81d077SJaegeuk Kim
28441adbcb7SEric Biggers /**
28541adbcb7SEric Biggers * fscrypt_decrypt_block_inplace() - Decrypt a filesystem block in-place
28641adbcb7SEric Biggers * @inode: The inode to which this block belongs
28741adbcb7SEric Biggers * @page: The page containing the block to decrypt
28863cec138SEric Biggers * @len: Size of block to decrypt. This must be a multiple of
28963cec138SEric Biggers * FSCRYPT_CONTENTS_ALIGNMENT.
29041adbcb7SEric Biggers * @offs: Byte offset within @page at which the block to decrypt begins
29141adbcb7SEric Biggers * @lblk_num: Filesystem logical block number of the block, i.e. the 0-based
29241adbcb7SEric Biggers * number of the block within the file
29341adbcb7SEric Biggers *
29441adbcb7SEric Biggers * Decrypt a possibly-compressed filesystem block that is located in an
29541adbcb7SEric Biggers * arbitrary page, not necessarily in the original pagecache page. The @inode
29641adbcb7SEric Biggers * and @lblk_num must be specified, as they can't be determined from @page.
29741adbcb7SEric Biggers *
29841adbcb7SEric Biggers * Return: 0 on success; -errno on failure
29941adbcb7SEric Biggers */
fscrypt_decrypt_block_inplace(const struct inode * inode,struct page * page,unsigned int len,unsigned int offs,u64 lblk_num)30041adbcb7SEric Biggers int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
30141adbcb7SEric Biggers unsigned int len, unsigned int offs,
30241adbcb7SEric Biggers u64 lblk_num)
30341adbcb7SEric Biggers {
30441adbcb7SEric Biggers return fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page, page,
30541adbcb7SEric Biggers len, offs, GFP_NOFS);
30641adbcb7SEric Biggers }
30741adbcb7SEric Biggers EXPORT_SYMBOL(fscrypt_decrypt_block_inplace);
30841adbcb7SEric Biggers
3090b81d077SJaegeuk Kim /**
3100b81d077SJaegeuk Kim * fscrypt_initialize() - allocate major buffers for fs encryption.
311*83e57e47SEric Biggers * @sb: the filesystem superblock
3120b81d077SJaegeuk Kim *
3130b81d077SJaegeuk Kim * We only call this when we start accessing encrypted files, since it
3140b81d077SJaegeuk Kim * results in memory getting allocated that wouldn't otherwise be used.
3150b81d077SJaegeuk Kim *
3161565bdadSEric Biggers * Return: 0 on success; -errno on failure
3170b81d077SJaegeuk Kim */
fscrypt_initialize(struct super_block * sb)318*83e57e47SEric Biggers int fscrypt_initialize(struct super_block *sb)
3190b81d077SJaegeuk Kim {
3201565bdadSEric Biggers int err = 0;
321*83e57e47SEric Biggers mempool_t *pool;
322*83e57e47SEric Biggers
323*83e57e47SEric Biggers /* pairs with smp_store_release() below */
324*83e57e47SEric Biggers if (likely(smp_load_acquire(&fscrypt_bounce_page_pool)))
325*83e57e47SEric Biggers return 0;
3260b81d077SJaegeuk Kim
327a0b3bc85SEric Biggers /* No need to allocate a bounce page pool if this FS won't use it. */
328*83e57e47SEric Biggers if (sb->s_cop->flags & FS_CFLG_OWN_PAGES)
3290b81d077SJaegeuk Kim return 0;
3300b81d077SJaegeuk Kim
3310b81d077SJaegeuk Kim mutex_lock(&fscrypt_init_mutex);
3320b81d077SJaegeuk Kim if (fscrypt_bounce_page_pool)
3331565bdadSEric Biggers goto out_unlock;
3340b81d077SJaegeuk Kim
3351565bdadSEric Biggers err = -ENOMEM;
336*83e57e47SEric Biggers pool = mempool_create_page_pool(num_prealloc_crypto_pages, 0);
337*83e57e47SEric Biggers if (!pool)
3381565bdadSEric Biggers goto out_unlock;
339*83e57e47SEric Biggers /* pairs with smp_load_acquire() above */
340*83e57e47SEric Biggers smp_store_release(&fscrypt_bounce_page_pool, pool);
3411565bdadSEric Biggers err = 0;
3421565bdadSEric Biggers out_unlock:
3430b81d077SJaegeuk Kim mutex_unlock(&fscrypt_init_mutex);
3441565bdadSEric Biggers return err;
3450b81d077SJaegeuk Kim }
3460b81d077SJaegeuk Kim
fscrypt_msg(const struct inode * inode,const char * level,const char * fmt,...)347886da8b3SEric Biggers void fscrypt_msg(const struct inode *inode, const char *level,
348544d08fdSEric Biggers const char *fmt, ...)
349544d08fdSEric Biggers {
350544d08fdSEric Biggers static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
351544d08fdSEric Biggers DEFAULT_RATELIMIT_BURST);
352544d08fdSEric Biggers struct va_format vaf;
353544d08fdSEric Biggers va_list args;
354544d08fdSEric Biggers
355544d08fdSEric Biggers if (!__ratelimit(&rs))
356544d08fdSEric Biggers return;
357544d08fdSEric Biggers
358544d08fdSEric Biggers va_start(args, fmt);
359544d08fdSEric Biggers vaf.fmt = fmt;
360544d08fdSEric Biggers vaf.va = &args;
361ae9ff8adSEric Biggers if (inode && inode->i_ino)
362886da8b3SEric Biggers printk("%sfscrypt (%s, inode %lu): %pV\n",
363886da8b3SEric Biggers level, inode->i_sb->s_id, inode->i_ino, &vaf);
364ae9ff8adSEric Biggers else if (inode)
365ae9ff8adSEric Biggers printk("%sfscrypt (%s): %pV\n", level, inode->i_sb->s_id, &vaf);
366544d08fdSEric Biggers else
367544d08fdSEric Biggers printk("%sfscrypt: %pV\n", level, &vaf);
368544d08fdSEric Biggers va_end(args);
369544d08fdSEric Biggers }
370544d08fdSEric Biggers
3710b81d077SJaegeuk Kim /**
3720b81d077SJaegeuk Kim * fscrypt_init() - Set up for fs encryption.
373d2fe9754SEric Biggers *
374d2fe9754SEric Biggers * Return: 0 on success; -errno on failure
3750b81d077SJaegeuk Kim */
fscrypt_init(void)3760b81d077SJaegeuk Kim static int __init fscrypt_init(void)
3770b81d077SJaegeuk Kim {
37822d94f49SEric Biggers int err = -ENOMEM;
37922d94f49SEric Biggers
38036dd26e0SEric Biggers /*
38136dd26e0SEric Biggers * Use an unbound workqueue to allow bios to be decrypted in parallel
38236dd26e0SEric Biggers * even when they happen to complete on the same CPU. This sacrifices
38336dd26e0SEric Biggers * locality, but it's worthwhile since decryption is CPU-intensive.
38436dd26e0SEric Biggers *
38536dd26e0SEric Biggers * Also use a high-priority workqueue to prioritize decryption work,
38636dd26e0SEric Biggers * which blocks reads from completing, over regular application tasks.
38736dd26e0SEric Biggers */
3880b81d077SJaegeuk Kim fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
38936dd26e0SEric Biggers WQ_UNBOUND | WQ_HIGHPRI,
39036dd26e0SEric Biggers num_online_cpus());
3910b81d077SJaegeuk Kim if (!fscrypt_read_workqueue)
3920b81d077SJaegeuk Kim goto fail;
3930b81d077SJaegeuk Kim
3940b81d077SJaegeuk Kim fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
3950b81d077SJaegeuk Kim if (!fscrypt_info_cachep)
3961565bdadSEric Biggers goto fail_free_queue;
3970b81d077SJaegeuk Kim
39822d94f49SEric Biggers err = fscrypt_init_keyring();
39922d94f49SEric Biggers if (err)
40022d94f49SEric Biggers goto fail_free_info;
40122d94f49SEric Biggers
4020b81d077SJaegeuk Kim return 0;
4030b81d077SJaegeuk Kim
40422d94f49SEric Biggers fail_free_info:
40522d94f49SEric Biggers kmem_cache_destroy(fscrypt_info_cachep);
4060b81d077SJaegeuk Kim fail_free_queue:
4070b81d077SJaegeuk Kim destroy_workqueue(fscrypt_read_workqueue);
4080b81d077SJaegeuk Kim fail:
40922d94f49SEric Biggers return err;
4100b81d077SJaegeuk Kim }
41175798f85SEric Biggers late_initcall(fscrypt_init)
412