crypto.c (5b45fe6b39e1d01c45de7b8e6d3ff72585eee6cf) crypto.c (b7e7cf7a66a27e62c5f873a0068cee34094bf5d7)
1/*
2 * This contains encryption functions for per-file encryption.
3 *
4 * Copyright (C) 2015, Google, Inc.
5 * Copyright (C) 2015, Motorola Mobility
6 *
7 * Written by Michael Halcrow, 2014.
8 *

--- 12 unchanged lines hidden (view full) ---

21
22#include <linux/pagemap.h>
23#include <linux/mempool.h>
24#include <linux/module.h>
25#include <linux/scatterlist.h>
26#include <linux/ratelimit.h>
27#include <linux/dcache.h>
28#include <linux/namei.h>
1/*
2 * This contains encryption functions for per-file encryption.
3 *
4 * Copyright (C) 2015, Google, Inc.
5 * Copyright (C) 2015, Motorola Mobility
6 *
7 * Written by Michael Halcrow, 2014.
8 *

--- 12 unchanged lines hidden (view full) ---

21
22#include <linux/pagemap.h>
23#include <linux/mempool.h>
24#include <linux/module.h>
25#include <linux/scatterlist.h>
26#include <linux/ratelimit.h>
27#include <linux/dcache.h>
28#include <linux/namei.h>
29#include <crypto/aes.h>
29#include "fscrypt_private.h"
30
31static unsigned int num_prealloc_crypto_pages = 32;
32static unsigned int num_prealloc_crypto_ctxs = 128;
33
34module_param(num_prealloc_crypto_pages, uint, 0444);
35MODULE_PARM_DESC(num_prealloc_crypto_pages,
36 "Number of crypto pages to preallocate");

--- 105 unchanged lines hidden (view full) ---

142
143int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
144 u64 lblk_num, struct page *src_page,
145 struct page *dest_page, unsigned int len,
146 unsigned int offs, gfp_t gfp_flags)
147{
148 struct {
149 __le64 index;
30#include "fscrypt_private.h"
31
32static unsigned int num_prealloc_crypto_pages = 32;
33static unsigned int num_prealloc_crypto_ctxs = 128;
34
35module_param(num_prealloc_crypto_pages, uint, 0444);
36MODULE_PARM_DESC(num_prealloc_crypto_pages,
37 "Number of crypto pages to preallocate");

--- 105 unchanged lines hidden (view full) ---

143
144int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
145 u64 lblk_num, struct page *src_page,
146 struct page *dest_page, unsigned int len,
147 unsigned int offs, gfp_t gfp_flags)
148{
149 struct {
150 __le64 index;
150 u8 padding[FS_XTS_TWEAK_SIZE - sizeof(__le64)];
151 } xts_tweak;
151 u8 padding[FS_IV_SIZE - sizeof(__le64)];
152 } iv;
152 struct skcipher_request *req = NULL;
153 DECLARE_FS_COMPLETION_RESULT(ecr);
154 struct scatterlist dst, src;
155 struct fscrypt_info *ci = inode->i_crypt_info;
156 struct crypto_skcipher *tfm = ci->ci_ctfm;
157 int res = 0;
158
159 BUG_ON(len == 0);
160
153 struct skcipher_request *req = NULL;
154 DECLARE_FS_COMPLETION_RESULT(ecr);
155 struct scatterlist dst, src;
156 struct fscrypt_info *ci = inode->i_crypt_info;
157 struct crypto_skcipher *tfm = ci->ci_ctfm;
158 int res = 0;
159
160 BUG_ON(len == 0);
161
162 BUILD_BUG_ON(sizeof(iv) != FS_IV_SIZE);
163 BUILD_BUG_ON(AES_BLOCK_SIZE != FS_IV_SIZE);
164 iv.index = cpu_to_le64(lblk_num);
165 memset(iv.padding, 0, sizeof(iv.padding));
166
167 if (ci->ci_essiv_tfm != NULL) {
168 crypto_cipher_encrypt_one(ci->ci_essiv_tfm, (u8 *)&iv,
169 (u8 *)&iv);
170 }
171
161 req = skcipher_request_alloc(tfm, gfp_flags);
162 if (!req) {
163 printk_ratelimited(KERN_ERR
164 "%s: crypto_request_alloc() failed\n",
165 __func__);
166 return -ENOMEM;
167 }
168
169 skcipher_request_set_callback(
170 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
171 page_crypt_complete, &ecr);
172
172 req = skcipher_request_alloc(tfm, gfp_flags);
173 if (!req) {
174 printk_ratelimited(KERN_ERR
175 "%s: crypto_request_alloc() failed\n",
176 __func__);
177 return -ENOMEM;
178 }
179
180 skcipher_request_set_callback(
181 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
182 page_crypt_complete, &ecr);
183
173 BUILD_BUG_ON(sizeof(xts_tweak) != FS_XTS_TWEAK_SIZE);
174 xts_tweak.index = cpu_to_le64(lblk_num);
175 memset(xts_tweak.padding, 0, sizeof(xts_tweak.padding));
176
177 sg_init_table(&dst, 1);
178 sg_set_page(&dst, dest_page, len, offs);
179 sg_init_table(&src, 1);
180 sg_set_page(&src, src_page, len, offs);
184 sg_init_table(&dst, 1);
185 sg_set_page(&dst, dest_page, len, offs);
186 sg_init_table(&src, 1);
187 sg_set_page(&src, src_page, len, offs);
181 skcipher_request_set_crypt(req, &src, &dst, len, &xts_tweak);
188 skcipher_request_set_crypt(req, &src, &dst, len, &iv);
182 if (rw == FS_DECRYPT)
183 res = crypto_skcipher_decrypt(req);
184 else
185 res = crypto_skcipher_encrypt(req);
186 if (res == -EINPROGRESS || res == -EBUSY) {
187 BUG_ON(req->base.data != &ecr);
188 wait_for_completion(&ecr.completion);
189 res = ecr.res;

--- 282 unchanged lines hidden (view full) ---

472static void __exit fscrypt_exit(void)
473{
474 fscrypt_destroy();
475
476 if (fscrypt_read_workqueue)
477 destroy_workqueue(fscrypt_read_workqueue);
478 kmem_cache_destroy(fscrypt_ctx_cachep);
479 kmem_cache_destroy(fscrypt_info_cachep);
189 if (rw == FS_DECRYPT)
190 res = crypto_skcipher_decrypt(req);
191 else
192 res = crypto_skcipher_encrypt(req);
193 if (res == -EINPROGRESS || res == -EBUSY) {
194 BUG_ON(req->base.data != &ecr);
195 wait_for_completion(&ecr.completion);
196 res = ecr.res;

--- 282 unchanged lines hidden (view full) ---

479static void __exit fscrypt_exit(void)
480{
481 fscrypt_destroy();
482
483 if (fscrypt_read_workqueue)
484 destroy_workqueue(fscrypt_read_workqueue);
485 kmem_cache_destroy(fscrypt_ctx_cachep);
486 kmem_cache_destroy(fscrypt_info_cachep);
487
488 fscrypt_essiv_cleanup();
480}
481module_exit(fscrypt_exit);
482
483MODULE_LICENSE("GPL");
489}
490module_exit(fscrypt_exit);
491
492MODULE_LICENSE("GPL");