1 /* 2 * This contains encryption functions for per-file encryption. 3 * 4 * Copyright (C) 2015, Google, Inc. 5 * Copyright (C) 2015, Motorola Mobility 6 * 7 * Written by Michael Halcrow, 2014. 8 * 9 * Filename encryption additions 10 * Uday Savagaonkar, 2014 11 * Encryption policy handling additions 12 * Ildar Muslukhov, 2014 13 * Add fscrypt_pullback_bio_page() 14 * Jaegeuk Kim, 2015. 15 * 16 * This has not yet undergone a rigorous security audit. 17 * 18 * The usage of AES-XTS should conform to recommendations in NIST 19 * Special Publication 800-38E and IEEE P1619/D16. 20 */ 21 22 #include <linux/pagemap.h> 23 #include <linux/mempool.h> 24 #include <linux/module.h> 25 #include <linux/scatterlist.h> 26 #include <linux/ratelimit.h> 27 #include <linux/dcache.h> 28 #include <linux/namei.h> 29 #include "fscrypt_private.h" 30 31 static unsigned int num_prealloc_crypto_pages = 32; 32 static unsigned int num_prealloc_crypto_ctxs = 128; 33 34 module_param(num_prealloc_crypto_pages, uint, 0444); 35 MODULE_PARM_DESC(num_prealloc_crypto_pages, 36 "Number of crypto pages to preallocate"); 37 module_param(num_prealloc_crypto_ctxs, uint, 0444); 38 MODULE_PARM_DESC(num_prealloc_crypto_ctxs, 39 "Number of crypto contexts to preallocate"); 40 41 static mempool_t *fscrypt_bounce_page_pool = NULL; 42 43 static LIST_HEAD(fscrypt_free_ctxs); 44 static DEFINE_SPINLOCK(fscrypt_ctx_lock); 45 46 struct workqueue_struct *fscrypt_read_workqueue; 47 static DEFINE_MUTEX(fscrypt_init_mutex); 48 49 static struct kmem_cache *fscrypt_ctx_cachep; 50 struct kmem_cache *fscrypt_info_cachep; 51 52 /** 53 * fscrypt_release_ctx() - Releases an encryption context 54 * @ctx: The encryption context to release. 55 * 56 * If the encryption context was allocated from the pre-allocated pool, returns 57 * it to that pool. Else, frees it. 58 * 59 * If there's a bounce page in the context, this frees that. 60 */ 61 void fscrypt_release_ctx(struct fscrypt_ctx *ctx) 62 { 63 unsigned long flags; 64 65 if (ctx->flags & FS_CTX_HAS_BOUNCE_BUFFER_FL && ctx->w.bounce_page) { 66 mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool); 67 ctx->w.bounce_page = NULL; 68 } 69 ctx->w.control_page = NULL; 70 if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) { 71 kmem_cache_free(fscrypt_ctx_cachep, ctx); 72 } else { 73 spin_lock_irqsave(&fscrypt_ctx_lock, flags); 74 list_add(&ctx->free_list, &fscrypt_free_ctxs); 75 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags); 76 } 77 } 78 EXPORT_SYMBOL(fscrypt_release_ctx); 79 80 /** 81 * fscrypt_get_ctx() - Gets an encryption context 82 * @inode: The inode for which we are doing the crypto 83 * @gfp_flags: The gfp flag for memory allocation 84 * 85 * Allocates and initializes an encryption context. 86 * 87 * Return: An allocated and initialized encryption context on success; error 88 * value or NULL otherwise. 89 */ 90 struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags) 91 { 92 struct fscrypt_ctx *ctx = NULL; 93 struct fscrypt_info *ci = inode->i_crypt_info; 94 unsigned long flags; 95 96 if (ci == NULL) 97 return ERR_PTR(-ENOKEY); 98 99 /* 100 * We first try getting the ctx from a free list because in 101 * the common case the ctx will have an allocated and 102 * initialized crypto tfm, so it's probably a worthwhile 103 * optimization. For the bounce page, we first try getting it 104 * from the kernel allocator because that's just about as fast 105 * as getting it from a list and because a cache of free pages 106 * should generally be a "last resort" option for a filesystem 107 * to be able to do its job. 108 */ 109 spin_lock_irqsave(&fscrypt_ctx_lock, flags); 110 ctx = list_first_entry_or_null(&fscrypt_free_ctxs, 111 struct fscrypt_ctx, free_list); 112 if (ctx) 113 list_del(&ctx->free_list); 114 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags); 115 if (!ctx) { 116 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags); 117 if (!ctx) 118 return ERR_PTR(-ENOMEM); 119 ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL; 120 } else { 121 ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL; 122 } 123 ctx->flags &= ~FS_CTX_HAS_BOUNCE_BUFFER_FL; 124 return ctx; 125 } 126 EXPORT_SYMBOL(fscrypt_get_ctx); 127 128 /** 129 * page_crypt_complete() - completion callback for page crypto 130 * @req: The asynchronous cipher request context 131 * @res: The result of the cipher operation 132 */ 133 static void page_crypt_complete(struct crypto_async_request *req, int res) 134 { 135 struct fscrypt_completion_result *ecr = req->data; 136 137 if (res == -EINPROGRESS) 138 return; 139 ecr->res = res; 140 complete(&ecr->completion); 141 } 142 143 int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, 144 u64 lblk_num, struct page *src_page, 145 struct page *dest_page, unsigned int len, 146 unsigned int offs, gfp_t gfp_flags) 147 { 148 struct { 149 __le64 index; 150 u8 padding[FS_XTS_TWEAK_SIZE - sizeof(__le64)]; 151 } xts_tweak; 152 struct skcipher_request *req = NULL; 153 DECLARE_FS_COMPLETION_RESULT(ecr); 154 struct scatterlist dst, src; 155 struct fscrypt_info *ci = inode->i_crypt_info; 156 struct crypto_skcipher *tfm = ci->ci_ctfm; 157 int res = 0; 158 159 BUG_ON(len == 0); 160 161 req = skcipher_request_alloc(tfm, gfp_flags); 162 if (!req) { 163 printk_ratelimited(KERN_ERR 164 "%s: crypto_request_alloc() failed\n", 165 __func__); 166 return -ENOMEM; 167 } 168 169 skcipher_request_set_callback( 170 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 171 page_crypt_complete, &ecr); 172 173 BUILD_BUG_ON(sizeof(xts_tweak) != FS_XTS_TWEAK_SIZE); 174 xts_tweak.index = cpu_to_le64(lblk_num); 175 memset(xts_tweak.padding, 0, sizeof(xts_tweak.padding)); 176 177 sg_init_table(&dst, 1); 178 sg_set_page(&dst, dest_page, len, offs); 179 sg_init_table(&src, 1); 180 sg_set_page(&src, src_page, len, offs); 181 skcipher_request_set_crypt(req, &src, &dst, len, &xts_tweak); 182 if (rw == FS_DECRYPT) 183 res = crypto_skcipher_decrypt(req); 184 else 185 res = crypto_skcipher_encrypt(req); 186 if (res == -EINPROGRESS || res == -EBUSY) { 187 BUG_ON(req->base.data != &ecr); 188 wait_for_completion(&ecr.completion); 189 res = ecr.res; 190 } 191 skcipher_request_free(req); 192 if (res) { 193 printk_ratelimited(KERN_ERR 194 "%s: crypto_skcipher_encrypt() returned %d\n", 195 __func__, res); 196 return res; 197 } 198 return 0; 199 } 200 201 struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx, 202 gfp_t gfp_flags) 203 { 204 ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags); 205 if (ctx->w.bounce_page == NULL) 206 return ERR_PTR(-ENOMEM); 207 ctx->flags |= FS_CTX_HAS_BOUNCE_BUFFER_FL; 208 return ctx->w.bounce_page; 209 } 210 211 /** 212 * fscypt_encrypt_page() - Encrypts a page 213 * @inode: The inode for which the encryption should take place 214 * @page: The page to encrypt. Must be locked for bounce-page 215 * encryption. 216 * @len: Length of data to encrypt in @page and encrypted 217 * data in returned page. 218 * @offs: Offset of data within @page and returned 219 * page holding encrypted data. 220 * @lblk_num: Logical block number. This must be unique for multiple 221 * calls with same inode, except when overwriting 222 * previously written data. 223 * @gfp_flags: The gfp flag for memory allocation 224 * 225 * Encrypts @page using the ctx encryption context. Performs encryption 226 * either in-place or into a newly allocated bounce page. 227 * Called on the page write path. 228 * 229 * Bounce page allocation is the default. 230 * In this case, the contents of @page are encrypted and stored in an 231 * allocated bounce page. @page has to be locked and the caller must call 232 * fscrypt_restore_control_page() on the returned ciphertext page to 233 * release the bounce buffer and the encryption context. 234 * 235 * In-place encryption is used by setting the FS_CFLG_OWN_PAGES flag in 236 * fscrypt_operations. Here, the input-page is returned with its content 237 * encrypted. 238 * 239 * Return: A page with the encrypted content on success. Else, an 240 * error value or NULL. 241 */ 242 struct page *fscrypt_encrypt_page(const struct inode *inode, 243 struct page *page, 244 unsigned int len, 245 unsigned int offs, 246 u64 lblk_num, gfp_t gfp_flags) 247 248 { 249 struct fscrypt_ctx *ctx; 250 struct page *ciphertext_page = page; 251 int err; 252 253 BUG_ON(len % FS_CRYPTO_BLOCK_SIZE != 0); 254 255 if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) { 256 /* with inplace-encryption we just encrypt the page */ 257 err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page, 258 ciphertext_page, len, offs, 259 gfp_flags); 260 if (err) 261 return ERR_PTR(err); 262 263 return ciphertext_page; 264 } 265 266 BUG_ON(!PageLocked(page)); 267 268 ctx = fscrypt_get_ctx(inode, gfp_flags); 269 if (IS_ERR(ctx)) 270 return (struct page *)ctx; 271 272 /* The encryption operation will require a bounce page. */ 273 ciphertext_page = fscrypt_alloc_bounce_page(ctx, gfp_flags); 274 if (IS_ERR(ciphertext_page)) 275 goto errout; 276 277 ctx->w.control_page = page; 278 err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, 279 page, ciphertext_page, len, offs, 280 gfp_flags); 281 if (err) { 282 ciphertext_page = ERR_PTR(err); 283 goto errout; 284 } 285 SetPagePrivate(ciphertext_page); 286 set_page_private(ciphertext_page, (unsigned long)ctx); 287 lock_page(ciphertext_page); 288 return ciphertext_page; 289 290 errout: 291 fscrypt_release_ctx(ctx); 292 return ciphertext_page; 293 } 294 EXPORT_SYMBOL(fscrypt_encrypt_page); 295 296 /** 297 * fscrypt_decrypt_page() - Decrypts a page in-place 298 * @inode: The corresponding inode for the page to decrypt. 299 * @page: The page to decrypt. Must be locked in case 300 * it is a writeback page (FS_CFLG_OWN_PAGES unset). 301 * @len: Number of bytes in @page to be decrypted. 302 * @offs: Start of data in @page. 303 * @lblk_num: Logical block number. 304 * 305 * Decrypts page in-place using the ctx encryption context. 306 * 307 * Called from the read completion callback. 308 * 309 * Return: Zero on success, non-zero otherwise. 310 */ 311 int fscrypt_decrypt_page(const struct inode *inode, struct page *page, 312 unsigned int len, unsigned int offs, u64 lblk_num) 313 { 314 if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES)) 315 BUG_ON(!PageLocked(page)); 316 317 return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page, 318 len, offs, GFP_NOFS); 319 } 320 EXPORT_SYMBOL(fscrypt_decrypt_page); 321 322 /* 323 * Validate dentries for encrypted directories to make sure we aren't 324 * potentially caching stale data after a key has been added or 325 * removed. 326 */ 327 static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) 328 { 329 struct dentry *dir; 330 int dir_has_key, cached_with_key; 331 332 if (flags & LOOKUP_RCU) 333 return -ECHILD; 334 335 dir = dget_parent(dentry); 336 if (!d_inode(dir)->i_sb->s_cop->is_encrypted(d_inode(dir))) { 337 dput(dir); 338 return 0; 339 } 340 341 /* this should eventually be an flag in d_flags */ 342 spin_lock(&dentry->d_lock); 343 cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY; 344 spin_unlock(&dentry->d_lock); 345 dir_has_key = (d_inode(dir)->i_crypt_info != NULL); 346 dput(dir); 347 348 /* 349 * If the dentry was cached without the key, and it is a 350 * negative dentry, it might be a valid name. We can't check 351 * if the key has since been made available due to locking 352 * reasons, so we fail the validation so ext4_lookup() can do 353 * this check. 354 * 355 * We also fail the validation if the dentry was created with 356 * the key present, but we no longer have the key, or vice versa. 357 */ 358 if ((!cached_with_key && d_is_negative(dentry)) || 359 (!cached_with_key && dir_has_key) || 360 (cached_with_key && !dir_has_key)) 361 return 0; 362 return 1; 363 } 364 365 const struct dentry_operations fscrypt_d_ops = { 366 .d_revalidate = fscrypt_d_revalidate, 367 }; 368 EXPORT_SYMBOL(fscrypt_d_ops); 369 370 void fscrypt_restore_control_page(struct page *page) 371 { 372 struct fscrypt_ctx *ctx; 373 374 ctx = (struct fscrypt_ctx *)page_private(page); 375 set_page_private(page, (unsigned long)NULL); 376 ClearPagePrivate(page); 377 unlock_page(page); 378 fscrypt_release_ctx(ctx); 379 } 380 EXPORT_SYMBOL(fscrypt_restore_control_page); 381 382 static void fscrypt_destroy(void) 383 { 384 struct fscrypt_ctx *pos, *n; 385 386 list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list) 387 kmem_cache_free(fscrypt_ctx_cachep, pos); 388 INIT_LIST_HEAD(&fscrypt_free_ctxs); 389 mempool_destroy(fscrypt_bounce_page_pool); 390 fscrypt_bounce_page_pool = NULL; 391 } 392 393 /** 394 * fscrypt_initialize() - allocate major buffers for fs encryption. 395 * @cop_flags: fscrypt operations flags 396 * 397 * We only call this when we start accessing encrypted files, since it 398 * results in memory getting allocated that wouldn't otherwise be used. 399 * 400 * Return: Zero on success, non-zero otherwise. 401 */ 402 int fscrypt_initialize(unsigned int cop_flags) 403 { 404 int i, res = -ENOMEM; 405 406 /* 407 * No need to allocate a bounce page pool if there already is one or 408 * this FS won't use it. 409 */ 410 if (cop_flags & FS_CFLG_OWN_PAGES || fscrypt_bounce_page_pool) 411 return 0; 412 413 mutex_lock(&fscrypt_init_mutex); 414 if (fscrypt_bounce_page_pool) 415 goto already_initialized; 416 417 for (i = 0; i < num_prealloc_crypto_ctxs; i++) { 418 struct fscrypt_ctx *ctx; 419 420 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS); 421 if (!ctx) 422 goto fail; 423 list_add(&ctx->free_list, &fscrypt_free_ctxs); 424 } 425 426 fscrypt_bounce_page_pool = 427 mempool_create_page_pool(num_prealloc_crypto_pages, 0); 428 if (!fscrypt_bounce_page_pool) 429 goto fail; 430 431 already_initialized: 432 mutex_unlock(&fscrypt_init_mutex); 433 return 0; 434 fail: 435 fscrypt_destroy(); 436 mutex_unlock(&fscrypt_init_mutex); 437 return res; 438 } 439 440 /** 441 * fscrypt_init() - Set up for fs encryption. 442 */ 443 static int __init fscrypt_init(void) 444 { 445 fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue", 446 WQ_HIGHPRI, 0); 447 if (!fscrypt_read_workqueue) 448 goto fail; 449 450 fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT); 451 if (!fscrypt_ctx_cachep) 452 goto fail_free_queue; 453 454 fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT); 455 if (!fscrypt_info_cachep) 456 goto fail_free_ctx; 457 458 return 0; 459 460 fail_free_ctx: 461 kmem_cache_destroy(fscrypt_ctx_cachep); 462 fail_free_queue: 463 destroy_workqueue(fscrypt_read_workqueue); 464 fail: 465 return -ENOMEM; 466 } 467 module_init(fscrypt_init) 468 469 /** 470 * fscrypt_exit() - Shutdown the fs encryption system 471 */ 472 static void __exit fscrypt_exit(void) 473 { 474 fscrypt_destroy(); 475 476 if (fscrypt_read_workqueue) 477 destroy_workqueue(fscrypt_read_workqueue); 478 kmem_cache_destroy(fscrypt_ctx_cachep); 479 kmem_cache_destroy(fscrypt_info_cachep); 480 } 481 module_exit(fscrypt_exit); 482 483 MODULE_LICENSE("GPL"); 484