1 /* 2 * This contains encryption functions for per-file encryption. 3 * 4 * Copyright (C) 2015, Google, Inc. 5 * Copyright (C) 2015, Motorola Mobility 6 * 7 * Written by Michael Halcrow, 2014. 8 * 9 * Filename encryption additions 10 * Uday Savagaonkar, 2014 11 * Encryption policy handling additions 12 * Ildar Muslukhov, 2014 13 * Add fscrypt_pullback_bio_page() 14 * Jaegeuk Kim, 2015. 15 * 16 * This has not yet undergone a rigorous security audit. 17 * 18 * The usage of AES-XTS should conform to recommendations in NIST 19 * Special Publication 800-38E and IEEE P1619/D16. 20 */ 21 22 #include <linux/pagemap.h> 23 #include <linux/mempool.h> 24 #include <linux/module.h> 25 #include <linux/scatterlist.h> 26 #include <linux/ratelimit.h> 27 #include <linux/bio.h> 28 #include <linux/dcache.h> 29 #include <linux/namei.h> 30 #include <linux/fscrypto.h> 31 #include <linux/ecryptfs.h> 32 33 static unsigned int num_prealloc_crypto_pages = 32; 34 static unsigned int num_prealloc_crypto_ctxs = 128; 35 36 module_param(num_prealloc_crypto_pages, uint, 0444); 37 MODULE_PARM_DESC(num_prealloc_crypto_pages, 38 "Number of crypto pages to preallocate"); 39 module_param(num_prealloc_crypto_ctxs, uint, 0444); 40 MODULE_PARM_DESC(num_prealloc_crypto_ctxs, 41 "Number of crypto contexts to preallocate"); 42 43 static mempool_t *fscrypt_bounce_page_pool = NULL; 44 45 static LIST_HEAD(fscrypt_free_ctxs); 46 static DEFINE_SPINLOCK(fscrypt_ctx_lock); 47 48 static struct workqueue_struct *fscrypt_read_workqueue; 49 static DEFINE_MUTEX(fscrypt_init_mutex); 50 51 static struct kmem_cache *fscrypt_ctx_cachep; 52 struct kmem_cache *fscrypt_info_cachep; 53 54 /** 55 * fscrypt_release_ctx() - Releases an encryption context 56 * @ctx: The encryption context to release. 57 * 58 * If the encryption context was allocated from the pre-allocated pool, returns 59 * it to that pool. Else, frees it. 60 * 61 * If there's a bounce page in the context, this frees that. 62 */ 63 void fscrypt_release_ctx(struct fscrypt_ctx *ctx) 64 { 65 unsigned long flags; 66 67 if (ctx->flags & FS_WRITE_PATH_FL && ctx->w.bounce_page) { 68 mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool); 69 ctx->w.bounce_page = NULL; 70 } 71 ctx->w.control_page = NULL; 72 if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) { 73 kmem_cache_free(fscrypt_ctx_cachep, ctx); 74 } else { 75 spin_lock_irqsave(&fscrypt_ctx_lock, flags); 76 list_add(&ctx->free_list, &fscrypt_free_ctxs); 77 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags); 78 } 79 } 80 EXPORT_SYMBOL(fscrypt_release_ctx); 81 82 /** 83 * fscrypt_get_ctx() - Gets an encryption context 84 * @inode: The inode for which we are doing the crypto 85 * @gfp_flags: The gfp flag for memory allocation 86 * 87 * Allocates and initializes an encryption context. 88 * 89 * Return: An allocated and initialized encryption context on success; error 90 * value or NULL otherwise. 91 */ 92 struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode, gfp_t gfp_flags) 93 { 94 struct fscrypt_ctx *ctx = NULL; 95 struct fscrypt_info *ci = inode->i_crypt_info; 96 unsigned long flags; 97 98 if (ci == NULL) 99 return ERR_PTR(-ENOKEY); 100 101 /* 102 * We first try getting the ctx from a free list because in 103 * the common case the ctx will have an allocated and 104 * initialized crypto tfm, so it's probably a worthwhile 105 * optimization. For the bounce page, we first try getting it 106 * from the kernel allocator because that's just about as fast 107 * as getting it from a list and because a cache of free pages 108 * should generally be a "last resort" option for a filesystem 109 * to be able to do its job. 110 */ 111 spin_lock_irqsave(&fscrypt_ctx_lock, flags); 112 ctx = list_first_entry_or_null(&fscrypt_free_ctxs, 113 struct fscrypt_ctx, free_list); 114 if (ctx) 115 list_del(&ctx->free_list); 116 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags); 117 if (!ctx) { 118 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags); 119 if (!ctx) 120 return ERR_PTR(-ENOMEM); 121 ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL; 122 } else { 123 ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL; 124 } 125 ctx->flags &= ~FS_WRITE_PATH_FL; 126 return ctx; 127 } 128 EXPORT_SYMBOL(fscrypt_get_ctx); 129 130 /** 131 * fscrypt_complete() - The completion callback for page encryption 132 * @req: The asynchronous encryption request context 133 * @res: The result of the encryption operation 134 */ 135 static void fscrypt_complete(struct crypto_async_request *req, int res) 136 { 137 struct fscrypt_completion_result *ecr = req->data; 138 139 if (res == -EINPROGRESS) 140 return; 141 ecr->res = res; 142 complete(&ecr->completion); 143 } 144 145 typedef enum { 146 FS_DECRYPT = 0, 147 FS_ENCRYPT, 148 } fscrypt_direction_t; 149 150 static int do_page_crypto(struct inode *inode, 151 fscrypt_direction_t rw, pgoff_t index, 152 struct page *src_page, struct page *dest_page, 153 gfp_t gfp_flags) 154 { 155 u8 xts_tweak[FS_XTS_TWEAK_SIZE]; 156 struct skcipher_request *req = NULL; 157 DECLARE_FS_COMPLETION_RESULT(ecr); 158 struct scatterlist dst, src; 159 struct fscrypt_info *ci = inode->i_crypt_info; 160 struct crypto_skcipher *tfm = ci->ci_ctfm; 161 int res = 0; 162 163 req = skcipher_request_alloc(tfm, gfp_flags); 164 if (!req) { 165 printk_ratelimited(KERN_ERR 166 "%s: crypto_request_alloc() failed\n", 167 __func__); 168 return -ENOMEM; 169 } 170 171 skcipher_request_set_callback( 172 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 173 fscrypt_complete, &ecr); 174 175 BUILD_BUG_ON(FS_XTS_TWEAK_SIZE < sizeof(index)); 176 memcpy(xts_tweak, &index, sizeof(index)); 177 memset(&xts_tweak[sizeof(index)], 0, 178 FS_XTS_TWEAK_SIZE - sizeof(index)); 179 180 sg_init_table(&dst, 1); 181 sg_set_page(&dst, dest_page, PAGE_SIZE, 0); 182 sg_init_table(&src, 1); 183 sg_set_page(&src, src_page, PAGE_SIZE, 0); 184 skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, 185 xts_tweak); 186 if (rw == FS_DECRYPT) 187 res = crypto_skcipher_decrypt(req); 188 else 189 res = crypto_skcipher_encrypt(req); 190 if (res == -EINPROGRESS || res == -EBUSY) { 191 BUG_ON(req->base.data != &ecr); 192 wait_for_completion(&ecr.completion); 193 res = ecr.res; 194 } 195 skcipher_request_free(req); 196 if (res) { 197 printk_ratelimited(KERN_ERR 198 "%s: crypto_skcipher_encrypt() returned %d\n", 199 __func__, res); 200 return res; 201 } 202 return 0; 203 } 204 205 static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags) 206 { 207 ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags); 208 if (ctx->w.bounce_page == NULL) 209 return ERR_PTR(-ENOMEM); 210 ctx->flags |= FS_WRITE_PATH_FL; 211 return ctx->w.bounce_page; 212 } 213 214 /** 215 * fscypt_encrypt_page() - Encrypts a page 216 * @inode: The inode for which the encryption should take place 217 * @plaintext_page: The page to encrypt. Must be locked. 218 * @gfp_flags: The gfp flag for memory allocation 219 * 220 * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx 221 * encryption context. 222 * 223 * Called on the page write path. The caller must call 224 * fscrypt_restore_control_page() on the returned ciphertext page to 225 * release the bounce buffer and the encryption context. 226 * 227 * Return: An allocated page with the encrypted content on success. Else, an 228 * error value or NULL. 229 */ 230 struct page *fscrypt_encrypt_page(struct inode *inode, 231 struct page *plaintext_page, gfp_t gfp_flags) 232 { 233 struct fscrypt_ctx *ctx; 234 struct page *ciphertext_page = NULL; 235 int err; 236 237 BUG_ON(!PageLocked(plaintext_page)); 238 239 ctx = fscrypt_get_ctx(inode, gfp_flags); 240 if (IS_ERR(ctx)) 241 return (struct page *)ctx; 242 243 /* The encryption operation will require a bounce page. */ 244 ciphertext_page = alloc_bounce_page(ctx, gfp_flags); 245 if (IS_ERR(ciphertext_page)) 246 goto errout; 247 248 ctx->w.control_page = plaintext_page; 249 err = do_page_crypto(inode, FS_ENCRYPT, plaintext_page->index, 250 plaintext_page, ciphertext_page, 251 gfp_flags); 252 if (err) { 253 ciphertext_page = ERR_PTR(err); 254 goto errout; 255 } 256 SetPagePrivate(ciphertext_page); 257 set_page_private(ciphertext_page, (unsigned long)ctx); 258 lock_page(ciphertext_page); 259 return ciphertext_page; 260 261 errout: 262 fscrypt_release_ctx(ctx); 263 return ciphertext_page; 264 } 265 EXPORT_SYMBOL(fscrypt_encrypt_page); 266 267 /** 268 * f2crypt_decrypt_page() - Decrypts a page in-place 269 * @page: The page to decrypt. Must be locked. 270 * 271 * Decrypts page in-place using the ctx encryption context. 272 * 273 * Called from the read completion callback. 274 * 275 * Return: Zero on success, non-zero otherwise. 276 */ 277 int fscrypt_decrypt_page(struct page *page) 278 { 279 BUG_ON(!PageLocked(page)); 280 281 return do_page_crypto(page->mapping->host, 282 FS_DECRYPT, page->index, page, page, GFP_NOFS); 283 } 284 EXPORT_SYMBOL(fscrypt_decrypt_page); 285 286 int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk, 287 sector_t pblk, unsigned int len) 288 { 289 struct fscrypt_ctx *ctx; 290 struct page *ciphertext_page = NULL; 291 struct bio *bio; 292 int ret, err = 0; 293 294 BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE); 295 296 ctx = fscrypt_get_ctx(inode, GFP_NOFS); 297 if (IS_ERR(ctx)) 298 return PTR_ERR(ctx); 299 300 ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT); 301 if (IS_ERR(ciphertext_page)) { 302 err = PTR_ERR(ciphertext_page); 303 goto errout; 304 } 305 306 while (len--) { 307 err = do_page_crypto(inode, FS_ENCRYPT, lblk, 308 ZERO_PAGE(0), ciphertext_page, 309 GFP_NOFS); 310 if (err) 311 goto errout; 312 313 bio = bio_alloc(GFP_NOWAIT, 1); 314 if (!bio) { 315 err = -ENOMEM; 316 goto errout; 317 } 318 bio->bi_bdev = inode->i_sb->s_bdev; 319 bio->bi_iter.bi_sector = 320 pblk << (inode->i_sb->s_blocksize_bits - 9); 321 ret = bio_add_page(bio, ciphertext_page, 322 inode->i_sb->s_blocksize, 0); 323 if (ret != inode->i_sb->s_blocksize) { 324 /* should never happen! */ 325 WARN_ON(1); 326 bio_put(bio); 327 err = -EIO; 328 goto errout; 329 } 330 err = submit_bio_wait(WRITE, bio); 331 if ((err == 0) && bio->bi_error) 332 err = -EIO; 333 bio_put(bio); 334 if (err) 335 goto errout; 336 lblk++; 337 pblk++; 338 } 339 err = 0; 340 errout: 341 fscrypt_release_ctx(ctx); 342 return err; 343 } 344 EXPORT_SYMBOL(fscrypt_zeroout_range); 345 346 /* 347 * Validate dentries for encrypted directories to make sure we aren't 348 * potentially caching stale data after a key has been added or 349 * removed. 350 */ 351 static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) 352 { 353 struct dentry *dir; 354 struct fscrypt_info *ci; 355 int dir_has_key, cached_with_key; 356 357 if (flags & LOOKUP_RCU) 358 return -ECHILD; 359 360 dir = dget_parent(dentry); 361 if (!d_inode(dir)->i_sb->s_cop->is_encrypted(d_inode(dir))) { 362 dput(dir); 363 return 0; 364 } 365 366 ci = d_inode(dir)->i_crypt_info; 367 if (ci && ci->ci_keyring_key && 368 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) | 369 (1 << KEY_FLAG_REVOKED) | 370 (1 << KEY_FLAG_DEAD)))) 371 ci = NULL; 372 373 /* this should eventually be an flag in d_flags */ 374 spin_lock(&dentry->d_lock); 375 cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY; 376 spin_unlock(&dentry->d_lock); 377 dir_has_key = (ci != NULL); 378 dput(dir); 379 380 /* 381 * If the dentry was cached without the key, and it is a 382 * negative dentry, it might be a valid name. We can't check 383 * if the key has since been made available due to locking 384 * reasons, so we fail the validation so ext4_lookup() can do 385 * this check. 386 * 387 * We also fail the validation if the dentry was created with 388 * the key present, but we no longer have the key, or vice versa. 389 */ 390 if ((!cached_with_key && d_is_negative(dentry)) || 391 (!cached_with_key && dir_has_key) || 392 (cached_with_key && !dir_has_key)) 393 return 0; 394 return 1; 395 } 396 397 const struct dentry_operations fscrypt_d_ops = { 398 .d_revalidate = fscrypt_d_revalidate, 399 }; 400 EXPORT_SYMBOL(fscrypt_d_ops); 401 402 /* 403 * Call fscrypt_decrypt_page on every single page, reusing the encryption 404 * context. 405 */ 406 static void completion_pages(struct work_struct *work) 407 { 408 struct fscrypt_ctx *ctx = 409 container_of(work, struct fscrypt_ctx, r.work); 410 struct bio *bio = ctx->r.bio; 411 struct bio_vec *bv; 412 int i; 413 414 bio_for_each_segment_all(bv, bio, i) { 415 struct page *page = bv->bv_page; 416 int ret = fscrypt_decrypt_page(page); 417 418 if (ret) { 419 WARN_ON_ONCE(1); 420 SetPageError(page); 421 } else { 422 SetPageUptodate(page); 423 } 424 unlock_page(page); 425 } 426 fscrypt_release_ctx(ctx); 427 bio_put(bio); 428 } 429 430 void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio) 431 { 432 INIT_WORK(&ctx->r.work, completion_pages); 433 ctx->r.bio = bio; 434 queue_work(fscrypt_read_workqueue, &ctx->r.work); 435 } 436 EXPORT_SYMBOL(fscrypt_decrypt_bio_pages); 437 438 void fscrypt_pullback_bio_page(struct page **page, bool restore) 439 { 440 struct fscrypt_ctx *ctx; 441 struct page *bounce_page; 442 443 /* The bounce data pages are unmapped. */ 444 if ((*page)->mapping) 445 return; 446 447 /* The bounce data page is unmapped. */ 448 bounce_page = *page; 449 ctx = (struct fscrypt_ctx *)page_private(bounce_page); 450 451 /* restore control page */ 452 *page = ctx->w.control_page; 453 454 if (restore) 455 fscrypt_restore_control_page(bounce_page); 456 } 457 EXPORT_SYMBOL(fscrypt_pullback_bio_page); 458 459 void fscrypt_restore_control_page(struct page *page) 460 { 461 struct fscrypt_ctx *ctx; 462 463 ctx = (struct fscrypt_ctx *)page_private(page); 464 set_page_private(page, (unsigned long)NULL); 465 ClearPagePrivate(page); 466 unlock_page(page); 467 fscrypt_release_ctx(ctx); 468 } 469 EXPORT_SYMBOL(fscrypt_restore_control_page); 470 471 static void fscrypt_destroy(void) 472 { 473 struct fscrypt_ctx *pos, *n; 474 475 list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list) 476 kmem_cache_free(fscrypt_ctx_cachep, pos); 477 INIT_LIST_HEAD(&fscrypt_free_ctxs); 478 mempool_destroy(fscrypt_bounce_page_pool); 479 fscrypt_bounce_page_pool = NULL; 480 } 481 482 /** 483 * fscrypt_initialize() - allocate major buffers for fs encryption. 484 * 485 * We only call this when we start accessing encrypted files, since it 486 * results in memory getting allocated that wouldn't otherwise be used. 487 * 488 * Return: Zero on success, non-zero otherwise. 489 */ 490 int fscrypt_initialize(void) 491 { 492 int i, res = -ENOMEM; 493 494 if (fscrypt_bounce_page_pool) 495 return 0; 496 497 mutex_lock(&fscrypt_init_mutex); 498 if (fscrypt_bounce_page_pool) 499 goto already_initialized; 500 501 for (i = 0; i < num_prealloc_crypto_ctxs; i++) { 502 struct fscrypt_ctx *ctx; 503 504 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS); 505 if (!ctx) 506 goto fail; 507 list_add(&ctx->free_list, &fscrypt_free_ctxs); 508 } 509 510 fscrypt_bounce_page_pool = 511 mempool_create_page_pool(num_prealloc_crypto_pages, 0); 512 if (!fscrypt_bounce_page_pool) 513 goto fail; 514 515 already_initialized: 516 mutex_unlock(&fscrypt_init_mutex); 517 return 0; 518 fail: 519 fscrypt_destroy(); 520 mutex_unlock(&fscrypt_init_mutex); 521 return res; 522 } 523 EXPORT_SYMBOL(fscrypt_initialize); 524 525 /** 526 * fscrypt_init() - Set up for fs encryption. 527 */ 528 static int __init fscrypt_init(void) 529 { 530 fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue", 531 WQ_HIGHPRI, 0); 532 if (!fscrypt_read_workqueue) 533 goto fail; 534 535 fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT); 536 if (!fscrypt_ctx_cachep) 537 goto fail_free_queue; 538 539 fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT); 540 if (!fscrypt_info_cachep) 541 goto fail_free_ctx; 542 543 return 0; 544 545 fail_free_ctx: 546 kmem_cache_destroy(fscrypt_ctx_cachep); 547 fail_free_queue: 548 destroy_workqueue(fscrypt_read_workqueue); 549 fail: 550 return -ENOMEM; 551 } 552 module_init(fscrypt_init) 553 554 /** 555 * fscrypt_exit() - Shutdown the fs encryption system 556 */ 557 static void __exit fscrypt_exit(void) 558 { 559 fscrypt_destroy(); 560 561 if (fscrypt_read_workqueue) 562 destroy_workqueue(fscrypt_read_workqueue); 563 kmem_cache_destroy(fscrypt_ctx_cachep); 564 kmem_cache_destroy(fscrypt_info_cachep); 565 } 566 module_exit(fscrypt_exit); 567 568 MODULE_LICENSE("GPL"); 569