1 /* 2 * This contains encryption functions for per-file encryption. 3 * 4 * Copyright (C) 2015, Google, Inc. 5 * Copyright (C) 2015, Motorola Mobility 6 * 7 * Written by Michael Halcrow, 2014. 8 * 9 * Filename encryption additions 10 * Uday Savagaonkar, 2014 11 * Encryption policy handling additions 12 * Ildar Muslukhov, 2014 13 * Add fscrypt_pullback_bio_page() 14 * Jaegeuk Kim, 2015. 15 * 16 * This has not yet undergone a rigorous security audit. 17 * 18 * The usage of AES-XTS should conform to recommendations in NIST 19 * Special Publication 800-38E and IEEE P1619/D16. 20 */ 21 22 #include <linux/pagemap.h> 23 #include <linux/mempool.h> 24 #include <linux/module.h> 25 #include <linux/scatterlist.h> 26 #include <linux/ratelimit.h> 27 #include <linux/bio.h> 28 #include <linux/dcache.h> 29 #include <linux/namei.h> 30 #include <linux/fscrypto.h> 31 32 static unsigned int num_prealloc_crypto_pages = 32; 33 static unsigned int num_prealloc_crypto_ctxs = 128; 34 35 module_param(num_prealloc_crypto_pages, uint, 0444); 36 MODULE_PARM_DESC(num_prealloc_crypto_pages, 37 "Number of crypto pages to preallocate"); 38 module_param(num_prealloc_crypto_ctxs, uint, 0444); 39 MODULE_PARM_DESC(num_prealloc_crypto_ctxs, 40 "Number of crypto contexts to preallocate"); 41 42 static mempool_t *fscrypt_bounce_page_pool = NULL; 43 44 static LIST_HEAD(fscrypt_free_ctxs); 45 static DEFINE_SPINLOCK(fscrypt_ctx_lock); 46 47 static struct workqueue_struct *fscrypt_read_workqueue; 48 static DEFINE_MUTEX(fscrypt_init_mutex); 49 50 static struct kmem_cache *fscrypt_ctx_cachep; 51 struct kmem_cache *fscrypt_info_cachep; 52 53 /** 54 * fscrypt_release_ctx() - Releases an encryption context 55 * @ctx: The encryption context to release. 56 * 57 * If the encryption context was allocated from the pre-allocated pool, returns 58 * it to that pool. Else, frees it. 59 * 60 * If there's a bounce page in the context, this frees that. 61 */ 62 void fscrypt_release_ctx(struct fscrypt_ctx *ctx) 63 { 64 unsigned long flags; 65 66 if (ctx->flags & FS_WRITE_PATH_FL && ctx->w.bounce_page) { 67 mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool); 68 ctx->w.bounce_page = NULL; 69 } 70 ctx->w.control_page = NULL; 71 if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) { 72 kmem_cache_free(fscrypt_ctx_cachep, ctx); 73 } else { 74 spin_lock_irqsave(&fscrypt_ctx_lock, flags); 75 list_add(&ctx->free_list, &fscrypt_free_ctxs); 76 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags); 77 } 78 } 79 EXPORT_SYMBOL(fscrypt_release_ctx); 80 81 /** 82 * fscrypt_get_ctx() - Gets an encryption context 83 * @inode: The inode for which we are doing the crypto 84 * @gfp_flags: The gfp flag for memory allocation 85 * 86 * Allocates and initializes an encryption context. 87 * 88 * Return: An allocated and initialized encryption context on success; error 89 * value or NULL otherwise. 90 */ 91 struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode, gfp_t gfp_flags) 92 { 93 struct fscrypt_ctx *ctx = NULL; 94 struct fscrypt_info *ci = inode->i_crypt_info; 95 unsigned long flags; 96 97 if (ci == NULL) 98 return ERR_PTR(-ENOKEY); 99 100 /* 101 * We first try getting the ctx from a free list because in 102 * the common case the ctx will have an allocated and 103 * initialized crypto tfm, so it's probably a worthwhile 104 * optimization. For the bounce page, we first try getting it 105 * from the kernel allocator because that's just about as fast 106 * as getting it from a list and because a cache of free pages 107 * should generally be a "last resort" option for a filesystem 108 * to be able to do its job. 109 */ 110 spin_lock_irqsave(&fscrypt_ctx_lock, flags); 111 ctx = list_first_entry_or_null(&fscrypt_free_ctxs, 112 struct fscrypt_ctx, free_list); 113 if (ctx) 114 list_del(&ctx->free_list); 115 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags); 116 if (!ctx) { 117 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags); 118 if (!ctx) 119 return ERR_PTR(-ENOMEM); 120 ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL; 121 } else { 122 ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL; 123 } 124 ctx->flags &= ~FS_WRITE_PATH_FL; 125 return ctx; 126 } 127 EXPORT_SYMBOL(fscrypt_get_ctx); 128 129 /** 130 * page_crypt_complete() - completion callback for page crypto 131 * @req: The asynchronous cipher request context 132 * @res: The result of the cipher operation 133 */ 134 static void page_crypt_complete(struct crypto_async_request *req, int res) 135 { 136 struct fscrypt_completion_result *ecr = req->data; 137 138 if (res == -EINPROGRESS) 139 return; 140 ecr->res = res; 141 complete(&ecr->completion); 142 } 143 144 typedef enum { 145 FS_DECRYPT = 0, 146 FS_ENCRYPT, 147 } fscrypt_direction_t; 148 149 static int do_page_crypto(struct inode *inode, 150 fscrypt_direction_t rw, pgoff_t index, 151 struct page *src_page, struct page *dest_page, 152 gfp_t gfp_flags) 153 { 154 u8 xts_tweak[FS_XTS_TWEAK_SIZE]; 155 struct skcipher_request *req = NULL; 156 DECLARE_FS_COMPLETION_RESULT(ecr); 157 struct scatterlist dst, src; 158 struct fscrypt_info *ci = inode->i_crypt_info; 159 struct crypto_skcipher *tfm = ci->ci_ctfm; 160 int res = 0; 161 162 req = skcipher_request_alloc(tfm, gfp_flags); 163 if (!req) { 164 printk_ratelimited(KERN_ERR 165 "%s: crypto_request_alloc() failed\n", 166 __func__); 167 return -ENOMEM; 168 } 169 170 skcipher_request_set_callback( 171 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 172 page_crypt_complete, &ecr); 173 174 BUILD_BUG_ON(FS_XTS_TWEAK_SIZE < sizeof(index)); 175 memcpy(xts_tweak, &index, sizeof(index)); 176 memset(&xts_tweak[sizeof(index)], 0, 177 FS_XTS_TWEAK_SIZE - sizeof(index)); 178 179 sg_init_table(&dst, 1); 180 sg_set_page(&dst, dest_page, PAGE_SIZE, 0); 181 sg_init_table(&src, 1); 182 sg_set_page(&src, src_page, PAGE_SIZE, 0); 183 skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, 184 xts_tweak); 185 if (rw == FS_DECRYPT) 186 res = crypto_skcipher_decrypt(req); 187 else 188 res = crypto_skcipher_encrypt(req); 189 if (res == -EINPROGRESS || res == -EBUSY) { 190 BUG_ON(req->base.data != &ecr); 191 wait_for_completion(&ecr.completion); 192 res = ecr.res; 193 } 194 skcipher_request_free(req); 195 if (res) { 196 printk_ratelimited(KERN_ERR 197 "%s: crypto_skcipher_encrypt() returned %d\n", 198 __func__, res); 199 return res; 200 } 201 return 0; 202 } 203 204 static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags) 205 { 206 ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags); 207 if (ctx->w.bounce_page == NULL) 208 return ERR_PTR(-ENOMEM); 209 ctx->flags |= FS_WRITE_PATH_FL; 210 return ctx->w.bounce_page; 211 } 212 213 /** 214 * fscypt_encrypt_page() - Encrypts a page 215 * @inode: The inode for which the encryption should take place 216 * @plaintext_page: The page to encrypt. Must be locked. 217 * @gfp_flags: The gfp flag for memory allocation 218 * 219 * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx 220 * encryption context. 221 * 222 * Called on the page write path. The caller must call 223 * fscrypt_restore_control_page() on the returned ciphertext page to 224 * release the bounce buffer and the encryption context. 225 * 226 * Return: An allocated page with the encrypted content on success. Else, an 227 * error value or NULL. 228 */ 229 struct page *fscrypt_encrypt_page(struct inode *inode, 230 struct page *plaintext_page, gfp_t gfp_flags) 231 { 232 struct fscrypt_ctx *ctx; 233 struct page *ciphertext_page = NULL; 234 int err; 235 236 BUG_ON(!PageLocked(plaintext_page)); 237 238 ctx = fscrypt_get_ctx(inode, gfp_flags); 239 if (IS_ERR(ctx)) 240 return (struct page *)ctx; 241 242 /* The encryption operation will require a bounce page. */ 243 ciphertext_page = alloc_bounce_page(ctx, gfp_flags); 244 if (IS_ERR(ciphertext_page)) 245 goto errout; 246 247 ctx->w.control_page = plaintext_page; 248 err = do_page_crypto(inode, FS_ENCRYPT, plaintext_page->index, 249 plaintext_page, ciphertext_page, 250 gfp_flags); 251 if (err) { 252 ciphertext_page = ERR_PTR(err); 253 goto errout; 254 } 255 SetPagePrivate(ciphertext_page); 256 set_page_private(ciphertext_page, (unsigned long)ctx); 257 lock_page(ciphertext_page); 258 return ciphertext_page; 259 260 errout: 261 fscrypt_release_ctx(ctx); 262 return ciphertext_page; 263 } 264 EXPORT_SYMBOL(fscrypt_encrypt_page); 265 266 /** 267 * f2crypt_decrypt_page() - Decrypts a page in-place 268 * @page: The page to decrypt. Must be locked. 269 * 270 * Decrypts page in-place using the ctx encryption context. 271 * 272 * Called from the read completion callback. 273 * 274 * Return: Zero on success, non-zero otherwise. 275 */ 276 int fscrypt_decrypt_page(struct page *page) 277 { 278 BUG_ON(!PageLocked(page)); 279 280 return do_page_crypto(page->mapping->host, 281 FS_DECRYPT, page->index, page, page, GFP_NOFS); 282 } 283 EXPORT_SYMBOL(fscrypt_decrypt_page); 284 285 int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk, 286 sector_t pblk, unsigned int len) 287 { 288 struct fscrypt_ctx *ctx; 289 struct page *ciphertext_page = NULL; 290 struct bio *bio; 291 int ret, err = 0; 292 293 BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE); 294 295 ctx = fscrypt_get_ctx(inode, GFP_NOFS); 296 if (IS_ERR(ctx)) 297 return PTR_ERR(ctx); 298 299 ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT); 300 if (IS_ERR(ciphertext_page)) { 301 err = PTR_ERR(ciphertext_page); 302 goto errout; 303 } 304 305 while (len--) { 306 err = do_page_crypto(inode, FS_ENCRYPT, lblk, 307 ZERO_PAGE(0), ciphertext_page, 308 GFP_NOFS); 309 if (err) 310 goto errout; 311 312 bio = bio_alloc(GFP_NOWAIT, 1); 313 if (!bio) { 314 err = -ENOMEM; 315 goto errout; 316 } 317 bio->bi_bdev = inode->i_sb->s_bdev; 318 bio->bi_iter.bi_sector = 319 pblk << (inode->i_sb->s_blocksize_bits - 9); 320 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 321 ret = bio_add_page(bio, ciphertext_page, 322 inode->i_sb->s_blocksize, 0); 323 if (ret != inode->i_sb->s_blocksize) { 324 /* should never happen! */ 325 WARN_ON(1); 326 bio_put(bio); 327 err = -EIO; 328 goto errout; 329 } 330 err = submit_bio_wait(bio); 331 if ((err == 0) && bio->bi_error) 332 err = -EIO; 333 bio_put(bio); 334 if (err) 335 goto errout; 336 lblk++; 337 pblk++; 338 } 339 err = 0; 340 errout: 341 fscrypt_release_ctx(ctx); 342 return err; 343 } 344 EXPORT_SYMBOL(fscrypt_zeroout_range); 345 346 /* 347 * Validate dentries for encrypted directories to make sure we aren't 348 * potentially caching stale data after a key has been added or 349 * removed. 350 */ 351 static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) 352 { 353 struct dentry *dir; 354 struct fscrypt_info *ci; 355 int dir_has_key, cached_with_key; 356 357 if (flags & LOOKUP_RCU) 358 return -ECHILD; 359 360 dir = dget_parent(dentry); 361 if (!d_inode(dir)->i_sb->s_cop->is_encrypted(d_inode(dir))) { 362 dput(dir); 363 return 0; 364 } 365 366 ci = d_inode(dir)->i_crypt_info; 367 if (ci && ci->ci_keyring_key && 368 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) | 369 (1 << KEY_FLAG_REVOKED) | 370 (1 << KEY_FLAG_DEAD)))) 371 ci = NULL; 372 373 /* this should eventually be an flag in d_flags */ 374 spin_lock(&dentry->d_lock); 375 cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY; 376 spin_unlock(&dentry->d_lock); 377 dir_has_key = (ci != NULL); 378 dput(dir); 379 380 /* 381 * If the dentry was cached without the key, and it is a 382 * negative dentry, it might be a valid name. We can't check 383 * if the key has since been made available due to locking 384 * reasons, so we fail the validation so ext4_lookup() can do 385 * this check. 386 * 387 * We also fail the validation if the dentry was created with 388 * the key present, but we no longer have the key, or vice versa. 389 */ 390 if ((!cached_with_key && d_is_negative(dentry)) || 391 (!cached_with_key && dir_has_key) || 392 (cached_with_key && !dir_has_key)) 393 return 0; 394 return 1; 395 } 396 397 const struct dentry_operations fscrypt_d_ops = { 398 .d_revalidate = fscrypt_d_revalidate, 399 }; 400 EXPORT_SYMBOL(fscrypt_d_ops); 401 402 /* 403 * Call fscrypt_decrypt_page on every single page, reusing the encryption 404 * context. 405 */ 406 static void completion_pages(struct work_struct *work) 407 { 408 struct fscrypt_ctx *ctx = 409 container_of(work, struct fscrypt_ctx, r.work); 410 struct bio *bio = ctx->r.bio; 411 struct bio_vec *bv; 412 int i; 413 414 bio_for_each_segment_all(bv, bio, i) { 415 struct page *page = bv->bv_page; 416 int ret = fscrypt_decrypt_page(page); 417 418 if (ret) { 419 WARN_ON_ONCE(1); 420 SetPageError(page); 421 } else { 422 SetPageUptodate(page); 423 } 424 unlock_page(page); 425 } 426 fscrypt_release_ctx(ctx); 427 bio_put(bio); 428 } 429 430 void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio) 431 { 432 INIT_WORK(&ctx->r.work, completion_pages); 433 ctx->r.bio = bio; 434 queue_work(fscrypt_read_workqueue, &ctx->r.work); 435 } 436 EXPORT_SYMBOL(fscrypt_decrypt_bio_pages); 437 438 void fscrypt_pullback_bio_page(struct page **page, bool restore) 439 { 440 struct fscrypt_ctx *ctx; 441 struct page *bounce_page; 442 443 /* The bounce data pages are unmapped. */ 444 if ((*page)->mapping) 445 return; 446 447 /* The bounce data page is unmapped. */ 448 bounce_page = *page; 449 ctx = (struct fscrypt_ctx *)page_private(bounce_page); 450 451 /* restore control page */ 452 *page = ctx->w.control_page; 453 454 if (restore) 455 fscrypt_restore_control_page(bounce_page); 456 } 457 EXPORT_SYMBOL(fscrypt_pullback_bio_page); 458 459 void fscrypt_restore_control_page(struct page *page) 460 { 461 struct fscrypt_ctx *ctx; 462 463 ctx = (struct fscrypt_ctx *)page_private(page); 464 set_page_private(page, (unsigned long)NULL); 465 ClearPagePrivate(page); 466 unlock_page(page); 467 fscrypt_release_ctx(ctx); 468 } 469 EXPORT_SYMBOL(fscrypt_restore_control_page); 470 471 static void fscrypt_destroy(void) 472 { 473 struct fscrypt_ctx *pos, *n; 474 475 list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list) 476 kmem_cache_free(fscrypt_ctx_cachep, pos); 477 INIT_LIST_HEAD(&fscrypt_free_ctxs); 478 mempool_destroy(fscrypt_bounce_page_pool); 479 fscrypt_bounce_page_pool = NULL; 480 } 481 482 /** 483 * fscrypt_initialize() - allocate major buffers for fs encryption. 484 * 485 * We only call this when we start accessing encrypted files, since it 486 * results in memory getting allocated that wouldn't otherwise be used. 487 * 488 * Return: Zero on success, non-zero otherwise. 489 */ 490 int fscrypt_initialize(void) 491 { 492 int i, res = -ENOMEM; 493 494 if (fscrypt_bounce_page_pool) 495 return 0; 496 497 mutex_lock(&fscrypt_init_mutex); 498 if (fscrypt_bounce_page_pool) 499 goto already_initialized; 500 501 for (i = 0; i < num_prealloc_crypto_ctxs; i++) { 502 struct fscrypt_ctx *ctx; 503 504 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS); 505 if (!ctx) 506 goto fail; 507 list_add(&ctx->free_list, &fscrypt_free_ctxs); 508 } 509 510 fscrypt_bounce_page_pool = 511 mempool_create_page_pool(num_prealloc_crypto_pages, 0); 512 if (!fscrypt_bounce_page_pool) 513 goto fail; 514 515 already_initialized: 516 mutex_unlock(&fscrypt_init_mutex); 517 return 0; 518 fail: 519 fscrypt_destroy(); 520 mutex_unlock(&fscrypt_init_mutex); 521 return res; 522 } 523 EXPORT_SYMBOL(fscrypt_initialize); 524 525 /** 526 * fscrypt_init() - Set up for fs encryption. 527 */ 528 static int __init fscrypt_init(void) 529 { 530 fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue", 531 WQ_HIGHPRI, 0); 532 if (!fscrypt_read_workqueue) 533 goto fail; 534 535 fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT); 536 if (!fscrypt_ctx_cachep) 537 goto fail_free_queue; 538 539 fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT); 540 if (!fscrypt_info_cachep) 541 goto fail_free_ctx; 542 543 return 0; 544 545 fail_free_ctx: 546 kmem_cache_destroy(fscrypt_ctx_cachep); 547 fail_free_queue: 548 destroy_workqueue(fscrypt_read_workqueue); 549 fail: 550 return -ENOMEM; 551 } 552 module_init(fscrypt_init) 553 554 /** 555 * fscrypt_exit() - Shutdown the fs encryption system 556 */ 557 static void __exit fscrypt_exit(void) 558 { 559 fscrypt_destroy(); 560 561 if (fscrypt_read_workqueue) 562 destroy_workqueue(fscrypt_read_workqueue); 563 kmem_cache_destroy(fscrypt_ctx_cachep); 564 kmem_cache_destroy(fscrypt_info_cachep); 565 } 566 module_exit(fscrypt_exit); 567 568 MODULE_LICENSE("GPL"); 569