1 /* 2 * This contains encryption functions for per-file encryption. 3 * 4 * Copyright (C) 2015, Google, Inc. 5 * Copyright (C) 2015, Motorola Mobility 6 * 7 * Written by Michael Halcrow, 2014. 8 * 9 * Filename encryption additions 10 * Uday Savagaonkar, 2014 11 * Encryption policy handling additions 12 * Ildar Muslukhov, 2014 13 * Add fscrypt_pullback_bio_page() 14 * Jaegeuk Kim, 2015. 15 * 16 * This has not yet undergone a rigorous security audit. 17 * 18 * The usage of AES-XTS should conform to recommendations in NIST 19 * Special Publication 800-38E and IEEE P1619/D16. 20 */ 21 22 #include <linux/pagemap.h> 23 #include <linux/mempool.h> 24 #include <linux/module.h> 25 #include <linux/scatterlist.h> 26 #include <linux/ratelimit.h> 27 #include <linux/bio.h> 28 #include <linux/dcache.h> 29 #include <linux/fscrypto.h> 30 #include <linux/ecryptfs.h> 31 32 static unsigned int num_prealloc_crypto_pages = 32; 33 static unsigned int num_prealloc_crypto_ctxs = 128; 34 35 module_param(num_prealloc_crypto_pages, uint, 0444); 36 MODULE_PARM_DESC(num_prealloc_crypto_pages, 37 "Number of crypto pages to preallocate"); 38 module_param(num_prealloc_crypto_ctxs, uint, 0444); 39 MODULE_PARM_DESC(num_prealloc_crypto_ctxs, 40 "Number of crypto contexts to preallocate"); 41 42 static mempool_t *fscrypt_bounce_page_pool = NULL; 43 44 static LIST_HEAD(fscrypt_free_ctxs); 45 static DEFINE_SPINLOCK(fscrypt_ctx_lock); 46 47 static struct workqueue_struct *fscrypt_read_workqueue; 48 static DEFINE_MUTEX(fscrypt_init_mutex); 49 50 static struct kmem_cache *fscrypt_ctx_cachep; 51 struct kmem_cache *fscrypt_info_cachep; 52 53 /** 54 * fscrypt_release_ctx() - Releases an encryption context 55 * @ctx: The encryption context to release. 56 * 57 * If the encryption context was allocated from the pre-allocated pool, returns 58 * it to that pool. Else, frees it. 59 * 60 * If there's a bounce page in the context, this frees that. 61 */ 62 void fscrypt_release_ctx(struct fscrypt_ctx *ctx) 63 { 64 unsigned long flags; 65 66 if (ctx->flags & FS_WRITE_PATH_FL && ctx->w.bounce_page) { 67 mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool); 68 ctx->w.bounce_page = NULL; 69 } 70 ctx->w.control_page = NULL; 71 if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) { 72 kmem_cache_free(fscrypt_ctx_cachep, ctx); 73 } else { 74 spin_lock_irqsave(&fscrypt_ctx_lock, flags); 75 list_add(&ctx->free_list, &fscrypt_free_ctxs); 76 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags); 77 } 78 } 79 EXPORT_SYMBOL(fscrypt_release_ctx); 80 81 /** 82 * fscrypt_get_ctx() - Gets an encryption context 83 * @inode: The inode for which we are doing the crypto 84 * 85 * Allocates and initializes an encryption context. 86 * 87 * Return: An allocated and initialized encryption context on success; error 88 * value or NULL otherwise. 89 */ 90 struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode) 91 { 92 struct fscrypt_ctx *ctx = NULL; 93 struct fscrypt_info *ci = inode->i_crypt_info; 94 unsigned long flags; 95 96 if (ci == NULL) 97 return ERR_PTR(-ENOKEY); 98 99 /* 100 * We first try getting the ctx from a free list because in 101 * the common case the ctx will have an allocated and 102 * initialized crypto tfm, so it's probably a worthwhile 103 * optimization. For the bounce page, we first try getting it 104 * from the kernel allocator because that's just about as fast 105 * as getting it from a list and because a cache of free pages 106 * should generally be a "last resort" option for a filesystem 107 * to be able to do its job. 108 */ 109 spin_lock_irqsave(&fscrypt_ctx_lock, flags); 110 ctx = list_first_entry_or_null(&fscrypt_free_ctxs, 111 struct fscrypt_ctx, free_list); 112 if (ctx) 113 list_del(&ctx->free_list); 114 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags); 115 if (!ctx) { 116 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS); 117 if (!ctx) 118 return ERR_PTR(-ENOMEM); 119 ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL; 120 } else { 121 ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL; 122 } 123 ctx->flags &= ~FS_WRITE_PATH_FL; 124 return ctx; 125 } 126 EXPORT_SYMBOL(fscrypt_get_ctx); 127 128 /** 129 * fscrypt_complete() - The completion callback for page encryption 130 * @req: The asynchronous encryption request context 131 * @res: The result of the encryption operation 132 */ 133 static void fscrypt_complete(struct crypto_async_request *req, int res) 134 { 135 struct fscrypt_completion_result *ecr = req->data; 136 137 if (res == -EINPROGRESS) 138 return; 139 ecr->res = res; 140 complete(&ecr->completion); 141 } 142 143 typedef enum { 144 FS_DECRYPT = 0, 145 FS_ENCRYPT, 146 } fscrypt_direction_t; 147 148 static int do_page_crypto(struct inode *inode, 149 fscrypt_direction_t rw, pgoff_t index, 150 struct page *src_page, struct page *dest_page) 151 { 152 u8 xts_tweak[FS_XTS_TWEAK_SIZE]; 153 struct skcipher_request *req = NULL; 154 DECLARE_FS_COMPLETION_RESULT(ecr); 155 struct scatterlist dst, src; 156 struct fscrypt_info *ci = inode->i_crypt_info; 157 struct crypto_skcipher *tfm = ci->ci_ctfm; 158 int res = 0; 159 160 req = skcipher_request_alloc(tfm, GFP_NOFS); 161 if (!req) { 162 printk_ratelimited(KERN_ERR 163 "%s: crypto_request_alloc() failed\n", 164 __func__); 165 return -ENOMEM; 166 } 167 168 skcipher_request_set_callback( 169 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 170 fscrypt_complete, &ecr); 171 172 BUILD_BUG_ON(FS_XTS_TWEAK_SIZE < sizeof(index)); 173 memcpy(xts_tweak, &index, sizeof(index)); 174 memset(&xts_tweak[sizeof(index)], 0, 175 FS_XTS_TWEAK_SIZE - sizeof(index)); 176 177 sg_init_table(&dst, 1); 178 sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0); 179 sg_init_table(&src, 1); 180 sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0); 181 skcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE, 182 xts_tweak); 183 if (rw == FS_DECRYPT) 184 res = crypto_skcipher_decrypt(req); 185 else 186 res = crypto_skcipher_encrypt(req); 187 if (res == -EINPROGRESS || res == -EBUSY) { 188 BUG_ON(req->base.data != &ecr); 189 wait_for_completion(&ecr.completion); 190 res = ecr.res; 191 } 192 skcipher_request_free(req); 193 if (res) { 194 printk_ratelimited(KERN_ERR 195 "%s: crypto_skcipher_encrypt() returned %d\n", 196 __func__, res); 197 return res; 198 } 199 return 0; 200 } 201 202 static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx) 203 { 204 ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, 205 GFP_NOWAIT); 206 if (ctx->w.bounce_page == NULL) 207 return ERR_PTR(-ENOMEM); 208 ctx->flags |= FS_WRITE_PATH_FL; 209 return ctx->w.bounce_page; 210 } 211 212 /** 213 * fscypt_encrypt_page() - Encrypts a page 214 * @inode: The inode for which the encryption should take place 215 * @plaintext_page: The page to encrypt. Must be locked. 216 * 217 * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx 218 * encryption context. 219 * 220 * Called on the page write path. The caller must call 221 * fscrypt_restore_control_page() on the returned ciphertext page to 222 * release the bounce buffer and the encryption context. 223 * 224 * Return: An allocated page with the encrypted content on success. Else, an 225 * error value or NULL. 226 */ 227 struct page *fscrypt_encrypt_page(struct inode *inode, 228 struct page *plaintext_page) 229 { 230 struct fscrypt_ctx *ctx; 231 struct page *ciphertext_page = NULL; 232 int err; 233 234 BUG_ON(!PageLocked(plaintext_page)); 235 236 ctx = fscrypt_get_ctx(inode); 237 if (IS_ERR(ctx)) 238 return (struct page *)ctx; 239 240 /* The encryption operation will require a bounce page. */ 241 ciphertext_page = alloc_bounce_page(ctx); 242 if (IS_ERR(ciphertext_page)) 243 goto errout; 244 245 ctx->w.control_page = plaintext_page; 246 err = do_page_crypto(inode, FS_ENCRYPT, plaintext_page->index, 247 plaintext_page, ciphertext_page); 248 if (err) { 249 ciphertext_page = ERR_PTR(err); 250 goto errout; 251 } 252 SetPagePrivate(ciphertext_page); 253 set_page_private(ciphertext_page, (unsigned long)ctx); 254 lock_page(ciphertext_page); 255 return ciphertext_page; 256 257 errout: 258 fscrypt_release_ctx(ctx); 259 return ciphertext_page; 260 } 261 EXPORT_SYMBOL(fscrypt_encrypt_page); 262 263 /** 264 * f2crypt_decrypt_page() - Decrypts a page in-place 265 * @page: The page to decrypt. Must be locked. 266 * 267 * Decrypts page in-place using the ctx encryption context. 268 * 269 * Called from the read completion callback. 270 * 271 * Return: Zero on success, non-zero otherwise. 272 */ 273 int fscrypt_decrypt_page(struct page *page) 274 { 275 BUG_ON(!PageLocked(page)); 276 277 return do_page_crypto(page->mapping->host, 278 FS_DECRYPT, page->index, page, page); 279 } 280 EXPORT_SYMBOL(fscrypt_decrypt_page); 281 282 int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk, 283 sector_t pblk, unsigned int len) 284 { 285 struct fscrypt_ctx *ctx; 286 struct page *ciphertext_page = NULL; 287 struct bio *bio; 288 int ret, err = 0; 289 290 BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE); 291 292 ctx = fscrypt_get_ctx(inode); 293 if (IS_ERR(ctx)) 294 return PTR_ERR(ctx); 295 296 ciphertext_page = alloc_bounce_page(ctx); 297 if (IS_ERR(ciphertext_page)) { 298 err = PTR_ERR(ciphertext_page); 299 goto errout; 300 } 301 302 while (len--) { 303 err = do_page_crypto(inode, FS_ENCRYPT, lblk, 304 ZERO_PAGE(0), ciphertext_page); 305 if (err) 306 goto errout; 307 308 bio = bio_alloc(GFP_KERNEL, 1); 309 if (!bio) { 310 err = -ENOMEM; 311 goto errout; 312 } 313 bio->bi_bdev = inode->i_sb->s_bdev; 314 bio->bi_iter.bi_sector = 315 pblk << (inode->i_sb->s_blocksize_bits - 9); 316 ret = bio_add_page(bio, ciphertext_page, 317 inode->i_sb->s_blocksize, 0); 318 if (ret != inode->i_sb->s_blocksize) { 319 /* should never happen! */ 320 WARN_ON(1); 321 bio_put(bio); 322 err = -EIO; 323 goto errout; 324 } 325 err = submit_bio_wait(WRITE, bio); 326 if ((err == 0) && bio->bi_error) 327 err = -EIO; 328 bio_put(bio); 329 if (err) 330 goto errout; 331 lblk++; 332 pblk++; 333 } 334 err = 0; 335 errout: 336 fscrypt_release_ctx(ctx); 337 return err; 338 } 339 EXPORT_SYMBOL(fscrypt_zeroout_range); 340 341 /* 342 * Validate dentries for encrypted directories to make sure we aren't 343 * potentially caching stale data after a key has been added or 344 * removed. 345 */ 346 static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) 347 { 348 struct inode *dir = d_inode(dentry->d_parent); 349 struct fscrypt_info *ci = dir->i_crypt_info; 350 int dir_has_key, cached_with_key; 351 352 if (!dir->i_sb->s_cop->is_encrypted(dir)) 353 return 0; 354 355 if (ci && ci->ci_keyring_key && 356 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) | 357 (1 << KEY_FLAG_REVOKED) | 358 (1 << KEY_FLAG_DEAD)))) 359 ci = NULL; 360 361 /* this should eventually be an flag in d_flags */ 362 spin_lock(&dentry->d_lock); 363 cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY; 364 spin_unlock(&dentry->d_lock); 365 dir_has_key = (ci != NULL); 366 367 /* 368 * If the dentry was cached without the key, and it is a 369 * negative dentry, it might be a valid name. We can't check 370 * if the key has since been made available due to locking 371 * reasons, so we fail the validation so ext4_lookup() can do 372 * this check. 373 * 374 * We also fail the validation if the dentry was created with 375 * the key present, but we no longer have the key, or vice versa. 376 */ 377 if ((!cached_with_key && d_is_negative(dentry)) || 378 (!cached_with_key && dir_has_key) || 379 (cached_with_key && !dir_has_key)) 380 return 0; 381 return 1; 382 } 383 384 const struct dentry_operations fscrypt_d_ops = { 385 .d_revalidate = fscrypt_d_revalidate, 386 }; 387 EXPORT_SYMBOL(fscrypt_d_ops); 388 389 /* 390 * Call fscrypt_decrypt_page on every single page, reusing the encryption 391 * context. 392 */ 393 static void completion_pages(struct work_struct *work) 394 { 395 struct fscrypt_ctx *ctx = 396 container_of(work, struct fscrypt_ctx, r.work); 397 struct bio *bio = ctx->r.bio; 398 struct bio_vec *bv; 399 int i; 400 401 bio_for_each_segment_all(bv, bio, i) { 402 struct page *page = bv->bv_page; 403 int ret = fscrypt_decrypt_page(page); 404 405 if (ret) { 406 WARN_ON_ONCE(1); 407 SetPageError(page); 408 } else { 409 SetPageUptodate(page); 410 } 411 unlock_page(page); 412 } 413 fscrypt_release_ctx(ctx); 414 bio_put(bio); 415 } 416 417 void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio) 418 { 419 INIT_WORK(&ctx->r.work, completion_pages); 420 ctx->r.bio = bio; 421 queue_work(fscrypt_read_workqueue, &ctx->r.work); 422 } 423 EXPORT_SYMBOL(fscrypt_decrypt_bio_pages); 424 425 void fscrypt_pullback_bio_page(struct page **page, bool restore) 426 { 427 struct fscrypt_ctx *ctx; 428 struct page *bounce_page; 429 430 /* The bounce data pages are unmapped. */ 431 if ((*page)->mapping) 432 return; 433 434 /* The bounce data page is unmapped. */ 435 bounce_page = *page; 436 ctx = (struct fscrypt_ctx *)page_private(bounce_page); 437 438 /* restore control page */ 439 *page = ctx->w.control_page; 440 441 if (restore) 442 fscrypt_restore_control_page(bounce_page); 443 } 444 EXPORT_SYMBOL(fscrypt_pullback_bio_page); 445 446 void fscrypt_restore_control_page(struct page *page) 447 { 448 struct fscrypt_ctx *ctx; 449 450 ctx = (struct fscrypt_ctx *)page_private(page); 451 set_page_private(page, (unsigned long)NULL); 452 ClearPagePrivate(page); 453 unlock_page(page); 454 fscrypt_release_ctx(ctx); 455 } 456 EXPORT_SYMBOL(fscrypt_restore_control_page); 457 458 static void fscrypt_destroy(void) 459 { 460 struct fscrypt_ctx *pos, *n; 461 462 list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list) 463 kmem_cache_free(fscrypt_ctx_cachep, pos); 464 INIT_LIST_HEAD(&fscrypt_free_ctxs); 465 mempool_destroy(fscrypt_bounce_page_pool); 466 fscrypt_bounce_page_pool = NULL; 467 } 468 469 /** 470 * fscrypt_initialize() - allocate major buffers for fs encryption. 471 * 472 * We only call this when we start accessing encrypted files, since it 473 * results in memory getting allocated that wouldn't otherwise be used. 474 * 475 * Return: Zero on success, non-zero otherwise. 476 */ 477 int fscrypt_initialize(void) 478 { 479 int i, res = -ENOMEM; 480 481 if (fscrypt_bounce_page_pool) 482 return 0; 483 484 mutex_lock(&fscrypt_init_mutex); 485 if (fscrypt_bounce_page_pool) 486 goto already_initialized; 487 488 for (i = 0; i < num_prealloc_crypto_ctxs; i++) { 489 struct fscrypt_ctx *ctx; 490 491 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS); 492 if (!ctx) 493 goto fail; 494 list_add(&ctx->free_list, &fscrypt_free_ctxs); 495 } 496 497 fscrypt_bounce_page_pool = 498 mempool_create_page_pool(num_prealloc_crypto_pages, 0); 499 if (!fscrypt_bounce_page_pool) 500 goto fail; 501 502 already_initialized: 503 mutex_unlock(&fscrypt_init_mutex); 504 return 0; 505 fail: 506 fscrypt_destroy(); 507 mutex_unlock(&fscrypt_init_mutex); 508 return res; 509 } 510 EXPORT_SYMBOL(fscrypt_initialize); 511 512 /** 513 * fscrypt_init() - Set up for fs encryption. 514 */ 515 static int __init fscrypt_init(void) 516 { 517 fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue", 518 WQ_HIGHPRI, 0); 519 if (!fscrypt_read_workqueue) 520 goto fail; 521 522 fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT); 523 if (!fscrypt_ctx_cachep) 524 goto fail_free_queue; 525 526 fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT); 527 if (!fscrypt_info_cachep) 528 goto fail_free_ctx; 529 530 return 0; 531 532 fail_free_ctx: 533 kmem_cache_destroy(fscrypt_ctx_cachep); 534 fail_free_queue: 535 destroy_workqueue(fscrypt_read_workqueue); 536 fail: 537 return -ENOMEM; 538 } 539 module_init(fscrypt_init) 540 541 /** 542 * fscrypt_exit() - Shutdown the fs encryption system 543 */ 544 static void __exit fscrypt_exit(void) 545 { 546 fscrypt_destroy(); 547 548 if (fscrypt_read_workqueue) 549 destroy_workqueue(fscrypt_read_workqueue); 550 kmem_cache_destroy(fscrypt_ctx_cachep); 551 kmem_cache_destroy(fscrypt_info_cachep); 552 } 553 module_exit(fscrypt_exit); 554 555 MODULE_LICENSE("GPL"); 556