1 /* 2 * This contains encryption functions for per-file encryption. 3 * 4 * Copyright (C) 2015, Google, Inc. 5 * Copyright (C) 2015, Motorola Mobility 6 * 7 * Written by Michael Halcrow, 2014. 8 * 9 * Filename encryption additions 10 * Uday Savagaonkar, 2014 11 * Encryption policy handling additions 12 * Ildar Muslukhov, 2014 13 * Add fscrypt_pullback_bio_page() 14 * Jaegeuk Kim, 2015. 15 * 16 * This has not yet undergone a rigorous security audit. 17 * 18 * The usage of AES-XTS should conform to recommendations in NIST 19 * Special Publication 800-38E and IEEE P1619/D16. 20 */ 21 22 #include <linux/pagemap.h> 23 #include <linux/mempool.h> 24 #include <linux/module.h> 25 #include <linux/scatterlist.h> 26 #include <linux/ratelimit.h> 27 #include <linux/dcache.h> 28 #include <linux/namei.h> 29 #include <crypto/aes.h> 30 #include <crypto/skcipher.h> 31 #include "fscrypt_private.h" 32 33 static unsigned int num_prealloc_crypto_pages = 32; 34 static unsigned int num_prealloc_crypto_ctxs = 128; 35 36 module_param(num_prealloc_crypto_pages, uint, 0444); 37 MODULE_PARM_DESC(num_prealloc_crypto_pages, 38 "Number of crypto pages to preallocate"); 39 module_param(num_prealloc_crypto_ctxs, uint, 0444); 40 MODULE_PARM_DESC(num_prealloc_crypto_ctxs, 41 "Number of crypto contexts to preallocate"); 42 43 static mempool_t *fscrypt_bounce_page_pool = NULL; 44 45 static LIST_HEAD(fscrypt_free_ctxs); 46 static DEFINE_SPINLOCK(fscrypt_ctx_lock); 47 48 static struct workqueue_struct *fscrypt_read_workqueue; 49 static DEFINE_MUTEX(fscrypt_init_mutex); 50 51 static struct kmem_cache *fscrypt_ctx_cachep; 52 struct kmem_cache *fscrypt_info_cachep; 53 54 void fscrypt_enqueue_decrypt_work(struct work_struct *work) 55 { 56 queue_work(fscrypt_read_workqueue, work); 57 } 58 EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work); 59 60 /** 61 * fscrypt_release_ctx() - Releases an encryption context 62 * @ctx: The encryption context to release. 63 * 64 * If the encryption context was allocated from the pre-allocated pool, returns 65 * it to that pool. Else, frees it. 66 * 67 * If there's a bounce page in the context, this frees that. 68 */ 69 void fscrypt_release_ctx(struct fscrypt_ctx *ctx) 70 { 71 unsigned long flags; 72 73 if (ctx->flags & FS_CTX_HAS_BOUNCE_BUFFER_FL && ctx->w.bounce_page) { 74 mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool); 75 ctx->w.bounce_page = NULL; 76 } 77 ctx->w.control_page = NULL; 78 if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) { 79 kmem_cache_free(fscrypt_ctx_cachep, ctx); 80 } else { 81 spin_lock_irqsave(&fscrypt_ctx_lock, flags); 82 list_add(&ctx->free_list, &fscrypt_free_ctxs); 83 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags); 84 } 85 } 86 EXPORT_SYMBOL(fscrypt_release_ctx); 87 88 /** 89 * fscrypt_get_ctx() - Gets an encryption context 90 * @gfp_flags: The gfp flag for memory allocation 91 * 92 * Allocates and initializes an encryption context. 93 * 94 * Return: A new encryption context on success; an ERR_PTR() otherwise. 95 */ 96 struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags) 97 { 98 struct fscrypt_ctx *ctx; 99 unsigned long flags; 100 101 /* 102 * We first try getting the ctx from a free list because in 103 * the common case the ctx will have an allocated and 104 * initialized crypto tfm, so it's probably a worthwhile 105 * optimization. For the bounce page, we first try getting it 106 * from the kernel allocator because that's just about as fast 107 * as getting it from a list and because a cache of free pages 108 * should generally be a "last resort" option for a filesystem 109 * to be able to do its job. 110 */ 111 spin_lock_irqsave(&fscrypt_ctx_lock, flags); 112 ctx = list_first_entry_or_null(&fscrypt_free_ctxs, 113 struct fscrypt_ctx, free_list); 114 if (ctx) 115 list_del(&ctx->free_list); 116 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags); 117 if (!ctx) { 118 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags); 119 if (!ctx) 120 return ERR_PTR(-ENOMEM); 121 ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL; 122 } else { 123 ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL; 124 } 125 ctx->flags &= ~FS_CTX_HAS_BOUNCE_BUFFER_FL; 126 return ctx; 127 } 128 EXPORT_SYMBOL(fscrypt_get_ctx); 129 130 void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, 131 const struct fscrypt_info *ci) 132 { 133 memset(iv, 0, ci->ci_mode->ivsize); 134 iv->lblk_num = cpu_to_le64(lblk_num); 135 136 if (ci->ci_flags & FS_POLICY_FLAG_DIRECT_KEY) 137 memcpy(iv->nonce, ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE); 138 139 if (ci->ci_essiv_tfm != NULL) 140 crypto_cipher_encrypt_one(ci->ci_essiv_tfm, iv->raw, iv->raw); 141 } 142 143 int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, 144 u64 lblk_num, struct page *src_page, 145 struct page *dest_page, unsigned int len, 146 unsigned int offs, gfp_t gfp_flags) 147 { 148 union fscrypt_iv iv; 149 struct skcipher_request *req = NULL; 150 DECLARE_CRYPTO_WAIT(wait); 151 struct scatterlist dst, src; 152 struct fscrypt_info *ci = inode->i_crypt_info; 153 struct crypto_skcipher *tfm = ci->ci_ctfm; 154 int res = 0; 155 156 BUG_ON(len == 0); 157 158 fscrypt_generate_iv(&iv, lblk_num, ci); 159 160 req = skcipher_request_alloc(tfm, gfp_flags); 161 if (!req) 162 return -ENOMEM; 163 164 skcipher_request_set_callback( 165 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 166 crypto_req_done, &wait); 167 168 sg_init_table(&dst, 1); 169 sg_set_page(&dst, dest_page, len, offs); 170 sg_init_table(&src, 1); 171 sg_set_page(&src, src_page, len, offs); 172 skcipher_request_set_crypt(req, &src, &dst, len, &iv); 173 if (rw == FS_DECRYPT) 174 res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait); 175 else 176 res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); 177 skcipher_request_free(req); 178 if (res) { 179 fscrypt_err(inode->i_sb, 180 "%scryption failed for inode %lu, block %llu: %d", 181 (rw == FS_DECRYPT ? "de" : "en"), 182 inode->i_ino, lblk_num, res); 183 return res; 184 } 185 return 0; 186 } 187 188 struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx, 189 gfp_t gfp_flags) 190 { 191 ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags); 192 if (ctx->w.bounce_page == NULL) 193 return ERR_PTR(-ENOMEM); 194 ctx->flags |= FS_CTX_HAS_BOUNCE_BUFFER_FL; 195 return ctx->w.bounce_page; 196 } 197 198 /** 199 * fscypt_encrypt_page() - Encrypts a page 200 * @inode: The inode for which the encryption should take place 201 * @page: The page to encrypt. Must be locked for bounce-page 202 * encryption. 203 * @len: Length of data to encrypt in @page and encrypted 204 * data in returned page. 205 * @offs: Offset of data within @page and returned 206 * page holding encrypted data. 207 * @lblk_num: Logical block number. This must be unique for multiple 208 * calls with same inode, except when overwriting 209 * previously written data. 210 * @gfp_flags: The gfp flag for memory allocation 211 * 212 * Encrypts @page using the ctx encryption context. Performs encryption 213 * either in-place or into a newly allocated bounce page. 214 * Called on the page write path. 215 * 216 * Bounce page allocation is the default. 217 * In this case, the contents of @page are encrypted and stored in an 218 * allocated bounce page. @page has to be locked and the caller must call 219 * fscrypt_restore_control_page() on the returned ciphertext page to 220 * release the bounce buffer and the encryption context. 221 * 222 * In-place encryption is used by setting the FS_CFLG_OWN_PAGES flag in 223 * fscrypt_operations. Here, the input-page is returned with its content 224 * encrypted. 225 * 226 * Return: A page with the encrypted content on success. Else, an 227 * error value or NULL. 228 */ 229 struct page *fscrypt_encrypt_page(const struct inode *inode, 230 struct page *page, 231 unsigned int len, 232 unsigned int offs, 233 u64 lblk_num, gfp_t gfp_flags) 234 235 { 236 struct fscrypt_ctx *ctx; 237 struct page *ciphertext_page = page; 238 int err; 239 240 BUG_ON(len % FS_CRYPTO_BLOCK_SIZE != 0); 241 242 if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) { 243 /* with inplace-encryption we just encrypt the page */ 244 err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page, 245 ciphertext_page, len, offs, 246 gfp_flags); 247 if (err) 248 return ERR_PTR(err); 249 250 return ciphertext_page; 251 } 252 253 BUG_ON(!PageLocked(page)); 254 255 ctx = fscrypt_get_ctx(gfp_flags); 256 if (IS_ERR(ctx)) 257 return ERR_CAST(ctx); 258 259 /* The encryption operation will require a bounce page. */ 260 ciphertext_page = fscrypt_alloc_bounce_page(ctx, gfp_flags); 261 if (IS_ERR(ciphertext_page)) 262 goto errout; 263 264 ctx->w.control_page = page; 265 err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, 266 page, ciphertext_page, len, offs, 267 gfp_flags); 268 if (err) { 269 ciphertext_page = ERR_PTR(err); 270 goto errout; 271 } 272 SetPagePrivate(ciphertext_page); 273 set_page_private(ciphertext_page, (unsigned long)ctx); 274 lock_page(ciphertext_page); 275 return ciphertext_page; 276 277 errout: 278 fscrypt_release_ctx(ctx); 279 return ciphertext_page; 280 } 281 EXPORT_SYMBOL(fscrypt_encrypt_page); 282 283 /** 284 * fscrypt_decrypt_page() - Decrypts a page in-place 285 * @inode: The corresponding inode for the page to decrypt. 286 * @page: The page to decrypt. Must be locked in case 287 * it is a writeback page (FS_CFLG_OWN_PAGES unset). 288 * @len: Number of bytes in @page to be decrypted. 289 * @offs: Start of data in @page. 290 * @lblk_num: Logical block number. 291 * 292 * Decrypts page in-place using the ctx encryption context. 293 * 294 * Called from the read completion callback. 295 * 296 * Return: Zero on success, non-zero otherwise. 297 */ 298 int fscrypt_decrypt_page(const struct inode *inode, struct page *page, 299 unsigned int len, unsigned int offs, u64 lblk_num) 300 { 301 if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES)) 302 BUG_ON(!PageLocked(page)); 303 304 return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page, 305 len, offs, GFP_NOFS); 306 } 307 EXPORT_SYMBOL(fscrypt_decrypt_page); 308 309 /* 310 * Validate dentries in encrypted directories to make sure we aren't potentially 311 * caching stale dentries after a key has been added. 312 */ 313 static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) 314 { 315 struct dentry *dir; 316 int err; 317 int valid; 318 319 /* 320 * Plaintext names are always valid, since fscrypt doesn't support 321 * reverting to ciphertext names without evicting the directory's inode 322 * -- which implies eviction of the dentries in the directory. 323 */ 324 if (!(dentry->d_flags & DCACHE_ENCRYPTED_NAME)) 325 return 1; 326 327 /* 328 * Ciphertext name; valid if the directory's key is still unavailable. 329 * 330 * Although fscrypt forbids rename() on ciphertext names, we still must 331 * use dget_parent() here rather than use ->d_parent directly. That's 332 * because a corrupted fs image may contain directory hard links, which 333 * the VFS handles by moving the directory's dentry tree in the dcache 334 * each time ->lookup() finds the directory and it already has a dentry 335 * elsewhere. Thus ->d_parent can be changing, and we must safely grab 336 * a reference to some ->d_parent to prevent it from being freed. 337 */ 338 339 if (flags & LOOKUP_RCU) 340 return -ECHILD; 341 342 dir = dget_parent(dentry); 343 err = fscrypt_get_encryption_info(d_inode(dir)); 344 valid = !fscrypt_has_encryption_key(d_inode(dir)); 345 dput(dir); 346 347 if (err < 0) 348 return err; 349 350 return valid; 351 } 352 353 const struct dentry_operations fscrypt_d_ops = { 354 .d_revalidate = fscrypt_d_revalidate, 355 }; 356 357 void fscrypt_restore_control_page(struct page *page) 358 { 359 struct fscrypt_ctx *ctx; 360 361 ctx = (struct fscrypt_ctx *)page_private(page); 362 set_page_private(page, (unsigned long)NULL); 363 ClearPagePrivate(page); 364 unlock_page(page); 365 fscrypt_release_ctx(ctx); 366 } 367 EXPORT_SYMBOL(fscrypt_restore_control_page); 368 369 static void fscrypt_destroy(void) 370 { 371 struct fscrypt_ctx *pos, *n; 372 373 list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list) 374 kmem_cache_free(fscrypt_ctx_cachep, pos); 375 INIT_LIST_HEAD(&fscrypt_free_ctxs); 376 mempool_destroy(fscrypt_bounce_page_pool); 377 fscrypt_bounce_page_pool = NULL; 378 } 379 380 /** 381 * fscrypt_initialize() - allocate major buffers for fs encryption. 382 * @cop_flags: fscrypt operations flags 383 * 384 * We only call this when we start accessing encrypted files, since it 385 * results in memory getting allocated that wouldn't otherwise be used. 386 * 387 * Return: Zero on success, non-zero otherwise. 388 */ 389 int fscrypt_initialize(unsigned int cop_flags) 390 { 391 int i, res = -ENOMEM; 392 393 /* No need to allocate a bounce page pool if this FS won't use it. */ 394 if (cop_flags & FS_CFLG_OWN_PAGES) 395 return 0; 396 397 mutex_lock(&fscrypt_init_mutex); 398 if (fscrypt_bounce_page_pool) 399 goto already_initialized; 400 401 for (i = 0; i < num_prealloc_crypto_ctxs; i++) { 402 struct fscrypt_ctx *ctx; 403 404 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS); 405 if (!ctx) 406 goto fail; 407 list_add(&ctx->free_list, &fscrypt_free_ctxs); 408 } 409 410 fscrypt_bounce_page_pool = 411 mempool_create_page_pool(num_prealloc_crypto_pages, 0); 412 if (!fscrypt_bounce_page_pool) 413 goto fail; 414 415 already_initialized: 416 mutex_unlock(&fscrypt_init_mutex); 417 return 0; 418 fail: 419 fscrypt_destroy(); 420 mutex_unlock(&fscrypt_init_mutex); 421 return res; 422 } 423 424 void fscrypt_msg(struct super_block *sb, const char *level, 425 const char *fmt, ...) 426 { 427 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, 428 DEFAULT_RATELIMIT_BURST); 429 struct va_format vaf; 430 va_list args; 431 432 if (!__ratelimit(&rs)) 433 return; 434 435 va_start(args, fmt); 436 vaf.fmt = fmt; 437 vaf.va = &args; 438 if (sb) 439 printk("%sfscrypt (%s): %pV\n", level, sb->s_id, &vaf); 440 else 441 printk("%sfscrypt: %pV\n", level, &vaf); 442 va_end(args); 443 } 444 445 /** 446 * fscrypt_init() - Set up for fs encryption. 447 */ 448 static int __init fscrypt_init(void) 449 { 450 /* 451 * Use an unbound workqueue to allow bios to be decrypted in parallel 452 * even when they happen to complete on the same CPU. This sacrifices 453 * locality, but it's worthwhile since decryption is CPU-intensive. 454 * 455 * Also use a high-priority workqueue to prioritize decryption work, 456 * which blocks reads from completing, over regular application tasks. 457 */ 458 fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue", 459 WQ_UNBOUND | WQ_HIGHPRI, 460 num_online_cpus()); 461 if (!fscrypt_read_workqueue) 462 goto fail; 463 464 fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT); 465 if (!fscrypt_ctx_cachep) 466 goto fail_free_queue; 467 468 fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT); 469 if (!fscrypt_info_cachep) 470 goto fail_free_ctx; 471 472 return 0; 473 474 fail_free_ctx: 475 kmem_cache_destroy(fscrypt_ctx_cachep); 476 fail_free_queue: 477 destroy_workqueue(fscrypt_read_workqueue); 478 fail: 479 return -ENOMEM; 480 } 481 module_init(fscrypt_init) 482 483 /** 484 * fscrypt_exit() - Shutdown the fs encryption system 485 */ 486 static void __exit fscrypt_exit(void) 487 { 488 fscrypt_destroy(); 489 490 if (fscrypt_read_workqueue) 491 destroy_workqueue(fscrypt_read_workqueue); 492 kmem_cache_destroy(fscrypt_ctx_cachep); 493 kmem_cache_destroy(fscrypt_info_cachep); 494 495 fscrypt_essiv_cleanup(); 496 } 497 module_exit(fscrypt_exit); 498 499 MODULE_LICENSE("GPL"); 500