1 /* 2 * This contains encryption functions for per-file encryption. 3 * 4 * Copyright (C) 2015, Google, Inc. 5 * Copyright (C) 2015, Motorola Mobility 6 * 7 * Written by Michael Halcrow, 2014. 8 * 9 * Filename encryption additions 10 * Uday Savagaonkar, 2014 11 * Encryption policy handling additions 12 * Ildar Muslukhov, 2014 13 * Add fscrypt_pullback_bio_page() 14 * Jaegeuk Kim, 2015. 15 * 16 * This has not yet undergone a rigorous security audit. 17 * 18 * The usage of AES-XTS should conform to recommendations in NIST 19 * Special Publication 800-38E and IEEE P1619/D16. 20 */ 21 22 #include <linux/pagemap.h> 23 #include <linux/mempool.h> 24 #include <linux/module.h> 25 #include <linux/scatterlist.h> 26 #include <linux/ratelimit.h> 27 #include <linux/dcache.h> 28 #include <linux/namei.h> 29 #include <crypto/aes.h> 30 #include <crypto/skcipher.h> 31 #include "fscrypt_private.h" 32 33 static unsigned int num_prealloc_crypto_pages = 32; 34 static unsigned int num_prealloc_crypto_ctxs = 128; 35 36 module_param(num_prealloc_crypto_pages, uint, 0444); 37 MODULE_PARM_DESC(num_prealloc_crypto_pages, 38 "Number of crypto pages to preallocate"); 39 module_param(num_prealloc_crypto_ctxs, uint, 0444); 40 MODULE_PARM_DESC(num_prealloc_crypto_ctxs, 41 "Number of crypto contexts to preallocate"); 42 43 static mempool_t *fscrypt_bounce_page_pool = NULL; 44 45 static LIST_HEAD(fscrypt_free_ctxs); 46 static DEFINE_SPINLOCK(fscrypt_ctx_lock); 47 48 struct workqueue_struct *fscrypt_read_workqueue; 49 static DEFINE_MUTEX(fscrypt_init_mutex); 50 51 static struct kmem_cache *fscrypt_ctx_cachep; 52 struct kmem_cache *fscrypt_info_cachep; 53 54 /** 55 * fscrypt_release_ctx() - Releases an encryption context 56 * @ctx: The encryption context to release. 57 * 58 * If the encryption context was allocated from the pre-allocated pool, returns 59 * it to that pool. Else, frees it. 60 * 61 * If there's a bounce page in the context, this frees that. 62 */ 63 void fscrypt_release_ctx(struct fscrypt_ctx *ctx) 64 { 65 unsigned long flags; 66 67 if (ctx->flags & FS_CTX_HAS_BOUNCE_BUFFER_FL && ctx->w.bounce_page) { 68 mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool); 69 ctx->w.bounce_page = NULL; 70 } 71 ctx->w.control_page = NULL; 72 if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) { 73 kmem_cache_free(fscrypt_ctx_cachep, ctx); 74 } else { 75 spin_lock_irqsave(&fscrypt_ctx_lock, flags); 76 list_add(&ctx->free_list, &fscrypt_free_ctxs); 77 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags); 78 } 79 } 80 EXPORT_SYMBOL(fscrypt_release_ctx); 81 82 /** 83 * fscrypt_get_ctx() - Gets an encryption context 84 * @inode: The inode for which we are doing the crypto 85 * @gfp_flags: The gfp flag for memory allocation 86 * 87 * Allocates and initializes an encryption context. 88 * 89 * Return: An allocated and initialized encryption context on success; error 90 * value or NULL otherwise. 91 */ 92 struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags) 93 { 94 struct fscrypt_ctx *ctx = NULL; 95 struct fscrypt_info *ci = inode->i_crypt_info; 96 unsigned long flags; 97 98 if (ci == NULL) 99 return ERR_PTR(-ENOKEY); 100 101 /* 102 * We first try getting the ctx from a free list because in 103 * the common case the ctx will have an allocated and 104 * initialized crypto tfm, so it's probably a worthwhile 105 * optimization. For the bounce page, we first try getting it 106 * from the kernel allocator because that's just about as fast 107 * as getting it from a list and because a cache of free pages 108 * should generally be a "last resort" option for a filesystem 109 * to be able to do its job. 110 */ 111 spin_lock_irqsave(&fscrypt_ctx_lock, flags); 112 ctx = list_first_entry_or_null(&fscrypt_free_ctxs, 113 struct fscrypt_ctx, free_list); 114 if (ctx) 115 list_del(&ctx->free_list); 116 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags); 117 if (!ctx) { 118 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags); 119 if (!ctx) 120 return ERR_PTR(-ENOMEM); 121 ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL; 122 } else { 123 ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL; 124 } 125 ctx->flags &= ~FS_CTX_HAS_BOUNCE_BUFFER_FL; 126 return ctx; 127 } 128 EXPORT_SYMBOL(fscrypt_get_ctx); 129 130 int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, 131 u64 lblk_num, struct page *src_page, 132 struct page *dest_page, unsigned int len, 133 unsigned int offs, gfp_t gfp_flags) 134 { 135 struct { 136 __le64 index; 137 u8 padding[FS_IV_SIZE - sizeof(__le64)]; 138 } iv; 139 struct skcipher_request *req = NULL; 140 DECLARE_CRYPTO_WAIT(wait); 141 struct scatterlist dst, src; 142 struct fscrypt_info *ci = inode->i_crypt_info; 143 struct crypto_skcipher *tfm = ci->ci_ctfm; 144 int res = 0; 145 146 BUG_ON(len == 0); 147 148 BUILD_BUG_ON(sizeof(iv) != FS_IV_SIZE); 149 BUILD_BUG_ON(AES_BLOCK_SIZE != FS_IV_SIZE); 150 iv.index = cpu_to_le64(lblk_num); 151 memset(iv.padding, 0, sizeof(iv.padding)); 152 153 if (ci->ci_essiv_tfm != NULL) { 154 crypto_cipher_encrypt_one(ci->ci_essiv_tfm, (u8 *)&iv, 155 (u8 *)&iv); 156 } 157 158 req = skcipher_request_alloc(tfm, gfp_flags); 159 if (!req) { 160 printk_ratelimited(KERN_ERR 161 "%s: crypto_request_alloc() failed\n", 162 __func__); 163 return -ENOMEM; 164 } 165 166 skcipher_request_set_callback( 167 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 168 crypto_req_done, &wait); 169 170 sg_init_table(&dst, 1); 171 sg_set_page(&dst, dest_page, len, offs); 172 sg_init_table(&src, 1); 173 sg_set_page(&src, src_page, len, offs); 174 skcipher_request_set_crypt(req, &src, &dst, len, &iv); 175 if (rw == FS_DECRYPT) 176 res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait); 177 else 178 res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); 179 skcipher_request_free(req); 180 if (res) { 181 printk_ratelimited(KERN_ERR 182 "%s: crypto_skcipher_encrypt() returned %d\n", 183 __func__, res); 184 return res; 185 } 186 return 0; 187 } 188 189 struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx, 190 gfp_t gfp_flags) 191 { 192 ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags); 193 if (ctx->w.bounce_page == NULL) 194 return ERR_PTR(-ENOMEM); 195 ctx->flags |= FS_CTX_HAS_BOUNCE_BUFFER_FL; 196 return ctx->w.bounce_page; 197 } 198 199 /** 200 * fscypt_encrypt_page() - Encrypts a page 201 * @inode: The inode for which the encryption should take place 202 * @page: The page to encrypt. Must be locked for bounce-page 203 * encryption. 204 * @len: Length of data to encrypt in @page and encrypted 205 * data in returned page. 206 * @offs: Offset of data within @page and returned 207 * page holding encrypted data. 208 * @lblk_num: Logical block number. This must be unique for multiple 209 * calls with same inode, except when overwriting 210 * previously written data. 211 * @gfp_flags: The gfp flag for memory allocation 212 * 213 * Encrypts @page using the ctx encryption context. Performs encryption 214 * either in-place or into a newly allocated bounce page. 215 * Called on the page write path. 216 * 217 * Bounce page allocation is the default. 218 * In this case, the contents of @page are encrypted and stored in an 219 * allocated bounce page. @page has to be locked and the caller must call 220 * fscrypt_restore_control_page() on the returned ciphertext page to 221 * release the bounce buffer and the encryption context. 222 * 223 * In-place encryption is used by setting the FS_CFLG_OWN_PAGES flag in 224 * fscrypt_operations. Here, the input-page is returned with its content 225 * encrypted. 226 * 227 * Return: A page with the encrypted content on success. Else, an 228 * error value or NULL. 229 */ 230 struct page *fscrypt_encrypt_page(const struct inode *inode, 231 struct page *page, 232 unsigned int len, 233 unsigned int offs, 234 u64 lblk_num, gfp_t gfp_flags) 235 236 { 237 struct fscrypt_ctx *ctx; 238 struct page *ciphertext_page = page; 239 int err; 240 241 BUG_ON(len % FS_CRYPTO_BLOCK_SIZE != 0); 242 243 if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) { 244 /* with inplace-encryption we just encrypt the page */ 245 err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page, 246 ciphertext_page, len, offs, 247 gfp_flags); 248 if (err) 249 return ERR_PTR(err); 250 251 return ciphertext_page; 252 } 253 254 BUG_ON(!PageLocked(page)); 255 256 ctx = fscrypt_get_ctx(inode, gfp_flags); 257 if (IS_ERR(ctx)) 258 return (struct page *)ctx; 259 260 /* The encryption operation will require a bounce page. */ 261 ciphertext_page = fscrypt_alloc_bounce_page(ctx, gfp_flags); 262 if (IS_ERR(ciphertext_page)) 263 goto errout; 264 265 ctx->w.control_page = page; 266 err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, 267 page, ciphertext_page, len, offs, 268 gfp_flags); 269 if (err) { 270 ciphertext_page = ERR_PTR(err); 271 goto errout; 272 } 273 SetPagePrivate(ciphertext_page); 274 set_page_private(ciphertext_page, (unsigned long)ctx); 275 lock_page(ciphertext_page); 276 return ciphertext_page; 277 278 errout: 279 fscrypt_release_ctx(ctx); 280 return ciphertext_page; 281 } 282 EXPORT_SYMBOL(fscrypt_encrypt_page); 283 284 /** 285 * fscrypt_decrypt_page() - Decrypts a page in-place 286 * @inode: The corresponding inode for the page to decrypt. 287 * @page: The page to decrypt. Must be locked in case 288 * it is a writeback page (FS_CFLG_OWN_PAGES unset). 289 * @len: Number of bytes in @page to be decrypted. 290 * @offs: Start of data in @page. 291 * @lblk_num: Logical block number. 292 * 293 * Decrypts page in-place using the ctx encryption context. 294 * 295 * Called from the read completion callback. 296 * 297 * Return: Zero on success, non-zero otherwise. 298 */ 299 int fscrypt_decrypt_page(const struct inode *inode, struct page *page, 300 unsigned int len, unsigned int offs, u64 lblk_num) 301 { 302 if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES)) 303 BUG_ON(!PageLocked(page)); 304 305 return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page, 306 len, offs, GFP_NOFS); 307 } 308 EXPORT_SYMBOL(fscrypt_decrypt_page); 309 310 /* 311 * Validate dentries for encrypted directories to make sure we aren't 312 * potentially caching stale data after a key has been added or 313 * removed. 314 */ 315 static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) 316 { 317 struct dentry *dir; 318 int dir_has_key, cached_with_key; 319 320 if (flags & LOOKUP_RCU) 321 return -ECHILD; 322 323 dir = dget_parent(dentry); 324 if (!IS_ENCRYPTED(d_inode(dir))) { 325 dput(dir); 326 return 0; 327 } 328 329 /* this should eventually be an flag in d_flags */ 330 spin_lock(&dentry->d_lock); 331 cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY; 332 spin_unlock(&dentry->d_lock); 333 dir_has_key = (d_inode(dir)->i_crypt_info != NULL); 334 dput(dir); 335 336 /* 337 * If the dentry was cached without the key, and it is a 338 * negative dentry, it might be a valid name. We can't check 339 * if the key has since been made available due to locking 340 * reasons, so we fail the validation so ext4_lookup() can do 341 * this check. 342 * 343 * We also fail the validation if the dentry was created with 344 * the key present, but we no longer have the key, or vice versa. 345 */ 346 if ((!cached_with_key && d_is_negative(dentry)) || 347 (!cached_with_key && dir_has_key) || 348 (cached_with_key && !dir_has_key)) 349 return 0; 350 return 1; 351 } 352 353 const struct dentry_operations fscrypt_d_ops = { 354 .d_revalidate = fscrypt_d_revalidate, 355 }; 356 EXPORT_SYMBOL(fscrypt_d_ops); 357 358 void fscrypt_restore_control_page(struct page *page) 359 { 360 struct fscrypt_ctx *ctx; 361 362 ctx = (struct fscrypt_ctx *)page_private(page); 363 set_page_private(page, (unsigned long)NULL); 364 ClearPagePrivate(page); 365 unlock_page(page); 366 fscrypt_release_ctx(ctx); 367 } 368 EXPORT_SYMBOL(fscrypt_restore_control_page); 369 370 static void fscrypt_destroy(void) 371 { 372 struct fscrypt_ctx *pos, *n; 373 374 list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list) 375 kmem_cache_free(fscrypt_ctx_cachep, pos); 376 INIT_LIST_HEAD(&fscrypt_free_ctxs); 377 mempool_destroy(fscrypt_bounce_page_pool); 378 fscrypt_bounce_page_pool = NULL; 379 } 380 381 /** 382 * fscrypt_initialize() - allocate major buffers for fs encryption. 383 * @cop_flags: fscrypt operations flags 384 * 385 * We only call this when we start accessing encrypted files, since it 386 * results in memory getting allocated that wouldn't otherwise be used. 387 * 388 * Return: Zero on success, non-zero otherwise. 389 */ 390 int fscrypt_initialize(unsigned int cop_flags) 391 { 392 int i, res = -ENOMEM; 393 394 /* No need to allocate a bounce page pool if this FS won't use it. */ 395 if (cop_flags & FS_CFLG_OWN_PAGES) 396 return 0; 397 398 mutex_lock(&fscrypt_init_mutex); 399 if (fscrypt_bounce_page_pool) 400 goto already_initialized; 401 402 for (i = 0; i < num_prealloc_crypto_ctxs; i++) { 403 struct fscrypt_ctx *ctx; 404 405 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS); 406 if (!ctx) 407 goto fail; 408 list_add(&ctx->free_list, &fscrypt_free_ctxs); 409 } 410 411 fscrypt_bounce_page_pool = 412 mempool_create_page_pool(num_prealloc_crypto_pages, 0); 413 if (!fscrypt_bounce_page_pool) 414 goto fail; 415 416 already_initialized: 417 mutex_unlock(&fscrypt_init_mutex); 418 return 0; 419 fail: 420 fscrypt_destroy(); 421 mutex_unlock(&fscrypt_init_mutex); 422 return res; 423 } 424 425 /** 426 * fscrypt_init() - Set up for fs encryption. 427 */ 428 static int __init fscrypt_init(void) 429 { 430 fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue", 431 WQ_HIGHPRI, 0); 432 if (!fscrypt_read_workqueue) 433 goto fail; 434 435 fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT); 436 if (!fscrypt_ctx_cachep) 437 goto fail_free_queue; 438 439 fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT); 440 if (!fscrypt_info_cachep) 441 goto fail_free_ctx; 442 443 return 0; 444 445 fail_free_ctx: 446 kmem_cache_destroy(fscrypt_ctx_cachep); 447 fail_free_queue: 448 destroy_workqueue(fscrypt_read_workqueue); 449 fail: 450 return -ENOMEM; 451 } 452 module_init(fscrypt_init) 453 454 /** 455 * fscrypt_exit() - Shutdown the fs encryption system 456 */ 457 static void __exit fscrypt_exit(void) 458 { 459 fscrypt_destroy(); 460 461 if (fscrypt_read_workqueue) 462 destroy_workqueue(fscrypt_read_workqueue); 463 kmem_cache_destroy(fscrypt_ctx_cachep); 464 kmem_cache_destroy(fscrypt_info_cachep); 465 466 fscrypt_essiv_cleanup(); 467 } 468 module_exit(fscrypt_exit); 469 470 MODULE_LICENSE("GPL"); 471