1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * This contains encryption functions for per-file encryption. 4 * 5 * Copyright (C) 2015, Google, Inc. 6 * Copyright (C) 2015, Motorola Mobility 7 * 8 * Written by Michael Halcrow, 2014. 9 * 10 * Filename encryption additions 11 * Uday Savagaonkar, 2014 12 * Encryption policy handling additions 13 * Ildar Muslukhov, 2014 14 * Add fscrypt_pullback_bio_page() 15 * Jaegeuk Kim, 2015. 16 * 17 * This has not yet undergone a rigorous security audit. 18 * 19 * The usage of AES-XTS should conform to recommendations in NIST 20 * Special Publication 800-38E and IEEE P1619/D16. 21 */ 22 23 #include <linux/pagemap.h> 24 #include <linux/module.h> 25 #include <linux/bio.h> 26 #include <linux/namei.h> 27 #include "fscrypt_private.h" 28 29 static void __fscrypt_decrypt_bio(struct bio *bio, bool done) 30 { 31 struct bio_vec *bv; 32 int i; 33 struct bvec_iter_all iter_all; 34 35 bio_for_each_segment_all(bv, bio, i, iter_all) { 36 struct page *page = bv->bv_page; 37 int ret = fscrypt_decrypt_page(page->mapping->host, page, 38 PAGE_SIZE, 0, page->index); 39 40 if (ret) { 41 WARN_ON_ONCE(1); 42 SetPageError(page); 43 } else if (done) { 44 SetPageUptodate(page); 45 } 46 if (done) 47 unlock_page(page); 48 } 49 } 50 51 void fscrypt_decrypt_bio(struct bio *bio) 52 { 53 __fscrypt_decrypt_bio(bio, false); 54 } 55 EXPORT_SYMBOL(fscrypt_decrypt_bio); 56 57 static void completion_pages(struct work_struct *work) 58 { 59 struct fscrypt_ctx *ctx = 60 container_of(work, struct fscrypt_ctx, r.work); 61 struct bio *bio = ctx->r.bio; 62 63 __fscrypt_decrypt_bio(bio, true); 64 fscrypt_release_ctx(ctx); 65 bio_put(bio); 66 } 67 68 void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio) 69 { 70 INIT_WORK(&ctx->r.work, completion_pages); 71 ctx->r.bio = bio; 72 fscrypt_enqueue_decrypt_work(&ctx->r.work); 73 } 74 EXPORT_SYMBOL(fscrypt_enqueue_decrypt_bio); 75 76 void fscrypt_pullback_bio_page(struct page **page, bool restore) 77 { 78 struct fscrypt_ctx *ctx; 79 struct page *bounce_page; 80 81 /* The bounce data pages are unmapped. */ 82 if ((*page)->mapping) 83 return; 84 85 /* The bounce data page is unmapped. */ 86 bounce_page = *page; 87 ctx = (struct fscrypt_ctx *)page_private(bounce_page); 88 89 /* restore control page */ 90 *page = ctx->w.control_page; 91 92 if (restore) 93 fscrypt_restore_control_page(bounce_page); 94 } 95 EXPORT_SYMBOL(fscrypt_pullback_bio_page); 96 97 int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, 98 sector_t pblk, unsigned int len) 99 { 100 struct fscrypt_ctx *ctx; 101 struct page *ciphertext_page = NULL; 102 struct bio *bio; 103 int ret, err = 0; 104 105 BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE); 106 107 ctx = fscrypt_get_ctx(inode, GFP_NOFS); 108 if (IS_ERR(ctx)) 109 return PTR_ERR(ctx); 110 111 ciphertext_page = fscrypt_alloc_bounce_page(ctx, GFP_NOWAIT); 112 if (IS_ERR(ciphertext_page)) { 113 err = PTR_ERR(ciphertext_page); 114 goto errout; 115 } 116 117 while (len--) { 118 err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk, 119 ZERO_PAGE(0), ciphertext_page, 120 PAGE_SIZE, 0, GFP_NOFS); 121 if (err) 122 goto errout; 123 124 bio = bio_alloc(GFP_NOWAIT, 1); 125 if (!bio) { 126 err = -ENOMEM; 127 goto errout; 128 } 129 bio_set_dev(bio, inode->i_sb->s_bdev); 130 bio->bi_iter.bi_sector = 131 pblk << (inode->i_sb->s_blocksize_bits - 9); 132 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 133 ret = bio_add_page(bio, ciphertext_page, 134 inode->i_sb->s_blocksize, 0); 135 if (ret != inode->i_sb->s_blocksize) { 136 /* should never happen! */ 137 WARN_ON(1); 138 bio_put(bio); 139 err = -EIO; 140 goto errout; 141 } 142 err = submit_bio_wait(bio); 143 if (err == 0 && bio->bi_status) 144 err = -EIO; 145 bio_put(bio); 146 if (err) 147 goto errout; 148 lblk++; 149 pblk++; 150 } 151 err = 0; 152 errout: 153 fscrypt_release_ctx(ctx); 154 return err; 155 } 156 EXPORT_SYMBOL(fscrypt_zeroout_range); 157