1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 258ae7468SRichard Weinberger /* 358ae7468SRichard Weinberger * This contains encryption functions for per-file encryption. 458ae7468SRichard Weinberger * 558ae7468SRichard Weinberger * Copyright (C) 2015, Google, Inc. 658ae7468SRichard Weinberger * Copyright (C) 2015, Motorola Mobility 758ae7468SRichard Weinberger * 858ae7468SRichard Weinberger * Written by Michael Halcrow, 2014. 958ae7468SRichard Weinberger * 1058ae7468SRichard Weinberger * Filename encryption additions 1158ae7468SRichard Weinberger * Uday Savagaonkar, 2014 1258ae7468SRichard Weinberger * Encryption policy handling additions 1358ae7468SRichard Weinberger * Ildar Muslukhov, 2014 1458ae7468SRichard Weinberger * Add fscrypt_pullback_bio_page() 1558ae7468SRichard Weinberger * Jaegeuk Kim, 2015. 1658ae7468SRichard Weinberger * 1758ae7468SRichard Weinberger * This has not yet undergone a rigorous security audit. 1858ae7468SRichard Weinberger * 1958ae7468SRichard Weinberger * The usage of AES-XTS should conform to recommendations in NIST 2058ae7468SRichard Weinberger * Special Publication 800-38E and IEEE P1619/D16. 2158ae7468SRichard Weinberger */ 2258ae7468SRichard Weinberger 2358ae7468SRichard Weinberger #include <linux/pagemap.h> 2458ae7468SRichard Weinberger #include <linux/module.h> 2558ae7468SRichard Weinberger #include <linux/bio.h> 2658ae7468SRichard Weinberger #include <linux/namei.h> 2758ae7468SRichard Weinberger #include "fscrypt_private.h" 2858ae7468SRichard Weinberger 290cb8dae4SEric Biggers static void __fscrypt_decrypt_bio(struct bio *bio, bool done) 3058ae7468SRichard Weinberger { 3158ae7468SRichard Weinberger struct bio_vec *bv; 326dc4f100SMing Lei struct bvec_iter_all iter_all; 3358ae7468SRichard Weinberger 34*2b070cfeSChristoph Hellwig bio_for_each_segment_all(bv, bio, iter_all) { 3558ae7468SRichard Weinberger struct page *page = bv->bv_page; 3658ae7468SRichard Weinberger int ret = fscrypt_decrypt_page(page->mapping->host, page, 3758ae7468SRichard Weinberger PAGE_SIZE, 0, page->index); 3858ae7468SRichard Weinberger 3958ae7468SRichard Weinberger if (ret) { 4058ae7468SRichard Weinberger WARN_ON_ONCE(1); 4158ae7468SRichard Weinberger SetPageError(page); 420cb8dae4SEric Biggers } else if (done) { 4358ae7468SRichard Weinberger SetPageUptodate(page); 4458ae7468SRichard Weinberger } 450cb8dae4SEric Biggers if (done) 4658ae7468SRichard Weinberger unlock_page(page); 4758ae7468SRichard Weinberger } 480cb8dae4SEric Biggers } 490cb8dae4SEric Biggers 500cb8dae4SEric Biggers void fscrypt_decrypt_bio(struct bio *bio) 510cb8dae4SEric Biggers { 520cb8dae4SEric Biggers __fscrypt_decrypt_bio(bio, false); 530cb8dae4SEric Biggers } 540cb8dae4SEric Biggers EXPORT_SYMBOL(fscrypt_decrypt_bio); 550cb8dae4SEric Biggers 560cb8dae4SEric Biggers static void completion_pages(struct work_struct *work) 570cb8dae4SEric Biggers { 580cb8dae4SEric Biggers struct fscrypt_ctx *ctx = 590cb8dae4SEric Biggers container_of(work, struct fscrypt_ctx, r.work); 600cb8dae4SEric Biggers struct bio *bio = ctx->r.bio; 610cb8dae4SEric Biggers 620cb8dae4SEric Biggers __fscrypt_decrypt_bio(bio, true); 6358ae7468SRichard Weinberger fscrypt_release_ctx(ctx); 6458ae7468SRichard Weinberger bio_put(bio); 6558ae7468SRichard Weinberger } 6658ae7468SRichard Weinberger 670cb8dae4SEric Biggers void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio) 6858ae7468SRichard Weinberger { 6958ae7468SRichard Weinberger INIT_WORK(&ctx->r.work, completion_pages); 7058ae7468SRichard Weinberger ctx->r.bio = bio; 710cb8dae4SEric Biggers fscrypt_enqueue_decrypt_work(&ctx->r.work); 7258ae7468SRichard Weinberger } 730cb8dae4SEric Biggers EXPORT_SYMBOL(fscrypt_enqueue_decrypt_bio); 7458ae7468SRichard Weinberger 7558ae7468SRichard Weinberger void fscrypt_pullback_bio_page(struct page **page, bool restore) 7658ae7468SRichard Weinberger { 7758ae7468SRichard Weinberger struct fscrypt_ctx *ctx; 7858ae7468SRichard Weinberger struct page *bounce_page; 7958ae7468SRichard Weinberger 8058ae7468SRichard Weinberger /* The bounce data pages are unmapped. */ 8158ae7468SRichard Weinberger if ((*page)->mapping) 8258ae7468SRichard Weinberger return; 8358ae7468SRichard Weinberger 8458ae7468SRichard Weinberger /* The bounce data page is unmapped. */ 8558ae7468SRichard Weinberger bounce_page = *page; 8658ae7468SRichard Weinberger ctx = (struct fscrypt_ctx *)page_private(bounce_page); 8758ae7468SRichard Weinberger 8858ae7468SRichard Weinberger /* restore control page */ 8958ae7468SRichard Weinberger *page = ctx->w.control_page; 9058ae7468SRichard Weinberger 9158ae7468SRichard Weinberger if (restore) 9258ae7468SRichard Weinberger fscrypt_restore_control_page(bounce_page); 9358ae7468SRichard Weinberger } 9458ae7468SRichard Weinberger EXPORT_SYMBOL(fscrypt_pullback_bio_page); 9558ae7468SRichard Weinberger 9658ae7468SRichard Weinberger int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, 9758ae7468SRichard Weinberger sector_t pblk, unsigned int len) 9858ae7468SRichard Weinberger { 9958ae7468SRichard Weinberger struct fscrypt_ctx *ctx; 10058ae7468SRichard Weinberger struct page *ciphertext_page = NULL; 10158ae7468SRichard Weinberger struct bio *bio; 10258ae7468SRichard Weinberger int ret, err = 0; 10358ae7468SRichard Weinberger 10458ae7468SRichard Weinberger BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE); 10558ae7468SRichard Weinberger 10658ae7468SRichard Weinberger ctx = fscrypt_get_ctx(inode, GFP_NOFS); 10758ae7468SRichard Weinberger if (IS_ERR(ctx)) 10858ae7468SRichard Weinberger return PTR_ERR(ctx); 10958ae7468SRichard Weinberger 11058ae7468SRichard Weinberger ciphertext_page = fscrypt_alloc_bounce_page(ctx, GFP_NOWAIT); 11158ae7468SRichard Weinberger if (IS_ERR(ciphertext_page)) { 11258ae7468SRichard Weinberger err = PTR_ERR(ciphertext_page); 11358ae7468SRichard Weinberger goto errout; 11458ae7468SRichard Weinberger } 11558ae7468SRichard Weinberger 11658ae7468SRichard Weinberger while (len--) { 11758ae7468SRichard Weinberger err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk, 11858ae7468SRichard Weinberger ZERO_PAGE(0), ciphertext_page, 11958ae7468SRichard Weinberger PAGE_SIZE, 0, GFP_NOFS); 12058ae7468SRichard Weinberger if (err) 12158ae7468SRichard Weinberger goto errout; 12258ae7468SRichard Weinberger 12358ae7468SRichard Weinberger bio = bio_alloc(GFP_NOWAIT, 1); 12458ae7468SRichard Weinberger if (!bio) { 12558ae7468SRichard Weinberger err = -ENOMEM; 12658ae7468SRichard Weinberger goto errout; 12758ae7468SRichard Weinberger } 12874d46992SChristoph Hellwig bio_set_dev(bio, inode->i_sb->s_bdev); 12958ae7468SRichard Weinberger bio->bi_iter.bi_sector = 13058ae7468SRichard Weinberger pblk << (inode->i_sb->s_blocksize_bits - 9); 13158ae7468SRichard Weinberger bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 13258ae7468SRichard Weinberger ret = bio_add_page(bio, ciphertext_page, 13358ae7468SRichard Weinberger inode->i_sb->s_blocksize, 0); 13458ae7468SRichard Weinberger if (ret != inode->i_sb->s_blocksize) { 13558ae7468SRichard Weinberger /* should never happen! */ 13658ae7468SRichard Weinberger WARN_ON(1); 13758ae7468SRichard Weinberger bio_put(bio); 13858ae7468SRichard Weinberger err = -EIO; 13958ae7468SRichard Weinberger goto errout; 14058ae7468SRichard Weinberger } 14158ae7468SRichard Weinberger err = submit_bio_wait(bio); 1424e4cbee9SChristoph Hellwig if (err == 0 && bio->bi_status) 14358ae7468SRichard Weinberger err = -EIO; 14458ae7468SRichard Weinberger bio_put(bio); 14558ae7468SRichard Weinberger if (err) 14658ae7468SRichard Weinberger goto errout; 14758ae7468SRichard Weinberger lblk++; 14858ae7468SRichard Weinberger pblk++; 14958ae7468SRichard Weinberger } 15058ae7468SRichard Weinberger err = 0; 15158ae7468SRichard Weinberger errout: 15258ae7468SRichard Weinberger fscrypt_release_ctx(ctx); 15358ae7468SRichard Weinberger return err; 15458ae7468SRichard Weinberger } 15558ae7468SRichard Weinberger EXPORT_SYMBOL(fscrypt_zeroout_range); 156