xref: /openbmc/linux/fs/crypto/bio.c (revision 41b2ad80)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
258ae7468SRichard Weinberger /*
3f262ca7dSEric Biggers  * Utility functions for file contents encryption/decryption on
4f262ca7dSEric Biggers  * block device-based filesystems.
558ae7468SRichard Weinberger  *
658ae7468SRichard Weinberger  * Copyright (C) 2015, Google, Inc.
758ae7468SRichard Weinberger  * Copyright (C) 2015, Motorola Mobility
858ae7468SRichard Weinberger  */
958ae7468SRichard Weinberger 
1058ae7468SRichard Weinberger #include <linux/pagemap.h>
1158ae7468SRichard Weinberger #include <linux/module.h>
1258ae7468SRichard Weinberger #include <linux/bio.h>
1358ae7468SRichard Weinberger #include <linux/namei.h>
1458ae7468SRichard Weinberger #include "fscrypt_private.h"
1558ae7468SRichard Weinberger 
16f262ca7dSEric Biggers /**
17f262ca7dSEric Biggers  * fscrypt_decrypt_bio() - decrypt the contents of a bio
18f262ca7dSEric Biggers  * @bio: the bio to decrypt
19f262ca7dSEric Biggers  *
20f262ca7dSEric Biggers  * Decrypt the contents of a "read" bio following successful completion of the
21f262ca7dSEric Biggers  * underlying disk read.  The bio must be reading a whole number of blocks of an
22f262ca7dSEric Biggers  * encrypted file directly into the page cache.  If the bio is reading the
23f262ca7dSEric Biggers  * ciphertext into bounce pages instead of the page cache (for example, because
24f262ca7dSEric Biggers  * the file is also compressed, so decompression is required after decryption),
25f262ca7dSEric Biggers  * then this function isn't applicable.  This function may sleep, so it must be
26f262ca7dSEric Biggers  * called from a workqueue rather than from the bio's bi_end_io callback.
27f262ca7dSEric Biggers  *
2814db0b3cSEric Biggers  * Return: %true on success; %false on failure.  On failure, bio->bi_status is
2914db0b3cSEric Biggers  *	   also set to an error status.
30f262ca7dSEric Biggers  */
fscrypt_decrypt_bio(struct bio * bio)3114db0b3cSEric Biggers bool fscrypt_decrypt_bio(struct bio *bio)
3258ae7468SRichard Weinberger {
3351e4e315SEric Biggers 	struct folio_iter fi;
3458ae7468SRichard Weinberger 
3551e4e315SEric Biggers 	bio_for_each_folio_all(fi, bio) {
3651e4e315SEric Biggers 		int err = fscrypt_decrypt_pagecache_blocks(fi.folio, fi.length,
3751e4e315SEric Biggers 							   fi.offset);
3814db0b3cSEric Biggers 
3914db0b3cSEric Biggers 		if (err) {
4014db0b3cSEric Biggers 			bio->bi_status = errno_to_blk_status(err);
4114db0b3cSEric Biggers 			return false;
4258ae7468SRichard Weinberger 		}
430cb8dae4SEric Biggers 	}
4414db0b3cSEric Biggers 	return true;
4514db0b3cSEric Biggers }
460cb8dae4SEric Biggers EXPORT_SYMBOL(fscrypt_decrypt_bio);
470cb8dae4SEric Biggers 
fscrypt_zeroout_range_inline_crypt(const struct inode * inode,pgoff_t lblk,sector_t pblk,unsigned int len)485fee3609SSatya Tangirala static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
495fee3609SSatya Tangirala 					      pgoff_t lblk, sector_t pblk,
505fee3609SSatya Tangirala 					      unsigned int len)
515fee3609SSatya Tangirala {
525fee3609SSatya Tangirala 	const unsigned int blockbits = inode->i_blkbits;
535fee3609SSatya Tangirala 	const unsigned int blocks_per_page = 1 << (PAGE_SHIFT - blockbits);
545fee3609SSatya Tangirala 	struct bio *bio;
555fee3609SSatya Tangirala 	int ret, err = 0;
565fee3609SSatya Tangirala 	int num_pages = 0;
575fee3609SSatya Tangirala 
585fee3609SSatya Tangirala 	/* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
5907888c66SChristoph Hellwig 	bio = bio_alloc(inode->i_sb->s_bdev, BIO_MAX_VECS, REQ_OP_WRITE,
6007888c66SChristoph Hellwig 			GFP_NOFS);
615fee3609SSatya Tangirala 
625fee3609SSatya Tangirala 	while (len) {
635fee3609SSatya Tangirala 		unsigned int blocks_this_page = min(len, blocks_per_page);
645fee3609SSatya Tangirala 		unsigned int bytes_this_page = blocks_this_page << blockbits;
655fee3609SSatya Tangirala 
665fee3609SSatya Tangirala 		if (num_pages == 0) {
675fee3609SSatya Tangirala 			fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS);
685fee3609SSatya Tangirala 			bio->bi_iter.bi_sector =
695fee3609SSatya Tangirala 					pblk << (blockbits - SECTOR_SHIFT);
705fee3609SSatya Tangirala 		}
715fee3609SSatya Tangirala 		ret = bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0);
72*41b2ad80SEric Biggers 		if (WARN_ON_ONCE(ret != bytes_this_page)) {
735fee3609SSatya Tangirala 			err = -EIO;
745fee3609SSatya Tangirala 			goto out;
755fee3609SSatya Tangirala 		}
765fee3609SSatya Tangirala 		num_pages++;
775fee3609SSatya Tangirala 		len -= blocks_this_page;
785fee3609SSatya Tangirala 		lblk += blocks_this_page;
795fee3609SSatya Tangirala 		pblk += blocks_this_page;
80a8affc03SChristoph Hellwig 		if (num_pages == BIO_MAX_VECS || !len ||
815fee3609SSatya Tangirala 		    !fscrypt_mergeable_bio(bio, inode, lblk)) {
825fee3609SSatya Tangirala 			err = submit_bio_wait(bio);
835fee3609SSatya Tangirala 			if (err)
845fee3609SSatya Tangirala 				goto out;
85a7c50c94SChristoph Hellwig 			bio_reset(bio, inode->i_sb->s_bdev, REQ_OP_WRITE);
865fee3609SSatya Tangirala 			num_pages = 0;
875fee3609SSatya Tangirala 		}
885fee3609SSatya Tangirala 	}
895fee3609SSatya Tangirala out:
905fee3609SSatya Tangirala 	bio_put(bio);
915fee3609SSatya Tangirala 	return err;
925fee3609SSatya Tangirala }
935fee3609SSatya Tangirala 
94796f12d7SEric Biggers /**
95796f12d7SEric Biggers  * fscrypt_zeroout_range() - zero out a range of blocks in an encrypted file
96796f12d7SEric Biggers  * @inode: the file's inode
97796f12d7SEric Biggers  * @lblk: the first file logical block to zero out
98796f12d7SEric Biggers  * @pblk: the first filesystem physical block to zero out
99796f12d7SEric Biggers  * @len: number of blocks to zero out
100796f12d7SEric Biggers  *
101796f12d7SEric Biggers  * Zero out filesystem blocks in an encrypted regular file on-disk, i.e. write
102796f12d7SEric Biggers  * ciphertext blocks which decrypt to the all-zeroes block.  The blocks must be
103796f12d7SEric Biggers  * both logically and physically contiguous.  It's also assumed that the
104796f12d7SEric Biggers  * filesystem only uses a single block device, ->s_bdev.
105796f12d7SEric Biggers  *
106796f12d7SEric Biggers  * Note that since each block uses a different IV, this involves writing a
107796f12d7SEric Biggers  * different ciphertext to each block; we can't simply reuse the same one.
108796f12d7SEric Biggers  *
109796f12d7SEric Biggers  * Return: 0 on success; -errno on failure.
110796f12d7SEric Biggers  */
fscrypt_zeroout_range(const struct inode * inode,pgoff_t lblk,sector_t pblk,unsigned int len)11158ae7468SRichard Weinberger int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
11258ae7468SRichard Weinberger 			  sector_t pblk, unsigned int len)
11358ae7468SRichard Weinberger {
114930d4539SEric Biggers 	const unsigned int blockbits = inode->i_blkbits;
115930d4539SEric Biggers 	const unsigned int blocksize = 1 << blockbits;
116796f12d7SEric Biggers 	const unsigned int blocks_per_page_bits = PAGE_SHIFT - blockbits;
117796f12d7SEric Biggers 	const unsigned int blocks_per_page = 1 << blocks_per_page_bits;
118796f12d7SEric Biggers 	struct page *pages[16]; /* write up to 16 pages at a time */
119796f12d7SEric Biggers 	unsigned int nr_pages;
120796f12d7SEric Biggers 	unsigned int i;
121796f12d7SEric Biggers 	unsigned int offset;
12258ae7468SRichard Weinberger 	struct bio *bio;
123796f12d7SEric Biggers 	int ret, err;
12458ae7468SRichard Weinberger 
125796f12d7SEric Biggers 	if (len == 0)
126796f12d7SEric Biggers 		return 0;
12758ae7468SRichard Weinberger 
1285fee3609SSatya Tangirala 	if (fscrypt_inode_uses_inline_crypto(inode))
1295fee3609SSatya Tangirala 		return fscrypt_zeroout_range_inline_crypt(inode, lblk, pblk,
1305fee3609SSatya Tangirala 							  len);
1315fee3609SSatya Tangirala 
132a8affc03SChristoph Hellwig 	BUILD_BUG_ON(ARRAY_SIZE(pages) > BIO_MAX_VECS);
133796f12d7SEric Biggers 	nr_pages = min_t(unsigned int, ARRAY_SIZE(pages),
134796f12d7SEric Biggers 			 (len + blocks_per_page - 1) >> blocks_per_page_bits);
13558ae7468SRichard Weinberger 
136796f12d7SEric Biggers 	/*
137796f12d7SEric Biggers 	 * We need at least one page for ciphertext.  Allocate the first one
138796f12d7SEric Biggers 	 * from a mempool, with __GFP_DIRECT_RECLAIM set so that it can't fail.
139796f12d7SEric Biggers 	 *
140796f12d7SEric Biggers 	 * Any additional page allocations are allowed to fail, as they only
141796f12d7SEric Biggers 	 * help performance, and waiting on the mempool for them could deadlock.
142796f12d7SEric Biggers 	 */
143796f12d7SEric Biggers 	for (i = 0; i < nr_pages; i++) {
144796f12d7SEric Biggers 		pages[i] = fscrypt_alloc_bounce_page(i == 0 ? GFP_NOFS :
145796f12d7SEric Biggers 						     GFP_NOWAIT | __GFP_NOWARN);
146796f12d7SEric Biggers 		if (!pages[i])
147796f12d7SEric Biggers 			break;
14858ae7468SRichard Weinberger 	}
149796f12d7SEric Biggers 	nr_pages = i;
150*41b2ad80SEric Biggers 	if (WARN_ON_ONCE(nr_pages <= 0))
151796f12d7SEric Biggers 		return -EINVAL;
152796f12d7SEric Biggers 
153796f12d7SEric Biggers 	/* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
15407888c66SChristoph Hellwig 	bio = bio_alloc(inode->i_sb->s_bdev, nr_pages, REQ_OP_WRITE, GFP_NOFS);
155796f12d7SEric Biggers 
156796f12d7SEric Biggers 	do {
157930d4539SEric Biggers 		bio->bi_iter.bi_sector = pblk << (blockbits - 9);
158796f12d7SEric Biggers 
159796f12d7SEric Biggers 		i = 0;
160796f12d7SEric Biggers 		offset = 0;
161796f12d7SEric Biggers 		do {
162796f12d7SEric Biggers 			err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk,
163796f12d7SEric Biggers 						  ZERO_PAGE(0), pages[i],
164796f12d7SEric Biggers 						  blocksize, offset, GFP_NOFS);
16558ae7468SRichard Weinberger 			if (err)
166796f12d7SEric Biggers 				goto out;
16758ae7468SRichard Weinberger 			lblk++;
16858ae7468SRichard Weinberger 			pblk++;
169796f12d7SEric Biggers 			len--;
170796f12d7SEric Biggers 			offset += blocksize;
171796f12d7SEric Biggers 			if (offset == PAGE_SIZE || len == 0) {
172796f12d7SEric Biggers 				ret = bio_add_page(bio, pages[i++], offset, 0);
173*41b2ad80SEric Biggers 				if (WARN_ON_ONCE(ret != offset)) {
174796f12d7SEric Biggers 					err = -EIO;
175796f12d7SEric Biggers 					goto out;
17658ae7468SRichard Weinberger 				}
177796f12d7SEric Biggers 				offset = 0;
178796f12d7SEric Biggers 			}
179796f12d7SEric Biggers 		} while (i != nr_pages && len != 0);
180796f12d7SEric Biggers 
181796f12d7SEric Biggers 		err = submit_bio_wait(bio);
182796f12d7SEric Biggers 		if (err)
183796f12d7SEric Biggers 			goto out;
184a7c50c94SChristoph Hellwig 		bio_reset(bio, inode->i_sb->s_bdev, REQ_OP_WRITE);
185796f12d7SEric Biggers 	} while (len != 0);
18658ae7468SRichard Weinberger 	err = 0;
187796f12d7SEric Biggers out:
188796f12d7SEric Biggers 	bio_put(bio);
189796f12d7SEric Biggers 	for (i = 0; i < nr_pages; i++)
190796f12d7SEric Biggers 		fscrypt_free_bounce_page(pages[i]);
19158ae7468SRichard Weinberger 	return err;
19258ae7468SRichard Weinberger }
19358ae7468SRichard Weinberger EXPORT_SYMBOL(fscrypt_zeroout_range);
194