xref: /openbmc/linux/fs/crypto/inline_crypt.c (revision 7b0364ea)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Inline encryption support for fscrypt
4  *
5  * Copyright 2019 Google LLC
6  */
7 
8 /*
9  * With "inline encryption", the block layer handles the decryption/encryption
10  * as part of the bio, instead of the filesystem doing the crypto itself via
11  * crypto API.  See Documentation/block/inline-encryption.rst.  fscrypt still
12  * provides the key and IV to use.
13  */
14 
15 #include <linux/blk-crypto.h>
16 #include <linux/blkdev.h>
17 #include <linux/buffer_head.h>
18 #include <linux/sched/mm.h>
19 #include <linux/slab.h>
20 #include <linux/uio.h>
21 
22 #include "fscrypt_private.h"
23 
24 struct fscrypt_blk_crypto_key {
25 	struct blk_crypto_key base;
26 	int num_devs;
27 	struct request_queue *devs[];
28 };
29 
30 static int fscrypt_get_num_devices(struct super_block *sb)
31 {
32 	if (sb->s_cop->get_num_devices)
33 		return sb->s_cop->get_num_devices(sb);
34 	return 1;
35 }
36 
37 static void fscrypt_get_devices(struct super_block *sb, int num_devs,
38 				struct request_queue **devs)
39 {
40 	if (num_devs == 1)
41 		devs[0] = bdev_get_queue(sb->s_bdev);
42 	else
43 		sb->s_cop->get_devices(sb, devs);
44 }
45 
46 static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci)
47 {
48 	struct super_block *sb = ci->ci_inode->i_sb;
49 	unsigned int flags = fscrypt_policy_flags(&ci->ci_policy);
50 	int ino_bits = 64, lblk_bits = 64;
51 
52 	if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY)
53 		return offsetofend(union fscrypt_iv, nonce);
54 
55 	if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64)
56 		return sizeof(__le64);
57 
58 	if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)
59 		return sizeof(__le32);
60 
61 	/* Default case: IVs are just the file logical block number */
62 	if (sb->s_cop->get_ino_and_lblk_bits)
63 		sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits);
64 	return DIV_ROUND_UP(lblk_bits, 8);
65 }
66 
67 /* Enable inline encryption for this file if supported. */
68 int fscrypt_select_encryption_impl(struct fscrypt_info *ci)
69 {
70 	const struct inode *inode = ci->ci_inode;
71 	struct super_block *sb = inode->i_sb;
72 	struct blk_crypto_config crypto_cfg;
73 	int num_devs;
74 	struct request_queue **devs;
75 	int i;
76 
77 	/* The file must need contents encryption, not filenames encryption */
78 	if (!S_ISREG(inode->i_mode))
79 		return 0;
80 
81 	/* The crypto mode must have a blk-crypto counterpart */
82 	if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID)
83 		return 0;
84 
85 	/* The filesystem must be mounted with -o inlinecrypt */
86 	if (!(sb->s_flags & SB_INLINECRYPT))
87 		return 0;
88 
89 	/*
90 	 * When a page contains multiple logically contiguous filesystem blocks,
91 	 * some filesystem code only calls fscrypt_mergeable_bio() for the first
92 	 * block in the page. This is fine for most of fscrypt's IV generation
93 	 * strategies, where contiguous blocks imply contiguous IVs. But it
94 	 * doesn't work with IV_INO_LBLK_32. For now, simply exclude
95 	 * IV_INO_LBLK_32 with blocksize != PAGE_SIZE from inline encryption.
96 	 */
97 	if ((fscrypt_policy_flags(&ci->ci_policy) &
98 	     FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) &&
99 	    sb->s_blocksize != PAGE_SIZE)
100 		return 0;
101 
102 	/*
103 	 * On all the filesystem's devices, blk-crypto must support the crypto
104 	 * configuration that the file would use.
105 	 */
106 	crypto_cfg.crypto_mode = ci->ci_mode->blk_crypto_mode;
107 	crypto_cfg.data_unit_size = sb->s_blocksize;
108 	crypto_cfg.dun_bytes = fscrypt_get_dun_bytes(ci);
109 	num_devs = fscrypt_get_num_devices(sb);
110 	devs = kmalloc_array(num_devs, sizeof(*devs), GFP_KERNEL);
111 	if (!devs)
112 		return -ENOMEM;
113 	fscrypt_get_devices(sb, num_devs, devs);
114 
115 	for (i = 0; i < num_devs; i++) {
116 		if (!blk_crypto_config_supported(devs[i], &crypto_cfg))
117 			goto out_free_devs;
118 	}
119 
120 	ci->ci_inlinecrypt = true;
121 out_free_devs:
122 	kfree(devs);
123 
124 	return 0;
125 }
126 
127 int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
128 				     const u8 *raw_key,
129 				     const struct fscrypt_info *ci)
130 {
131 	const struct inode *inode = ci->ci_inode;
132 	struct super_block *sb = inode->i_sb;
133 	enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode;
134 	int num_devs = fscrypt_get_num_devices(sb);
135 	int queue_refs = 0;
136 	struct fscrypt_blk_crypto_key *blk_key;
137 	int err;
138 	int i;
139 
140 	blk_key = kzalloc(struct_size(blk_key, devs, num_devs), GFP_KERNEL);
141 	if (!blk_key)
142 		return -ENOMEM;
143 
144 	blk_key->num_devs = num_devs;
145 	fscrypt_get_devices(sb, num_devs, blk_key->devs);
146 
147 	err = blk_crypto_init_key(&blk_key->base, raw_key, crypto_mode,
148 				  fscrypt_get_dun_bytes(ci), sb->s_blocksize);
149 	if (err) {
150 		fscrypt_err(inode, "error %d initializing blk-crypto key", err);
151 		goto fail;
152 	}
153 
154 	/*
155 	 * We have to start using blk-crypto on all the filesystem's devices.
156 	 * We also have to save all the request_queue's for later so that the
157 	 * key can be evicted from them.  This is needed because some keys
158 	 * aren't destroyed until after the filesystem was already unmounted
159 	 * (namely, the per-mode keys in struct fscrypt_master_key).
160 	 */
161 	for (i = 0; i < num_devs; i++) {
162 		if (!blk_get_queue(blk_key->devs[i])) {
163 			fscrypt_err(inode, "couldn't get request_queue");
164 			err = -EAGAIN;
165 			goto fail;
166 		}
167 		queue_refs++;
168 
169 		err = blk_crypto_start_using_key(&blk_key->base,
170 						 blk_key->devs[i]);
171 		if (err) {
172 			fscrypt_err(inode,
173 				    "error %d starting to use blk-crypto", err);
174 			goto fail;
175 		}
176 	}
177 	/*
178 	 * Pairs with the smp_load_acquire() in fscrypt_is_key_prepared().
179 	 * I.e., here we publish ->blk_key with a RELEASE barrier so that
180 	 * concurrent tasks can ACQUIRE it.  Note that this concurrency is only
181 	 * possible for per-mode keys, not for per-file keys.
182 	 */
183 	smp_store_release(&prep_key->blk_key, blk_key);
184 	return 0;
185 
186 fail:
187 	for (i = 0; i < queue_refs; i++)
188 		blk_put_queue(blk_key->devs[i]);
189 	kfree_sensitive(blk_key);
190 	return err;
191 }
192 
193 void fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key)
194 {
195 	struct fscrypt_blk_crypto_key *blk_key = prep_key->blk_key;
196 	int i;
197 
198 	if (blk_key) {
199 		for (i = 0; i < blk_key->num_devs; i++) {
200 			blk_crypto_evict_key(blk_key->devs[i], &blk_key->base);
201 			blk_put_queue(blk_key->devs[i]);
202 		}
203 		kfree_sensitive(blk_key);
204 	}
205 }
206 
207 bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
208 {
209 	return inode->i_crypt_info->ci_inlinecrypt;
210 }
211 EXPORT_SYMBOL_GPL(__fscrypt_inode_uses_inline_crypto);
212 
213 static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num,
214 				 u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
215 {
216 	union fscrypt_iv iv;
217 	int i;
218 
219 	fscrypt_generate_iv(&iv, lblk_num, ci);
220 
221 	BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE);
222 	memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE);
223 	for (i = 0; i < ci->ci_mode->ivsize/sizeof(dun[0]); i++)
224 		dun[i] = le64_to_cpu(iv.dun[i]);
225 }
226 
227 /**
228  * fscrypt_set_bio_crypt_ctx() - prepare a file contents bio for inline crypto
229  * @bio: a bio which will eventually be submitted to the file
230  * @inode: the file's inode
231  * @first_lblk: the first file logical block number in the I/O
232  * @gfp_mask: memory allocation flags - these must be a waiting mask so that
233  *					bio_crypt_set_ctx can't fail.
234  *
235  * If the contents of the file should be encrypted (or decrypted) with inline
236  * encryption, then assign the appropriate encryption context to the bio.
237  *
238  * Normally the bio should be newly allocated (i.e. no pages added yet), as
239  * otherwise fscrypt_mergeable_bio() won't work as intended.
240  *
241  * The encryption context will be freed automatically when the bio is freed.
242  */
243 void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
244 			       u64 first_lblk, gfp_t gfp_mask)
245 {
246 	const struct fscrypt_info *ci;
247 	u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
248 
249 	if (!fscrypt_inode_uses_inline_crypto(inode))
250 		return;
251 	ci = inode->i_crypt_info;
252 
253 	fscrypt_generate_dun(ci, first_lblk, dun);
254 	bio_crypt_set_ctx(bio, &ci->ci_enc_key.blk_key->base, dun, gfp_mask);
255 }
256 EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx);
257 
258 /* Extract the inode and logical block number from a buffer_head. */
259 static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh,
260 				      const struct inode **inode_ret,
261 				      u64 *lblk_num_ret)
262 {
263 	struct page *page = bh->b_page;
264 	const struct address_space *mapping;
265 	const struct inode *inode;
266 
267 	/*
268 	 * The ext4 journal (jbd2) can submit a buffer_head it directly created
269 	 * for a non-pagecache page.  fscrypt doesn't care about these.
270 	 */
271 	mapping = page_mapping(page);
272 	if (!mapping)
273 		return false;
274 	inode = mapping->host;
275 
276 	*inode_ret = inode;
277 	*lblk_num_ret = ((u64)page->index << (PAGE_SHIFT - inode->i_blkbits)) +
278 			(bh_offset(bh) >> inode->i_blkbits);
279 	return true;
280 }
281 
282 /**
283  * fscrypt_set_bio_crypt_ctx_bh() - prepare a file contents bio for inline
284  *				    crypto
285  * @bio: a bio which will eventually be submitted to the file
286  * @first_bh: the first buffer_head for which I/O will be submitted
287  * @gfp_mask: memory allocation flags
288  *
289  * Same as fscrypt_set_bio_crypt_ctx(), except this takes a buffer_head instead
290  * of an inode and block number directly.
291  */
292 void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio,
293 				  const struct buffer_head *first_bh,
294 				  gfp_t gfp_mask)
295 {
296 	const struct inode *inode;
297 	u64 first_lblk;
298 
299 	if (bh_get_inode_and_lblk_num(first_bh, &inode, &first_lblk))
300 		fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask);
301 }
302 EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh);
303 
304 /**
305  * fscrypt_mergeable_bio() - test whether data can be added to a bio
306  * @bio: the bio being built up
307  * @inode: the inode for the next part of the I/O
308  * @next_lblk: the next file logical block number in the I/O
309  *
310  * When building a bio which may contain data which should undergo inline
311  * encryption (or decryption) via fscrypt, filesystems should call this function
312  * to ensure that the resulting bio contains only contiguous data unit numbers.
313  * This will return false if the next part of the I/O cannot be merged with the
314  * bio because either the encryption key would be different or the encryption
315  * data unit numbers would be discontiguous.
316  *
317  * fscrypt_set_bio_crypt_ctx() must have already been called on the bio.
318  *
319  * This function isn't required in cases where crypto-mergeability is ensured in
320  * another way, such as I/O targeting only a single file (and thus a single key)
321  * combined with fscrypt_limit_io_blocks() to ensure DUN contiguity.
322  *
323  * Return: true iff the I/O is mergeable
324  */
325 bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
326 			   u64 next_lblk)
327 {
328 	const struct bio_crypt_ctx *bc = bio->bi_crypt_context;
329 	u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
330 
331 	if (!!bc != fscrypt_inode_uses_inline_crypto(inode))
332 		return false;
333 	if (!bc)
334 		return true;
335 
336 	/*
337 	 * Comparing the key pointers is good enough, as all I/O for each key
338 	 * uses the same pointer.  I.e., there's currently no need to support
339 	 * merging requests where the keys are the same but the pointers differ.
340 	 */
341 	if (bc->bc_key != &inode->i_crypt_info->ci_enc_key.blk_key->base)
342 		return false;
343 
344 	fscrypt_generate_dun(inode->i_crypt_info, next_lblk, next_dun);
345 	return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun);
346 }
347 EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio);
348 
349 /**
350  * fscrypt_mergeable_bio_bh() - test whether data can be added to a bio
351  * @bio: the bio being built up
352  * @next_bh: the next buffer_head for which I/O will be submitted
353  *
354  * Same as fscrypt_mergeable_bio(), except this takes a buffer_head instead of
355  * an inode and block number directly.
356  *
357  * Return: true iff the I/O is mergeable
358  */
359 bool fscrypt_mergeable_bio_bh(struct bio *bio,
360 			      const struct buffer_head *next_bh)
361 {
362 	const struct inode *inode;
363 	u64 next_lblk;
364 
365 	if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk))
366 		return !bio->bi_crypt_context;
367 
368 	return fscrypt_mergeable_bio(bio, inode, next_lblk);
369 }
370 EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh);
371 
372 /**
373  * fscrypt_dio_supported() - check whether a DIO (direct I/O) request is
374  *			     supported as far as encryption is concerned
375  * @iocb: the file and position the I/O is targeting
376  * @iter: the I/O data segment(s)
377  *
378  * Return: %true if there are no encryption constraints that prevent DIO from
379  *	   being supported; %false if DIO is unsupported.  (Note that in the
380  *	   %true case, the filesystem might have other, non-encryption-related
381  *	   constraints that prevent DIO from actually being supported.)
382  */
383 bool fscrypt_dio_supported(struct kiocb *iocb, struct iov_iter *iter)
384 {
385 	const struct inode *inode = file_inode(iocb->ki_filp);
386 	const unsigned int blocksize = i_blocksize(inode);
387 
388 	/* If the file is unencrypted, no veto from us. */
389 	if (!fscrypt_needs_contents_encryption(inode))
390 		return true;
391 
392 	/* We only support DIO with inline crypto, not fs-layer crypto. */
393 	if (!fscrypt_inode_uses_inline_crypto(inode))
394 		return false;
395 
396 	/*
397 	 * Since the granularity of encryption is filesystem blocks, the file
398 	 * position and total I/O length must be aligned to the filesystem block
399 	 * size -- not just to the block device's logical block size as is
400 	 * traditionally the case for DIO on many filesystems.
401 	 *
402 	 * We require that the user-provided memory buffers be filesystem block
403 	 * aligned too.  It is simpler to have a single alignment value required
404 	 * for all properties of the I/O, as is normally the case for DIO.
405 	 * Also, allowing less aligned buffers would imply that data units could
406 	 * cross bvecs, which would greatly complicate the I/O stack, which
407 	 * assumes that bios can be split at any bvec boundary.
408 	 */
409 	if (!IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), blocksize))
410 		return false;
411 
412 	return true;
413 }
414 EXPORT_SYMBOL_GPL(fscrypt_dio_supported);
415 
416 /**
417  * fscrypt_limit_io_blocks() - limit I/O blocks to avoid discontiguous DUNs
418  * @inode: the file on which I/O is being done
419  * @lblk: the block at which the I/O is being started from
420  * @nr_blocks: the number of blocks we want to submit starting at @lblk
421  *
422  * Determine the limit to the number of blocks that can be submitted in a bio
423  * targeting @lblk without causing a data unit number (DUN) discontiguity.
424  *
425  * This is normally just @nr_blocks, as normally the DUNs just increment along
426  * with the logical blocks.  (Or the file is not encrypted.)
427  *
428  * In rare cases, fscrypt can be using an IV generation method that allows the
429  * DUN to wrap around within logically contiguous blocks, and that wraparound
430  * will occur.  If this happens, a value less than @nr_blocks will be returned
431  * so that the wraparound doesn't occur in the middle of a bio, which would
432  * cause encryption/decryption to produce wrong results.
433  *
434  * Return: the actual number of blocks that can be submitted
435  */
436 u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks)
437 {
438 	const struct fscrypt_info *ci;
439 	u32 dun;
440 
441 	if (!fscrypt_inode_uses_inline_crypto(inode))
442 		return nr_blocks;
443 
444 	if (nr_blocks <= 1)
445 		return nr_blocks;
446 
447 	ci = inode->i_crypt_info;
448 	if (!(fscrypt_policy_flags(&ci->ci_policy) &
449 	      FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))
450 		return nr_blocks;
451 
452 	/* With IV_INO_LBLK_32, the DUN can wrap around from U32_MAX to 0. */
453 
454 	dun = ci->ci_hashed_ino + lblk;
455 
456 	return min_t(u64, nr_blocks, (u64)U32_MAX + 1 - dun);
457 }
458 EXPORT_SYMBOL_GPL(fscrypt_limit_io_blocks);
459