1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Inline encryption support for fscrypt 4 * 5 * Copyright 2019 Google LLC 6 */ 7 8 /* 9 * With "inline encryption", the block layer handles the decryption/encryption 10 * as part of the bio, instead of the filesystem doing the crypto itself via 11 * crypto API. See Documentation/block/inline-encryption.rst. fscrypt still 12 * provides the key and IV to use. 13 */ 14 15 #include <linux/blk-crypto-profile.h> 16 #include <linux/blkdev.h> 17 #include <linux/buffer_head.h> 18 #include <linux/sched/mm.h> 19 #include <linux/slab.h> 20 #include <linux/uio.h> 21 22 #include "fscrypt_private.h" 23 24 struct fscrypt_blk_crypto_key { 25 struct blk_crypto_key base; 26 int num_devs; 27 struct request_queue *devs[]; 28 }; 29 30 static int fscrypt_get_num_devices(struct super_block *sb) 31 { 32 if (sb->s_cop->get_num_devices) 33 return sb->s_cop->get_num_devices(sb); 34 return 1; 35 } 36 37 static void fscrypt_get_devices(struct super_block *sb, int num_devs, 38 struct request_queue **devs) 39 { 40 if (num_devs == 1) 41 devs[0] = bdev_get_queue(sb->s_bdev); 42 else 43 sb->s_cop->get_devices(sb, devs); 44 } 45 46 static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci) 47 { 48 struct super_block *sb = ci->ci_inode->i_sb; 49 unsigned int flags = fscrypt_policy_flags(&ci->ci_policy); 50 int ino_bits = 64, lblk_bits = 64; 51 52 if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) 53 return offsetofend(union fscrypt_iv, nonce); 54 55 if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) 56 return sizeof(__le64); 57 58 if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) 59 return sizeof(__le32); 60 61 /* Default case: IVs are just the file logical block number */ 62 if (sb->s_cop->get_ino_and_lblk_bits) 63 sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits); 64 return DIV_ROUND_UP(lblk_bits, 8); 65 } 66 67 /* 68 * Log a message when starting to use blk-crypto (native) or blk-crypto-fallback 69 * for an encryption mode for the first time. This is the blk-crypto 70 * counterpart to the message logged when starting to use the crypto API for the 71 * first time. A limitation is that these messages don't convey which specific 72 * filesystems or files are using each implementation. However, *usually* 73 * systems use just one implementation per mode, which makes these messages 74 * helpful for debugging problems where the "wrong" implementation is used. 75 */ 76 static void fscrypt_log_blk_crypto_impl(struct fscrypt_mode *mode, 77 struct request_queue **devs, 78 int num_devs, 79 const struct blk_crypto_config *cfg) 80 { 81 int i; 82 83 for (i = 0; i < num_devs; i++) { 84 if (!IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) || 85 __blk_crypto_cfg_supported(devs[i]->crypto_profile, cfg)) { 86 if (!xchg(&mode->logged_blk_crypto_native, 1)) 87 pr_info("fscrypt: %s using blk-crypto (native)\n", 88 mode->friendly_name); 89 } else if (!xchg(&mode->logged_blk_crypto_fallback, 1)) { 90 pr_info("fscrypt: %s using blk-crypto-fallback\n", 91 mode->friendly_name); 92 } 93 } 94 } 95 96 /* Enable inline encryption for this file if supported. */ 97 int fscrypt_select_encryption_impl(struct fscrypt_info *ci) 98 { 99 const struct inode *inode = ci->ci_inode; 100 struct super_block *sb = inode->i_sb; 101 struct blk_crypto_config crypto_cfg; 102 int num_devs; 103 struct request_queue **devs; 104 int i; 105 106 /* The file must need contents encryption, not filenames encryption */ 107 if (!S_ISREG(inode->i_mode)) 108 return 0; 109 110 /* The crypto mode must have a blk-crypto counterpart */ 111 if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID) 112 return 0; 113 114 /* The filesystem must be mounted with -o inlinecrypt */ 115 if (!(sb->s_flags & SB_INLINECRYPT)) 116 return 0; 117 118 /* 119 * When a page contains multiple logically contiguous filesystem blocks, 120 * some filesystem code only calls fscrypt_mergeable_bio() for the first 121 * block in the page. This is fine for most of fscrypt's IV generation 122 * strategies, where contiguous blocks imply contiguous IVs. But it 123 * doesn't work with IV_INO_LBLK_32. For now, simply exclude 124 * IV_INO_LBLK_32 with blocksize != PAGE_SIZE from inline encryption. 125 */ 126 if ((fscrypt_policy_flags(&ci->ci_policy) & 127 FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) && 128 sb->s_blocksize != PAGE_SIZE) 129 return 0; 130 131 /* 132 * On all the filesystem's devices, blk-crypto must support the crypto 133 * configuration that the file would use. 134 */ 135 crypto_cfg.crypto_mode = ci->ci_mode->blk_crypto_mode; 136 crypto_cfg.data_unit_size = sb->s_blocksize; 137 crypto_cfg.dun_bytes = fscrypt_get_dun_bytes(ci); 138 num_devs = fscrypt_get_num_devices(sb); 139 devs = kmalloc_array(num_devs, sizeof(*devs), GFP_KERNEL); 140 if (!devs) 141 return -ENOMEM; 142 fscrypt_get_devices(sb, num_devs, devs); 143 144 for (i = 0; i < num_devs; i++) { 145 if (!blk_crypto_config_supported(devs[i], &crypto_cfg)) 146 goto out_free_devs; 147 } 148 149 fscrypt_log_blk_crypto_impl(ci->ci_mode, devs, num_devs, &crypto_cfg); 150 151 ci->ci_inlinecrypt = true; 152 out_free_devs: 153 kfree(devs); 154 155 return 0; 156 } 157 158 int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, 159 const u8 *raw_key, 160 const struct fscrypt_info *ci) 161 { 162 const struct inode *inode = ci->ci_inode; 163 struct super_block *sb = inode->i_sb; 164 enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode; 165 int num_devs = fscrypt_get_num_devices(sb); 166 int queue_refs = 0; 167 struct fscrypt_blk_crypto_key *blk_key; 168 int err; 169 int i; 170 171 blk_key = kzalloc(struct_size(blk_key, devs, num_devs), GFP_KERNEL); 172 if (!blk_key) 173 return -ENOMEM; 174 175 blk_key->num_devs = num_devs; 176 fscrypt_get_devices(sb, num_devs, blk_key->devs); 177 178 err = blk_crypto_init_key(&blk_key->base, raw_key, crypto_mode, 179 fscrypt_get_dun_bytes(ci), sb->s_blocksize); 180 if (err) { 181 fscrypt_err(inode, "error %d initializing blk-crypto key", err); 182 goto fail; 183 } 184 185 /* 186 * We have to start using blk-crypto on all the filesystem's devices. 187 * We also have to save all the request_queue's for later so that the 188 * key can be evicted from them. This is needed because some keys 189 * aren't destroyed until after the filesystem was already unmounted 190 * (namely, the per-mode keys in struct fscrypt_master_key). 191 */ 192 for (i = 0; i < num_devs; i++) { 193 if (!blk_get_queue(blk_key->devs[i])) { 194 fscrypt_err(inode, "couldn't get request_queue"); 195 err = -EAGAIN; 196 goto fail; 197 } 198 queue_refs++; 199 200 err = blk_crypto_start_using_key(&blk_key->base, 201 blk_key->devs[i]); 202 if (err) { 203 fscrypt_err(inode, 204 "error %d starting to use blk-crypto", err); 205 goto fail; 206 } 207 } 208 /* 209 * Pairs with the smp_load_acquire() in fscrypt_is_key_prepared(). 210 * I.e., here we publish ->blk_key with a RELEASE barrier so that 211 * concurrent tasks can ACQUIRE it. Note that this concurrency is only 212 * possible for per-mode keys, not for per-file keys. 213 */ 214 smp_store_release(&prep_key->blk_key, blk_key); 215 return 0; 216 217 fail: 218 for (i = 0; i < queue_refs; i++) 219 blk_put_queue(blk_key->devs[i]); 220 kfree_sensitive(blk_key); 221 return err; 222 } 223 224 void fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key) 225 { 226 struct fscrypt_blk_crypto_key *blk_key = prep_key->blk_key; 227 int i; 228 229 if (blk_key) { 230 for (i = 0; i < blk_key->num_devs; i++) { 231 blk_crypto_evict_key(blk_key->devs[i], &blk_key->base); 232 blk_put_queue(blk_key->devs[i]); 233 } 234 kfree_sensitive(blk_key); 235 } 236 } 237 238 bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode) 239 { 240 return inode->i_crypt_info->ci_inlinecrypt; 241 } 242 EXPORT_SYMBOL_GPL(__fscrypt_inode_uses_inline_crypto); 243 244 static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num, 245 u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]) 246 { 247 union fscrypt_iv iv; 248 int i; 249 250 fscrypt_generate_iv(&iv, lblk_num, ci); 251 252 BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE); 253 memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE); 254 for (i = 0; i < ci->ci_mode->ivsize/sizeof(dun[0]); i++) 255 dun[i] = le64_to_cpu(iv.dun[i]); 256 } 257 258 /** 259 * fscrypt_set_bio_crypt_ctx() - prepare a file contents bio for inline crypto 260 * @bio: a bio which will eventually be submitted to the file 261 * @inode: the file's inode 262 * @first_lblk: the first file logical block number in the I/O 263 * @gfp_mask: memory allocation flags - these must be a waiting mask so that 264 * bio_crypt_set_ctx can't fail. 265 * 266 * If the contents of the file should be encrypted (or decrypted) with inline 267 * encryption, then assign the appropriate encryption context to the bio. 268 * 269 * Normally the bio should be newly allocated (i.e. no pages added yet), as 270 * otherwise fscrypt_mergeable_bio() won't work as intended. 271 * 272 * The encryption context will be freed automatically when the bio is freed. 273 */ 274 void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode, 275 u64 first_lblk, gfp_t gfp_mask) 276 { 277 const struct fscrypt_info *ci; 278 u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; 279 280 if (!fscrypt_inode_uses_inline_crypto(inode)) 281 return; 282 ci = inode->i_crypt_info; 283 284 fscrypt_generate_dun(ci, first_lblk, dun); 285 bio_crypt_set_ctx(bio, &ci->ci_enc_key.blk_key->base, dun, gfp_mask); 286 } 287 EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx); 288 289 /* Extract the inode and logical block number from a buffer_head. */ 290 static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh, 291 const struct inode **inode_ret, 292 u64 *lblk_num_ret) 293 { 294 struct page *page = bh->b_page; 295 const struct address_space *mapping; 296 const struct inode *inode; 297 298 /* 299 * The ext4 journal (jbd2) can submit a buffer_head it directly created 300 * for a non-pagecache page. fscrypt doesn't care about these. 301 */ 302 mapping = page_mapping(page); 303 if (!mapping) 304 return false; 305 inode = mapping->host; 306 307 *inode_ret = inode; 308 *lblk_num_ret = ((u64)page->index << (PAGE_SHIFT - inode->i_blkbits)) + 309 (bh_offset(bh) >> inode->i_blkbits); 310 return true; 311 } 312 313 /** 314 * fscrypt_set_bio_crypt_ctx_bh() - prepare a file contents bio for inline 315 * crypto 316 * @bio: a bio which will eventually be submitted to the file 317 * @first_bh: the first buffer_head for which I/O will be submitted 318 * @gfp_mask: memory allocation flags 319 * 320 * Same as fscrypt_set_bio_crypt_ctx(), except this takes a buffer_head instead 321 * of an inode and block number directly. 322 */ 323 void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio, 324 const struct buffer_head *first_bh, 325 gfp_t gfp_mask) 326 { 327 const struct inode *inode; 328 u64 first_lblk; 329 330 if (bh_get_inode_and_lblk_num(first_bh, &inode, &first_lblk)) 331 fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask); 332 } 333 EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh); 334 335 /** 336 * fscrypt_mergeable_bio() - test whether data can be added to a bio 337 * @bio: the bio being built up 338 * @inode: the inode for the next part of the I/O 339 * @next_lblk: the next file logical block number in the I/O 340 * 341 * When building a bio which may contain data which should undergo inline 342 * encryption (or decryption) via fscrypt, filesystems should call this function 343 * to ensure that the resulting bio contains only contiguous data unit numbers. 344 * This will return false if the next part of the I/O cannot be merged with the 345 * bio because either the encryption key would be different or the encryption 346 * data unit numbers would be discontiguous. 347 * 348 * fscrypt_set_bio_crypt_ctx() must have already been called on the bio. 349 * 350 * This function isn't required in cases where crypto-mergeability is ensured in 351 * another way, such as I/O targeting only a single file (and thus a single key) 352 * combined with fscrypt_limit_io_blocks() to ensure DUN contiguity. 353 * 354 * Return: true iff the I/O is mergeable 355 */ 356 bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode, 357 u64 next_lblk) 358 { 359 const struct bio_crypt_ctx *bc = bio->bi_crypt_context; 360 u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; 361 362 if (!!bc != fscrypt_inode_uses_inline_crypto(inode)) 363 return false; 364 if (!bc) 365 return true; 366 367 /* 368 * Comparing the key pointers is good enough, as all I/O for each key 369 * uses the same pointer. I.e., there's currently no need to support 370 * merging requests where the keys are the same but the pointers differ. 371 */ 372 if (bc->bc_key != &inode->i_crypt_info->ci_enc_key.blk_key->base) 373 return false; 374 375 fscrypt_generate_dun(inode->i_crypt_info, next_lblk, next_dun); 376 return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun); 377 } 378 EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio); 379 380 /** 381 * fscrypt_mergeable_bio_bh() - test whether data can be added to a bio 382 * @bio: the bio being built up 383 * @next_bh: the next buffer_head for which I/O will be submitted 384 * 385 * Same as fscrypt_mergeable_bio(), except this takes a buffer_head instead of 386 * an inode and block number directly. 387 * 388 * Return: true iff the I/O is mergeable 389 */ 390 bool fscrypt_mergeable_bio_bh(struct bio *bio, 391 const struct buffer_head *next_bh) 392 { 393 const struct inode *inode; 394 u64 next_lblk; 395 396 if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk)) 397 return !bio->bi_crypt_context; 398 399 return fscrypt_mergeable_bio(bio, inode, next_lblk); 400 } 401 EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh); 402 403 /** 404 * fscrypt_dio_supported() - check whether a DIO (direct I/O) request is 405 * supported as far as encryption is concerned 406 * @iocb: the file and position the I/O is targeting 407 * @iter: the I/O data segment(s) 408 * 409 * Return: %true if there are no encryption constraints that prevent DIO from 410 * being supported; %false if DIO is unsupported. (Note that in the 411 * %true case, the filesystem might have other, non-encryption-related 412 * constraints that prevent DIO from actually being supported.) 413 */ 414 bool fscrypt_dio_supported(struct kiocb *iocb, struct iov_iter *iter) 415 { 416 const struct inode *inode = file_inode(iocb->ki_filp); 417 const unsigned int blocksize = i_blocksize(inode); 418 419 /* If the file is unencrypted, no veto from us. */ 420 if (!fscrypt_needs_contents_encryption(inode)) 421 return true; 422 423 /* We only support DIO with inline crypto, not fs-layer crypto. */ 424 if (!fscrypt_inode_uses_inline_crypto(inode)) 425 return false; 426 427 /* 428 * Since the granularity of encryption is filesystem blocks, the file 429 * position and total I/O length must be aligned to the filesystem block 430 * size -- not just to the block device's logical block size as is 431 * traditionally the case for DIO on many filesystems. 432 * 433 * We require that the user-provided memory buffers be filesystem block 434 * aligned too. It is simpler to have a single alignment value required 435 * for all properties of the I/O, as is normally the case for DIO. 436 * Also, allowing less aligned buffers would imply that data units could 437 * cross bvecs, which would greatly complicate the I/O stack, which 438 * assumes that bios can be split at any bvec boundary. 439 */ 440 if (!IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), blocksize)) 441 return false; 442 443 return true; 444 } 445 EXPORT_SYMBOL_GPL(fscrypt_dio_supported); 446 447 /** 448 * fscrypt_limit_io_blocks() - limit I/O blocks to avoid discontiguous DUNs 449 * @inode: the file on which I/O is being done 450 * @lblk: the block at which the I/O is being started from 451 * @nr_blocks: the number of blocks we want to submit starting at @lblk 452 * 453 * Determine the limit to the number of blocks that can be submitted in a bio 454 * targeting @lblk without causing a data unit number (DUN) discontiguity. 455 * 456 * This is normally just @nr_blocks, as normally the DUNs just increment along 457 * with the logical blocks. (Or the file is not encrypted.) 458 * 459 * In rare cases, fscrypt can be using an IV generation method that allows the 460 * DUN to wrap around within logically contiguous blocks, and that wraparound 461 * will occur. If this happens, a value less than @nr_blocks will be returned 462 * so that the wraparound doesn't occur in the middle of a bio, which would 463 * cause encryption/decryption to produce wrong results. 464 * 465 * Return: the actual number of blocks that can be submitted 466 */ 467 u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks) 468 { 469 const struct fscrypt_info *ci; 470 u32 dun; 471 472 if (!fscrypt_inode_uses_inline_crypto(inode)) 473 return nr_blocks; 474 475 if (nr_blocks <= 1) 476 return nr_blocks; 477 478 ci = inode->i_crypt_info; 479 if (!(fscrypt_policy_flags(&ci->ci_policy) & 480 FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) 481 return nr_blocks; 482 483 /* With IV_INO_LBLK_32, the DUN can wrap around from U32_MAX to 0. */ 484 485 dun = ci->ci_hashed_ino + lblk; 486 487 return min_t(u64, nr_blocks, (u64)U32_MAX + 1 - dun); 488 } 489 EXPORT_SYMBOL_GPL(fscrypt_limit_io_blocks); 490