1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Inline encryption support for fscrypt 4 * 5 * Copyright 2019 Google LLC 6 */ 7 8 /* 9 * With "inline encryption", the block layer handles the decryption/encryption 10 * as part of the bio, instead of the filesystem doing the crypto itself via 11 * crypto API. See Documentation/block/inline-encryption.rst. fscrypt still 12 * provides the key and IV to use. 13 */ 14 15 #include <linux/blk-crypto.h> 16 #include <linux/blkdev.h> 17 #include <linux/buffer_head.h> 18 #include <linux/sched/mm.h> 19 20 #include "fscrypt_private.h" 21 22 struct fscrypt_blk_crypto_key { 23 struct blk_crypto_key base; 24 int num_devs; 25 struct request_queue *devs[]; 26 }; 27 28 static int fscrypt_get_num_devices(struct super_block *sb) 29 { 30 if (sb->s_cop->get_num_devices) 31 return sb->s_cop->get_num_devices(sb); 32 return 1; 33 } 34 35 static void fscrypt_get_devices(struct super_block *sb, int num_devs, 36 struct request_queue **devs) 37 { 38 if (num_devs == 1) 39 devs[0] = bdev_get_queue(sb->s_bdev); 40 else 41 sb->s_cop->get_devices(sb, devs); 42 } 43 44 static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci) 45 { 46 struct super_block *sb = ci->ci_inode->i_sb; 47 unsigned int flags = fscrypt_policy_flags(&ci->ci_policy); 48 int ino_bits = 64, lblk_bits = 64; 49 50 if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) 51 return offsetofend(union fscrypt_iv, nonce); 52 53 if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) 54 return sizeof(__le64); 55 56 if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) 57 return sizeof(__le32); 58 59 /* Default case: IVs are just the file logical block number */ 60 if (sb->s_cop->get_ino_and_lblk_bits) 61 sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits); 62 return DIV_ROUND_UP(lblk_bits, 8); 63 } 64 65 /* Enable inline encryption for this file if supported. */ 66 int fscrypt_select_encryption_impl(struct fscrypt_info *ci) 67 { 68 const struct inode *inode = ci->ci_inode; 69 struct super_block *sb = inode->i_sb; 70 struct blk_crypto_config crypto_cfg; 71 int num_devs; 72 struct request_queue **devs; 73 int i; 74 75 /* The file must need contents encryption, not filenames encryption */ 76 if (!fscrypt_needs_contents_encryption(inode)) 77 return 0; 78 79 /* The crypto mode must have a blk-crypto counterpart */ 80 if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID) 81 return 0; 82 83 /* The filesystem must be mounted with -o inlinecrypt */ 84 if (!(sb->s_flags & SB_INLINECRYPT)) 85 return 0; 86 87 /* 88 * When a page contains multiple logically contiguous filesystem blocks, 89 * some filesystem code only calls fscrypt_mergeable_bio() for the first 90 * block in the page. This is fine for most of fscrypt's IV generation 91 * strategies, where contiguous blocks imply contiguous IVs. But it 92 * doesn't work with IV_INO_LBLK_32. For now, simply exclude 93 * IV_INO_LBLK_32 with blocksize != PAGE_SIZE from inline encryption. 94 */ 95 if ((fscrypt_policy_flags(&ci->ci_policy) & 96 FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) && 97 sb->s_blocksize != PAGE_SIZE) 98 return 0; 99 100 /* 101 * On all the filesystem's devices, blk-crypto must support the crypto 102 * configuration that the file would use. 103 */ 104 crypto_cfg.crypto_mode = ci->ci_mode->blk_crypto_mode; 105 crypto_cfg.data_unit_size = sb->s_blocksize; 106 crypto_cfg.dun_bytes = fscrypt_get_dun_bytes(ci); 107 num_devs = fscrypt_get_num_devices(sb); 108 devs = kmalloc_array(num_devs, sizeof(*devs), GFP_NOFS); 109 if (!devs) 110 return -ENOMEM; 111 fscrypt_get_devices(sb, num_devs, devs); 112 113 for (i = 0; i < num_devs; i++) { 114 if (!blk_crypto_config_supported(devs[i], &crypto_cfg)) 115 goto out_free_devs; 116 } 117 118 ci->ci_inlinecrypt = true; 119 out_free_devs: 120 kfree(devs); 121 122 return 0; 123 } 124 125 int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, 126 const u8 *raw_key, 127 const struct fscrypt_info *ci) 128 { 129 const struct inode *inode = ci->ci_inode; 130 struct super_block *sb = inode->i_sb; 131 enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode; 132 int num_devs = fscrypt_get_num_devices(sb); 133 int queue_refs = 0; 134 struct fscrypt_blk_crypto_key *blk_key; 135 int err; 136 int i; 137 unsigned int flags; 138 139 blk_key = kzalloc(struct_size(blk_key, devs, num_devs), GFP_NOFS); 140 if (!blk_key) 141 return -ENOMEM; 142 143 blk_key->num_devs = num_devs; 144 fscrypt_get_devices(sb, num_devs, blk_key->devs); 145 146 err = blk_crypto_init_key(&blk_key->base, raw_key, crypto_mode, 147 fscrypt_get_dun_bytes(ci), sb->s_blocksize); 148 if (err) { 149 fscrypt_err(inode, "error %d initializing blk-crypto key", err); 150 goto fail; 151 } 152 153 /* 154 * We have to start using blk-crypto on all the filesystem's devices. 155 * We also have to save all the request_queue's for later so that the 156 * key can be evicted from them. This is needed because some keys 157 * aren't destroyed until after the filesystem was already unmounted 158 * (namely, the per-mode keys in struct fscrypt_master_key). 159 */ 160 for (i = 0; i < num_devs; i++) { 161 if (!blk_get_queue(blk_key->devs[i])) { 162 fscrypt_err(inode, "couldn't get request_queue"); 163 err = -EAGAIN; 164 goto fail; 165 } 166 queue_refs++; 167 168 flags = memalloc_nofs_save(); 169 err = blk_crypto_start_using_key(&blk_key->base, 170 blk_key->devs[i]); 171 memalloc_nofs_restore(flags); 172 if (err) { 173 fscrypt_err(inode, 174 "error %d starting to use blk-crypto", err); 175 goto fail; 176 } 177 } 178 /* 179 * Pairs with the smp_load_acquire() in fscrypt_is_key_prepared(). 180 * I.e., here we publish ->blk_key with a RELEASE barrier so that 181 * concurrent tasks can ACQUIRE it. Note that this concurrency is only 182 * possible for per-mode keys, not for per-file keys. 183 */ 184 smp_store_release(&prep_key->blk_key, blk_key); 185 return 0; 186 187 fail: 188 for (i = 0; i < queue_refs; i++) 189 blk_put_queue(blk_key->devs[i]); 190 kzfree(blk_key); 191 return err; 192 } 193 194 void fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key) 195 { 196 struct fscrypt_blk_crypto_key *blk_key = prep_key->blk_key; 197 int i; 198 199 if (blk_key) { 200 for (i = 0; i < blk_key->num_devs; i++) { 201 blk_crypto_evict_key(blk_key->devs[i], &blk_key->base); 202 blk_put_queue(blk_key->devs[i]); 203 } 204 kzfree(blk_key); 205 } 206 } 207 208 bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode) 209 { 210 return inode->i_crypt_info->ci_inlinecrypt; 211 } 212 EXPORT_SYMBOL_GPL(__fscrypt_inode_uses_inline_crypto); 213 214 static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num, 215 u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]) 216 { 217 union fscrypt_iv iv; 218 int i; 219 220 fscrypt_generate_iv(&iv, lblk_num, ci); 221 222 BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE); 223 memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE); 224 for (i = 0; i < ci->ci_mode->ivsize/sizeof(dun[0]); i++) 225 dun[i] = le64_to_cpu(iv.dun[i]); 226 } 227 228 /** 229 * fscrypt_set_bio_crypt_ctx() - prepare a file contents bio for inline crypto 230 * @bio: a bio which will eventually be submitted to the file 231 * @inode: the file's inode 232 * @first_lblk: the first file logical block number in the I/O 233 * @gfp_mask: memory allocation flags - these must be a waiting mask so that 234 * bio_crypt_set_ctx can't fail. 235 * 236 * If the contents of the file should be encrypted (or decrypted) with inline 237 * encryption, then assign the appropriate encryption context to the bio. 238 * 239 * Normally the bio should be newly allocated (i.e. no pages added yet), as 240 * otherwise fscrypt_mergeable_bio() won't work as intended. 241 * 242 * The encryption context will be freed automatically when the bio is freed. 243 */ 244 void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode, 245 u64 first_lblk, gfp_t gfp_mask) 246 { 247 const struct fscrypt_info *ci = inode->i_crypt_info; 248 u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; 249 250 if (!fscrypt_inode_uses_inline_crypto(inode)) 251 return; 252 253 fscrypt_generate_dun(ci, first_lblk, dun); 254 bio_crypt_set_ctx(bio, &ci->ci_enc_key.blk_key->base, dun, gfp_mask); 255 } 256 EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx); 257 258 /* Extract the inode and logical block number from a buffer_head. */ 259 static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh, 260 const struct inode **inode_ret, 261 u64 *lblk_num_ret) 262 { 263 struct page *page = bh->b_page; 264 const struct address_space *mapping; 265 const struct inode *inode; 266 267 /* 268 * The ext4 journal (jbd2) can submit a buffer_head it directly created 269 * for a non-pagecache page. fscrypt doesn't care about these. 270 */ 271 mapping = page_mapping(page); 272 if (!mapping) 273 return false; 274 inode = mapping->host; 275 276 *inode_ret = inode; 277 *lblk_num_ret = ((u64)page->index << (PAGE_SHIFT - inode->i_blkbits)) + 278 (bh_offset(bh) >> inode->i_blkbits); 279 return true; 280 } 281 282 /** 283 * fscrypt_set_bio_crypt_ctx_bh() - prepare a file contents bio for inline 284 * crypto 285 * @bio: a bio which will eventually be submitted to the file 286 * @first_bh: the first buffer_head for which I/O will be submitted 287 * @gfp_mask: memory allocation flags 288 * 289 * Same as fscrypt_set_bio_crypt_ctx(), except this takes a buffer_head instead 290 * of an inode and block number directly. 291 */ 292 void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio, 293 const struct buffer_head *first_bh, 294 gfp_t gfp_mask) 295 { 296 const struct inode *inode; 297 u64 first_lblk; 298 299 if (bh_get_inode_and_lblk_num(first_bh, &inode, &first_lblk)) 300 fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask); 301 } 302 EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh); 303 304 /** 305 * fscrypt_mergeable_bio() - test whether data can be added to a bio 306 * @bio: the bio being built up 307 * @inode: the inode for the next part of the I/O 308 * @next_lblk: the next file logical block number in the I/O 309 * 310 * When building a bio which may contain data which should undergo inline 311 * encryption (or decryption) via fscrypt, filesystems should call this function 312 * to ensure that the resulting bio contains only contiguous data unit numbers. 313 * This will return false if the next part of the I/O cannot be merged with the 314 * bio because either the encryption key would be different or the encryption 315 * data unit numbers would be discontiguous. 316 * 317 * fscrypt_set_bio_crypt_ctx() must have already been called on the bio. 318 * 319 * Return: true iff the I/O is mergeable 320 */ 321 bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode, 322 u64 next_lblk) 323 { 324 const struct bio_crypt_ctx *bc = bio->bi_crypt_context; 325 u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; 326 327 if (!!bc != fscrypt_inode_uses_inline_crypto(inode)) 328 return false; 329 if (!bc) 330 return true; 331 332 /* 333 * Comparing the key pointers is good enough, as all I/O for each key 334 * uses the same pointer. I.e., there's currently no need to support 335 * merging requests where the keys are the same but the pointers differ. 336 */ 337 if (bc->bc_key != &inode->i_crypt_info->ci_enc_key.blk_key->base) 338 return false; 339 340 fscrypt_generate_dun(inode->i_crypt_info, next_lblk, next_dun); 341 return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun); 342 } 343 EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio); 344 345 /** 346 * fscrypt_mergeable_bio_bh() - test whether data can be added to a bio 347 * @bio: the bio being built up 348 * @next_bh: the next buffer_head for which I/O will be submitted 349 * 350 * Same as fscrypt_mergeable_bio(), except this takes a buffer_head instead of 351 * an inode and block number directly. 352 * 353 * Return: true iff the I/O is mergeable 354 */ 355 bool fscrypt_mergeable_bio_bh(struct bio *bio, 356 const struct buffer_head *next_bh) 357 { 358 const struct inode *inode; 359 u64 next_lblk; 360 361 if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk)) 362 return !bio->bi_crypt_context; 363 364 return fscrypt_mergeable_bio(bio, inode, next_lblk); 365 } 366 EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh); 367