1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright 2019 Google LLC 4 */ 5 6 /* 7 * Refer to Documentation/block/inline-encryption.rst for detailed explanation. 8 */ 9 10 #define pr_fmt(fmt) "blk-crypto: " fmt 11 12 #include <linux/bio.h> 13 #include <linux/blkdev.h> 14 #include <linux/blk-crypto-profile.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 18 #include "blk-crypto-internal.h" 19 20 const struct blk_crypto_mode blk_crypto_modes[] = { 21 [BLK_ENCRYPTION_MODE_AES_256_XTS] = { 22 .cipher_str = "xts(aes)", 23 .keysize = 64, 24 .ivsize = 16, 25 }, 26 [BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = { 27 .cipher_str = "essiv(cbc(aes),sha256)", 28 .keysize = 16, 29 .ivsize = 16, 30 }, 31 [BLK_ENCRYPTION_MODE_ADIANTUM] = { 32 .cipher_str = "adiantum(xchacha12,aes)", 33 .keysize = 32, 34 .ivsize = 32, 35 }, 36 }; 37 38 /* 39 * This number needs to be at least (the number of threads doing IO 40 * concurrently) * (maximum recursive depth of a bio), so that we don't 41 * deadlock on crypt_ctx allocations. The default is chosen to be the same 42 * as the default number of post read contexts in both EXT4 and F2FS. 43 */ 44 static int num_prealloc_crypt_ctxs = 128; 45 46 module_param(num_prealloc_crypt_ctxs, int, 0444); 47 MODULE_PARM_DESC(num_prealloc_crypt_ctxs, 48 "Number of bio crypto contexts to preallocate"); 49 50 static struct kmem_cache *bio_crypt_ctx_cache; 51 static mempool_t *bio_crypt_ctx_pool; 52 53 static int __init bio_crypt_ctx_init(void) 54 { 55 size_t i; 56 57 bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0); 58 if (!bio_crypt_ctx_cache) 59 goto out_no_mem; 60 61 bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs, 62 bio_crypt_ctx_cache); 63 if (!bio_crypt_ctx_pool) 64 goto out_no_mem; 65 66 /* This is assumed in various places. */ 67 BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0); 68 69 /* Sanity check that no algorithm exceeds the defined limits. */ 70 for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) { 71 BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE); 72 BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE); 73 } 74 75 return 0; 76 out_no_mem: 77 panic("Failed to allocate mem for bio crypt ctxs\n"); 78 } 79 subsys_initcall(bio_crypt_ctx_init); 80 81 void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key, 82 const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask) 83 { 84 struct bio_crypt_ctx *bc; 85 86 /* 87 * The caller must use a gfp_mask that contains __GFP_DIRECT_RECLAIM so 88 * that the mempool_alloc() can't fail. 89 */ 90 WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM)); 91 92 bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); 93 94 bc->bc_key = key; 95 memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun)); 96 97 bio->bi_crypt_context = bc; 98 } 99 100 void __bio_crypt_free_ctx(struct bio *bio) 101 { 102 mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool); 103 bio->bi_crypt_context = NULL; 104 } 105 106 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) 107 { 108 dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); 109 if (!dst->bi_crypt_context) 110 return -ENOMEM; 111 *dst->bi_crypt_context = *src->bi_crypt_context; 112 return 0; 113 } 114 EXPORT_SYMBOL_GPL(__bio_crypt_clone); 115 116 /* Increments @dun by @inc, treating @dun as a multi-limb integer. */ 117 void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], 118 unsigned int inc) 119 { 120 int i; 121 122 for (i = 0; inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) { 123 dun[i] += inc; 124 /* 125 * If the addition in this limb overflowed, then we need to 126 * carry 1 into the next limb. Else the carry is 0. 127 */ 128 if (dun[i] < inc) 129 inc = 1; 130 else 131 inc = 0; 132 } 133 } 134 135 void __bio_crypt_advance(struct bio *bio, unsigned int bytes) 136 { 137 struct bio_crypt_ctx *bc = bio->bi_crypt_context; 138 139 bio_crypt_dun_increment(bc->bc_dun, 140 bytes >> bc->bc_key->data_unit_size_bits); 141 } 142 143 /* 144 * Returns true if @bc->bc_dun plus @bytes converted to data units is equal to 145 * @next_dun, treating the DUNs as multi-limb integers. 146 */ 147 bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc, 148 unsigned int bytes, 149 const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]) 150 { 151 int i; 152 unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits; 153 154 for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) { 155 if (bc->bc_dun[i] + carry != next_dun[i]) 156 return false; 157 /* 158 * If the addition in this limb overflowed, then we need to 159 * carry 1 into the next limb. Else the carry is 0. 160 */ 161 if ((bc->bc_dun[i] + carry) < carry) 162 carry = 1; 163 else 164 carry = 0; 165 } 166 167 /* If the DUN wrapped through 0, don't treat it as contiguous. */ 168 return carry == 0; 169 } 170 171 /* 172 * Checks that two bio crypt contexts are compatible - i.e. that 173 * they are mergeable except for data_unit_num continuity. 174 */ 175 static bool bio_crypt_ctx_compatible(struct bio_crypt_ctx *bc1, 176 struct bio_crypt_ctx *bc2) 177 { 178 if (!bc1) 179 return !bc2; 180 181 return bc2 && bc1->bc_key == bc2->bc_key; 182 } 183 184 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio) 185 { 186 return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context); 187 } 188 189 /* 190 * Checks that two bio crypt contexts are compatible, and also 191 * that their data_unit_nums are continuous (and can hence be merged) 192 * in the order @bc1 followed by @bc2. 193 */ 194 bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes, 195 struct bio_crypt_ctx *bc2) 196 { 197 if (!bio_crypt_ctx_compatible(bc1, bc2)) 198 return false; 199 200 return !bc1 || bio_crypt_dun_is_contiguous(bc1, bc1_bytes, bc2->bc_dun); 201 } 202 203 /* Check that all I/O segments are data unit aligned. */ 204 static bool bio_crypt_check_alignment(struct bio *bio) 205 { 206 const unsigned int data_unit_size = 207 bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size; 208 struct bvec_iter iter; 209 struct bio_vec bv; 210 211 bio_for_each_segment(bv, bio, iter) { 212 if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size)) 213 return false; 214 } 215 216 return true; 217 } 218 219 blk_status_t __blk_crypto_init_request(struct request *rq) 220 { 221 return blk_crypto_get_keyslot(rq->q->crypto_profile, 222 rq->crypt_ctx->bc_key, 223 &rq->crypt_keyslot); 224 } 225 226 /** 227 * __blk_crypto_free_request - Uninitialize the crypto fields of a request. 228 * 229 * @rq: The request whose crypto fields to uninitialize. 230 * 231 * Completely uninitializes the crypto fields of a request. If a keyslot has 232 * been programmed into some inline encryption hardware, that keyslot is 233 * released. The rq->crypt_ctx is also freed. 234 */ 235 void __blk_crypto_free_request(struct request *rq) 236 { 237 blk_crypto_put_keyslot(rq->crypt_keyslot); 238 mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool); 239 blk_crypto_rq_set_defaults(rq); 240 } 241 242 /** 243 * __blk_crypto_bio_prep - Prepare bio for inline encryption 244 * 245 * @bio_ptr: pointer to original bio pointer 246 * 247 * If the bio crypt context provided for the bio is supported by the underlying 248 * device's inline encryption hardware, do nothing. 249 * 250 * Otherwise, try to perform en/decryption for this bio by falling back to the 251 * kernel crypto API. When the crypto API fallback is used for encryption, 252 * blk-crypto may choose to split the bio into 2 - the first one that will 253 * continue to be processed and the second one that will be resubmitted via 254 * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents 255 * of the aforementioned "first one", and *bio_ptr will be updated to this 256 * bounce bio. 257 * 258 * Caller must ensure bio has bio_crypt_ctx. 259 * 260 * Return: true on success; false on error (and bio->bi_status will be set 261 * appropriately, and bio_endio() will have been called so bio 262 * submission should abort). 263 */ 264 bool __blk_crypto_bio_prep(struct bio **bio_ptr) 265 { 266 struct bio *bio = *bio_ptr; 267 const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key; 268 struct blk_crypto_profile *profile; 269 270 /* Error if bio has no data. */ 271 if (WARN_ON_ONCE(!bio_has_data(bio))) { 272 bio->bi_status = BLK_STS_IOERR; 273 goto fail; 274 } 275 276 if (!bio_crypt_check_alignment(bio)) { 277 bio->bi_status = BLK_STS_IOERR; 278 goto fail; 279 } 280 281 /* 282 * Success if device supports the encryption context, or if we succeeded 283 * in falling back to the crypto API. 284 */ 285 profile = bdev_get_queue(bio->bi_bdev)->crypto_profile; 286 if (__blk_crypto_cfg_supported(profile, &bc_key->crypto_cfg)) 287 return true; 288 289 if (blk_crypto_fallback_bio_prep(bio_ptr)) 290 return true; 291 fail: 292 bio_endio(*bio_ptr); 293 return false; 294 } 295 296 int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio, 297 gfp_t gfp_mask) 298 { 299 if (!rq->crypt_ctx) { 300 rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); 301 if (!rq->crypt_ctx) 302 return -ENOMEM; 303 } 304 *rq->crypt_ctx = *bio->bi_crypt_context; 305 return 0; 306 } 307 308 /** 309 * blk_crypto_init_key() - Prepare a key for use with blk-crypto 310 * @blk_key: Pointer to the blk_crypto_key to initialize. 311 * @raw_key: Pointer to the raw key. Must be the correct length for the chosen 312 * @crypto_mode; see blk_crypto_modes[]. 313 * @crypto_mode: identifier for the encryption algorithm to use 314 * @dun_bytes: number of bytes that will be used to specify the DUN when this 315 * key is used 316 * @data_unit_size: the data unit size to use for en/decryption 317 * 318 * Return: 0 on success, -errno on failure. The caller is responsible for 319 * zeroizing both blk_key and raw_key when done with them. 320 */ 321 int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key, 322 enum blk_crypto_mode_num crypto_mode, 323 unsigned int dun_bytes, 324 unsigned int data_unit_size) 325 { 326 const struct blk_crypto_mode *mode; 327 328 memset(blk_key, 0, sizeof(*blk_key)); 329 330 if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes)) 331 return -EINVAL; 332 333 mode = &blk_crypto_modes[crypto_mode]; 334 if (mode->keysize == 0) 335 return -EINVAL; 336 337 if (dun_bytes == 0 || dun_bytes > mode->ivsize) 338 return -EINVAL; 339 340 if (!is_power_of_2(data_unit_size)) 341 return -EINVAL; 342 343 blk_key->crypto_cfg.crypto_mode = crypto_mode; 344 blk_key->crypto_cfg.dun_bytes = dun_bytes; 345 blk_key->crypto_cfg.data_unit_size = data_unit_size; 346 blk_key->data_unit_size_bits = ilog2(data_unit_size); 347 blk_key->size = mode->keysize; 348 memcpy(blk_key->raw, raw_key, mode->keysize); 349 350 return 0; 351 } 352 353 /* 354 * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the 355 * request queue it's submitted to supports inline crypto, or the 356 * blk-crypto-fallback is enabled and supports the cfg). 357 */ 358 bool blk_crypto_config_supported(struct request_queue *q, 359 const struct blk_crypto_config *cfg) 360 { 361 return IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) || 362 __blk_crypto_cfg_supported(q->crypto_profile, cfg); 363 } 364 365 /** 366 * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device 367 * @key: A key to use on the device 368 * @q: the request queue for the device 369 * 370 * Upper layers must call this function to ensure that either the hardware 371 * supports the key's crypto settings, or the crypto API fallback has transforms 372 * for the needed mode allocated and ready to go. This function may allocate 373 * an skcipher, and *should not* be called from the data path, since that might 374 * cause a deadlock 375 * 376 * Return: 0 on success; -ENOPKG if the hardware doesn't support the key and 377 * blk-crypto-fallback is either disabled or the needed algorithm 378 * is disabled in the crypto API; or another -errno code. 379 */ 380 int blk_crypto_start_using_key(const struct blk_crypto_key *key, 381 struct request_queue *q) 382 { 383 if (__blk_crypto_cfg_supported(q->crypto_profile, &key->crypto_cfg)) 384 return 0; 385 return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode); 386 } 387 388 /** 389 * blk_crypto_evict_key() - Evict a key from any inline encryption hardware 390 * it may have been programmed into 391 * @q: The request queue who's associated inline encryption hardware this key 392 * might have been programmed into 393 * @key: The key to evict 394 * 395 * Upper layers (filesystems) must call this function to ensure that a key is 396 * evicted from any hardware that it might have been programmed into. The key 397 * must not be in use by any in-flight IO when this function is called. 398 * 399 * Return: 0 on success or if the key wasn't in any keyslot; -errno on error. 400 */ 401 int blk_crypto_evict_key(struct request_queue *q, 402 const struct blk_crypto_key *key) 403 { 404 if (__blk_crypto_cfg_supported(q->crypto_profile, &key->crypto_cfg)) 405 return __blk_crypto_evict_key(q->crypto_profile, key); 406 407 /* 408 * If the request_queue didn't support the key, then blk-crypto-fallback 409 * may have been used, so try to evict the key from blk-crypto-fallback. 410 */ 411 return blk_crypto_fallback_evict_key(key); 412 } 413 EXPORT_SYMBOL_GPL(blk_crypto_evict_key); 414