1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright 2019 Google LLC 4 */ 5 6 /* 7 * Refer to Documentation/block/inline-encryption.rst for detailed explanation. 8 */ 9 10 #define pr_fmt(fmt) "blk-crypto-fallback: " fmt 11 12 #include <crypto/skcipher.h> 13 #include <linux/blk-cgroup.h> 14 #include <linux/blk-crypto.h> 15 #include <linux/blkdev.h> 16 #include <linux/crypto.h> 17 #include <linux/keyslot-manager.h> 18 #include <linux/mempool.h> 19 #include <linux/module.h> 20 #include <linux/random.h> 21 #include <linux/scatterlist.h> 22 23 #include "blk-crypto-internal.h" 24 25 static unsigned int num_prealloc_bounce_pg = 32; 26 module_param(num_prealloc_bounce_pg, uint, 0); 27 MODULE_PARM_DESC(num_prealloc_bounce_pg, 28 "Number of preallocated bounce pages for the blk-crypto crypto API fallback"); 29 30 static unsigned int blk_crypto_num_keyslots = 100; 31 module_param_named(num_keyslots, blk_crypto_num_keyslots, uint, 0); 32 MODULE_PARM_DESC(num_keyslots, 33 "Number of keyslots for the blk-crypto crypto API fallback"); 34 35 static unsigned int num_prealloc_fallback_crypt_ctxs = 128; 36 module_param(num_prealloc_fallback_crypt_ctxs, uint, 0); 37 MODULE_PARM_DESC(num_prealloc_crypt_fallback_ctxs, 38 "Number of preallocated bio fallback crypto contexts for blk-crypto to use during crypto API fallback"); 39 40 struct bio_fallback_crypt_ctx { 41 struct bio_crypt_ctx crypt_ctx; 42 /* 43 * Copy of the bvec_iter when this bio was submitted. 44 * We only want to en/decrypt the part of the bio as described by the 45 * bvec_iter upon submission because bio might be split before being 46 * resubmitted 47 */ 48 struct bvec_iter crypt_iter; 49 union { 50 struct { 51 struct work_struct work; 52 struct bio *bio; 53 }; 54 struct { 55 void *bi_private_orig; 56 bio_end_io_t *bi_end_io_orig; 57 }; 58 }; 59 }; 60 61 static struct kmem_cache *bio_fallback_crypt_ctx_cache; 62 static mempool_t *bio_fallback_crypt_ctx_pool; 63 64 /* 65 * Allocating a crypto tfm during I/O can deadlock, so we have to preallocate 66 * all of a mode's tfms when that mode starts being used. Since each mode may 67 * need all the keyslots at some point, each mode needs its own tfm for each 68 * keyslot; thus, a keyslot may contain tfms for multiple modes. However, to 69 * match the behavior of real inline encryption hardware (which only supports a 70 * single encryption context per keyslot), we only allow one tfm per keyslot to 71 * be used at a time - the rest of the unused tfms have their keys cleared. 72 */ 73 static DEFINE_MUTEX(tfms_init_lock); 74 static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX]; 75 76 static struct blk_crypto_fallback_keyslot { 77 enum blk_crypto_mode_num crypto_mode; 78 struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX]; 79 } *blk_crypto_keyslots; 80 81 static struct blk_keyslot_manager blk_crypto_ksm; 82 static struct workqueue_struct *blk_crypto_wq; 83 static mempool_t *blk_crypto_bounce_page_pool; 84 static struct bio_set crypto_bio_split; 85 86 /* 87 * This is the key we set when evicting a keyslot. This *should* be the all 0's 88 * key, but AES-XTS rejects that key, so we use some random bytes instead. 89 */ 90 static u8 blank_key[BLK_CRYPTO_MAX_KEY_SIZE]; 91 92 static void blk_crypto_fallback_evict_keyslot(unsigned int slot) 93 { 94 struct blk_crypto_fallback_keyslot *slotp = &blk_crypto_keyslots[slot]; 95 enum blk_crypto_mode_num crypto_mode = slotp->crypto_mode; 96 int err; 97 98 WARN_ON(slotp->crypto_mode == BLK_ENCRYPTION_MODE_INVALID); 99 100 /* Clear the key in the skcipher */ 101 err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], blank_key, 102 blk_crypto_modes[crypto_mode].keysize); 103 WARN_ON(err); 104 slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID; 105 } 106 107 static int blk_crypto_fallback_keyslot_program(struct blk_keyslot_manager *ksm, 108 const struct blk_crypto_key *key, 109 unsigned int slot) 110 { 111 struct blk_crypto_fallback_keyslot *slotp = &blk_crypto_keyslots[slot]; 112 const enum blk_crypto_mode_num crypto_mode = 113 key->crypto_cfg.crypto_mode; 114 int err; 115 116 if (crypto_mode != slotp->crypto_mode && 117 slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID) 118 blk_crypto_fallback_evict_keyslot(slot); 119 120 slotp->crypto_mode = crypto_mode; 121 err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw, 122 key->size); 123 if (err) { 124 blk_crypto_fallback_evict_keyslot(slot); 125 return err; 126 } 127 return 0; 128 } 129 130 static int blk_crypto_fallback_keyslot_evict(struct blk_keyslot_manager *ksm, 131 const struct blk_crypto_key *key, 132 unsigned int slot) 133 { 134 blk_crypto_fallback_evict_keyslot(slot); 135 return 0; 136 } 137 138 /* 139 * The crypto API fallback KSM ops - only used for a bio when it specifies a 140 * blk_crypto_key that was not supported by the device's inline encryption 141 * hardware. 142 */ 143 static const struct blk_ksm_ll_ops blk_crypto_ksm_ll_ops = { 144 .keyslot_program = blk_crypto_fallback_keyslot_program, 145 .keyslot_evict = blk_crypto_fallback_keyslot_evict, 146 }; 147 148 static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio) 149 { 150 struct bio *src_bio = enc_bio->bi_private; 151 int i; 152 153 for (i = 0; i < enc_bio->bi_vcnt; i++) 154 mempool_free(enc_bio->bi_io_vec[i].bv_page, 155 blk_crypto_bounce_page_pool); 156 157 src_bio->bi_status = enc_bio->bi_status; 158 159 bio_put(enc_bio); 160 bio_endio(src_bio); 161 } 162 163 static struct bio *blk_crypto_fallback_clone_bio(struct bio *bio_src) 164 { 165 struct bvec_iter iter; 166 struct bio_vec bv; 167 struct bio *bio; 168 169 bio = bio_kmalloc(GFP_NOIO, bio_segments(bio_src)); 170 if (!bio) 171 return NULL; 172 bio->bi_bdev = bio_src->bi_bdev; 173 if (bio_flagged(bio_src, BIO_REMAPPED)) 174 bio_set_flag(bio, BIO_REMAPPED); 175 bio->bi_opf = bio_src->bi_opf; 176 bio->bi_ioprio = bio_src->bi_ioprio; 177 bio->bi_write_hint = bio_src->bi_write_hint; 178 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; 179 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; 180 181 bio_for_each_segment(bv, bio_src, iter) 182 bio->bi_io_vec[bio->bi_vcnt++] = bv; 183 184 bio_clone_blkg_association(bio, bio_src); 185 blkcg_bio_issue_init(bio); 186 187 return bio; 188 } 189 190 static bool 191 blk_crypto_fallback_alloc_cipher_req(struct blk_ksm_keyslot *slot, 192 struct skcipher_request **ciph_req_ret, 193 struct crypto_wait *wait) 194 { 195 struct skcipher_request *ciph_req; 196 const struct blk_crypto_fallback_keyslot *slotp; 197 int keyslot_idx = blk_ksm_get_slot_idx(slot); 198 199 slotp = &blk_crypto_keyslots[keyslot_idx]; 200 ciph_req = skcipher_request_alloc(slotp->tfms[slotp->crypto_mode], 201 GFP_NOIO); 202 if (!ciph_req) 203 return false; 204 205 skcipher_request_set_callback(ciph_req, 206 CRYPTO_TFM_REQ_MAY_BACKLOG | 207 CRYPTO_TFM_REQ_MAY_SLEEP, 208 crypto_req_done, wait); 209 *ciph_req_ret = ciph_req; 210 211 return true; 212 } 213 214 static bool blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr) 215 { 216 struct bio *bio = *bio_ptr; 217 unsigned int i = 0; 218 unsigned int num_sectors = 0; 219 struct bio_vec bv; 220 struct bvec_iter iter; 221 222 bio_for_each_segment(bv, bio, iter) { 223 num_sectors += bv.bv_len >> SECTOR_SHIFT; 224 if (++i == BIO_MAX_VECS) 225 break; 226 } 227 if (num_sectors < bio_sectors(bio)) { 228 struct bio *split_bio; 229 230 split_bio = bio_split(bio, num_sectors, GFP_NOIO, 231 &crypto_bio_split); 232 if (!split_bio) { 233 bio->bi_status = BLK_STS_RESOURCE; 234 return false; 235 } 236 bio_chain(split_bio, bio); 237 submit_bio_noacct(bio); 238 *bio_ptr = split_bio; 239 } 240 241 return true; 242 } 243 244 union blk_crypto_iv { 245 __le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; 246 u8 bytes[BLK_CRYPTO_MAX_IV_SIZE]; 247 }; 248 249 static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], 250 union blk_crypto_iv *iv) 251 { 252 int i; 253 254 for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) 255 iv->dun[i] = cpu_to_le64(dun[i]); 256 } 257 258 /* 259 * The crypto API fallback's encryption routine. 260 * Allocate a bounce bio for encryption, encrypt the input bio using crypto API, 261 * and replace *bio_ptr with the bounce bio. May split input bio if it's too 262 * large. Returns true on success. Returns false and sets bio->bi_status on 263 * error. 264 */ 265 static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr) 266 { 267 struct bio *src_bio, *enc_bio; 268 struct bio_crypt_ctx *bc; 269 struct blk_ksm_keyslot *slot; 270 int data_unit_size; 271 struct skcipher_request *ciph_req = NULL; 272 DECLARE_CRYPTO_WAIT(wait); 273 u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; 274 struct scatterlist src, dst; 275 union blk_crypto_iv iv; 276 unsigned int i, j; 277 bool ret = false; 278 blk_status_t blk_st; 279 280 /* Split the bio if it's too big for single page bvec */ 281 if (!blk_crypto_fallback_split_bio_if_needed(bio_ptr)) 282 return false; 283 284 src_bio = *bio_ptr; 285 bc = src_bio->bi_crypt_context; 286 data_unit_size = bc->bc_key->crypto_cfg.data_unit_size; 287 288 /* Allocate bounce bio for encryption */ 289 enc_bio = blk_crypto_fallback_clone_bio(src_bio); 290 if (!enc_bio) { 291 src_bio->bi_status = BLK_STS_RESOURCE; 292 return false; 293 } 294 295 /* 296 * Use the crypto API fallback keyslot manager to get a crypto_skcipher 297 * for the algorithm and key specified for this bio. 298 */ 299 blk_st = blk_ksm_get_slot_for_key(&blk_crypto_ksm, bc->bc_key, &slot); 300 if (blk_st != BLK_STS_OK) { 301 src_bio->bi_status = blk_st; 302 goto out_put_enc_bio; 303 } 304 305 /* and then allocate an skcipher_request for it */ 306 if (!blk_crypto_fallback_alloc_cipher_req(slot, &ciph_req, &wait)) { 307 src_bio->bi_status = BLK_STS_RESOURCE; 308 goto out_release_keyslot; 309 } 310 311 memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun)); 312 sg_init_table(&src, 1); 313 sg_init_table(&dst, 1); 314 315 skcipher_request_set_crypt(ciph_req, &src, &dst, data_unit_size, 316 iv.bytes); 317 318 /* Encrypt each page in the bounce bio */ 319 for (i = 0; i < enc_bio->bi_vcnt; i++) { 320 struct bio_vec *enc_bvec = &enc_bio->bi_io_vec[i]; 321 struct page *plaintext_page = enc_bvec->bv_page; 322 struct page *ciphertext_page = 323 mempool_alloc(blk_crypto_bounce_page_pool, GFP_NOIO); 324 325 enc_bvec->bv_page = ciphertext_page; 326 327 if (!ciphertext_page) { 328 src_bio->bi_status = BLK_STS_RESOURCE; 329 goto out_free_bounce_pages; 330 } 331 332 sg_set_page(&src, plaintext_page, data_unit_size, 333 enc_bvec->bv_offset); 334 sg_set_page(&dst, ciphertext_page, data_unit_size, 335 enc_bvec->bv_offset); 336 337 /* Encrypt each data unit in this page */ 338 for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) { 339 blk_crypto_dun_to_iv(curr_dun, &iv); 340 if (crypto_wait_req(crypto_skcipher_encrypt(ciph_req), 341 &wait)) { 342 i++; 343 src_bio->bi_status = BLK_STS_IOERR; 344 goto out_free_bounce_pages; 345 } 346 bio_crypt_dun_increment(curr_dun, 1); 347 src.offset += data_unit_size; 348 dst.offset += data_unit_size; 349 } 350 } 351 352 enc_bio->bi_private = src_bio; 353 enc_bio->bi_end_io = blk_crypto_fallback_encrypt_endio; 354 *bio_ptr = enc_bio; 355 ret = true; 356 357 enc_bio = NULL; 358 goto out_free_ciph_req; 359 360 out_free_bounce_pages: 361 while (i > 0) 362 mempool_free(enc_bio->bi_io_vec[--i].bv_page, 363 blk_crypto_bounce_page_pool); 364 out_free_ciph_req: 365 skcipher_request_free(ciph_req); 366 out_release_keyslot: 367 blk_ksm_put_slot(slot); 368 out_put_enc_bio: 369 if (enc_bio) 370 bio_put(enc_bio); 371 372 return ret; 373 } 374 375 /* 376 * The crypto API fallback's main decryption routine. 377 * Decrypts input bio in place, and calls bio_endio on the bio. 378 */ 379 static void blk_crypto_fallback_decrypt_bio(struct work_struct *work) 380 { 381 struct bio_fallback_crypt_ctx *f_ctx = 382 container_of(work, struct bio_fallback_crypt_ctx, work); 383 struct bio *bio = f_ctx->bio; 384 struct bio_crypt_ctx *bc = &f_ctx->crypt_ctx; 385 struct blk_ksm_keyslot *slot; 386 struct skcipher_request *ciph_req = NULL; 387 DECLARE_CRYPTO_WAIT(wait); 388 u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; 389 union blk_crypto_iv iv; 390 struct scatterlist sg; 391 struct bio_vec bv; 392 struct bvec_iter iter; 393 const int data_unit_size = bc->bc_key->crypto_cfg.data_unit_size; 394 unsigned int i; 395 blk_status_t blk_st; 396 397 /* 398 * Use the crypto API fallback keyslot manager to get a crypto_skcipher 399 * for the algorithm and key specified for this bio. 400 */ 401 blk_st = blk_ksm_get_slot_for_key(&blk_crypto_ksm, bc->bc_key, &slot); 402 if (blk_st != BLK_STS_OK) { 403 bio->bi_status = blk_st; 404 goto out_no_keyslot; 405 } 406 407 /* and then allocate an skcipher_request for it */ 408 if (!blk_crypto_fallback_alloc_cipher_req(slot, &ciph_req, &wait)) { 409 bio->bi_status = BLK_STS_RESOURCE; 410 goto out; 411 } 412 413 memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun)); 414 sg_init_table(&sg, 1); 415 skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size, 416 iv.bytes); 417 418 /* Decrypt each segment in the bio */ 419 __bio_for_each_segment(bv, bio, iter, f_ctx->crypt_iter) { 420 struct page *page = bv.bv_page; 421 422 sg_set_page(&sg, page, data_unit_size, bv.bv_offset); 423 424 /* Decrypt each data unit in the segment */ 425 for (i = 0; i < bv.bv_len; i += data_unit_size) { 426 blk_crypto_dun_to_iv(curr_dun, &iv); 427 if (crypto_wait_req(crypto_skcipher_decrypt(ciph_req), 428 &wait)) { 429 bio->bi_status = BLK_STS_IOERR; 430 goto out; 431 } 432 bio_crypt_dun_increment(curr_dun, 1); 433 sg.offset += data_unit_size; 434 } 435 } 436 437 out: 438 skcipher_request_free(ciph_req); 439 blk_ksm_put_slot(slot); 440 out_no_keyslot: 441 mempool_free(f_ctx, bio_fallback_crypt_ctx_pool); 442 bio_endio(bio); 443 } 444 445 /** 446 * blk_crypto_fallback_decrypt_endio - queue bio for fallback decryption 447 * 448 * @bio: the bio to queue 449 * 450 * Restore bi_private and bi_end_io, and queue the bio for decryption into a 451 * workqueue, since this function will be called from an atomic context. 452 */ 453 static void blk_crypto_fallback_decrypt_endio(struct bio *bio) 454 { 455 struct bio_fallback_crypt_ctx *f_ctx = bio->bi_private; 456 457 bio->bi_private = f_ctx->bi_private_orig; 458 bio->bi_end_io = f_ctx->bi_end_io_orig; 459 460 /* If there was an IO error, don't queue for decrypt. */ 461 if (bio->bi_status) { 462 mempool_free(f_ctx, bio_fallback_crypt_ctx_pool); 463 bio_endio(bio); 464 return; 465 } 466 467 INIT_WORK(&f_ctx->work, blk_crypto_fallback_decrypt_bio); 468 f_ctx->bio = bio; 469 queue_work(blk_crypto_wq, &f_ctx->work); 470 } 471 472 /** 473 * blk_crypto_fallback_bio_prep - Prepare a bio to use fallback en/decryption 474 * 475 * @bio_ptr: pointer to the bio to prepare 476 * 477 * If bio is doing a WRITE operation, this splits the bio into two parts if it's 478 * too big (see blk_crypto_fallback_split_bio_if_needed()). It then allocates a 479 * bounce bio for the first part, encrypts it, and updates bio_ptr to point to 480 * the bounce bio. 481 * 482 * For a READ operation, we mark the bio for decryption by using bi_private and 483 * bi_end_io. 484 * 485 * In either case, this function will make the bio look like a regular bio (i.e. 486 * as if no encryption context was ever specified) for the purposes of the rest 487 * of the stack except for blk-integrity (blk-integrity and blk-crypto are not 488 * currently supported together). 489 * 490 * Return: true on success. Sets bio->bi_status and returns false on error. 491 */ 492 bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr) 493 { 494 struct bio *bio = *bio_ptr; 495 struct bio_crypt_ctx *bc = bio->bi_crypt_context; 496 struct bio_fallback_crypt_ctx *f_ctx; 497 498 if (WARN_ON_ONCE(!tfms_inited[bc->bc_key->crypto_cfg.crypto_mode])) { 499 /* User didn't call blk_crypto_start_using_key() first */ 500 bio->bi_status = BLK_STS_IOERR; 501 return false; 502 } 503 504 if (!blk_ksm_crypto_cfg_supported(&blk_crypto_ksm, 505 &bc->bc_key->crypto_cfg)) { 506 bio->bi_status = BLK_STS_NOTSUPP; 507 return false; 508 } 509 510 if (bio_data_dir(bio) == WRITE) 511 return blk_crypto_fallback_encrypt_bio(bio_ptr); 512 513 /* 514 * bio READ case: Set up a f_ctx in the bio's bi_private and set the 515 * bi_end_io appropriately to trigger decryption when the bio is ended. 516 */ 517 f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO); 518 f_ctx->crypt_ctx = *bc; 519 f_ctx->crypt_iter = bio->bi_iter; 520 f_ctx->bi_private_orig = bio->bi_private; 521 f_ctx->bi_end_io_orig = bio->bi_end_io; 522 bio->bi_private = (void *)f_ctx; 523 bio->bi_end_io = blk_crypto_fallback_decrypt_endio; 524 bio_crypt_free_ctx(bio); 525 526 return true; 527 } 528 529 int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key) 530 { 531 return blk_ksm_evict_key(&blk_crypto_ksm, key); 532 } 533 534 static bool blk_crypto_fallback_inited; 535 static int blk_crypto_fallback_init(void) 536 { 537 int i; 538 int err; 539 540 if (blk_crypto_fallback_inited) 541 return 0; 542 543 prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE); 544 545 err = bioset_init(&crypto_bio_split, 64, 0, 0); 546 if (err) 547 goto out; 548 549 err = blk_ksm_init(&blk_crypto_ksm, blk_crypto_num_keyslots); 550 if (err) 551 goto fail_free_bioset; 552 err = -ENOMEM; 553 554 blk_crypto_ksm.ksm_ll_ops = blk_crypto_ksm_ll_ops; 555 blk_crypto_ksm.max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE; 556 557 /* All blk-crypto modes have a crypto API fallback. */ 558 for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) 559 blk_crypto_ksm.crypto_modes_supported[i] = 0xFFFFFFFF; 560 blk_crypto_ksm.crypto_modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0; 561 562 blk_crypto_wq = alloc_workqueue("blk_crypto_wq", 563 WQ_UNBOUND | WQ_HIGHPRI | 564 WQ_MEM_RECLAIM, num_online_cpus()); 565 if (!blk_crypto_wq) 566 goto fail_free_ksm; 567 568 blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots, 569 sizeof(blk_crypto_keyslots[0]), 570 GFP_KERNEL); 571 if (!blk_crypto_keyslots) 572 goto fail_free_wq; 573 574 blk_crypto_bounce_page_pool = 575 mempool_create_page_pool(num_prealloc_bounce_pg, 0); 576 if (!blk_crypto_bounce_page_pool) 577 goto fail_free_keyslots; 578 579 bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0); 580 if (!bio_fallback_crypt_ctx_cache) 581 goto fail_free_bounce_page_pool; 582 583 bio_fallback_crypt_ctx_pool = 584 mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs, 585 bio_fallback_crypt_ctx_cache); 586 if (!bio_fallback_crypt_ctx_pool) 587 goto fail_free_crypt_ctx_cache; 588 589 blk_crypto_fallback_inited = true; 590 591 return 0; 592 fail_free_crypt_ctx_cache: 593 kmem_cache_destroy(bio_fallback_crypt_ctx_cache); 594 fail_free_bounce_page_pool: 595 mempool_destroy(blk_crypto_bounce_page_pool); 596 fail_free_keyslots: 597 kfree(blk_crypto_keyslots); 598 fail_free_wq: 599 destroy_workqueue(blk_crypto_wq); 600 fail_free_ksm: 601 blk_ksm_destroy(&blk_crypto_ksm); 602 fail_free_bioset: 603 bioset_exit(&crypto_bio_split); 604 out: 605 return err; 606 } 607 608 /* 609 * Prepare blk-crypto-fallback for the specified crypto mode. 610 * Returns -ENOPKG if the needed crypto API support is missing. 611 */ 612 int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num) 613 { 614 const char *cipher_str = blk_crypto_modes[mode_num].cipher_str; 615 struct blk_crypto_fallback_keyslot *slotp; 616 unsigned int i; 617 int err = 0; 618 619 /* 620 * Fast path 621 * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num] 622 * for each i are visible before we try to access them. 623 */ 624 if (likely(smp_load_acquire(&tfms_inited[mode_num]))) 625 return 0; 626 627 mutex_lock(&tfms_init_lock); 628 if (tfms_inited[mode_num]) 629 goto out; 630 631 err = blk_crypto_fallback_init(); 632 if (err) 633 goto out; 634 635 for (i = 0; i < blk_crypto_num_keyslots; i++) { 636 slotp = &blk_crypto_keyslots[i]; 637 slotp->tfms[mode_num] = crypto_alloc_skcipher(cipher_str, 0, 0); 638 if (IS_ERR(slotp->tfms[mode_num])) { 639 err = PTR_ERR(slotp->tfms[mode_num]); 640 if (err == -ENOENT) { 641 pr_warn_once("Missing crypto API support for \"%s\"\n", 642 cipher_str); 643 err = -ENOPKG; 644 } 645 slotp->tfms[mode_num] = NULL; 646 goto out_free_tfms; 647 } 648 649 crypto_skcipher_set_flags(slotp->tfms[mode_num], 650 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); 651 } 652 653 /* 654 * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num] 655 * for each i are visible before we set tfms_inited[mode_num]. 656 */ 657 smp_store_release(&tfms_inited[mode_num], true); 658 goto out; 659 660 out_free_tfms: 661 for (i = 0; i < blk_crypto_num_keyslots; i++) { 662 slotp = &blk_crypto_keyslots[i]; 663 crypto_free_skcipher(slotp->tfms[mode_num]); 664 slotp->tfms[mode_num] = NULL; 665 } 666 out: 667 mutex_unlock(&tfms_init_lock); 668 return err; 669 } 670