1*a892c8d5SSatya Tangirala // SPDX-License-Identifier: GPL-2.0 2*a892c8d5SSatya Tangirala /* 3*a892c8d5SSatya Tangirala * Copyright 2019 Google LLC 4*a892c8d5SSatya Tangirala */ 5*a892c8d5SSatya Tangirala 6*a892c8d5SSatya Tangirala /* 7*a892c8d5SSatya Tangirala * Refer to Documentation/block/inline-encryption.rst for detailed explanation. 8*a892c8d5SSatya Tangirala */ 9*a892c8d5SSatya Tangirala 10*a892c8d5SSatya Tangirala #define pr_fmt(fmt) "blk-crypto: " fmt 11*a892c8d5SSatya Tangirala 12*a892c8d5SSatya Tangirala #include <linux/bio.h> 13*a892c8d5SSatya Tangirala #include <linux/blkdev.h> 14*a892c8d5SSatya Tangirala #include <linux/keyslot-manager.h> 15*a892c8d5SSatya Tangirala #include <linux/module.h> 16*a892c8d5SSatya Tangirala #include <linux/slab.h> 17*a892c8d5SSatya Tangirala 18*a892c8d5SSatya Tangirala #include "blk-crypto-internal.h" 19*a892c8d5SSatya Tangirala 20*a892c8d5SSatya Tangirala const struct blk_crypto_mode blk_crypto_modes[] = { 21*a892c8d5SSatya Tangirala [BLK_ENCRYPTION_MODE_AES_256_XTS] = { 22*a892c8d5SSatya Tangirala .keysize = 64, 23*a892c8d5SSatya Tangirala .ivsize = 16, 24*a892c8d5SSatya Tangirala }, 25*a892c8d5SSatya Tangirala [BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = { 26*a892c8d5SSatya Tangirala .keysize = 16, 27*a892c8d5SSatya Tangirala .ivsize = 16, 28*a892c8d5SSatya Tangirala }, 29*a892c8d5SSatya Tangirala [BLK_ENCRYPTION_MODE_ADIANTUM] = { 30*a892c8d5SSatya Tangirala .keysize = 32, 31*a892c8d5SSatya Tangirala .ivsize = 32, 32*a892c8d5SSatya Tangirala }, 33*a892c8d5SSatya Tangirala }; 34*a892c8d5SSatya Tangirala 35*a892c8d5SSatya Tangirala /* 36*a892c8d5SSatya Tangirala * This number needs to be at least (the number of threads doing IO 37*a892c8d5SSatya Tangirala * concurrently) * (maximum recursive depth of a bio), so that we don't 38*a892c8d5SSatya Tangirala * deadlock on crypt_ctx allocations. The default is chosen to be the same 39*a892c8d5SSatya Tangirala * as the default number of post read contexts in both EXT4 and F2FS. 40*a892c8d5SSatya Tangirala */ 41*a892c8d5SSatya Tangirala static int num_prealloc_crypt_ctxs = 128; 42*a892c8d5SSatya Tangirala 43*a892c8d5SSatya Tangirala module_param(num_prealloc_crypt_ctxs, int, 0444); 44*a892c8d5SSatya Tangirala MODULE_PARM_DESC(num_prealloc_crypt_ctxs, 45*a892c8d5SSatya Tangirala "Number of bio crypto contexts to preallocate"); 46*a892c8d5SSatya Tangirala 47*a892c8d5SSatya Tangirala static struct kmem_cache *bio_crypt_ctx_cache; 48*a892c8d5SSatya Tangirala static mempool_t *bio_crypt_ctx_pool; 49*a892c8d5SSatya Tangirala 50*a892c8d5SSatya Tangirala static int __init bio_crypt_ctx_init(void) 51*a892c8d5SSatya Tangirala { 52*a892c8d5SSatya Tangirala size_t i; 53*a892c8d5SSatya Tangirala 54*a892c8d5SSatya Tangirala bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0); 55*a892c8d5SSatya Tangirala if (!bio_crypt_ctx_cache) 56*a892c8d5SSatya Tangirala goto out_no_mem; 57*a892c8d5SSatya Tangirala 58*a892c8d5SSatya Tangirala bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs, 59*a892c8d5SSatya Tangirala bio_crypt_ctx_cache); 60*a892c8d5SSatya Tangirala if (!bio_crypt_ctx_pool) 61*a892c8d5SSatya Tangirala goto out_no_mem; 62*a892c8d5SSatya Tangirala 63*a892c8d5SSatya Tangirala /* This is assumed in various places. */ 64*a892c8d5SSatya Tangirala BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0); 65*a892c8d5SSatya Tangirala 66*a892c8d5SSatya Tangirala /* Sanity check that no algorithm exceeds the defined limits. */ 67*a892c8d5SSatya Tangirala for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) { 68*a892c8d5SSatya Tangirala BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE); 69*a892c8d5SSatya Tangirala BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE); 70*a892c8d5SSatya Tangirala } 71*a892c8d5SSatya Tangirala 72*a892c8d5SSatya Tangirala return 0; 73*a892c8d5SSatya Tangirala out_no_mem: 74*a892c8d5SSatya Tangirala panic("Failed to allocate mem for bio crypt ctxs\n"); 75*a892c8d5SSatya Tangirala } 76*a892c8d5SSatya Tangirala subsys_initcall(bio_crypt_ctx_init); 77*a892c8d5SSatya Tangirala 78*a892c8d5SSatya Tangirala void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key, 79*a892c8d5SSatya Tangirala const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask) 80*a892c8d5SSatya Tangirala { 81*a892c8d5SSatya Tangirala struct bio_crypt_ctx *bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); 82*a892c8d5SSatya Tangirala 83*a892c8d5SSatya Tangirala bc->bc_key = key; 84*a892c8d5SSatya Tangirala memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun)); 85*a892c8d5SSatya Tangirala 86*a892c8d5SSatya Tangirala bio->bi_crypt_context = bc; 87*a892c8d5SSatya Tangirala } 88*a892c8d5SSatya Tangirala 89*a892c8d5SSatya Tangirala void __bio_crypt_free_ctx(struct bio *bio) 90*a892c8d5SSatya Tangirala { 91*a892c8d5SSatya Tangirala mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool); 92*a892c8d5SSatya Tangirala bio->bi_crypt_context = NULL; 93*a892c8d5SSatya Tangirala } 94*a892c8d5SSatya Tangirala 95*a892c8d5SSatya Tangirala void __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) 96*a892c8d5SSatya Tangirala { 97*a892c8d5SSatya Tangirala dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); 98*a892c8d5SSatya Tangirala *dst->bi_crypt_context = *src->bi_crypt_context; 99*a892c8d5SSatya Tangirala } 100*a892c8d5SSatya Tangirala EXPORT_SYMBOL_GPL(__bio_crypt_clone); 101*a892c8d5SSatya Tangirala 102*a892c8d5SSatya Tangirala /* Increments @dun by @inc, treating @dun as a multi-limb integer. */ 103*a892c8d5SSatya Tangirala void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], 104*a892c8d5SSatya Tangirala unsigned int inc) 105*a892c8d5SSatya Tangirala { 106*a892c8d5SSatya Tangirala int i; 107*a892c8d5SSatya Tangirala 108*a892c8d5SSatya Tangirala for (i = 0; inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) { 109*a892c8d5SSatya Tangirala dun[i] += inc; 110*a892c8d5SSatya Tangirala /* 111*a892c8d5SSatya Tangirala * If the addition in this limb overflowed, then we need to 112*a892c8d5SSatya Tangirala * carry 1 into the next limb. Else the carry is 0. 113*a892c8d5SSatya Tangirala */ 114*a892c8d5SSatya Tangirala if (dun[i] < inc) 115*a892c8d5SSatya Tangirala inc = 1; 116*a892c8d5SSatya Tangirala else 117*a892c8d5SSatya Tangirala inc = 0; 118*a892c8d5SSatya Tangirala } 119*a892c8d5SSatya Tangirala } 120*a892c8d5SSatya Tangirala 121*a892c8d5SSatya Tangirala void __bio_crypt_advance(struct bio *bio, unsigned int bytes) 122*a892c8d5SSatya Tangirala { 123*a892c8d5SSatya Tangirala struct bio_crypt_ctx *bc = bio->bi_crypt_context; 124*a892c8d5SSatya Tangirala 125*a892c8d5SSatya Tangirala bio_crypt_dun_increment(bc->bc_dun, 126*a892c8d5SSatya Tangirala bytes >> bc->bc_key->data_unit_size_bits); 127*a892c8d5SSatya Tangirala } 128*a892c8d5SSatya Tangirala 129*a892c8d5SSatya Tangirala /* 130*a892c8d5SSatya Tangirala * Returns true if @bc->bc_dun plus @bytes converted to data units is equal to 131*a892c8d5SSatya Tangirala * @next_dun, treating the DUNs as multi-limb integers. 132*a892c8d5SSatya Tangirala */ 133*a892c8d5SSatya Tangirala bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc, 134*a892c8d5SSatya Tangirala unsigned int bytes, 135*a892c8d5SSatya Tangirala const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]) 136*a892c8d5SSatya Tangirala { 137*a892c8d5SSatya Tangirala int i; 138*a892c8d5SSatya Tangirala unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits; 139*a892c8d5SSatya Tangirala 140*a892c8d5SSatya Tangirala for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) { 141*a892c8d5SSatya Tangirala if (bc->bc_dun[i] + carry != next_dun[i]) 142*a892c8d5SSatya Tangirala return false; 143*a892c8d5SSatya Tangirala /* 144*a892c8d5SSatya Tangirala * If the addition in this limb overflowed, then we need to 145*a892c8d5SSatya Tangirala * carry 1 into the next limb. Else the carry is 0. 146*a892c8d5SSatya Tangirala */ 147*a892c8d5SSatya Tangirala if ((bc->bc_dun[i] + carry) < carry) 148*a892c8d5SSatya Tangirala carry = 1; 149*a892c8d5SSatya Tangirala else 150*a892c8d5SSatya Tangirala carry = 0; 151*a892c8d5SSatya Tangirala } 152*a892c8d5SSatya Tangirala 153*a892c8d5SSatya Tangirala /* If the DUN wrapped through 0, don't treat it as contiguous. */ 154*a892c8d5SSatya Tangirala return carry == 0; 155*a892c8d5SSatya Tangirala } 156*a892c8d5SSatya Tangirala 157*a892c8d5SSatya Tangirala /* 158*a892c8d5SSatya Tangirala * Checks that two bio crypt contexts are compatible - i.e. that 159*a892c8d5SSatya Tangirala * they are mergeable except for data_unit_num continuity. 160*a892c8d5SSatya Tangirala */ 161*a892c8d5SSatya Tangirala static bool bio_crypt_ctx_compatible(struct bio_crypt_ctx *bc1, 162*a892c8d5SSatya Tangirala struct bio_crypt_ctx *bc2) 163*a892c8d5SSatya Tangirala { 164*a892c8d5SSatya Tangirala if (!bc1) 165*a892c8d5SSatya Tangirala return !bc2; 166*a892c8d5SSatya Tangirala 167*a892c8d5SSatya Tangirala return bc2 && bc1->bc_key == bc2->bc_key; 168*a892c8d5SSatya Tangirala } 169*a892c8d5SSatya Tangirala 170*a892c8d5SSatya Tangirala bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio) 171*a892c8d5SSatya Tangirala { 172*a892c8d5SSatya Tangirala return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context); 173*a892c8d5SSatya Tangirala } 174*a892c8d5SSatya Tangirala 175*a892c8d5SSatya Tangirala /* 176*a892c8d5SSatya Tangirala * Checks that two bio crypt contexts are compatible, and also 177*a892c8d5SSatya Tangirala * that their data_unit_nums are continuous (and can hence be merged) 178*a892c8d5SSatya Tangirala * in the order @bc1 followed by @bc2. 179*a892c8d5SSatya Tangirala */ 180*a892c8d5SSatya Tangirala bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes, 181*a892c8d5SSatya Tangirala struct bio_crypt_ctx *bc2) 182*a892c8d5SSatya Tangirala { 183*a892c8d5SSatya Tangirala if (!bio_crypt_ctx_compatible(bc1, bc2)) 184*a892c8d5SSatya Tangirala return false; 185*a892c8d5SSatya Tangirala 186*a892c8d5SSatya Tangirala return !bc1 || bio_crypt_dun_is_contiguous(bc1, bc1_bytes, bc2->bc_dun); 187*a892c8d5SSatya Tangirala } 188*a892c8d5SSatya Tangirala 189*a892c8d5SSatya Tangirala /* Check that all I/O segments are data unit aligned. */ 190*a892c8d5SSatya Tangirala static bool bio_crypt_check_alignment(struct bio *bio) 191*a892c8d5SSatya Tangirala { 192*a892c8d5SSatya Tangirala const unsigned int data_unit_size = 193*a892c8d5SSatya Tangirala bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size; 194*a892c8d5SSatya Tangirala struct bvec_iter iter; 195*a892c8d5SSatya Tangirala struct bio_vec bv; 196*a892c8d5SSatya Tangirala 197*a892c8d5SSatya Tangirala bio_for_each_segment(bv, bio, iter) { 198*a892c8d5SSatya Tangirala if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size)) 199*a892c8d5SSatya Tangirala return false; 200*a892c8d5SSatya Tangirala } 201*a892c8d5SSatya Tangirala 202*a892c8d5SSatya Tangirala return true; 203*a892c8d5SSatya Tangirala } 204*a892c8d5SSatya Tangirala 205*a892c8d5SSatya Tangirala blk_status_t __blk_crypto_init_request(struct request *rq) 206*a892c8d5SSatya Tangirala { 207*a892c8d5SSatya Tangirala return blk_ksm_get_slot_for_key(rq->q->ksm, rq->crypt_ctx->bc_key, 208*a892c8d5SSatya Tangirala &rq->crypt_keyslot); 209*a892c8d5SSatya Tangirala } 210*a892c8d5SSatya Tangirala 211*a892c8d5SSatya Tangirala /** 212*a892c8d5SSatya Tangirala * __blk_crypto_free_request - Uninitialize the crypto fields of a request. 213*a892c8d5SSatya Tangirala * 214*a892c8d5SSatya Tangirala * @rq: The request whose crypto fields to uninitialize. 215*a892c8d5SSatya Tangirala * 216*a892c8d5SSatya Tangirala * Completely uninitializes the crypto fields of a request. If a keyslot has 217*a892c8d5SSatya Tangirala * been programmed into some inline encryption hardware, that keyslot is 218*a892c8d5SSatya Tangirala * released. The rq->crypt_ctx is also freed. 219*a892c8d5SSatya Tangirala */ 220*a892c8d5SSatya Tangirala void __blk_crypto_free_request(struct request *rq) 221*a892c8d5SSatya Tangirala { 222*a892c8d5SSatya Tangirala blk_ksm_put_slot(rq->crypt_keyslot); 223*a892c8d5SSatya Tangirala mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool); 224*a892c8d5SSatya Tangirala blk_crypto_rq_set_defaults(rq); 225*a892c8d5SSatya Tangirala } 226*a892c8d5SSatya Tangirala 227*a892c8d5SSatya Tangirala /** 228*a892c8d5SSatya Tangirala * __blk_crypto_bio_prep - Prepare bio for inline encryption 229*a892c8d5SSatya Tangirala * 230*a892c8d5SSatya Tangirala * @bio_ptr: pointer to original bio pointer 231*a892c8d5SSatya Tangirala * 232*a892c8d5SSatya Tangirala * Succeeds if the bio doesn't have inline encryption enabled or if the bio 233*a892c8d5SSatya Tangirala * crypt context provided for the bio is supported by the underlying device's 234*a892c8d5SSatya Tangirala * inline encryption hardware. Ends the bio with error otherwise. 235*a892c8d5SSatya Tangirala * 236*a892c8d5SSatya Tangirala * Caller must ensure bio has bio_crypt_ctx. 237*a892c8d5SSatya Tangirala * 238*a892c8d5SSatya Tangirala * Return: true on success; false on error (and bio->bi_status will be set 239*a892c8d5SSatya Tangirala * appropriately, and bio_endio() will have been called so bio 240*a892c8d5SSatya Tangirala * submission should abort). 241*a892c8d5SSatya Tangirala */ 242*a892c8d5SSatya Tangirala bool __blk_crypto_bio_prep(struct bio **bio_ptr) 243*a892c8d5SSatya Tangirala { 244*a892c8d5SSatya Tangirala struct bio *bio = *bio_ptr; 245*a892c8d5SSatya Tangirala const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key; 246*a892c8d5SSatya Tangirala blk_status_t blk_st = BLK_STS_IOERR; 247*a892c8d5SSatya Tangirala 248*a892c8d5SSatya Tangirala /* Error if bio has no data. */ 249*a892c8d5SSatya Tangirala if (WARN_ON_ONCE(!bio_has_data(bio))) 250*a892c8d5SSatya Tangirala goto fail; 251*a892c8d5SSatya Tangirala 252*a892c8d5SSatya Tangirala if (!bio_crypt_check_alignment(bio)) 253*a892c8d5SSatya Tangirala goto fail; 254*a892c8d5SSatya Tangirala 255*a892c8d5SSatya Tangirala /* 256*a892c8d5SSatya Tangirala * Success if device supports the encryption context. 257*a892c8d5SSatya Tangirala */ 258*a892c8d5SSatya Tangirala if (!blk_ksm_crypto_cfg_supported(bio->bi_disk->queue->ksm, 259*a892c8d5SSatya Tangirala &bc_key->crypto_cfg)) { 260*a892c8d5SSatya Tangirala blk_st = BLK_STS_NOTSUPP; 261*a892c8d5SSatya Tangirala goto fail; 262*a892c8d5SSatya Tangirala } 263*a892c8d5SSatya Tangirala 264*a892c8d5SSatya Tangirala return true; 265*a892c8d5SSatya Tangirala fail: 266*a892c8d5SSatya Tangirala (*bio_ptr)->bi_status = blk_st; 267*a892c8d5SSatya Tangirala bio_endio(*bio_ptr); 268*a892c8d5SSatya Tangirala return false; 269*a892c8d5SSatya Tangirala } 270*a892c8d5SSatya Tangirala 271*a892c8d5SSatya Tangirala /** 272*a892c8d5SSatya Tangirala * __blk_crypto_rq_bio_prep - Prepare a request's crypt_ctx when its first bio 273*a892c8d5SSatya Tangirala * is inserted 274*a892c8d5SSatya Tangirala * 275*a892c8d5SSatya Tangirala * @rq: The request to prepare 276*a892c8d5SSatya Tangirala * @bio: The first bio being inserted into the request 277*a892c8d5SSatya Tangirala * @gfp_mask: gfp mask 278*a892c8d5SSatya Tangirala */ 279*a892c8d5SSatya Tangirala void __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio, 280*a892c8d5SSatya Tangirala gfp_t gfp_mask) 281*a892c8d5SSatya Tangirala { 282*a892c8d5SSatya Tangirala if (!rq->crypt_ctx) 283*a892c8d5SSatya Tangirala rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); 284*a892c8d5SSatya Tangirala *rq->crypt_ctx = *bio->bi_crypt_context; 285*a892c8d5SSatya Tangirala } 286*a892c8d5SSatya Tangirala 287*a892c8d5SSatya Tangirala /** 288*a892c8d5SSatya Tangirala * blk_crypto_init_key() - Prepare a key for use with blk-crypto 289*a892c8d5SSatya Tangirala * @blk_key: Pointer to the blk_crypto_key to initialize. 290*a892c8d5SSatya Tangirala * @raw_key: Pointer to the raw key. Must be the correct length for the chosen 291*a892c8d5SSatya Tangirala * @crypto_mode; see blk_crypto_modes[]. 292*a892c8d5SSatya Tangirala * @crypto_mode: identifier for the encryption algorithm to use 293*a892c8d5SSatya Tangirala * @dun_bytes: number of bytes that will be used to specify the DUN when this 294*a892c8d5SSatya Tangirala * key is used 295*a892c8d5SSatya Tangirala * @data_unit_size: the data unit size to use for en/decryption 296*a892c8d5SSatya Tangirala * 297*a892c8d5SSatya Tangirala * Return: 0 on success, -errno on failure. The caller is responsible for 298*a892c8d5SSatya Tangirala * zeroizing both blk_key and raw_key when done with them. 299*a892c8d5SSatya Tangirala */ 300*a892c8d5SSatya Tangirala int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key, 301*a892c8d5SSatya Tangirala enum blk_crypto_mode_num crypto_mode, 302*a892c8d5SSatya Tangirala unsigned int dun_bytes, 303*a892c8d5SSatya Tangirala unsigned int data_unit_size) 304*a892c8d5SSatya Tangirala { 305*a892c8d5SSatya Tangirala const struct blk_crypto_mode *mode; 306*a892c8d5SSatya Tangirala 307*a892c8d5SSatya Tangirala memset(blk_key, 0, sizeof(*blk_key)); 308*a892c8d5SSatya Tangirala 309*a892c8d5SSatya Tangirala if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes)) 310*a892c8d5SSatya Tangirala return -EINVAL; 311*a892c8d5SSatya Tangirala 312*a892c8d5SSatya Tangirala mode = &blk_crypto_modes[crypto_mode]; 313*a892c8d5SSatya Tangirala if (mode->keysize == 0) 314*a892c8d5SSatya Tangirala return -EINVAL; 315*a892c8d5SSatya Tangirala 316*a892c8d5SSatya Tangirala if (dun_bytes == 0 || dun_bytes > BLK_CRYPTO_MAX_IV_SIZE) 317*a892c8d5SSatya Tangirala return -EINVAL; 318*a892c8d5SSatya Tangirala 319*a892c8d5SSatya Tangirala if (!is_power_of_2(data_unit_size)) 320*a892c8d5SSatya Tangirala return -EINVAL; 321*a892c8d5SSatya Tangirala 322*a892c8d5SSatya Tangirala blk_key->crypto_cfg.crypto_mode = crypto_mode; 323*a892c8d5SSatya Tangirala blk_key->crypto_cfg.dun_bytes = dun_bytes; 324*a892c8d5SSatya Tangirala blk_key->crypto_cfg.data_unit_size = data_unit_size; 325*a892c8d5SSatya Tangirala blk_key->data_unit_size_bits = ilog2(data_unit_size); 326*a892c8d5SSatya Tangirala blk_key->size = mode->keysize; 327*a892c8d5SSatya Tangirala memcpy(blk_key->raw, raw_key, mode->keysize); 328*a892c8d5SSatya Tangirala 329*a892c8d5SSatya Tangirala return 0; 330*a892c8d5SSatya Tangirala } 331*a892c8d5SSatya Tangirala 332*a892c8d5SSatya Tangirala bool blk_crypto_config_supported(struct request_queue *q, 333*a892c8d5SSatya Tangirala const struct blk_crypto_config *cfg) 334*a892c8d5SSatya Tangirala { 335*a892c8d5SSatya Tangirala return blk_ksm_crypto_cfg_supported(q->ksm, cfg); 336*a892c8d5SSatya Tangirala } 337*a892c8d5SSatya Tangirala 338*a892c8d5SSatya Tangirala /** 339*a892c8d5SSatya Tangirala * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device 340*a892c8d5SSatya Tangirala * @key: A key to use on the device 341*a892c8d5SSatya Tangirala * @q: the request queue for the device 342*a892c8d5SSatya Tangirala * 343*a892c8d5SSatya Tangirala * Upper layers must call this function to ensure that the hardware supports 344*a892c8d5SSatya Tangirala * the key's crypto settings. 345*a892c8d5SSatya Tangirala * 346*a892c8d5SSatya Tangirala * Return: 0 on success; -ENOPKG if the hardware doesn't support the key 347*a892c8d5SSatya Tangirala */ 348*a892c8d5SSatya Tangirala int blk_crypto_start_using_key(const struct blk_crypto_key *key, 349*a892c8d5SSatya Tangirala struct request_queue *q) 350*a892c8d5SSatya Tangirala { 351*a892c8d5SSatya Tangirala if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg)) 352*a892c8d5SSatya Tangirala return 0; 353*a892c8d5SSatya Tangirala return -ENOPKG; 354*a892c8d5SSatya Tangirala } 355*a892c8d5SSatya Tangirala 356*a892c8d5SSatya Tangirala /** 357*a892c8d5SSatya Tangirala * blk_crypto_evict_key() - Evict a key from any inline encryption hardware 358*a892c8d5SSatya Tangirala * it may have been programmed into 359*a892c8d5SSatya Tangirala * @q: The request queue who's associated inline encryption hardware this key 360*a892c8d5SSatya Tangirala * might have been programmed into 361*a892c8d5SSatya Tangirala * @key: The key to evict 362*a892c8d5SSatya Tangirala * 363*a892c8d5SSatya Tangirala * Upper layers (filesystems) must call this function to ensure that a key is 364*a892c8d5SSatya Tangirala * evicted from any hardware that it might have been programmed into. The key 365*a892c8d5SSatya Tangirala * must not be in use by any in-flight IO when this function is called. 366*a892c8d5SSatya Tangirala * 367*a892c8d5SSatya Tangirala * Return: 0 on success or if key is not present in the q's ksm, -err on error. 368*a892c8d5SSatya Tangirala */ 369*a892c8d5SSatya Tangirala int blk_crypto_evict_key(struct request_queue *q, 370*a892c8d5SSatya Tangirala const struct blk_crypto_key *key) 371*a892c8d5SSatya Tangirala { 372*a892c8d5SSatya Tangirala if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg)) 373*a892c8d5SSatya Tangirala return blk_ksm_evict_key(q->ksm, key); 374*a892c8d5SSatya Tangirala 375*a892c8d5SSatya Tangirala return 0; 376*a892c8d5SSatya Tangirala } 377