1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright 2019 Google LLC 4 */ 5 6 /** 7 * DOC: blk-crypto profiles 8 * 9 * 'struct blk_crypto_profile' contains all generic inline encryption-related 10 * state for a particular inline encryption device. blk_crypto_profile serves 11 * as the way that drivers for inline encryption hardware expose their crypto 12 * capabilities and certain functions (e.g., functions to program and evict 13 * keys) to upper layers. Device drivers that want to support inline encryption 14 * construct a crypto profile, then associate it with the disk's request_queue. 15 * 16 * If the device has keyslots, then its blk_crypto_profile also handles managing 17 * these keyslots in a device-independent way, using the driver-provided 18 * functions to program and evict keys as needed. This includes keeping track 19 * of which key and how many I/O requests are using each keyslot, getting 20 * keyslots for I/O requests, and handling key eviction requests. 21 * 22 * For more information, see Documentation/block/inline-encryption.rst. 23 */ 24 25 #define pr_fmt(fmt) "blk-crypto: " fmt 26 27 #include <linux/blk-crypto-profile.h> 28 #include <linux/device.h> 29 #include <linux/atomic.h> 30 #include <linux/mutex.h> 31 #include <linux/pm_runtime.h> 32 #include <linux/wait.h> 33 #include <linux/blkdev.h> 34 #include <linux/blk-integrity.h> 35 #include "blk-crypto-internal.h" 36 37 struct blk_crypto_keyslot { 38 atomic_t slot_refs; 39 struct list_head idle_slot_node; 40 struct hlist_node hash_node; 41 const struct blk_crypto_key *key; 42 struct blk_crypto_profile *profile; 43 }; 44 45 static inline void blk_crypto_hw_enter(struct blk_crypto_profile *profile) 46 { 47 /* 48 * Calling into the driver requires profile->lock held and the device 49 * resumed. But we must resume the device first, since that can acquire 50 * and release profile->lock via blk_crypto_reprogram_all_keys(). 51 */ 52 if (profile->dev) 53 pm_runtime_get_sync(profile->dev); 54 down_write(&profile->lock); 55 } 56 57 static inline void blk_crypto_hw_exit(struct blk_crypto_profile *profile) 58 { 59 up_write(&profile->lock); 60 if (profile->dev) 61 pm_runtime_put_sync(profile->dev); 62 } 63 64 /** 65 * blk_crypto_profile_init() - Initialize a blk_crypto_profile 66 * @profile: the blk_crypto_profile to initialize 67 * @num_slots: the number of keyslots 68 * 69 * Storage drivers must call this when starting to set up a blk_crypto_profile, 70 * before filling in additional fields. 71 * 72 * Return: 0 on success, or else a negative error code. 73 */ 74 int blk_crypto_profile_init(struct blk_crypto_profile *profile, 75 unsigned int num_slots) 76 { 77 unsigned int slot; 78 unsigned int i; 79 unsigned int slot_hashtable_size; 80 81 memset(profile, 0, sizeof(*profile)); 82 init_rwsem(&profile->lock); 83 84 if (num_slots == 0) 85 return 0; 86 87 /* Initialize keyslot management data. */ 88 89 profile->slots = kvcalloc(num_slots, sizeof(profile->slots[0]), 90 GFP_KERNEL); 91 if (!profile->slots) 92 return -ENOMEM; 93 94 profile->num_slots = num_slots; 95 96 init_waitqueue_head(&profile->idle_slots_wait_queue); 97 INIT_LIST_HEAD(&profile->idle_slots); 98 99 for (slot = 0; slot < num_slots; slot++) { 100 profile->slots[slot].profile = profile; 101 list_add_tail(&profile->slots[slot].idle_slot_node, 102 &profile->idle_slots); 103 } 104 105 spin_lock_init(&profile->idle_slots_lock); 106 107 slot_hashtable_size = roundup_pow_of_two(num_slots); 108 /* 109 * hash_ptr() assumes bits != 0, so ensure the hash table has at least 2 110 * buckets. This only makes a difference when there is only 1 keyslot. 111 */ 112 if (slot_hashtable_size < 2) 113 slot_hashtable_size = 2; 114 115 profile->log_slot_ht_size = ilog2(slot_hashtable_size); 116 profile->slot_hashtable = 117 kvmalloc_array(slot_hashtable_size, 118 sizeof(profile->slot_hashtable[0]), GFP_KERNEL); 119 if (!profile->slot_hashtable) 120 goto err_destroy; 121 for (i = 0; i < slot_hashtable_size; i++) 122 INIT_HLIST_HEAD(&profile->slot_hashtable[i]); 123 124 return 0; 125 126 err_destroy: 127 blk_crypto_profile_destroy(profile); 128 return -ENOMEM; 129 } 130 EXPORT_SYMBOL_GPL(blk_crypto_profile_init); 131 132 static void blk_crypto_profile_destroy_callback(void *profile) 133 { 134 blk_crypto_profile_destroy(profile); 135 } 136 137 /** 138 * devm_blk_crypto_profile_init() - Resource-managed blk_crypto_profile_init() 139 * @dev: the device which owns the blk_crypto_profile 140 * @profile: the blk_crypto_profile to initialize 141 * @num_slots: the number of keyslots 142 * 143 * Like blk_crypto_profile_init(), but causes blk_crypto_profile_destroy() to be 144 * called automatically on driver detach. 145 * 146 * Return: 0 on success, or else a negative error code. 147 */ 148 int devm_blk_crypto_profile_init(struct device *dev, 149 struct blk_crypto_profile *profile, 150 unsigned int num_slots) 151 { 152 int err = blk_crypto_profile_init(profile, num_slots); 153 154 if (err) 155 return err; 156 157 return devm_add_action_or_reset(dev, 158 blk_crypto_profile_destroy_callback, 159 profile); 160 } 161 EXPORT_SYMBOL_GPL(devm_blk_crypto_profile_init); 162 163 static inline struct hlist_head * 164 blk_crypto_hash_bucket_for_key(struct blk_crypto_profile *profile, 165 const struct blk_crypto_key *key) 166 { 167 return &profile->slot_hashtable[ 168 hash_ptr(key, profile->log_slot_ht_size)]; 169 } 170 171 static void 172 blk_crypto_remove_slot_from_lru_list(struct blk_crypto_keyslot *slot) 173 { 174 struct blk_crypto_profile *profile = slot->profile; 175 unsigned long flags; 176 177 spin_lock_irqsave(&profile->idle_slots_lock, flags); 178 list_del(&slot->idle_slot_node); 179 spin_unlock_irqrestore(&profile->idle_slots_lock, flags); 180 } 181 182 static struct blk_crypto_keyslot * 183 blk_crypto_find_keyslot(struct blk_crypto_profile *profile, 184 const struct blk_crypto_key *key) 185 { 186 const struct hlist_head *head = 187 blk_crypto_hash_bucket_for_key(profile, key); 188 struct blk_crypto_keyslot *slotp; 189 190 hlist_for_each_entry(slotp, head, hash_node) { 191 if (slotp->key == key) 192 return slotp; 193 } 194 return NULL; 195 } 196 197 static struct blk_crypto_keyslot * 198 blk_crypto_find_and_grab_keyslot(struct blk_crypto_profile *profile, 199 const struct blk_crypto_key *key) 200 { 201 struct blk_crypto_keyslot *slot; 202 203 slot = blk_crypto_find_keyslot(profile, key); 204 if (!slot) 205 return NULL; 206 if (atomic_inc_return(&slot->slot_refs) == 1) { 207 /* Took first reference to this slot; remove it from LRU list */ 208 blk_crypto_remove_slot_from_lru_list(slot); 209 } 210 return slot; 211 } 212 213 /** 214 * blk_crypto_keyslot_index() - Get the index of a keyslot 215 * @slot: a keyslot that blk_crypto_get_keyslot() returned 216 * 217 * Return: the 0-based index of the keyslot within the device's keyslots. 218 */ 219 unsigned int blk_crypto_keyslot_index(struct blk_crypto_keyslot *slot) 220 { 221 return slot - slot->profile->slots; 222 } 223 EXPORT_SYMBOL_GPL(blk_crypto_keyslot_index); 224 225 /** 226 * blk_crypto_get_keyslot() - Get a keyslot for a key, if needed. 227 * @profile: the crypto profile of the device the key will be used on 228 * @key: the key that will be used 229 * @slot_ptr: If a keyslot is allocated, an opaque pointer to the keyslot struct 230 * will be stored here. blk_crypto_put_keyslot() must be called 231 * later to release it. Otherwise, NULL will be stored here. 232 * 233 * If the device has keyslots, this gets a keyslot that's been programmed with 234 * the specified key. If the key is already in a slot, this reuses it; 235 * otherwise this waits for a slot to become idle and programs the key into it. 236 * 237 * Context: Process context. Takes and releases profile->lock. 238 * Return: BLK_STS_OK on success, meaning that either a keyslot was allocated or 239 * one wasn't needed; or a blk_status_t error on failure. 240 */ 241 blk_status_t blk_crypto_get_keyslot(struct blk_crypto_profile *profile, 242 const struct blk_crypto_key *key, 243 struct blk_crypto_keyslot **slot_ptr) 244 { 245 struct blk_crypto_keyslot *slot; 246 int slot_idx; 247 int err; 248 249 *slot_ptr = NULL; 250 251 /* 252 * If the device has no concept of "keyslots", then there is no need to 253 * get one. 254 */ 255 if (profile->num_slots == 0) 256 return BLK_STS_OK; 257 258 down_read(&profile->lock); 259 slot = blk_crypto_find_and_grab_keyslot(profile, key); 260 up_read(&profile->lock); 261 if (slot) 262 goto success; 263 264 for (;;) { 265 blk_crypto_hw_enter(profile); 266 slot = blk_crypto_find_and_grab_keyslot(profile, key); 267 if (slot) { 268 blk_crypto_hw_exit(profile); 269 goto success; 270 } 271 272 /* 273 * If we're here, that means there wasn't a slot that was 274 * already programmed with the key. So try to program it. 275 */ 276 if (!list_empty(&profile->idle_slots)) 277 break; 278 279 blk_crypto_hw_exit(profile); 280 wait_event(profile->idle_slots_wait_queue, 281 !list_empty(&profile->idle_slots)); 282 } 283 284 slot = list_first_entry(&profile->idle_slots, struct blk_crypto_keyslot, 285 idle_slot_node); 286 slot_idx = blk_crypto_keyslot_index(slot); 287 288 err = profile->ll_ops.keyslot_program(profile, key, slot_idx); 289 if (err) { 290 wake_up(&profile->idle_slots_wait_queue); 291 blk_crypto_hw_exit(profile); 292 return errno_to_blk_status(err); 293 } 294 295 /* Move this slot to the hash list for the new key. */ 296 if (slot->key) 297 hlist_del(&slot->hash_node); 298 slot->key = key; 299 hlist_add_head(&slot->hash_node, 300 blk_crypto_hash_bucket_for_key(profile, key)); 301 302 atomic_set(&slot->slot_refs, 1); 303 304 blk_crypto_remove_slot_from_lru_list(slot); 305 306 blk_crypto_hw_exit(profile); 307 success: 308 *slot_ptr = slot; 309 return BLK_STS_OK; 310 } 311 312 /** 313 * blk_crypto_put_keyslot() - Release a reference to a keyslot 314 * @slot: The keyslot to release the reference of 315 * 316 * Context: Any context. 317 */ 318 void blk_crypto_put_keyslot(struct blk_crypto_keyslot *slot) 319 { 320 struct blk_crypto_profile *profile = slot->profile; 321 unsigned long flags; 322 323 if (atomic_dec_and_lock_irqsave(&slot->slot_refs, 324 &profile->idle_slots_lock, flags)) { 325 list_add_tail(&slot->idle_slot_node, &profile->idle_slots); 326 spin_unlock_irqrestore(&profile->idle_slots_lock, flags); 327 wake_up(&profile->idle_slots_wait_queue); 328 } 329 } 330 331 /** 332 * __blk_crypto_cfg_supported() - Check whether the given crypto profile 333 * supports the given crypto configuration. 334 * @profile: the crypto profile to check 335 * @cfg: the crypto configuration to check for 336 * 337 * Return: %true if @profile supports the given @cfg. 338 */ 339 bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile, 340 const struct blk_crypto_config *cfg) 341 { 342 if (!profile) 343 return false; 344 if (!(profile->modes_supported[cfg->crypto_mode] & cfg->data_unit_size)) 345 return false; 346 if (profile->max_dun_bytes_supported < cfg->dun_bytes) 347 return false; 348 return true; 349 } 350 351 /* 352 * This is an internal function that evicts a key from an inline encryption 353 * device that can be either a real device or the blk-crypto-fallback "device". 354 * It is used only by blk_crypto_evict_key(); see that function for details. 355 */ 356 int __blk_crypto_evict_key(struct blk_crypto_profile *profile, 357 const struct blk_crypto_key *key) 358 { 359 struct blk_crypto_keyslot *slot; 360 int err; 361 362 if (profile->num_slots == 0) { 363 if (profile->ll_ops.keyslot_evict) { 364 blk_crypto_hw_enter(profile); 365 err = profile->ll_ops.keyslot_evict(profile, key, -1); 366 blk_crypto_hw_exit(profile); 367 return err; 368 } 369 return 0; 370 } 371 372 blk_crypto_hw_enter(profile); 373 slot = blk_crypto_find_keyslot(profile, key); 374 if (!slot) { 375 /* 376 * Not an error, since a key not in use by I/O is not guaranteed 377 * to be in a keyslot. There can be more keys than keyslots. 378 */ 379 err = 0; 380 goto out; 381 } 382 383 if (WARN_ON_ONCE(atomic_read(&slot->slot_refs) != 0)) { 384 /* BUG: key is still in use by I/O */ 385 err = -EBUSY; 386 goto out_remove; 387 } 388 err = profile->ll_ops.keyslot_evict(profile, key, 389 blk_crypto_keyslot_index(slot)); 390 out_remove: 391 /* 392 * Callers free the key even on error, so unlink the key from the hash 393 * table and clear slot->key even on error. 394 */ 395 hlist_del(&slot->hash_node); 396 slot->key = NULL; 397 out: 398 blk_crypto_hw_exit(profile); 399 return err; 400 } 401 402 /** 403 * blk_crypto_reprogram_all_keys() - Re-program all keyslots. 404 * @profile: The crypto profile 405 * 406 * Re-program all keyslots that are supposed to have a key programmed. This is 407 * intended only for use by drivers for hardware that loses its keys on reset. 408 * 409 * Context: Process context. Takes and releases profile->lock. 410 */ 411 void blk_crypto_reprogram_all_keys(struct blk_crypto_profile *profile) 412 { 413 unsigned int slot; 414 415 if (profile->num_slots == 0) 416 return; 417 418 /* This is for device initialization, so don't resume the device */ 419 down_write(&profile->lock); 420 for (slot = 0; slot < profile->num_slots; slot++) { 421 const struct blk_crypto_key *key = profile->slots[slot].key; 422 int err; 423 424 if (!key) 425 continue; 426 427 err = profile->ll_ops.keyslot_program(profile, key, slot); 428 WARN_ON(err); 429 } 430 up_write(&profile->lock); 431 } 432 EXPORT_SYMBOL_GPL(blk_crypto_reprogram_all_keys); 433 434 void blk_crypto_profile_destroy(struct blk_crypto_profile *profile) 435 { 436 if (!profile) 437 return; 438 kvfree(profile->slot_hashtable); 439 kvfree_sensitive(profile->slots, 440 sizeof(profile->slots[0]) * profile->num_slots); 441 memzero_explicit(profile, sizeof(*profile)); 442 } 443 EXPORT_SYMBOL_GPL(blk_crypto_profile_destroy); 444 445 bool blk_crypto_register(struct blk_crypto_profile *profile, 446 struct request_queue *q) 447 { 448 if (blk_integrity_queue_supports_integrity(q)) { 449 pr_warn("Integrity and hardware inline encryption are not supported together. Disabling hardware inline encryption.\n"); 450 return false; 451 } 452 q->crypto_profile = profile; 453 return true; 454 } 455 EXPORT_SYMBOL_GPL(blk_crypto_register); 456 457 /** 458 * blk_crypto_intersect_capabilities() - restrict supported crypto capabilities 459 * by child device 460 * @parent: the crypto profile for the parent device 461 * @child: the crypto profile for the child device, or NULL 462 * 463 * This clears all crypto capabilities in @parent that aren't set in @child. If 464 * @child is NULL, then this clears all parent capabilities. 465 * 466 * Only use this when setting up the crypto profile for a layered device, before 467 * it's been exposed yet. 468 */ 469 void blk_crypto_intersect_capabilities(struct blk_crypto_profile *parent, 470 const struct blk_crypto_profile *child) 471 { 472 if (child) { 473 unsigned int i; 474 475 parent->max_dun_bytes_supported = 476 min(parent->max_dun_bytes_supported, 477 child->max_dun_bytes_supported); 478 for (i = 0; i < ARRAY_SIZE(child->modes_supported); i++) 479 parent->modes_supported[i] &= child->modes_supported[i]; 480 } else { 481 parent->max_dun_bytes_supported = 0; 482 memset(parent->modes_supported, 0, 483 sizeof(parent->modes_supported)); 484 } 485 } 486 EXPORT_SYMBOL_GPL(blk_crypto_intersect_capabilities); 487 488 /** 489 * blk_crypto_has_capabilities() - Check whether @target supports at least all 490 * the crypto capabilities that @reference does. 491 * @target: the target profile 492 * @reference: the reference profile 493 * 494 * Return: %true if @target supports all the crypto capabilities of @reference. 495 */ 496 bool blk_crypto_has_capabilities(const struct blk_crypto_profile *target, 497 const struct blk_crypto_profile *reference) 498 { 499 int i; 500 501 if (!reference) 502 return true; 503 504 if (!target) 505 return false; 506 507 for (i = 0; i < ARRAY_SIZE(target->modes_supported); i++) { 508 if (reference->modes_supported[i] & ~target->modes_supported[i]) 509 return false; 510 } 511 512 if (reference->max_dun_bytes_supported > 513 target->max_dun_bytes_supported) 514 return false; 515 516 return true; 517 } 518 EXPORT_SYMBOL_GPL(blk_crypto_has_capabilities); 519 520 /** 521 * blk_crypto_update_capabilities() - Update the capabilities of a crypto 522 * profile to match those of another crypto 523 * profile. 524 * @dst: The crypto profile whose capabilities to update. 525 * @src: The crypto profile whose capabilities this function will update @dst's 526 * capabilities to. 527 * 528 * Blk-crypto requires that crypto capabilities that were 529 * advertised when a bio was created continue to be supported by the 530 * device until that bio is ended. This is turn means that a device cannot 531 * shrink its advertised crypto capabilities without any explicit 532 * synchronization with upper layers. So if there's no such explicit 533 * synchronization, @src must support all the crypto capabilities that 534 * @dst does (i.e. we need blk_crypto_has_capabilities(@src, @dst)). 535 * 536 * Note also that as long as the crypto capabilities are being expanded, the 537 * order of updates becoming visible is not important because it's alright 538 * for blk-crypto to see stale values - they only cause blk-crypto to 539 * believe that a crypto capability isn't supported when it actually is (which 540 * might result in blk-crypto-fallback being used if available, or the bio being 541 * failed). 542 */ 543 void blk_crypto_update_capabilities(struct blk_crypto_profile *dst, 544 const struct blk_crypto_profile *src) 545 { 546 memcpy(dst->modes_supported, src->modes_supported, 547 sizeof(dst->modes_supported)); 548 549 dst->max_dun_bytes_supported = src->max_dun_bytes_supported; 550 } 551 EXPORT_SYMBOL_GPL(blk_crypto_update_capabilities); 552