xref: /openbmc/linux/block/blk-crypto-profile.c (revision 64cf26f0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2019 Google LLC
4  */
5 
6 /**
7  * DOC: blk-crypto profiles
8  *
9  * 'struct blk_crypto_profile' contains all generic inline encryption-related
10  * state for a particular inline encryption device.  blk_crypto_profile serves
11  * as the way that drivers for inline encryption hardware expose their crypto
12  * capabilities and certain functions (e.g., functions to program and evict
13  * keys) to upper layers.  Device drivers that want to support inline encryption
14  * construct a crypto profile, then associate it with the disk's request_queue.
15  *
16  * If the device has keyslots, then its blk_crypto_profile also handles managing
17  * these keyslots in a device-independent way, using the driver-provided
18  * functions to program and evict keys as needed.  This includes keeping track
19  * of which key and how many I/O requests are using each keyslot, getting
20  * keyslots for I/O requests, and handling key eviction requests.
21  *
22  * For more information, see Documentation/block/inline-encryption.rst.
23  */
24 
25 #define pr_fmt(fmt) "blk-crypto: " fmt
26 
27 #include <linux/blk-crypto-profile.h>
28 #include <linux/device.h>
29 #include <linux/atomic.h>
30 #include <linux/mutex.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/wait.h>
33 #include <linux/blkdev.h>
34 #include <linux/blk-integrity.h>
35 
36 struct blk_crypto_keyslot {
37 	atomic_t slot_refs;
38 	struct list_head idle_slot_node;
39 	struct hlist_node hash_node;
40 	const struct blk_crypto_key *key;
41 	struct blk_crypto_profile *profile;
42 };
43 
44 static inline void blk_crypto_hw_enter(struct blk_crypto_profile *profile)
45 {
46 	/*
47 	 * Calling into the driver requires profile->lock held and the device
48 	 * resumed.  But we must resume the device first, since that can acquire
49 	 * and release profile->lock via blk_crypto_reprogram_all_keys().
50 	 */
51 	if (profile->dev)
52 		pm_runtime_get_sync(profile->dev);
53 	down_write(&profile->lock);
54 }
55 
56 static inline void blk_crypto_hw_exit(struct blk_crypto_profile *profile)
57 {
58 	up_write(&profile->lock);
59 	if (profile->dev)
60 		pm_runtime_put_sync(profile->dev);
61 }
62 
63 /**
64  * blk_crypto_profile_init() - Initialize a blk_crypto_profile
65  * @profile: the blk_crypto_profile to initialize
66  * @num_slots: the number of keyslots
67  *
68  * Storage drivers must call this when starting to set up a blk_crypto_profile,
69  * before filling in additional fields.
70  *
71  * Return: 0 on success, or else a negative error code.
72  */
73 int blk_crypto_profile_init(struct blk_crypto_profile *profile,
74 			    unsigned int num_slots)
75 {
76 	unsigned int slot;
77 	unsigned int i;
78 	unsigned int slot_hashtable_size;
79 
80 	memset(profile, 0, sizeof(*profile));
81 	init_rwsem(&profile->lock);
82 
83 	if (num_slots == 0)
84 		return 0;
85 
86 	/* Initialize keyslot management data. */
87 
88 	profile->slots = kvcalloc(num_slots, sizeof(profile->slots[0]),
89 				  GFP_KERNEL);
90 	if (!profile->slots)
91 		return -ENOMEM;
92 
93 	profile->num_slots = num_slots;
94 
95 	init_waitqueue_head(&profile->idle_slots_wait_queue);
96 	INIT_LIST_HEAD(&profile->idle_slots);
97 
98 	for (slot = 0; slot < num_slots; slot++) {
99 		profile->slots[slot].profile = profile;
100 		list_add_tail(&profile->slots[slot].idle_slot_node,
101 			      &profile->idle_slots);
102 	}
103 
104 	spin_lock_init(&profile->idle_slots_lock);
105 
106 	slot_hashtable_size = roundup_pow_of_two(num_slots);
107 	/*
108 	 * hash_ptr() assumes bits != 0, so ensure the hash table has at least 2
109 	 * buckets.  This only makes a difference when there is only 1 keyslot.
110 	 */
111 	if (slot_hashtable_size < 2)
112 		slot_hashtable_size = 2;
113 
114 	profile->log_slot_ht_size = ilog2(slot_hashtable_size);
115 	profile->slot_hashtable =
116 		kvmalloc_array(slot_hashtable_size,
117 			       sizeof(profile->slot_hashtable[0]), GFP_KERNEL);
118 	if (!profile->slot_hashtable)
119 		goto err_destroy;
120 	for (i = 0; i < slot_hashtable_size; i++)
121 		INIT_HLIST_HEAD(&profile->slot_hashtable[i]);
122 
123 	return 0;
124 
125 err_destroy:
126 	blk_crypto_profile_destroy(profile);
127 	return -ENOMEM;
128 }
129 EXPORT_SYMBOL_GPL(blk_crypto_profile_init);
130 
131 static void blk_crypto_profile_destroy_callback(void *profile)
132 {
133 	blk_crypto_profile_destroy(profile);
134 }
135 
136 /**
137  * devm_blk_crypto_profile_init() - Resource-managed blk_crypto_profile_init()
138  * @dev: the device which owns the blk_crypto_profile
139  * @profile: the blk_crypto_profile to initialize
140  * @num_slots: the number of keyslots
141  *
142  * Like blk_crypto_profile_init(), but causes blk_crypto_profile_destroy() to be
143  * called automatically on driver detach.
144  *
145  * Return: 0 on success, or else a negative error code.
146  */
147 int devm_blk_crypto_profile_init(struct device *dev,
148 				 struct blk_crypto_profile *profile,
149 				 unsigned int num_slots)
150 {
151 	int err = blk_crypto_profile_init(profile, num_slots);
152 
153 	if (err)
154 		return err;
155 
156 	return devm_add_action_or_reset(dev,
157 					blk_crypto_profile_destroy_callback,
158 					profile);
159 }
160 EXPORT_SYMBOL_GPL(devm_blk_crypto_profile_init);
161 
162 static inline struct hlist_head *
163 blk_crypto_hash_bucket_for_key(struct blk_crypto_profile *profile,
164 			       const struct blk_crypto_key *key)
165 {
166 	return &profile->slot_hashtable[
167 			hash_ptr(key, profile->log_slot_ht_size)];
168 }
169 
170 static void
171 blk_crypto_remove_slot_from_lru_list(struct blk_crypto_keyslot *slot)
172 {
173 	struct blk_crypto_profile *profile = slot->profile;
174 	unsigned long flags;
175 
176 	spin_lock_irqsave(&profile->idle_slots_lock, flags);
177 	list_del(&slot->idle_slot_node);
178 	spin_unlock_irqrestore(&profile->idle_slots_lock, flags);
179 }
180 
181 static struct blk_crypto_keyslot *
182 blk_crypto_find_keyslot(struct blk_crypto_profile *profile,
183 			const struct blk_crypto_key *key)
184 {
185 	const struct hlist_head *head =
186 		blk_crypto_hash_bucket_for_key(profile, key);
187 	struct blk_crypto_keyslot *slotp;
188 
189 	hlist_for_each_entry(slotp, head, hash_node) {
190 		if (slotp->key == key)
191 			return slotp;
192 	}
193 	return NULL;
194 }
195 
196 static struct blk_crypto_keyslot *
197 blk_crypto_find_and_grab_keyslot(struct blk_crypto_profile *profile,
198 				 const struct blk_crypto_key *key)
199 {
200 	struct blk_crypto_keyslot *slot;
201 
202 	slot = blk_crypto_find_keyslot(profile, key);
203 	if (!slot)
204 		return NULL;
205 	if (atomic_inc_return(&slot->slot_refs) == 1) {
206 		/* Took first reference to this slot; remove it from LRU list */
207 		blk_crypto_remove_slot_from_lru_list(slot);
208 	}
209 	return slot;
210 }
211 
212 /**
213  * blk_crypto_keyslot_index() - Get the index of a keyslot
214  * @slot: a keyslot that blk_crypto_get_keyslot() returned
215  *
216  * Return: the 0-based index of the keyslot within the device's keyslots.
217  */
218 unsigned int blk_crypto_keyslot_index(struct blk_crypto_keyslot *slot)
219 {
220 	return slot - slot->profile->slots;
221 }
222 EXPORT_SYMBOL_GPL(blk_crypto_keyslot_index);
223 
224 /**
225  * blk_crypto_get_keyslot() - Get a keyslot for a key, if needed.
226  * @profile: the crypto profile of the device the key will be used on
227  * @key: the key that will be used
228  * @slot_ptr: If a keyslot is allocated, an opaque pointer to the keyslot struct
229  *	      will be stored here; otherwise NULL will be stored here.
230  *
231  * If the device has keyslots, this gets a keyslot that's been programmed with
232  * the specified key.  If the key is already in a slot, this reuses it;
233  * otherwise this waits for a slot to become idle and programs the key into it.
234  *
235  * This must be paired with a call to blk_crypto_put_keyslot().
236  *
237  * Context: Process context. Takes and releases profile->lock.
238  * Return: BLK_STS_OK on success, meaning that either a keyslot was allocated or
239  *	   one wasn't needed; or a blk_status_t error on failure.
240  */
241 blk_status_t blk_crypto_get_keyslot(struct blk_crypto_profile *profile,
242 				    const struct blk_crypto_key *key,
243 				    struct blk_crypto_keyslot **slot_ptr)
244 {
245 	struct blk_crypto_keyslot *slot;
246 	int slot_idx;
247 	int err;
248 
249 	*slot_ptr = NULL;
250 
251 	/*
252 	 * If the device has no concept of "keyslots", then there is no need to
253 	 * get one.
254 	 */
255 	if (profile->num_slots == 0)
256 		return BLK_STS_OK;
257 
258 	down_read(&profile->lock);
259 	slot = blk_crypto_find_and_grab_keyslot(profile, key);
260 	up_read(&profile->lock);
261 	if (slot)
262 		goto success;
263 
264 	for (;;) {
265 		blk_crypto_hw_enter(profile);
266 		slot = blk_crypto_find_and_grab_keyslot(profile, key);
267 		if (slot) {
268 			blk_crypto_hw_exit(profile);
269 			goto success;
270 		}
271 
272 		/*
273 		 * If we're here, that means there wasn't a slot that was
274 		 * already programmed with the key. So try to program it.
275 		 */
276 		if (!list_empty(&profile->idle_slots))
277 			break;
278 
279 		blk_crypto_hw_exit(profile);
280 		wait_event(profile->idle_slots_wait_queue,
281 			   !list_empty(&profile->idle_slots));
282 	}
283 
284 	slot = list_first_entry(&profile->idle_slots, struct blk_crypto_keyslot,
285 				idle_slot_node);
286 	slot_idx = blk_crypto_keyslot_index(slot);
287 
288 	err = profile->ll_ops.keyslot_program(profile, key, slot_idx);
289 	if (err) {
290 		wake_up(&profile->idle_slots_wait_queue);
291 		blk_crypto_hw_exit(profile);
292 		return errno_to_blk_status(err);
293 	}
294 
295 	/* Move this slot to the hash list for the new key. */
296 	if (slot->key)
297 		hlist_del(&slot->hash_node);
298 	slot->key = key;
299 	hlist_add_head(&slot->hash_node,
300 		       blk_crypto_hash_bucket_for_key(profile, key));
301 
302 	atomic_set(&slot->slot_refs, 1);
303 
304 	blk_crypto_remove_slot_from_lru_list(slot);
305 
306 	blk_crypto_hw_exit(profile);
307 success:
308 	*slot_ptr = slot;
309 	return BLK_STS_OK;
310 }
311 
312 /**
313  * blk_crypto_put_keyslot() - Release a reference to a keyslot
314  * @slot: The keyslot to release the reference of (may be NULL).
315  *
316  * Context: Any context.
317  */
318 void blk_crypto_put_keyslot(struct blk_crypto_keyslot *slot)
319 {
320 	struct blk_crypto_profile *profile;
321 	unsigned long flags;
322 
323 	if (!slot)
324 		return;
325 
326 	profile = slot->profile;
327 
328 	if (atomic_dec_and_lock_irqsave(&slot->slot_refs,
329 					&profile->idle_slots_lock, flags)) {
330 		list_add_tail(&slot->idle_slot_node, &profile->idle_slots);
331 		spin_unlock_irqrestore(&profile->idle_slots_lock, flags);
332 		wake_up(&profile->idle_slots_wait_queue);
333 	}
334 }
335 
336 /**
337  * __blk_crypto_cfg_supported() - Check whether the given crypto profile
338  *				  supports the given crypto configuration.
339  * @profile: the crypto profile to check
340  * @cfg: the crypto configuration to check for
341  *
342  * Return: %true if @profile supports the given @cfg.
343  */
344 bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile,
345 				const struct blk_crypto_config *cfg)
346 {
347 	if (!profile)
348 		return false;
349 	if (!(profile->modes_supported[cfg->crypto_mode] & cfg->data_unit_size))
350 		return false;
351 	if (profile->max_dun_bytes_supported < cfg->dun_bytes)
352 		return false;
353 	return true;
354 }
355 
356 /**
357  * __blk_crypto_evict_key() - Evict a key from a device.
358  * @profile: the crypto profile of the device
359  * @key: the key to evict.  It must not still be used in any I/O.
360  *
361  * If the device has keyslots, this finds the keyslot (if any) that contains the
362  * specified key and calls the driver's keyslot_evict function to evict it.
363  *
364  * Otherwise, this just calls the driver's keyslot_evict function if it is
365  * implemented, passing just the key (without any particular keyslot).  This
366  * allows layered devices to evict the key from their underlying devices.
367  *
368  * Context: Process context. Takes and releases profile->lock.
369  * Return: 0 on success or if there's no keyslot with the specified key, -EBUSY
370  *	   if the keyslot is still in use, or another -errno value on other
371  *	   error.
372  */
373 int __blk_crypto_evict_key(struct blk_crypto_profile *profile,
374 			   const struct blk_crypto_key *key)
375 {
376 	struct blk_crypto_keyslot *slot;
377 	int err = 0;
378 
379 	if (profile->num_slots == 0) {
380 		if (profile->ll_ops.keyslot_evict) {
381 			blk_crypto_hw_enter(profile);
382 			err = profile->ll_ops.keyslot_evict(profile, key, -1);
383 			blk_crypto_hw_exit(profile);
384 			return err;
385 		}
386 		return 0;
387 	}
388 
389 	blk_crypto_hw_enter(profile);
390 	slot = blk_crypto_find_keyslot(profile, key);
391 	if (!slot)
392 		goto out_unlock;
393 
394 	if (WARN_ON_ONCE(atomic_read(&slot->slot_refs) != 0)) {
395 		err = -EBUSY;
396 		goto out_unlock;
397 	}
398 	err = profile->ll_ops.keyslot_evict(profile, key,
399 					    blk_crypto_keyslot_index(slot));
400 	if (err)
401 		goto out_unlock;
402 
403 	hlist_del(&slot->hash_node);
404 	slot->key = NULL;
405 	err = 0;
406 out_unlock:
407 	blk_crypto_hw_exit(profile);
408 	return err;
409 }
410 
411 /**
412  * blk_crypto_reprogram_all_keys() - Re-program all keyslots.
413  * @profile: The crypto profile
414  *
415  * Re-program all keyslots that are supposed to have a key programmed.  This is
416  * intended only for use by drivers for hardware that loses its keys on reset.
417  *
418  * Context: Process context. Takes and releases profile->lock.
419  */
420 void blk_crypto_reprogram_all_keys(struct blk_crypto_profile *profile)
421 {
422 	unsigned int slot;
423 
424 	if (profile->num_slots == 0)
425 		return;
426 
427 	/* This is for device initialization, so don't resume the device */
428 	down_write(&profile->lock);
429 	for (slot = 0; slot < profile->num_slots; slot++) {
430 		const struct blk_crypto_key *key = profile->slots[slot].key;
431 		int err;
432 
433 		if (!key)
434 			continue;
435 
436 		err = profile->ll_ops.keyslot_program(profile, key, slot);
437 		WARN_ON(err);
438 	}
439 	up_write(&profile->lock);
440 }
441 EXPORT_SYMBOL_GPL(blk_crypto_reprogram_all_keys);
442 
443 void blk_crypto_profile_destroy(struct blk_crypto_profile *profile)
444 {
445 	if (!profile)
446 		return;
447 	kvfree(profile->slot_hashtable);
448 	kvfree_sensitive(profile->slots,
449 			 sizeof(profile->slots[0]) * profile->num_slots);
450 	memzero_explicit(profile, sizeof(*profile));
451 }
452 EXPORT_SYMBOL_GPL(blk_crypto_profile_destroy);
453 
454 bool blk_crypto_register(struct blk_crypto_profile *profile,
455 			 struct request_queue *q)
456 {
457 	if (blk_integrity_queue_supports_integrity(q)) {
458 		pr_warn("Integrity and hardware inline encryption are not supported together. Disabling hardware inline encryption.\n");
459 		return false;
460 	}
461 	q->crypto_profile = profile;
462 	return true;
463 }
464 EXPORT_SYMBOL_GPL(blk_crypto_register);
465 
466 void blk_crypto_unregister(struct request_queue *q)
467 {
468 	q->crypto_profile = NULL;
469 }
470 
471 /**
472  * blk_crypto_intersect_capabilities() - restrict supported crypto capabilities
473  *					 by child device
474  * @parent: the crypto profile for the parent device
475  * @child: the crypto profile for the child device, or NULL
476  *
477  * This clears all crypto capabilities in @parent that aren't set in @child.  If
478  * @child is NULL, then this clears all parent capabilities.
479  *
480  * Only use this when setting up the crypto profile for a layered device, before
481  * it's been exposed yet.
482  */
483 void blk_crypto_intersect_capabilities(struct blk_crypto_profile *parent,
484 				       const struct blk_crypto_profile *child)
485 {
486 	if (child) {
487 		unsigned int i;
488 
489 		parent->max_dun_bytes_supported =
490 			min(parent->max_dun_bytes_supported,
491 			    child->max_dun_bytes_supported);
492 		for (i = 0; i < ARRAY_SIZE(child->modes_supported); i++)
493 			parent->modes_supported[i] &= child->modes_supported[i];
494 	} else {
495 		parent->max_dun_bytes_supported = 0;
496 		memset(parent->modes_supported, 0,
497 		       sizeof(parent->modes_supported));
498 	}
499 }
500 EXPORT_SYMBOL_GPL(blk_crypto_intersect_capabilities);
501 
502 /**
503  * blk_crypto_has_capabilities() - Check whether @target supports at least all
504  *				   the crypto capabilities that @reference does.
505  * @target: the target profile
506  * @reference: the reference profile
507  *
508  * Return: %true if @target supports all the crypto capabilities of @reference.
509  */
510 bool blk_crypto_has_capabilities(const struct blk_crypto_profile *target,
511 				 const struct blk_crypto_profile *reference)
512 {
513 	int i;
514 
515 	if (!reference)
516 		return true;
517 
518 	if (!target)
519 		return false;
520 
521 	for (i = 0; i < ARRAY_SIZE(target->modes_supported); i++) {
522 		if (reference->modes_supported[i] & ~target->modes_supported[i])
523 			return false;
524 	}
525 
526 	if (reference->max_dun_bytes_supported >
527 	    target->max_dun_bytes_supported)
528 		return false;
529 
530 	return true;
531 }
532 EXPORT_SYMBOL_GPL(blk_crypto_has_capabilities);
533 
534 /**
535  * blk_crypto_update_capabilities() - Update the capabilities of a crypto
536  *				      profile to match those of another crypto
537  *				      profile.
538  * @dst: The crypto profile whose capabilities to update.
539  * @src: The crypto profile whose capabilities this function will update @dst's
540  *	 capabilities to.
541  *
542  * Blk-crypto requires that crypto capabilities that were
543  * advertised when a bio was created continue to be supported by the
544  * device until that bio is ended. This is turn means that a device cannot
545  * shrink its advertised crypto capabilities without any explicit
546  * synchronization with upper layers. So if there's no such explicit
547  * synchronization, @src must support all the crypto capabilities that
548  * @dst does (i.e. we need blk_crypto_has_capabilities(@src, @dst)).
549  *
550  * Note also that as long as the crypto capabilities are being expanded, the
551  * order of updates becoming visible is not important because it's alright
552  * for blk-crypto to see stale values - they only cause blk-crypto to
553  * believe that a crypto capability isn't supported when it actually is (which
554  * might result in blk-crypto-fallback being used if available, or the bio being
555  * failed).
556  */
557 void blk_crypto_update_capabilities(struct blk_crypto_profile *dst,
558 				    const struct blk_crypto_profile *src)
559 {
560 	memcpy(dst->modes_supported, src->modes_supported,
561 	       sizeof(dst->modes_supported));
562 
563 	dst->max_dun_bytes_supported = src->max_dun_bytes_supported;
564 }
565 EXPORT_SYMBOL_GPL(blk_crypto_update_capabilities);
566