xref: /openbmc/linux/block/blk-crypto-internal.h (revision 979ac5ef)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright 2019 Google LLC
4  */
5 
6 #ifndef __LINUX_BLK_CRYPTO_INTERNAL_H
7 #define __LINUX_BLK_CRYPTO_INTERNAL_H
8 
9 #include <linux/bio.h>
10 #include <linux/blk-mq.h>
11 
12 /* Represents a crypto mode supported by blk-crypto  */
13 struct blk_crypto_mode {
14 	const char *name; /* name of this mode, shown in sysfs */
15 	const char *cipher_str; /* crypto API name (for fallback case) */
16 	unsigned int keysize; /* key size in bytes */
17 	unsigned int ivsize; /* iv size in bytes */
18 };
19 
20 extern const struct blk_crypto_mode blk_crypto_modes[];
21 
22 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
23 
24 int blk_crypto_sysfs_register(struct gendisk *disk);
25 
26 void blk_crypto_sysfs_unregister(struct gendisk *disk);
27 
28 void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
29 			     unsigned int inc);
30 
31 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio);
32 
33 bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
34 			     struct bio_crypt_ctx *bc2);
35 
36 static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
37 						struct bio *bio)
38 {
39 	return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
40 				       bio->bi_crypt_context);
41 }
42 
43 static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
44 						 struct bio *bio)
45 {
46 	return bio_crypt_ctx_mergeable(bio->bi_crypt_context,
47 				       bio->bi_iter.bi_size, req->crypt_ctx);
48 }
49 
50 static inline bool bio_crypt_ctx_merge_rq(struct request *req,
51 					  struct request *next)
52 {
53 	return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
54 				       next->crypt_ctx);
55 }
56 
57 static inline void blk_crypto_rq_set_defaults(struct request *rq)
58 {
59 	rq->crypt_ctx = NULL;
60 	rq->crypt_keyslot = NULL;
61 }
62 
63 static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
64 {
65 	return rq->crypt_ctx;
66 }
67 
68 blk_status_t blk_crypto_get_keyslot(struct blk_crypto_profile *profile,
69 				    const struct blk_crypto_key *key,
70 				    struct blk_crypto_keyslot **slot_ptr);
71 
72 void blk_crypto_put_keyslot(struct blk_crypto_keyslot *slot);
73 
74 int __blk_crypto_evict_key(struct blk_crypto_profile *profile,
75 			   const struct blk_crypto_key *key);
76 
77 bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile,
78 				const struct blk_crypto_config *cfg);
79 
80 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
81 
82 static inline int blk_crypto_sysfs_register(struct gendisk *disk)
83 {
84 	return 0;
85 }
86 
87 static inline void blk_crypto_sysfs_unregister(struct gendisk *disk)
88 {
89 }
90 
91 static inline bool bio_crypt_rq_ctx_compatible(struct request *rq,
92 					       struct bio *bio)
93 {
94 	return true;
95 }
96 
97 static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
98 						 struct bio *bio)
99 {
100 	return true;
101 }
102 
103 static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
104 						struct bio *bio)
105 {
106 	return true;
107 }
108 
109 static inline bool bio_crypt_ctx_merge_rq(struct request *req,
110 					  struct request *next)
111 {
112 	return true;
113 }
114 
115 static inline void blk_crypto_rq_set_defaults(struct request *rq) { }
116 
117 static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
118 {
119 	return false;
120 }
121 
122 #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
123 
124 void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
125 static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes)
126 {
127 	if (bio_has_crypt_ctx(bio))
128 		__bio_crypt_advance(bio, bytes);
129 }
130 
131 void __bio_crypt_free_ctx(struct bio *bio);
132 static inline void bio_crypt_free_ctx(struct bio *bio)
133 {
134 	if (bio_has_crypt_ctx(bio))
135 		__bio_crypt_free_ctx(bio);
136 }
137 
138 static inline void bio_crypt_do_front_merge(struct request *rq,
139 					    struct bio *bio)
140 {
141 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
142 	if (bio_has_crypt_ctx(bio))
143 		memcpy(rq->crypt_ctx->bc_dun, bio->bi_crypt_context->bc_dun,
144 		       sizeof(rq->crypt_ctx->bc_dun));
145 #endif
146 }
147 
148 bool __blk_crypto_bio_prep(struct bio **bio_ptr);
149 static inline bool blk_crypto_bio_prep(struct bio **bio_ptr)
150 {
151 	if (bio_has_crypt_ctx(*bio_ptr))
152 		return __blk_crypto_bio_prep(bio_ptr);
153 	return true;
154 }
155 
156 blk_status_t __blk_crypto_init_request(struct request *rq);
157 static inline blk_status_t blk_crypto_init_request(struct request *rq)
158 {
159 	if (blk_crypto_rq_is_encrypted(rq))
160 		return __blk_crypto_init_request(rq);
161 	return BLK_STS_OK;
162 }
163 
164 void __blk_crypto_free_request(struct request *rq);
165 static inline void blk_crypto_free_request(struct request *rq)
166 {
167 	if (blk_crypto_rq_is_encrypted(rq))
168 		__blk_crypto_free_request(rq);
169 }
170 
171 int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
172 			     gfp_t gfp_mask);
173 /**
174  * blk_crypto_rq_bio_prep - Prepare a request's crypt_ctx when its first bio
175  *			    is inserted
176  * @rq: The request to prepare
177  * @bio: The first bio being inserted into the request
178  * @gfp_mask: Memory allocation flags
179  *
180  * Return: 0 on success, -ENOMEM if out of memory.  -ENOMEM is only possible if
181  *	   @gfp_mask doesn't include %__GFP_DIRECT_RECLAIM.
182  */
183 static inline int blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
184 					 gfp_t gfp_mask)
185 {
186 	if (bio_has_crypt_ctx(bio))
187 		return __blk_crypto_rq_bio_prep(rq, bio, gfp_mask);
188 	return 0;
189 }
190 
191 /**
192  * blk_crypto_insert_cloned_request - Prepare a cloned request to be inserted
193  *				      into a request queue.
194  * @rq: the request being queued
195  *
196  * Return: BLK_STS_OK on success, nonzero on error.
197  */
198 static inline blk_status_t blk_crypto_insert_cloned_request(struct request *rq)
199 {
200 
201 	if (blk_crypto_rq_is_encrypted(rq))
202 		return blk_crypto_init_request(rq);
203 	return BLK_STS_OK;
204 }
205 
206 #ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK
207 
208 int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num);
209 
210 bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr);
211 
212 int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key);
213 
214 #else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
215 
216 static inline int
217 blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
218 {
219 	pr_warn_once("crypto API fallback is disabled\n");
220 	return -ENOPKG;
221 }
222 
223 static inline bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
224 {
225 	pr_warn_once("crypto API fallback disabled; failing request.\n");
226 	(*bio_ptr)->bi_status = BLK_STS_NOTSUPP;
227 	return false;
228 }
229 
230 static inline int
231 blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
232 {
233 	return 0;
234 }
235 
236 #endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
237 
238 #endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */
239