xref: /openbmc/linux/block/blk-crypto-internal.h (revision 55fd7e02)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright 2019 Google LLC
4  */
5 
6 #ifndef __LINUX_BLK_CRYPTO_INTERNAL_H
7 #define __LINUX_BLK_CRYPTO_INTERNAL_H
8 
9 #include <linux/bio.h>
10 #include <linux/blkdev.h>
11 
12 /* Represents a crypto mode supported by blk-crypto  */
13 struct blk_crypto_mode {
14 	const char *cipher_str; /* crypto API name (for fallback case) */
15 	unsigned int keysize; /* key size in bytes */
16 	unsigned int ivsize; /* iv size in bytes */
17 };
18 
19 extern const struct blk_crypto_mode blk_crypto_modes[];
20 
21 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
22 
23 void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
24 			     unsigned int inc);
25 
26 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio);
27 
28 bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
29 			     struct bio_crypt_ctx *bc2);
30 
31 static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
32 						struct bio *bio)
33 {
34 	return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
35 				       bio->bi_crypt_context);
36 }
37 
38 static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
39 						 struct bio *bio)
40 {
41 	return bio_crypt_ctx_mergeable(bio->bi_crypt_context,
42 				       bio->bi_iter.bi_size, req->crypt_ctx);
43 }
44 
45 static inline bool bio_crypt_ctx_merge_rq(struct request *req,
46 					  struct request *next)
47 {
48 	return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
49 				       next->crypt_ctx);
50 }
51 
52 static inline void blk_crypto_rq_set_defaults(struct request *rq)
53 {
54 	rq->crypt_ctx = NULL;
55 	rq->crypt_keyslot = NULL;
56 }
57 
58 static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
59 {
60 	return rq->crypt_ctx;
61 }
62 
63 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
64 
65 static inline bool bio_crypt_rq_ctx_compatible(struct request *rq,
66 					       struct bio *bio)
67 {
68 	return true;
69 }
70 
71 static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
72 						 struct bio *bio)
73 {
74 	return true;
75 }
76 
77 static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
78 						struct bio *bio)
79 {
80 	return true;
81 }
82 
83 static inline bool bio_crypt_ctx_merge_rq(struct request *req,
84 					  struct request *next)
85 {
86 	return true;
87 }
88 
89 static inline void blk_crypto_rq_set_defaults(struct request *rq) { }
90 
91 static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
92 {
93 	return false;
94 }
95 
96 #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
97 
98 void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
99 static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes)
100 {
101 	if (bio_has_crypt_ctx(bio))
102 		__bio_crypt_advance(bio, bytes);
103 }
104 
105 void __bio_crypt_free_ctx(struct bio *bio);
106 static inline void bio_crypt_free_ctx(struct bio *bio)
107 {
108 	if (bio_has_crypt_ctx(bio))
109 		__bio_crypt_free_ctx(bio);
110 }
111 
112 static inline void bio_crypt_do_front_merge(struct request *rq,
113 					    struct bio *bio)
114 {
115 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
116 	if (bio_has_crypt_ctx(bio))
117 		memcpy(rq->crypt_ctx->bc_dun, bio->bi_crypt_context->bc_dun,
118 		       sizeof(rq->crypt_ctx->bc_dun));
119 #endif
120 }
121 
122 bool __blk_crypto_bio_prep(struct bio **bio_ptr);
123 static inline bool blk_crypto_bio_prep(struct bio **bio_ptr)
124 {
125 	if (bio_has_crypt_ctx(*bio_ptr))
126 		return __blk_crypto_bio_prep(bio_ptr);
127 	return true;
128 }
129 
130 blk_status_t __blk_crypto_init_request(struct request *rq);
131 static inline blk_status_t blk_crypto_init_request(struct request *rq)
132 {
133 	if (blk_crypto_rq_is_encrypted(rq))
134 		return __blk_crypto_init_request(rq);
135 	return BLK_STS_OK;
136 }
137 
138 void __blk_crypto_free_request(struct request *rq);
139 static inline void blk_crypto_free_request(struct request *rq)
140 {
141 	if (blk_crypto_rq_is_encrypted(rq))
142 		__blk_crypto_free_request(rq);
143 }
144 
145 void __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
146 			      gfp_t gfp_mask);
147 static inline void blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
148 					  gfp_t gfp_mask)
149 {
150 	if (bio_has_crypt_ctx(bio))
151 		__blk_crypto_rq_bio_prep(rq, bio, gfp_mask);
152 }
153 
154 /**
155  * blk_crypto_insert_cloned_request - Prepare a cloned request to be inserted
156  *				      into a request queue.
157  * @rq: the request being queued
158  *
159  * Return: BLK_STS_OK on success, nonzero on error.
160  */
161 static inline blk_status_t blk_crypto_insert_cloned_request(struct request *rq)
162 {
163 
164 	if (blk_crypto_rq_is_encrypted(rq))
165 		return blk_crypto_init_request(rq);
166 	return BLK_STS_OK;
167 }
168 
169 #ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK
170 
171 int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num);
172 
173 bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr);
174 
175 int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key);
176 
177 #else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
178 
179 static inline int
180 blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
181 {
182 	pr_warn_once("crypto API fallback is disabled\n");
183 	return -ENOPKG;
184 }
185 
186 static inline bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
187 {
188 	pr_warn_once("crypto API fallback disabled; failing request.\n");
189 	(*bio_ptr)->bi_status = BLK_STS_NOTSUPP;
190 	return false;
191 }
192 
193 static inline int
194 blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
195 {
196 	return 0;
197 }
198 
199 #endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
200 
201 #endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */
202