xref: /openbmc/linux/include/crypto/algapi.h (revision 8dde5715)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Cryptographic API for algorithms (i.e., low-level API).
4  *
5  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
6  */
7 #ifndef _CRYPTO_ALGAPI_H
8 #define _CRYPTO_ALGAPI_H
9 
10 #include <linux/crypto.h>
11 #include <linux/list.h>
12 #include <linux/kernel.h>
13 #include <linux/skbuff.h>
14 
15 /*
16  * Maximum values for blocksize and alignmask, used to allocate
17  * static buffers that are big enough for any combination of
18  * algs and architectures. Ciphers have a lower maximum size.
19  */
20 #define MAX_ALGAPI_BLOCKSIZE		160
21 #define MAX_ALGAPI_ALIGNMASK		63
22 #define MAX_CIPHER_BLOCKSIZE		16
23 #define MAX_CIPHER_ALIGNMASK		15
24 
25 struct crypto_aead;
26 struct crypto_instance;
27 struct module;
28 struct rtattr;
29 struct seq_file;
30 
31 struct crypto_type {
32 	unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
33 	unsigned int (*extsize)(struct crypto_alg *alg);
34 	int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
35 	int (*init_tfm)(struct crypto_tfm *tfm);
36 	void (*show)(struct seq_file *m, struct crypto_alg *alg);
37 	int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
38 	void (*free)(struct crypto_instance *inst);
39 
40 	unsigned int type;
41 	unsigned int maskclear;
42 	unsigned int maskset;
43 	unsigned int tfmsize;
44 };
45 
46 struct crypto_instance {
47 	struct crypto_alg alg;
48 
49 	struct crypto_template *tmpl;
50 	struct hlist_node list;
51 
52 	void *__ctx[] CRYPTO_MINALIGN_ATTR;
53 };
54 
55 struct crypto_template {
56 	struct list_head list;
57 	struct hlist_head instances;
58 	struct module *module;
59 
60 	struct crypto_instance *(*alloc)(struct rtattr **tb);
61 	void (*free)(struct crypto_instance *inst);
62 	int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
63 
64 	char name[CRYPTO_MAX_ALG_NAME];
65 };
66 
67 struct crypto_spawn {
68 	struct list_head list;
69 	struct crypto_alg *alg;
70 	struct crypto_instance *inst;
71 	const struct crypto_type *frontend;
72 	u32 mask;
73 };
74 
75 struct crypto_queue {
76 	struct list_head list;
77 	struct list_head *backlog;
78 
79 	unsigned int qlen;
80 	unsigned int max_qlen;
81 };
82 
83 struct scatter_walk {
84 	struct scatterlist *sg;
85 	unsigned int offset;
86 };
87 
88 struct blkcipher_walk {
89 	union {
90 		struct {
91 			struct page *page;
92 			unsigned long offset;
93 		} phys;
94 
95 		struct {
96 			u8 *page;
97 			u8 *addr;
98 		} virt;
99 	} src, dst;
100 
101 	struct scatter_walk in;
102 	unsigned int nbytes;
103 
104 	struct scatter_walk out;
105 	unsigned int total;
106 
107 	void *page;
108 	u8 *buffer;
109 	u8 *iv;
110 	unsigned int ivsize;
111 
112 	int flags;
113 	unsigned int walk_blocksize;
114 	unsigned int cipher_blocksize;
115 	unsigned int alignmask;
116 };
117 
118 struct ablkcipher_walk {
119 	struct {
120 		struct page *page;
121 		unsigned int offset;
122 	} src, dst;
123 
124 	struct scatter_walk	in;
125 	unsigned int		nbytes;
126 	struct scatter_walk	out;
127 	unsigned int		total;
128 	struct list_head	buffers;
129 	u8			*iv_buffer;
130 	u8			*iv;
131 	int			flags;
132 	unsigned int		blocksize;
133 };
134 
135 extern const struct crypto_type crypto_ablkcipher_type;
136 extern const struct crypto_type crypto_blkcipher_type;
137 
138 void crypto_mod_put(struct crypto_alg *alg);
139 
140 int crypto_register_template(struct crypto_template *tmpl);
141 int crypto_register_templates(struct crypto_template *tmpls, int count);
142 void crypto_unregister_template(struct crypto_template *tmpl);
143 void crypto_unregister_templates(struct crypto_template *tmpls, int count);
144 struct crypto_template *crypto_lookup_template(const char *name);
145 
146 int crypto_register_instance(struct crypto_template *tmpl,
147 			     struct crypto_instance *inst);
148 int crypto_unregister_instance(struct crypto_instance *inst);
149 
150 int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
151 		      struct crypto_instance *inst, u32 mask);
152 int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
153 		       struct crypto_instance *inst,
154 		       const struct crypto_type *frontend);
155 int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name,
156 		      u32 type, u32 mask);
157 
158 void crypto_drop_spawn(struct crypto_spawn *spawn);
159 struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
160 				    u32 mask);
161 void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
162 
163 static inline void crypto_set_spawn(struct crypto_spawn *spawn,
164 				    struct crypto_instance *inst)
165 {
166 	spawn->inst = inst;
167 }
168 
169 struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
170 int crypto_check_attr_type(struct rtattr **tb, u32 type);
171 const char *crypto_attr_alg_name(struct rtattr *rta);
172 struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
173 				    const struct crypto_type *frontend,
174 				    u32 type, u32 mask);
175 
176 static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta,
177 						 u32 type, u32 mask)
178 {
179 	return crypto_attr_alg2(rta, NULL, type, mask);
180 }
181 
182 int crypto_attr_u32(struct rtattr *rta, u32 *num);
183 int crypto_inst_setname(struct crypto_instance *inst, const char *name,
184 			struct crypto_alg *alg);
185 void *crypto_alloc_instance(const char *name, struct crypto_alg *alg,
186 			    unsigned int head);
187 
188 void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
189 int crypto_enqueue_request(struct crypto_queue *queue,
190 			   struct crypto_async_request *request);
191 struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
192 int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
193 static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
194 {
195 	return queue->qlen;
196 }
197 
198 void crypto_inc(u8 *a, unsigned int size);
199 void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
200 
201 static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
202 {
203 	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
204 	    __builtin_constant_p(size) &&
205 	    (size % sizeof(unsigned long)) == 0) {
206 		unsigned long *d = (unsigned long *)dst;
207 		unsigned long *s = (unsigned long *)src;
208 
209 		while (size > 0) {
210 			*d++ ^= *s++;
211 			size -= sizeof(unsigned long);
212 		}
213 	} else {
214 		__crypto_xor(dst, dst, src, size);
215 	}
216 }
217 
218 static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
219 				  unsigned int size)
220 {
221 	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
222 	    __builtin_constant_p(size) &&
223 	    (size % sizeof(unsigned long)) == 0) {
224 		unsigned long *d = (unsigned long *)dst;
225 		unsigned long *s1 = (unsigned long *)src1;
226 		unsigned long *s2 = (unsigned long *)src2;
227 
228 		while (size > 0) {
229 			*d++ = *s1++ ^ *s2++;
230 			size -= sizeof(unsigned long);
231 		}
232 	} else {
233 		__crypto_xor(dst, src1, src2, size);
234 	}
235 }
236 
237 int blkcipher_walk_done(struct blkcipher_desc *desc,
238 			struct blkcipher_walk *walk, int err);
239 int blkcipher_walk_virt(struct blkcipher_desc *desc,
240 			struct blkcipher_walk *walk);
241 int blkcipher_walk_phys(struct blkcipher_desc *desc,
242 			struct blkcipher_walk *walk);
243 int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
244 			      struct blkcipher_walk *walk,
245 			      unsigned int blocksize);
246 int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
247 				   struct blkcipher_walk *walk,
248 				   struct crypto_aead *tfm,
249 				   unsigned int blocksize);
250 
251 int ablkcipher_walk_done(struct ablkcipher_request *req,
252 			 struct ablkcipher_walk *walk, int err);
253 int ablkcipher_walk_phys(struct ablkcipher_request *req,
254 			 struct ablkcipher_walk *walk);
255 void __ablkcipher_walk_complete(struct ablkcipher_walk *walk);
256 
257 static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
258 {
259 	return PTR_ALIGN(crypto_tfm_ctx(tfm),
260 			 crypto_tfm_alg_alignmask(tfm) + 1);
261 }
262 
263 static inline struct crypto_instance *crypto_tfm_alg_instance(
264 	struct crypto_tfm *tfm)
265 {
266 	return container_of(tfm->__crt_alg, struct crypto_instance, alg);
267 }
268 
269 static inline void *crypto_instance_ctx(struct crypto_instance *inst)
270 {
271 	return inst->__ctx;
272 }
273 
274 static inline struct ablkcipher_alg *crypto_ablkcipher_alg(
275 	struct crypto_ablkcipher *tfm)
276 {
277 	return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher;
278 }
279 
280 static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm)
281 {
282 	return crypto_tfm_ctx(&tfm->base);
283 }
284 
285 static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm)
286 {
287 	return crypto_tfm_ctx_aligned(&tfm->base);
288 }
289 
290 static inline struct crypto_blkcipher *crypto_spawn_blkcipher(
291 	struct crypto_spawn *spawn)
292 {
293 	u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
294 	u32 mask = CRYPTO_ALG_TYPE_MASK;
295 
296 	return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask));
297 }
298 
299 static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm)
300 {
301 	return crypto_tfm_ctx(&tfm->base);
302 }
303 
304 static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm)
305 {
306 	return crypto_tfm_ctx_aligned(&tfm->base);
307 }
308 
309 static inline struct crypto_cipher *crypto_spawn_cipher(
310 	struct crypto_spawn *spawn)
311 {
312 	u32 type = CRYPTO_ALG_TYPE_CIPHER;
313 	u32 mask = CRYPTO_ALG_TYPE_MASK;
314 
315 	return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask));
316 }
317 
318 static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
319 {
320 	return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
321 }
322 
323 static inline void blkcipher_walk_init(struct blkcipher_walk *walk,
324 				       struct scatterlist *dst,
325 				       struct scatterlist *src,
326 				       unsigned int nbytes)
327 {
328 	walk->in.sg = src;
329 	walk->out.sg = dst;
330 	walk->total = nbytes;
331 }
332 
333 static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk,
334 					struct scatterlist *dst,
335 					struct scatterlist *src,
336 					unsigned int nbytes)
337 {
338 	walk->in.sg = src;
339 	walk->out.sg = dst;
340 	walk->total = nbytes;
341 	INIT_LIST_HEAD(&walk->buffers);
342 }
343 
344 static inline void ablkcipher_walk_complete(struct ablkcipher_walk *walk)
345 {
346 	if (unlikely(!list_empty(&walk->buffers)))
347 		__ablkcipher_walk_complete(walk);
348 }
349 
350 static inline struct crypto_async_request *crypto_get_backlog(
351 	struct crypto_queue *queue)
352 {
353 	return queue->backlog == &queue->list ? NULL :
354 	       container_of(queue->backlog, struct crypto_async_request, list);
355 }
356 
357 static inline int ablkcipher_enqueue_request(struct crypto_queue *queue,
358 					     struct ablkcipher_request *request)
359 {
360 	return crypto_enqueue_request(queue, &request->base);
361 }
362 
363 static inline struct ablkcipher_request *ablkcipher_dequeue_request(
364 	struct crypto_queue *queue)
365 {
366 	return ablkcipher_request_cast(crypto_dequeue_request(queue));
367 }
368 
369 static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req)
370 {
371 	return req->__ctx;
372 }
373 
374 static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue,
375 					  struct crypto_ablkcipher *tfm)
376 {
377 	return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm));
378 }
379 
380 static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
381 						     u32 type, u32 mask)
382 {
383 	return crypto_attr_alg(tb[1], type, mask);
384 }
385 
386 static inline int crypto_requires_off(u32 type, u32 mask, u32 off)
387 {
388 	return (type ^ off) & mask & off;
389 }
390 
391 /*
392  * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms.
393  * Otherwise returns zero.
394  */
395 static inline int crypto_requires_sync(u32 type, u32 mask)
396 {
397 	return crypto_requires_off(type, mask, CRYPTO_ALG_ASYNC);
398 }
399 
400 noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
401 
402 /**
403  * crypto_memneq - Compare two areas of memory without leaking
404  *		   timing information.
405  *
406  * @a: One area of memory
407  * @b: Another area of memory
408  * @size: The size of the area.
409  *
410  * Returns 0 when data is equal, 1 otherwise.
411  */
412 static inline int crypto_memneq(const void *a, const void *b, size_t size)
413 {
414 	return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
415 }
416 
417 static inline void crypto_yield(u32 flags)
418 {
419 #if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY)
420 	if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
421 		cond_resched();
422 #endif
423 }
424 
425 int crypto_register_notifier(struct notifier_block *nb);
426 int crypto_unregister_notifier(struct notifier_block *nb);
427 
428 /* Crypto notification events. */
429 enum {
430 	CRYPTO_MSG_ALG_REQUEST,
431 	CRYPTO_MSG_ALG_REGISTER,
432 	CRYPTO_MSG_ALG_LOADED,
433 };
434 
435 #endif	/* _CRYPTO_ALGAPI_H */
436