xref: /openbmc/linux/include/crypto/algapi.h (revision ae213c44)
1 /*
2  * Cryptographic API for algorithms (i.e., low-level API).
3  *
4  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the Free
8  * Software Foundation; either version 2 of the License, or (at your option)
9  * any later version.
10  *
11  */
12 #ifndef _CRYPTO_ALGAPI_H
13 #define _CRYPTO_ALGAPI_H
14 
15 #include <linux/crypto.h>
16 #include <linux/list.h>
17 #include <linux/kernel.h>
18 #include <linux/skbuff.h>
19 
20 /*
21  * Maximum values for blocksize and alignmask, used to allocate
22  * static buffers that are big enough for any combination of
23  * algs and architectures. Ciphers have a lower maximum size.
24  */
25 #define MAX_ALGAPI_BLOCKSIZE		160
26 #define MAX_ALGAPI_ALIGNMASK		63
27 #define MAX_CIPHER_BLOCKSIZE		16
28 #define MAX_CIPHER_ALIGNMASK		15
29 
30 struct crypto_aead;
31 struct crypto_instance;
32 struct module;
33 struct rtattr;
34 struct seq_file;
35 
36 struct crypto_type {
37 	unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
38 	unsigned int (*extsize)(struct crypto_alg *alg);
39 	int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
40 	int (*init_tfm)(struct crypto_tfm *tfm);
41 	void (*show)(struct seq_file *m, struct crypto_alg *alg);
42 	int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
43 	void (*free)(struct crypto_instance *inst);
44 
45 	unsigned int type;
46 	unsigned int maskclear;
47 	unsigned int maskset;
48 	unsigned int tfmsize;
49 };
50 
51 struct crypto_instance {
52 	struct crypto_alg alg;
53 
54 	struct crypto_template *tmpl;
55 	struct hlist_node list;
56 
57 	void *__ctx[] CRYPTO_MINALIGN_ATTR;
58 };
59 
60 struct crypto_template {
61 	struct list_head list;
62 	struct hlist_head instances;
63 	struct module *module;
64 
65 	struct crypto_instance *(*alloc)(struct rtattr **tb);
66 	void (*free)(struct crypto_instance *inst);
67 	int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
68 
69 	char name[CRYPTO_MAX_ALG_NAME];
70 };
71 
72 struct crypto_spawn {
73 	struct list_head list;
74 	struct crypto_alg *alg;
75 	struct crypto_instance *inst;
76 	const struct crypto_type *frontend;
77 	u32 mask;
78 };
79 
80 struct crypto_queue {
81 	struct list_head list;
82 	struct list_head *backlog;
83 
84 	unsigned int qlen;
85 	unsigned int max_qlen;
86 };
87 
88 struct scatter_walk {
89 	struct scatterlist *sg;
90 	unsigned int offset;
91 };
92 
93 struct blkcipher_walk {
94 	union {
95 		struct {
96 			struct page *page;
97 			unsigned long offset;
98 		} phys;
99 
100 		struct {
101 			u8 *page;
102 			u8 *addr;
103 		} virt;
104 	} src, dst;
105 
106 	struct scatter_walk in;
107 	unsigned int nbytes;
108 
109 	struct scatter_walk out;
110 	unsigned int total;
111 
112 	void *page;
113 	u8 *buffer;
114 	u8 *iv;
115 	unsigned int ivsize;
116 
117 	int flags;
118 	unsigned int walk_blocksize;
119 	unsigned int cipher_blocksize;
120 	unsigned int alignmask;
121 };
122 
123 struct ablkcipher_walk {
124 	struct {
125 		struct page *page;
126 		unsigned int offset;
127 	} src, dst;
128 
129 	struct scatter_walk	in;
130 	unsigned int		nbytes;
131 	struct scatter_walk	out;
132 	unsigned int		total;
133 	struct list_head	buffers;
134 	u8			*iv_buffer;
135 	u8			*iv;
136 	int			flags;
137 	unsigned int		blocksize;
138 };
139 
140 extern const struct crypto_type crypto_ablkcipher_type;
141 extern const struct crypto_type crypto_blkcipher_type;
142 
143 void crypto_mod_put(struct crypto_alg *alg);
144 
145 int crypto_register_template(struct crypto_template *tmpl);
146 int crypto_register_templates(struct crypto_template *tmpls, int count);
147 void crypto_unregister_template(struct crypto_template *tmpl);
148 void crypto_unregister_templates(struct crypto_template *tmpls, int count);
149 struct crypto_template *crypto_lookup_template(const char *name);
150 
151 int crypto_register_instance(struct crypto_template *tmpl,
152 			     struct crypto_instance *inst);
153 int crypto_unregister_instance(struct crypto_instance *inst);
154 
155 int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
156 		      struct crypto_instance *inst, u32 mask);
157 int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
158 		       struct crypto_instance *inst,
159 		       const struct crypto_type *frontend);
160 int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name,
161 		      u32 type, u32 mask);
162 
163 void crypto_drop_spawn(struct crypto_spawn *spawn);
164 struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
165 				    u32 mask);
166 void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
167 
168 static inline void crypto_set_spawn(struct crypto_spawn *spawn,
169 				    struct crypto_instance *inst)
170 {
171 	spawn->inst = inst;
172 }
173 
174 struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
175 int crypto_check_attr_type(struct rtattr **tb, u32 type);
176 const char *crypto_attr_alg_name(struct rtattr *rta);
177 struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
178 				    const struct crypto_type *frontend,
179 				    u32 type, u32 mask);
180 
181 static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta,
182 						 u32 type, u32 mask)
183 {
184 	return crypto_attr_alg2(rta, NULL, type, mask);
185 }
186 
187 int crypto_attr_u32(struct rtattr *rta, u32 *num);
188 int crypto_inst_setname(struct crypto_instance *inst, const char *name,
189 			struct crypto_alg *alg);
190 void *crypto_alloc_instance(const char *name, struct crypto_alg *alg,
191 			    unsigned int head);
192 
193 void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
194 int crypto_enqueue_request(struct crypto_queue *queue,
195 			   struct crypto_async_request *request);
196 struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
197 int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
198 static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
199 {
200 	return queue->qlen;
201 }
202 
203 void crypto_inc(u8 *a, unsigned int size);
204 void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
205 
206 static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
207 {
208 	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
209 	    __builtin_constant_p(size) &&
210 	    (size % sizeof(unsigned long)) == 0) {
211 		unsigned long *d = (unsigned long *)dst;
212 		unsigned long *s = (unsigned long *)src;
213 
214 		while (size > 0) {
215 			*d++ ^= *s++;
216 			size -= sizeof(unsigned long);
217 		}
218 	} else {
219 		__crypto_xor(dst, dst, src, size);
220 	}
221 }
222 
223 static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
224 				  unsigned int size)
225 {
226 	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
227 	    __builtin_constant_p(size) &&
228 	    (size % sizeof(unsigned long)) == 0) {
229 		unsigned long *d = (unsigned long *)dst;
230 		unsigned long *s1 = (unsigned long *)src1;
231 		unsigned long *s2 = (unsigned long *)src2;
232 
233 		while (size > 0) {
234 			*d++ = *s1++ ^ *s2++;
235 			size -= sizeof(unsigned long);
236 		}
237 	} else {
238 		__crypto_xor(dst, src1, src2, size);
239 	}
240 }
241 
242 int blkcipher_walk_done(struct blkcipher_desc *desc,
243 			struct blkcipher_walk *walk, int err);
244 int blkcipher_walk_virt(struct blkcipher_desc *desc,
245 			struct blkcipher_walk *walk);
246 int blkcipher_walk_phys(struct blkcipher_desc *desc,
247 			struct blkcipher_walk *walk);
248 int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
249 			      struct blkcipher_walk *walk,
250 			      unsigned int blocksize);
251 int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
252 				   struct blkcipher_walk *walk,
253 				   struct crypto_aead *tfm,
254 				   unsigned int blocksize);
255 
256 int ablkcipher_walk_done(struct ablkcipher_request *req,
257 			 struct ablkcipher_walk *walk, int err);
258 int ablkcipher_walk_phys(struct ablkcipher_request *req,
259 			 struct ablkcipher_walk *walk);
260 void __ablkcipher_walk_complete(struct ablkcipher_walk *walk);
261 
262 static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
263 {
264 	return PTR_ALIGN(crypto_tfm_ctx(tfm),
265 			 crypto_tfm_alg_alignmask(tfm) + 1);
266 }
267 
268 static inline struct crypto_instance *crypto_tfm_alg_instance(
269 	struct crypto_tfm *tfm)
270 {
271 	return container_of(tfm->__crt_alg, struct crypto_instance, alg);
272 }
273 
274 static inline void *crypto_instance_ctx(struct crypto_instance *inst)
275 {
276 	return inst->__ctx;
277 }
278 
279 static inline struct ablkcipher_alg *crypto_ablkcipher_alg(
280 	struct crypto_ablkcipher *tfm)
281 {
282 	return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher;
283 }
284 
285 static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm)
286 {
287 	return crypto_tfm_ctx(&tfm->base);
288 }
289 
290 static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm)
291 {
292 	return crypto_tfm_ctx_aligned(&tfm->base);
293 }
294 
295 static inline struct crypto_blkcipher *crypto_spawn_blkcipher(
296 	struct crypto_spawn *spawn)
297 {
298 	u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
299 	u32 mask = CRYPTO_ALG_TYPE_MASK;
300 
301 	return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask));
302 }
303 
304 static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm)
305 {
306 	return crypto_tfm_ctx(&tfm->base);
307 }
308 
309 static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm)
310 {
311 	return crypto_tfm_ctx_aligned(&tfm->base);
312 }
313 
314 static inline struct crypto_cipher *crypto_spawn_cipher(
315 	struct crypto_spawn *spawn)
316 {
317 	u32 type = CRYPTO_ALG_TYPE_CIPHER;
318 	u32 mask = CRYPTO_ALG_TYPE_MASK;
319 
320 	return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask));
321 }
322 
323 static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
324 {
325 	return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
326 }
327 
328 static inline void blkcipher_walk_init(struct blkcipher_walk *walk,
329 				       struct scatterlist *dst,
330 				       struct scatterlist *src,
331 				       unsigned int nbytes)
332 {
333 	walk->in.sg = src;
334 	walk->out.sg = dst;
335 	walk->total = nbytes;
336 }
337 
338 static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk,
339 					struct scatterlist *dst,
340 					struct scatterlist *src,
341 					unsigned int nbytes)
342 {
343 	walk->in.sg = src;
344 	walk->out.sg = dst;
345 	walk->total = nbytes;
346 	INIT_LIST_HEAD(&walk->buffers);
347 }
348 
349 static inline void ablkcipher_walk_complete(struct ablkcipher_walk *walk)
350 {
351 	if (unlikely(!list_empty(&walk->buffers)))
352 		__ablkcipher_walk_complete(walk);
353 }
354 
355 static inline struct crypto_async_request *crypto_get_backlog(
356 	struct crypto_queue *queue)
357 {
358 	return queue->backlog == &queue->list ? NULL :
359 	       container_of(queue->backlog, struct crypto_async_request, list);
360 }
361 
362 static inline int ablkcipher_enqueue_request(struct crypto_queue *queue,
363 					     struct ablkcipher_request *request)
364 {
365 	return crypto_enqueue_request(queue, &request->base);
366 }
367 
368 static inline struct ablkcipher_request *ablkcipher_dequeue_request(
369 	struct crypto_queue *queue)
370 {
371 	return ablkcipher_request_cast(crypto_dequeue_request(queue));
372 }
373 
374 static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req)
375 {
376 	return req->__ctx;
377 }
378 
379 static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue,
380 					  struct crypto_ablkcipher *tfm)
381 {
382 	return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm));
383 }
384 
385 static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
386 						     u32 type, u32 mask)
387 {
388 	return crypto_attr_alg(tb[1], type, mask);
389 }
390 
391 static inline int crypto_requires_off(u32 type, u32 mask, u32 off)
392 {
393 	return (type ^ off) & mask & off;
394 }
395 
396 /*
397  * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms.
398  * Otherwise returns zero.
399  */
400 static inline int crypto_requires_sync(u32 type, u32 mask)
401 {
402 	return crypto_requires_off(type, mask, CRYPTO_ALG_ASYNC);
403 }
404 
405 noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
406 
407 /**
408  * crypto_memneq - Compare two areas of memory without leaking
409  *		   timing information.
410  *
411  * @a: One area of memory
412  * @b: Another area of memory
413  * @size: The size of the area.
414  *
415  * Returns 0 when data is equal, 1 otherwise.
416  */
417 static inline int crypto_memneq(const void *a, const void *b, size_t size)
418 {
419 	return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
420 }
421 
422 static inline void crypto_yield(u32 flags)
423 {
424 #if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY)
425 	if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
426 		cond_resched();
427 #endif
428 }
429 
430 int crypto_register_notifier(struct notifier_block *nb);
431 int crypto_unregister_notifier(struct notifier_block *nb);
432 
433 /* Crypto notification events. */
434 enum {
435 	CRYPTO_MSG_ALG_REQUEST,
436 	CRYPTO_MSG_ALG_REGISTER,
437 	CRYPTO_MSG_ALG_LOADED,
438 };
439 
440 #endif	/* _CRYPTO_ALGAPI_H */
441