xref: /openbmc/linux/include/crypto/algapi.h (revision 62ea22c4)
1 /*
2  * Cryptographic API for algorithms (i.e., low-level API).
3  *
4  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the Free
8  * Software Foundation; either version 2 of the License, or (at your option)
9  * any later version.
10  *
11  */
12 #ifndef _CRYPTO_ALGAPI_H
13 #define _CRYPTO_ALGAPI_H
14 
15 #include <linux/crypto.h>
16 #include <linux/list.h>
17 #include <linux/kernel.h>
18 #include <linux/skbuff.h>
19 
20 struct crypto_aead;
21 struct crypto_instance;
22 struct module;
23 struct rtattr;
24 struct seq_file;
25 
26 struct crypto_type {
27 	unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
28 	unsigned int (*extsize)(struct crypto_alg *alg);
29 	int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
30 	int (*init_tfm)(struct crypto_tfm *tfm);
31 	void (*show)(struct seq_file *m, struct crypto_alg *alg);
32 	int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
33 	void (*free)(struct crypto_instance *inst);
34 
35 	unsigned int type;
36 	unsigned int maskclear;
37 	unsigned int maskset;
38 	unsigned int tfmsize;
39 };
40 
41 struct crypto_instance {
42 	struct crypto_alg alg;
43 
44 	struct crypto_template *tmpl;
45 	struct hlist_node list;
46 
47 	void *__ctx[] CRYPTO_MINALIGN_ATTR;
48 };
49 
50 struct crypto_template {
51 	struct list_head list;
52 	struct hlist_head instances;
53 	struct module *module;
54 
55 	struct crypto_instance *(*alloc)(struct rtattr **tb);
56 	void (*free)(struct crypto_instance *inst);
57 	int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
58 
59 	char name[CRYPTO_MAX_ALG_NAME];
60 };
61 
62 struct crypto_spawn {
63 	struct list_head list;
64 	struct crypto_alg *alg;
65 	struct crypto_instance *inst;
66 	const struct crypto_type *frontend;
67 	u32 mask;
68 };
69 
70 struct crypto_queue {
71 	struct list_head list;
72 	struct list_head *backlog;
73 
74 	unsigned int qlen;
75 	unsigned int max_qlen;
76 };
77 
78 struct scatter_walk {
79 	struct scatterlist *sg;
80 	unsigned int offset;
81 };
82 
83 struct blkcipher_walk {
84 	union {
85 		struct {
86 			struct page *page;
87 			unsigned long offset;
88 		} phys;
89 
90 		struct {
91 			u8 *page;
92 			u8 *addr;
93 		} virt;
94 	} src, dst;
95 
96 	struct scatter_walk in;
97 	unsigned int nbytes;
98 
99 	struct scatter_walk out;
100 	unsigned int total;
101 
102 	void *page;
103 	u8 *buffer;
104 	u8 *iv;
105 	unsigned int ivsize;
106 
107 	int flags;
108 	unsigned int walk_blocksize;
109 	unsigned int cipher_blocksize;
110 	unsigned int alignmask;
111 };
112 
113 struct ablkcipher_walk {
114 	struct {
115 		struct page *page;
116 		unsigned int offset;
117 	} src, dst;
118 
119 	struct scatter_walk	in;
120 	unsigned int		nbytes;
121 	struct scatter_walk	out;
122 	unsigned int		total;
123 	struct list_head	buffers;
124 	u8			*iv_buffer;
125 	u8			*iv;
126 	int			flags;
127 	unsigned int		blocksize;
128 };
129 
130 extern const struct crypto_type crypto_ablkcipher_type;
131 extern const struct crypto_type crypto_blkcipher_type;
132 
133 void crypto_mod_put(struct crypto_alg *alg);
134 
135 int crypto_register_template(struct crypto_template *tmpl);
136 void crypto_unregister_template(struct crypto_template *tmpl);
137 struct crypto_template *crypto_lookup_template(const char *name);
138 
139 int crypto_register_instance(struct crypto_template *tmpl,
140 			     struct crypto_instance *inst);
141 int crypto_unregister_instance(struct crypto_instance *inst);
142 
143 int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
144 		      struct crypto_instance *inst, u32 mask);
145 int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
146 		       struct crypto_instance *inst,
147 		       const struct crypto_type *frontend);
148 int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name,
149 		      u32 type, u32 mask);
150 
151 void crypto_drop_spawn(struct crypto_spawn *spawn);
152 struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
153 				    u32 mask);
154 void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
155 
156 static inline void crypto_set_spawn(struct crypto_spawn *spawn,
157 				    struct crypto_instance *inst)
158 {
159 	spawn->inst = inst;
160 }
161 
162 struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
163 int crypto_check_attr_type(struct rtattr **tb, u32 type);
164 const char *crypto_attr_alg_name(struct rtattr *rta);
165 struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
166 				    const struct crypto_type *frontend,
167 				    u32 type, u32 mask);
168 
169 static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta,
170 						 u32 type, u32 mask)
171 {
172 	return crypto_attr_alg2(rta, NULL, type, mask);
173 }
174 
175 int crypto_attr_u32(struct rtattr *rta, u32 *num);
176 int crypto_inst_setname(struct crypto_instance *inst, const char *name,
177 			struct crypto_alg *alg);
178 void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
179 			     unsigned int head);
180 struct crypto_instance *crypto_alloc_instance(const char *name,
181 					      struct crypto_alg *alg);
182 
183 void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
184 int crypto_enqueue_request(struct crypto_queue *queue,
185 			   struct crypto_async_request *request);
186 struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
187 int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
188 static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
189 {
190 	return queue->qlen;
191 }
192 
193 void crypto_inc(u8 *a, unsigned int size);
194 void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
195 
196 static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
197 {
198 	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
199 	    __builtin_constant_p(size) &&
200 	    (size % sizeof(unsigned long)) == 0) {
201 		unsigned long *d = (unsigned long *)dst;
202 		unsigned long *s = (unsigned long *)src;
203 
204 		while (size > 0) {
205 			*d++ ^= *s++;
206 			size -= sizeof(unsigned long);
207 		}
208 	} else {
209 		__crypto_xor(dst, dst, src, size);
210 	}
211 }
212 
213 static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
214 				  unsigned int size)
215 {
216 	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
217 	    __builtin_constant_p(size) &&
218 	    (size % sizeof(unsigned long)) == 0) {
219 		unsigned long *d = (unsigned long *)dst;
220 		unsigned long *s1 = (unsigned long *)src1;
221 		unsigned long *s2 = (unsigned long *)src2;
222 
223 		while (size > 0) {
224 			*d++ = *s1++ ^ *s2++;
225 			size -= sizeof(unsigned long);
226 		}
227 	} else {
228 		__crypto_xor(dst, src1, src2, size);
229 	}
230 }
231 
232 int blkcipher_walk_done(struct blkcipher_desc *desc,
233 			struct blkcipher_walk *walk, int err);
234 int blkcipher_walk_virt(struct blkcipher_desc *desc,
235 			struct blkcipher_walk *walk);
236 int blkcipher_walk_phys(struct blkcipher_desc *desc,
237 			struct blkcipher_walk *walk);
238 int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
239 			      struct blkcipher_walk *walk,
240 			      unsigned int blocksize);
241 int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
242 				   struct blkcipher_walk *walk,
243 				   struct crypto_aead *tfm,
244 				   unsigned int blocksize);
245 
246 int ablkcipher_walk_done(struct ablkcipher_request *req,
247 			 struct ablkcipher_walk *walk, int err);
248 int ablkcipher_walk_phys(struct ablkcipher_request *req,
249 			 struct ablkcipher_walk *walk);
250 void __ablkcipher_walk_complete(struct ablkcipher_walk *walk);
251 
252 static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
253 {
254 	return PTR_ALIGN(crypto_tfm_ctx(tfm),
255 			 crypto_tfm_alg_alignmask(tfm) + 1);
256 }
257 
258 static inline struct crypto_instance *crypto_tfm_alg_instance(
259 	struct crypto_tfm *tfm)
260 {
261 	return container_of(tfm->__crt_alg, struct crypto_instance, alg);
262 }
263 
264 static inline void *crypto_instance_ctx(struct crypto_instance *inst)
265 {
266 	return inst->__ctx;
267 }
268 
269 static inline struct ablkcipher_alg *crypto_ablkcipher_alg(
270 	struct crypto_ablkcipher *tfm)
271 {
272 	return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher;
273 }
274 
275 static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm)
276 {
277 	return crypto_tfm_ctx(&tfm->base);
278 }
279 
280 static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm)
281 {
282 	return crypto_tfm_ctx_aligned(&tfm->base);
283 }
284 
285 static inline struct crypto_blkcipher *crypto_spawn_blkcipher(
286 	struct crypto_spawn *spawn)
287 {
288 	u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
289 	u32 mask = CRYPTO_ALG_TYPE_MASK;
290 
291 	return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask));
292 }
293 
294 static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm)
295 {
296 	return crypto_tfm_ctx(&tfm->base);
297 }
298 
299 static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm)
300 {
301 	return crypto_tfm_ctx_aligned(&tfm->base);
302 }
303 
304 static inline struct crypto_cipher *crypto_spawn_cipher(
305 	struct crypto_spawn *spawn)
306 {
307 	u32 type = CRYPTO_ALG_TYPE_CIPHER;
308 	u32 mask = CRYPTO_ALG_TYPE_MASK;
309 
310 	return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask));
311 }
312 
313 static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
314 {
315 	return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
316 }
317 
318 static inline void blkcipher_walk_init(struct blkcipher_walk *walk,
319 				       struct scatterlist *dst,
320 				       struct scatterlist *src,
321 				       unsigned int nbytes)
322 {
323 	walk->in.sg = src;
324 	walk->out.sg = dst;
325 	walk->total = nbytes;
326 }
327 
328 static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk,
329 					struct scatterlist *dst,
330 					struct scatterlist *src,
331 					unsigned int nbytes)
332 {
333 	walk->in.sg = src;
334 	walk->out.sg = dst;
335 	walk->total = nbytes;
336 	INIT_LIST_HEAD(&walk->buffers);
337 }
338 
339 static inline void ablkcipher_walk_complete(struct ablkcipher_walk *walk)
340 {
341 	if (unlikely(!list_empty(&walk->buffers)))
342 		__ablkcipher_walk_complete(walk);
343 }
344 
345 static inline struct crypto_async_request *crypto_get_backlog(
346 	struct crypto_queue *queue)
347 {
348 	return queue->backlog == &queue->list ? NULL :
349 	       container_of(queue->backlog, struct crypto_async_request, list);
350 }
351 
352 static inline int ablkcipher_enqueue_request(struct crypto_queue *queue,
353 					     struct ablkcipher_request *request)
354 {
355 	return crypto_enqueue_request(queue, &request->base);
356 }
357 
358 static inline struct ablkcipher_request *ablkcipher_dequeue_request(
359 	struct crypto_queue *queue)
360 {
361 	return ablkcipher_request_cast(crypto_dequeue_request(queue));
362 }
363 
364 static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req)
365 {
366 	return req->__ctx;
367 }
368 
369 static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue,
370 					  struct crypto_ablkcipher *tfm)
371 {
372 	return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm));
373 }
374 
375 static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
376 						     u32 type, u32 mask)
377 {
378 	return crypto_attr_alg(tb[1], type, mask);
379 }
380 
381 static inline int crypto_requires_off(u32 type, u32 mask, u32 off)
382 {
383 	return (type ^ off) & mask & off;
384 }
385 
386 /*
387  * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms.
388  * Otherwise returns zero.
389  */
390 static inline int crypto_requires_sync(u32 type, u32 mask)
391 {
392 	return crypto_requires_off(type, mask, CRYPTO_ALG_ASYNC);
393 }
394 
395 noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
396 
397 /**
398  * crypto_memneq - Compare two areas of memory without leaking
399  *		   timing information.
400  *
401  * @a: One area of memory
402  * @b: Another area of memory
403  * @size: The size of the area.
404  *
405  * Returns 0 when data is equal, 1 otherwise.
406  */
407 static inline int crypto_memneq(const void *a, const void *b, size_t size)
408 {
409 	return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
410 }
411 
412 static inline void crypto_yield(u32 flags)
413 {
414 #if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY)
415 	if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
416 		cond_resched();
417 #endif
418 }
419 
420 #endif	/* _CRYPTO_ALGAPI_H */
421