xref: /openbmc/linux/include/crypto/algapi.h (revision 5f567fffaae995dce3498e175e47d5a779fb0270)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Cryptographic API for algorithms (i.e., low-level API).
4  *
5  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
6  */
7 #ifndef _CRYPTO_ALGAPI_H
8 #define _CRYPTO_ALGAPI_H
9 
10 #include <linux/crypto.h>
11 #include <linux/list.h>
12 #include <linux/kernel.h>
13 #include <linux/skbuff.h>
14 
15 /*
16  * Maximum values for blocksize and alignmask, used to allocate
17  * static buffers that are big enough for any combination of
18  * algs and architectures. Ciphers have a lower maximum size.
19  */
20 #define MAX_ALGAPI_BLOCKSIZE		160
21 #define MAX_ALGAPI_ALIGNMASK		63
22 #define MAX_CIPHER_BLOCKSIZE		16
23 #define MAX_CIPHER_ALIGNMASK		15
24 
25 struct crypto_aead;
26 struct crypto_instance;
27 struct module;
28 struct rtattr;
29 struct seq_file;
30 
31 struct crypto_type {
32 	unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
33 	unsigned int (*extsize)(struct crypto_alg *alg);
34 	int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
35 	int (*init_tfm)(struct crypto_tfm *tfm);
36 	void (*show)(struct seq_file *m, struct crypto_alg *alg);
37 	int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
38 	void (*free)(struct crypto_instance *inst);
39 
40 	unsigned int type;
41 	unsigned int maskclear;
42 	unsigned int maskset;
43 	unsigned int tfmsize;
44 };
45 
46 struct crypto_instance {
47 	struct crypto_alg alg;
48 
49 	struct crypto_template *tmpl;
50 
51 	union {
52 		/* Node in list of instances after registration. */
53 		struct hlist_node list;
54 		/* List of attached spawns before registration. */
55 		struct crypto_spawn *spawns;
56 	};
57 
58 	void *__ctx[] CRYPTO_MINALIGN_ATTR;
59 };
60 
61 struct crypto_template {
62 	struct list_head list;
63 	struct hlist_head instances;
64 	struct module *module;
65 
66 	struct crypto_instance *(*alloc)(struct rtattr **tb);
67 	void (*free)(struct crypto_instance *inst);
68 	int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
69 
70 	char name[CRYPTO_MAX_ALG_NAME];
71 };
72 
73 struct crypto_spawn {
74 	struct list_head list;
75 	struct crypto_alg *alg;
76 	union {
77 		/* Back pointer to instance after registration.*/
78 		struct crypto_instance *inst;
79 		/* Spawn list pointer prior to registration. */
80 		struct crypto_spawn *next;
81 	};
82 	const struct crypto_type *frontend;
83 	u32 mask;
84 	bool dead;
85 	bool dropref;
86 	bool registered;
87 };
88 
89 struct crypto_queue {
90 	struct list_head list;
91 	struct list_head *backlog;
92 
93 	unsigned int qlen;
94 	unsigned int max_qlen;
95 };
96 
97 struct scatter_walk {
98 	struct scatterlist *sg;
99 	unsigned int offset;
100 };
101 
102 void crypto_mod_put(struct crypto_alg *alg);
103 
104 int crypto_register_template(struct crypto_template *tmpl);
105 int crypto_register_templates(struct crypto_template *tmpls, int count);
106 void crypto_unregister_template(struct crypto_template *tmpl);
107 void crypto_unregister_templates(struct crypto_template *tmpls, int count);
108 struct crypto_template *crypto_lookup_template(const char *name);
109 
110 int crypto_register_instance(struct crypto_template *tmpl,
111 			     struct crypto_instance *inst);
112 void crypto_unregister_instance(struct crypto_instance *inst);
113 
114 int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
115 		      struct crypto_instance *inst, u32 mask);
116 int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
117 		       struct crypto_instance *inst,
118 		       const struct crypto_type *frontend);
119 int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name,
120 		      u32 type, u32 mask);
121 
122 void crypto_drop_spawn(struct crypto_spawn *spawn);
123 struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
124 				    u32 mask);
125 void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
126 
127 static inline void crypto_set_spawn(struct crypto_spawn *spawn,
128 				    struct crypto_instance *inst)
129 {
130 	spawn->inst = inst;
131 }
132 
133 struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
134 int crypto_check_attr_type(struct rtattr **tb, u32 type);
135 const char *crypto_attr_alg_name(struct rtattr *rta);
136 struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
137 				    const struct crypto_type *frontend,
138 				    u32 type, u32 mask);
139 
140 static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta,
141 						 u32 type, u32 mask)
142 {
143 	return crypto_attr_alg2(rta, NULL, type, mask);
144 }
145 
146 int crypto_attr_u32(struct rtattr *rta, u32 *num);
147 int crypto_inst_setname(struct crypto_instance *inst, const char *name,
148 			struct crypto_alg *alg);
149 void *crypto_alloc_instance(const char *name, struct crypto_alg *alg,
150 			    unsigned int head);
151 
152 void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
153 int crypto_enqueue_request(struct crypto_queue *queue,
154 			   struct crypto_async_request *request);
155 struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
156 static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
157 {
158 	return queue->qlen;
159 }
160 
161 void crypto_inc(u8 *a, unsigned int size);
162 void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
163 
164 static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
165 {
166 	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
167 	    __builtin_constant_p(size) &&
168 	    (size % sizeof(unsigned long)) == 0) {
169 		unsigned long *d = (unsigned long *)dst;
170 		unsigned long *s = (unsigned long *)src;
171 
172 		while (size > 0) {
173 			*d++ ^= *s++;
174 			size -= sizeof(unsigned long);
175 		}
176 	} else {
177 		__crypto_xor(dst, dst, src, size);
178 	}
179 }
180 
181 static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
182 				  unsigned int size)
183 {
184 	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
185 	    __builtin_constant_p(size) &&
186 	    (size % sizeof(unsigned long)) == 0) {
187 		unsigned long *d = (unsigned long *)dst;
188 		unsigned long *s1 = (unsigned long *)src1;
189 		unsigned long *s2 = (unsigned long *)src2;
190 
191 		while (size > 0) {
192 			*d++ = *s1++ ^ *s2++;
193 			size -= sizeof(unsigned long);
194 		}
195 	} else {
196 		__crypto_xor(dst, src1, src2, size);
197 	}
198 }
199 
200 static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
201 {
202 	return PTR_ALIGN(crypto_tfm_ctx(tfm),
203 			 crypto_tfm_alg_alignmask(tfm) + 1);
204 }
205 
206 static inline struct crypto_instance *crypto_tfm_alg_instance(
207 	struct crypto_tfm *tfm)
208 {
209 	return container_of(tfm->__crt_alg, struct crypto_instance, alg);
210 }
211 
212 static inline void *crypto_instance_ctx(struct crypto_instance *inst)
213 {
214 	return inst->__ctx;
215 }
216 
217 static inline struct crypto_cipher *crypto_spawn_cipher(
218 	struct crypto_spawn *spawn)
219 {
220 	u32 type = CRYPTO_ALG_TYPE_CIPHER;
221 	u32 mask = CRYPTO_ALG_TYPE_MASK;
222 
223 	return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask));
224 }
225 
226 static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
227 {
228 	return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
229 }
230 
231 static inline struct crypto_async_request *crypto_get_backlog(
232 	struct crypto_queue *queue)
233 {
234 	return queue->backlog == &queue->list ? NULL :
235 	       container_of(queue->backlog, struct crypto_async_request, list);
236 }
237 
238 static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
239 						     u32 type, u32 mask)
240 {
241 	return crypto_attr_alg(tb[1], type, mask);
242 }
243 
244 static inline int crypto_requires_off(u32 type, u32 mask, u32 off)
245 {
246 	return (type ^ off) & mask & off;
247 }
248 
249 /*
250  * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms.
251  * Otherwise returns zero.
252  */
253 static inline int crypto_requires_sync(u32 type, u32 mask)
254 {
255 	return crypto_requires_off(type, mask, CRYPTO_ALG_ASYNC);
256 }
257 
258 noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
259 
260 /**
261  * crypto_memneq - Compare two areas of memory without leaking
262  *		   timing information.
263  *
264  * @a: One area of memory
265  * @b: Another area of memory
266  * @size: The size of the area.
267  *
268  * Returns 0 when data is equal, 1 otherwise.
269  */
270 static inline int crypto_memneq(const void *a, const void *b, size_t size)
271 {
272 	return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
273 }
274 
275 static inline void crypto_yield(u32 flags)
276 {
277 	if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
278 		cond_resched();
279 }
280 
281 int crypto_register_notifier(struct notifier_block *nb);
282 int crypto_unregister_notifier(struct notifier_block *nb);
283 
284 /* Crypto notification events. */
285 enum {
286 	CRYPTO_MSG_ALG_REQUEST,
287 	CRYPTO_MSG_ALG_REGISTER,
288 	CRYPTO_MSG_ALG_LOADED,
289 };
290 
291 #endif	/* _CRYPTO_ALGAPI_H */
292