1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Cryptographic API for algorithms (i.e., low-level API). 4 * 5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 6 */ 7 #ifndef _CRYPTO_ALGAPI_H 8 #define _CRYPTO_ALGAPI_H 9 10 #include <linux/crypto.h> 11 #include <linux/list.h> 12 #include <linux/kernel.h> 13 #include <linux/skbuff.h> 14 15 /* 16 * Maximum values for blocksize and alignmask, used to allocate 17 * static buffers that are big enough for any combination of 18 * algs and architectures. Ciphers have a lower maximum size. 19 */ 20 #define MAX_ALGAPI_BLOCKSIZE 160 21 #define MAX_ALGAPI_ALIGNMASK 63 22 #define MAX_CIPHER_BLOCKSIZE 16 23 #define MAX_CIPHER_ALIGNMASK 15 24 25 struct crypto_aead; 26 struct crypto_instance; 27 struct module; 28 struct rtattr; 29 struct seq_file; 30 31 struct crypto_type { 32 unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask); 33 unsigned int (*extsize)(struct crypto_alg *alg); 34 int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask); 35 int (*init_tfm)(struct crypto_tfm *tfm); 36 void (*show)(struct seq_file *m, struct crypto_alg *alg); 37 int (*report)(struct sk_buff *skb, struct crypto_alg *alg); 38 void (*free)(struct crypto_instance *inst); 39 40 unsigned int type; 41 unsigned int maskclear; 42 unsigned int maskset; 43 unsigned int tfmsize; 44 }; 45 46 struct crypto_instance { 47 struct crypto_alg alg; 48 49 struct crypto_template *tmpl; 50 struct hlist_node list; 51 52 void *__ctx[] CRYPTO_MINALIGN_ATTR; 53 }; 54 55 struct crypto_template { 56 struct list_head list; 57 struct hlist_head instances; 58 struct module *module; 59 60 struct crypto_instance *(*alloc)(struct rtattr **tb); 61 void (*free)(struct crypto_instance *inst); 62 int (*create)(struct crypto_template *tmpl, struct rtattr **tb); 63 64 char name[CRYPTO_MAX_ALG_NAME]; 65 }; 66 67 struct crypto_spawn { 68 struct list_head list; 69 struct crypto_alg *alg; 70 struct crypto_instance *inst; 71 const struct crypto_type *frontend; 72 u32 mask; 73 }; 74 75 struct crypto_queue { 76 struct list_head list; 77 struct list_head *backlog; 78 79 unsigned int qlen; 80 unsigned int max_qlen; 81 }; 82 83 struct scatter_walk { 84 struct scatterlist *sg; 85 unsigned int offset; 86 }; 87 88 struct ablkcipher_walk { 89 struct { 90 struct page *page; 91 unsigned int offset; 92 } src, dst; 93 94 struct scatter_walk in; 95 unsigned int nbytes; 96 struct scatter_walk out; 97 unsigned int total; 98 struct list_head buffers; 99 u8 *iv_buffer; 100 u8 *iv; 101 int flags; 102 unsigned int blocksize; 103 }; 104 105 extern const struct crypto_type crypto_ablkcipher_type; 106 107 void crypto_mod_put(struct crypto_alg *alg); 108 109 int crypto_register_template(struct crypto_template *tmpl); 110 int crypto_register_templates(struct crypto_template *tmpls, int count); 111 void crypto_unregister_template(struct crypto_template *tmpl); 112 void crypto_unregister_templates(struct crypto_template *tmpls, int count); 113 struct crypto_template *crypto_lookup_template(const char *name); 114 115 int crypto_register_instance(struct crypto_template *tmpl, 116 struct crypto_instance *inst); 117 int crypto_unregister_instance(struct crypto_instance *inst); 118 119 int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, 120 struct crypto_instance *inst, u32 mask); 121 int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, 122 struct crypto_instance *inst, 123 const struct crypto_type *frontend); 124 int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name, 125 u32 type, u32 mask); 126 127 void crypto_drop_spawn(struct crypto_spawn *spawn); 128 struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, 129 u32 mask); 130 void *crypto_spawn_tfm2(struct crypto_spawn *spawn); 131 132 static inline void crypto_set_spawn(struct crypto_spawn *spawn, 133 struct crypto_instance *inst) 134 { 135 spawn->inst = inst; 136 } 137 138 struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); 139 int crypto_check_attr_type(struct rtattr **tb, u32 type); 140 const char *crypto_attr_alg_name(struct rtattr *rta); 141 struct crypto_alg *crypto_attr_alg2(struct rtattr *rta, 142 const struct crypto_type *frontend, 143 u32 type, u32 mask); 144 145 static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta, 146 u32 type, u32 mask) 147 { 148 return crypto_attr_alg2(rta, NULL, type, mask); 149 } 150 151 int crypto_attr_u32(struct rtattr *rta, u32 *num); 152 int crypto_inst_setname(struct crypto_instance *inst, const char *name, 153 struct crypto_alg *alg); 154 void *crypto_alloc_instance(const char *name, struct crypto_alg *alg, 155 unsigned int head); 156 157 void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); 158 int crypto_enqueue_request(struct crypto_queue *queue, 159 struct crypto_async_request *request); 160 struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); 161 static inline unsigned int crypto_queue_len(struct crypto_queue *queue) 162 { 163 return queue->qlen; 164 } 165 166 void crypto_inc(u8 *a, unsigned int size); 167 void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size); 168 169 static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size) 170 { 171 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 172 __builtin_constant_p(size) && 173 (size % sizeof(unsigned long)) == 0) { 174 unsigned long *d = (unsigned long *)dst; 175 unsigned long *s = (unsigned long *)src; 176 177 while (size > 0) { 178 *d++ ^= *s++; 179 size -= sizeof(unsigned long); 180 } 181 } else { 182 __crypto_xor(dst, dst, src, size); 183 } 184 } 185 186 static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2, 187 unsigned int size) 188 { 189 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 190 __builtin_constant_p(size) && 191 (size % sizeof(unsigned long)) == 0) { 192 unsigned long *d = (unsigned long *)dst; 193 unsigned long *s1 = (unsigned long *)src1; 194 unsigned long *s2 = (unsigned long *)src2; 195 196 while (size > 0) { 197 *d++ = *s1++ ^ *s2++; 198 size -= sizeof(unsigned long); 199 } 200 } else { 201 __crypto_xor(dst, src1, src2, size); 202 } 203 } 204 205 int ablkcipher_walk_done(struct ablkcipher_request *req, 206 struct ablkcipher_walk *walk, int err); 207 int ablkcipher_walk_phys(struct ablkcipher_request *req, 208 struct ablkcipher_walk *walk); 209 void __ablkcipher_walk_complete(struct ablkcipher_walk *walk); 210 211 static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) 212 { 213 return PTR_ALIGN(crypto_tfm_ctx(tfm), 214 crypto_tfm_alg_alignmask(tfm) + 1); 215 } 216 217 static inline struct crypto_instance *crypto_tfm_alg_instance( 218 struct crypto_tfm *tfm) 219 { 220 return container_of(tfm->__crt_alg, struct crypto_instance, alg); 221 } 222 223 static inline void *crypto_instance_ctx(struct crypto_instance *inst) 224 { 225 return inst->__ctx; 226 } 227 228 static inline struct ablkcipher_alg *crypto_ablkcipher_alg( 229 struct crypto_ablkcipher *tfm) 230 { 231 return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher; 232 } 233 234 static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm) 235 { 236 return crypto_tfm_ctx(&tfm->base); 237 } 238 239 static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm) 240 { 241 return crypto_tfm_ctx_aligned(&tfm->base); 242 } 243 244 static inline struct crypto_cipher *crypto_spawn_cipher( 245 struct crypto_spawn *spawn) 246 { 247 u32 type = CRYPTO_ALG_TYPE_CIPHER; 248 u32 mask = CRYPTO_ALG_TYPE_MASK; 249 250 return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask)); 251 } 252 253 static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm) 254 { 255 return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher; 256 } 257 258 static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk, 259 struct scatterlist *dst, 260 struct scatterlist *src, 261 unsigned int nbytes) 262 { 263 walk->in.sg = src; 264 walk->out.sg = dst; 265 walk->total = nbytes; 266 INIT_LIST_HEAD(&walk->buffers); 267 } 268 269 static inline void ablkcipher_walk_complete(struct ablkcipher_walk *walk) 270 { 271 if (unlikely(!list_empty(&walk->buffers))) 272 __ablkcipher_walk_complete(walk); 273 } 274 275 static inline struct crypto_async_request *crypto_get_backlog( 276 struct crypto_queue *queue) 277 { 278 return queue->backlog == &queue->list ? NULL : 279 container_of(queue->backlog, struct crypto_async_request, list); 280 } 281 282 static inline int ablkcipher_enqueue_request(struct crypto_queue *queue, 283 struct ablkcipher_request *request) 284 { 285 return crypto_enqueue_request(queue, &request->base); 286 } 287 288 static inline struct ablkcipher_request *ablkcipher_dequeue_request( 289 struct crypto_queue *queue) 290 { 291 return ablkcipher_request_cast(crypto_dequeue_request(queue)); 292 } 293 294 static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req) 295 { 296 return req->__ctx; 297 } 298 299 static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, 300 u32 type, u32 mask) 301 { 302 return crypto_attr_alg(tb[1], type, mask); 303 } 304 305 static inline int crypto_requires_off(u32 type, u32 mask, u32 off) 306 { 307 return (type ^ off) & mask & off; 308 } 309 310 /* 311 * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms. 312 * Otherwise returns zero. 313 */ 314 static inline int crypto_requires_sync(u32 type, u32 mask) 315 { 316 return crypto_requires_off(type, mask, CRYPTO_ALG_ASYNC); 317 } 318 319 noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size); 320 321 /** 322 * crypto_memneq - Compare two areas of memory without leaking 323 * timing information. 324 * 325 * @a: One area of memory 326 * @b: Another area of memory 327 * @size: The size of the area. 328 * 329 * Returns 0 when data is equal, 1 otherwise. 330 */ 331 static inline int crypto_memneq(const void *a, const void *b, size_t size) 332 { 333 return __crypto_memneq(a, b, size) != 0UL ? 1 : 0; 334 } 335 336 static inline void crypto_yield(u32 flags) 337 { 338 if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) 339 cond_resched(); 340 } 341 342 int crypto_register_notifier(struct notifier_block *nb); 343 int crypto_unregister_notifier(struct notifier_block *nb); 344 345 /* Crypto notification events. */ 346 enum { 347 CRYPTO_MSG_ALG_REQUEST, 348 CRYPTO_MSG_ALG_REGISTER, 349 CRYPTO_MSG_ALG_LOADED, 350 }; 351 352 #endif /* _CRYPTO_ALGAPI_H */ 353