xref: /openbmc/linux/include/linux/crypto.h (revision a8fe58ce)
1 /*
2  * Scatterlist Cryptographic API.
3  *
4  * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
5  * Copyright (c) 2002 David S. Miller (davem@redhat.com)
6  * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
7  *
8  * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
9  * and Nettle, by Niels Möller.
10  *
11  * This program is free software; you can redistribute it and/or modify it
12  * under the terms of the GNU General Public License as published by the Free
13  * Software Foundation; either version 2 of the License, or (at your option)
14  * any later version.
15  *
16  */
17 #ifndef _LINUX_CRYPTO_H
18 #define _LINUX_CRYPTO_H
19 
20 #include <linux/atomic.h>
21 #include <linux/kernel.h>
22 #include <linux/list.h>
23 #include <linux/bug.h>
24 #include <linux/slab.h>
25 #include <linux/string.h>
26 #include <linux/uaccess.h>
27 
28 /*
29  * Autoloaded crypto modules should only use a prefixed name to avoid allowing
30  * arbitrary modules to be loaded. Loading from userspace may still need the
31  * unprefixed names, so retains those aliases as well.
32  * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
33  * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
34  * expands twice on the same line. Instead, use a separate base name for the
35  * alias.
36  */
37 #define MODULE_ALIAS_CRYPTO(name)	\
38 		__MODULE_INFO(alias, alias_userspace, name);	\
39 		__MODULE_INFO(alias, alias_crypto, "crypto-" name)
40 
41 /*
42  * Algorithm masks and types.
43  */
44 #define CRYPTO_ALG_TYPE_MASK		0x0000000f
45 #define CRYPTO_ALG_TYPE_CIPHER		0x00000001
46 #define CRYPTO_ALG_TYPE_COMPRESS	0x00000002
47 #define CRYPTO_ALG_TYPE_AEAD		0x00000003
48 #define CRYPTO_ALG_TYPE_BLKCIPHER	0x00000004
49 #define CRYPTO_ALG_TYPE_ABLKCIPHER	0x00000005
50 #define CRYPTO_ALG_TYPE_GIVCIPHER	0x00000006
51 #define CRYPTO_ALG_TYPE_DIGEST		0x00000008
52 #define CRYPTO_ALG_TYPE_HASH		0x00000008
53 #define CRYPTO_ALG_TYPE_SHASH		0x00000009
54 #define CRYPTO_ALG_TYPE_AHASH		0x0000000a
55 #define CRYPTO_ALG_TYPE_RNG		0x0000000c
56 #define CRYPTO_ALG_TYPE_AKCIPHER	0x0000000d
57 #define CRYPTO_ALG_TYPE_PCOMPRESS	0x0000000f
58 
59 #define CRYPTO_ALG_TYPE_HASH_MASK	0x0000000e
60 #define CRYPTO_ALG_TYPE_AHASH_MASK	0x0000000c
61 #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK	0x0000000c
62 
63 #define CRYPTO_ALG_LARVAL		0x00000010
64 #define CRYPTO_ALG_DEAD			0x00000020
65 #define CRYPTO_ALG_DYING		0x00000040
66 #define CRYPTO_ALG_ASYNC		0x00000080
67 
68 /*
69  * Set this bit if and only if the algorithm requires another algorithm of
70  * the same type to handle corner cases.
71  */
72 #define CRYPTO_ALG_NEED_FALLBACK	0x00000100
73 
74 /*
75  * This bit is set for symmetric key ciphers that have already been wrapped
76  * with a generic IV generator to prevent them from being wrapped again.
77  */
78 #define CRYPTO_ALG_GENIV		0x00000200
79 
80 /*
81  * Set if the algorithm has passed automated run-time testing.  Note that
82  * if there is no run-time testing for a given algorithm it is considered
83  * to have passed.
84  */
85 
86 #define CRYPTO_ALG_TESTED		0x00000400
87 
88 /*
89  * Set if the algorithm is an instance that is build from templates.
90  */
91 #define CRYPTO_ALG_INSTANCE		0x00000800
92 
93 /* Set this bit if the algorithm provided is hardware accelerated but
94  * not available to userspace via instruction set or so.
95  */
96 #define CRYPTO_ALG_KERN_DRIVER_ONLY	0x00001000
97 
98 /*
99  * Mark a cipher as a service implementation only usable by another
100  * cipher and never by a normal user of the kernel crypto API
101  */
102 #define CRYPTO_ALG_INTERNAL		0x00002000
103 
104 /*
105  * Transform masks and values (for crt_flags).
106  */
107 #define CRYPTO_TFM_REQ_MASK		0x000fff00
108 #define CRYPTO_TFM_RES_MASK		0xfff00000
109 
110 #define CRYPTO_TFM_REQ_WEAK_KEY		0x00000100
111 #define CRYPTO_TFM_REQ_MAY_SLEEP	0x00000200
112 #define CRYPTO_TFM_REQ_MAY_BACKLOG	0x00000400
113 #define CRYPTO_TFM_RES_WEAK_KEY		0x00100000
114 #define CRYPTO_TFM_RES_BAD_KEY_LEN   	0x00200000
115 #define CRYPTO_TFM_RES_BAD_KEY_SCHED 	0x00400000
116 #define CRYPTO_TFM_RES_BAD_BLOCK_LEN 	0x00800000
117 #define CRYPTO_TFM_RES_BAD_FLAGS 	0x01000000
118 
119 /*
120  * Miscellaneous stuff.
121  */
122 #define CRYPTO_MAX_ALG_NAME		64
123 
124 /*
125  * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
126  * declaration) is used to ensure that the crypto_tfm context structure is
127  * aligned correctly for the given architecture so that there are no alignment
128  * faults for C data types.  In particular, this is required on platforms such
129  * as arm where pointers are 32-bit aligned but there are data types such as
130  * u64 which require 64-bit alignment.
131  */
132 #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
133 
134 #define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
135 
136 struct scatterlist;
137 struct crypto_ablkcipher;
138 struct crypto_async_request;
139 struct crypto_blkcipher;
140 struct crypto_hash;
141 struct crypto_tfm;
142 struct crypto_type;
143 struct skcipher_givcrypt_request;
144 
145 typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
146 
147 /**
148  * DOC: Block Cipher Context Data Structures
149  *
150  * These data structures define the operating context for each block cipher
151  * type.
152  */
153 
154 struct crypto_async_request {
155 	struct list_head list;
156 	crypto_completion_t complete;
157 	void *data;
158 	struct crypto_tfm *tfm;
159 
160 	u32 flags;
161 };
162 
163 struct ablkcipher_request {
164 	struct crypto_async_request base;
165 
166 	unsigned int nbytes;
167 
168 	void *info;
169 
170 	struct scatterlist *src;
171 	struct scatterlist *dst;
172 
173 	void *__ctx[] CRYPTO_MINALIGN_ATTR;
174 };
175 
176 struct blkcipher_desc {
177 	struct crypto_blkcipher *tfm;
178 	void *info;
179 	u32 flags;
180 };
181 
182 struct cipher_desc {
183 	struct crypto_tfm *tfm;
184 	void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
185 	unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst,
186 			     const u8 *src, unsigned int nbytes);
187 	void *info;
188 };
189 
190 struct hash_desc {
191 	struct crypto_hash *tfm;
192 	u32 flags;
193 };
194 
195 /**
196  * DOC: Block Cipher Algorithm Definitions
197  *
198  * These data structures define modular crypto algorithm implementations,
199  * managed via crypto_register_alg() and crypto_unregister_alg().
200  */
201 
202 /**
203  * struct ablkcipher_alg - asynchronous block cipher definition
204  * @min_keysize: Minimum key size supported by the transformation. This is the
205  *		 smallest key length supported by this transformation algorithm.
206  *		 This must be set to one of the pre-defined values as this is
207  *		 not hardware specific. Possible values for this field can be
208  *		 found via git grep "_MIN_KEY_SIZE" include/crypto/
209  * @max_keysize: Maximum key size supported by the transformation. This is the
210  *		 largest key length supported by this transformation algorithm.
211  *		 This must be set to one of the pre-defined values as this is
212  *		 not hardware specific. Possible values for this field can be
213  *		 found via git grep "_MAX_KEY_SIZE" include/crypto/
214  * @setkey: Set key for the transformation. This function is used to either
215  *	    program a supplied key into the hardware or store the key in the
216  *	    transformation context for programming it later. Note that this
217  *	    function does modify the transformation context. This function can
218  *	    be called multiple times during the existence of the transformation
219  *	    object, so one must make sure the key is properly reprogrammed into
220  *	    the hardware. This function is also responsible for checking the key
221  *	    length for validity. In case a software fallback was put in place in
222  *	    the @cra_init call, this function might need to use the fallback if
223  *	    the algorithm doesn't support all of the key sizes.
224  * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt
225  *	     the supplied scatterlist containing the blocks of data. The crypto
226  *	     API consumer is responsible for aligning the entries of the
227  *	     scatterlist properly and making sure the chunks are correctly
228  *	     sized. In case a software fallback was put in place in the
229  *	     @cra_init call, this function might need to use the fallback if
230  *	     the algorithm doesn't support all of the key sizes. In case the
231  *	     key was stored in transformation context, the key might need to be
232  *	     re-programmed into the hardware in this function. This function
233  *	     shall not modify the transformation context, as this function may
234  *	     be called in parallel with the same transformation object.
235  * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
236  *	     and the conditions are exactly the same.
237  * @givencrypt: Update the IV for encryption. With this function, a cipher
238  *	        implementation may provide the function on how to update the IV
239  *	        for encryption.
240  * @givdecrypt: Update the IV for decryption. This is the reverse of
241  *	        @givencrypt .
242  * @geniv: The transformation implementation may use an "IV generator" provided
243  *	   by the kernel crypto API. Several use cases have a predefined
244  *	   approach how IVs are to be updated. For such use cases, the kernel
245  *	   crypto API provides ready-to-use implementations that can be
246  *	   referenced with this variable.
247  * @ivsize: IV size applicable for transformation. The consumer must provide an
248  *	    IV of exactly that size to perform the encrypt or decrypt operation.
249  *
250  * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
251  * mandatory and must be filled.
252  */
253 struct ablkcipher_alg {
254 	int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
255 	              unsigned int keylen);
256 	int (*encrypt)(struct ablkcipher_request *req);
257 	int (*decrypt)(struct ablkcipher_request *req);
258 	int (*givencrypt)(struct skcipher_givcrypt_request *req);
259 	int (*givdecrypt)(struct skcipher_givcrypt_request *req);
260 
261 	const char *geniv;
262 
263 	unsigned int min_keysize;
264 	unsigned int max_keysize;
265 	unsigned int ivsize;
266 };
267 
268 /**
269  * struct blkcipher_alg - synchronous block cipher definition
270  * @min_keysize: see struct ablkcipher_alg
271  * @max_keysize: see struct ablkcipher_alg
272  * @setkey: see struct ablkcipher_alg
273  * @encrypt: see struct ablkcipher_alg
274  * @decrypt: see struct ablkcipher_alg
275  * @geniv: see struct ablkcipher_alg
276  * @ivsize: see struct ablkcipher_alg
277  *
278  * All fields except @geniv and @ivsize are mandatory and must be filled.
279  */
280 struct blkcipher_alg {
281 	int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
282 	              unsigned int keylen);
283 	int (*encrypt)(struct blkcipher_desc *desc,
284 		       struct scatterlist *dst, struct scatterlist *src,
285 		       unsigned int nbytes);
286 	int (*decrypt)(struct blkcipher_desc *desc,
287 		       struct scatterlist *dst, struct scatterlist *src,
288 		       unsigned int nbytes);
289 
290 	const char *geniv;
291 
292 	unsigned int min_keysize;
293 	unsigned int max_keysize;
294 	unsigned int ivsize;
295 };
296 
297 /**
298  * struct cipher_alg - single-block symmetric ciphers definition
299  * @cia_min_keysize: Minimum key size supported by the transformation. This is
300  *		     the smallest key length supported by this transformation
301  *		     algorithm. This must be set to one of the pre-defined
302  *		     values as this is not hardware specific. Possible values
303  *		     for this field can be found via git grep "_MIN_KEY_SIZE"
304  *		     include/crypto/
305  * @cia_max_keysize: Maximum key size supported by the transformation. This is
306  *		    the largest key length supported by this transformation
307  *		    algorithm. This must be set to one of the pre-defined values
308  *		    as this is not hardware specific. Possible values for this
309  *		    field can be found via git grep "_MAX_KEY_SIZE"
310  *		    include/crypto/
311  * @cia_setkey: Set key for the transformation. This function is used to either
312  *	        program a supplied key into the hardware or store the key in the
313  *	        transformation context for programming it later. Note that this
314  *	        function does modify the transformation context. This function
315  *	        can be called multiple times during the existence of the
316  *	        transformation object, so one must make sure the key is properly
317  *	        reprogrammed into the hardware. This function is also
318  *	        responsible for checking the key length for validity.
319  * @cia_encrypt: Encrypt a single block. This function is used to encrypt a
320  *		 single block of data, which must be @cra_blocksize big. This
321  *		 always operates on a full @cra_blocksize and it is not possible
322  *		 to encrypt a block of smaller size. The supplied buffers must
323  *		 therefore also be at least of @cra_blocksize size. Both the
324  *		 input and output buffers are always aligned to @cra_alignmask.
325  *		 In case either of the input or output buffer supplied by user
326  *		 of the crypto API is not aligned to @cra_alignmask, the crypto
327  *		 API will re-align the buffers. The re-alignment means that a
328  *		 new buffer will be allocated, the data will be copied into the
329  *		 new buffer, then the processing will happen on the new buffer,
330  *		 then the data will be copied back into the original buffer and
331  *		 finally the new buffer will be freed. In case a software
332  *		 fallback was put in place in the @cra_init call, this function
333  *		 might need to use the fallback if the algorithm doesn't support
334  *		 all of the key sizes. In case the key was stored in
335  *		 transformation context, the key might need to be re-programmed
336  *		 into the hardware in this function. This function shall not
337  *		 modify the transformation context, as this function may be
338  *		 called in parallel with the same transformation object.
339  * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to
340  *		 @cia_encrypt, and the conditions are exactly the same.
341  *
342  * All fields are mandatory and must be filled.
343  */
344 struct cipher_alg {
345 	unsigned int cia_min_keysize;
346 	unsigned int cia_max_keysize;
347 	int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key,
348 	                  unsigned int keylen);
349 	void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
350 	void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
351 };
352 
353 struct compress_alg {
354 	int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src,
355 			    unsigned int slen, u8 *dst, unsigned int *dlen);
356 	int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src,
357 			      unsigned int slen, u8 *dst, unsigned int *dlen);
358 };
359 
360 
361 #define cra_ablkcipher	cra_u.ablkcipher
362 #define cra_blkcipher	cra_u.blkcipher
363 #define cra_cipher	cra_u.cipher
364 #define cra_compress	cra_u.compress
365 
366 /**
367  * struct crypto_alg - definition of a cryptograpic cipher algorithm
368  * @cra_flags: Flags describing this transformation. See include/linux/crypto.h
369  *	       CRYPTO_ALG_* flags for the flags which go in here. Those are
370  *	       used for fine-tuning the description of the transformation
371  *	       algorithm.
372  * @cra_blocksize: Minimum block size of this transformation. The size in bytes
373  *		   of the smallest possible unit which can be transformed with
374  *		   this algorithm. The users must respect this value.
375  *		   In case of HASH transformation, it is possible for a smaller
376  *		   block than @cra_blocksize to be passed to the crypto API for
377  *		   transformation, in case of any other transformation type, an
378  * 		   error will be returned upon any attempt to transform smaller
379  *		   than @cra_blocksize chunks.
380  * @cra_ctxsize: Size of the operational context of the transformation. This
381  *		 value informs the kernel crypto API about the memory size
382  *		 needed to be allocated for the transformation context.
383  * @cra_alignmask: Alignment mask for the input and output data buffer. The data
384  *		   buffer containing the input data for the algorithm must be
385  *		   aligned to this alignment mask. The data buffer for the
386  *		   output data must be aligned to this alignment mask. Note that
387  *		   the Crypto API will do the re-alignment in software, but
388  *		   only under special conditions and there is a performance hit.
389  *		   The re-alignment happens at these occasions for different
390  *		   @cra_u types: cipher -- For both input data and output data
391  *		   buffer; ahash -- For output hash destination buf; shash --
392  *		   For output hash destination buf.
393  *		   This is needed on hardware which is flawed by design and
394  *		   cannot pick data from arbitrary addresses.
395  * @cra_priority: Priority of this transformation implementation. In case
396  *		  multiple transformations with same @cra_name are available to
397  *		  the Crypto API, the kernel will use the one with highest
398  *		  @cra_priority.
399  * @cra_name: Generic name (usable by multiple implementations) of the
400  *	      transformation algorithm. This is the name of the transformation
401  *	      itself. This field is used by the kernel when looking up the
402  *	      providers of particular transformation.
403  * @cra_driver_name: Unique name of the transformation provider. This is the
404  *		     name of the provider of the transformation. This can be any
405  *		     arbitrary value, but in the usual case, this contains the
406  *		     name of the chip or provider and the name of the
407  *		     transformation algorithm.
408  * @cra_type: Type of the cryptographic transformation. This is a pointer to
409  *	      struct crypto_type, which implements callbacks common for all
410  *	      transformation types. There are multiple options:
411  *	      &crypto_blkcipher_type, &crypto_ablkcipher_type,
412  *	      &crypto_ahash_type, &crypto_rng_type.
413  *	      This field might be empty. In that case, there are no common
414  *	      callbacks. This is the case for: cipher, compress, shash.
415  * @cra_u: Callbacks implementing the transformation. This is a union of
416  *	   multiple structures. Depending on the type of transformation selected
417  *	   by @cra_type and @cra_flags above, the associated structure must be
418  *	   filled with callbacks. This field might be empty. This is the case
419  *	   for ahash, shash.
420  * @cra_init: Initialize the cryptographic transformation object. This function
421  *	      is used to initialize the cryptographic transformation object.
422  *	      This function is called only once at the instantiation time, right
423  *	      after the transformation context was allocated. In case the
424  *	      cryptographic hardware has some special requirements which need to
425  *	      be handled by software, this function shall check for the precise
426  *	      requirement of the transformation and put any software fallbacks
427  *	      in place.
428  * @cra_exit: Deinitialize the cryptographic transformation object. This is a
429  *	      counterpart to @cra_init, used to remove various changes set in
430  *	      @cra_init.
431  * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
432  * @cra_list: internally used
433  * @cra_users: internally used
434  * @cra_refcnt: internally used
435  * @cra_destroy: internally used
436  *
437  * The struct crypto_alg describes a generic Crypto API algorithm and is common
438  * for all of the transformations. Any variable not documented here shall not
439  * be used by a cipher implementation as it is internal to the Crypto API.
440  */
441 struct crypto_alg {
442 	struct list_head cra_list;
443 	struct list_head cra_users;
444 
445 	u32 cra_flags;
446 	unsigned int cra_blocksize;
447 	unsigned int cra_ctxsize;
448 	unsigned int cra_alignmask;
449 
450 	int cra_priority;
451 	atomic_t cra_refcnt;
452 
453 	char cra_name[CRYPTO_MAX_ALG_NAME];
454 	char cra_driver_name[CRYPTO_MAX_ALG_NAME];
455 
456 	const struct crypto_type *cra_type;
457 
458 	union {
459 		struct ablkcipher_alg ablkcipher;
460 		struct blkcipher_alg blkcipher;
461 		struct cipher_alg cipher;
462 		struct compress_alg compress;
463 	} cra_u;
464 
465 	int (*cra_init)(struct crypto_tfm *tfm);
466 	void (*cra_exit)(struct crypto_tfm *tfm);
467 	void (*cra_destroy)(struct crypto_alg *alg);
468 
469 	struct module *cra_module;
470 } CRYPTO_MINALIGN_ATTR;
471 
472 /*
473  * Algorithm registration interface.
474  */
475 int crypto_register_alg(struct crypto_alg *alg);
476 int crypto_unregister_alg(struct crypto_alg *alg);
477 int crypto_register_algs(struct crypto_alg *algs, int count);
478 int crypto_unregister_algs(struct crypto_alg *algs, int count);
479 
480 /*
481  * Algorithm query interface.
482  */
483 int crypto_has_alg(const char *name, u32 type, u32 mask);
484 
485 /*
486  * Transforms: user-instantiated objects which encapsulate algorithms
487  * and core processing logic.  Managed via crypto_alloc_*() and
488  * crypto_free_*(), as well as the various helpers below.
489  */
490 
491 struct ablkcipher_tfm {
492 	int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
493 	              unsigned int keylen);
494 	int (*encrypt)(struct ablkcipher_request *req);
495 	int (*decrypt)(struct ablkcipher_request *req);
496 	int (*givencrypt)(struct skcipher_givcrypt_request *req);
497 	int (*givdecrypt)(struct skcipher_givcrypt_request *req);
498 
499 	struct crypto_ablkcipher *base;
500 
501 	unsigned int ivsize;
502 	unsigned int reqsize;
503 };
504 
505 struct blkcipher_tfm {
506 	void *iv;
507 	int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
508 		      unsigned int keylen);
509 	int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
510 		       struct scatterlist *src, unsigned int nbytes);
511 	int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
512 		       struct scatterlist *src, unsigned int nbytes);
513 };
514 
515 struct cipher_tfm {
516 	int (*cit_setkey)(struct crypto_tfm *tfm,
517 	                  const u8 *key, unsigned int keylen);
518 	void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
519 	void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
520 };
521 
522 struct hash_tfm {
523 	int (*init)(struct hash_desc *desc);
524 	int (*update)(struct hash_desc *desc,
525 		      struct scatterlist *sg, unsigned int nsg);
526 	int (*final)(struct hash_desc *desc, u8 *out);
527 	int (*digest)(struct hash_desc *desc, struct scatterlist *sg,
528 		      unsigned int nsg, u8 *out);
529 	int (*setkey)(struct crypto_hash *tfm, const u8 *key,
530 		      unsigned int keylen);
531 	unsigned int digestsize;
532 };
533 
534 struct compress_tfm {
535 	int (*cot_compress)(struct crypto_tfm *tfm,
536 	                    const u8 *src, unsigned int slen,
537 	                    u8 *dst, unsigned int *dlen);
538 	int (*cot_decompress)(struct crypto_tfm *tfm,
539 	                      const u8 *src, unsigned int slen,
540 	                      u8 *dst, unsigned int *dlen);
541 };
542 
543 #define crt_ablkcipher	crt_u.ablkcipher
544 #define crt_blkcipher	crt_u.blkcipher
545 #define crt_cipher	crt_u.cipher
546 #define crt_hash	crt_u.hash
547 #define crt_compress	crt_u.compress
548 
549 struct crypto_tfm {
550 
551 	u32 crt_flags;
552 
553 	union {
554 		struct ablkcipher_tfm ablkcipher;
555 		struct blkcipher_tfm blkcipher;
556 		struct cipher_tfm cipher;
557 		struct hash_tfm hash;
558 		struct compress_tfm compress;
559 	} crt_u;
560 
561 	void (*exit)(struct crypto_tfm *tfm);
562 
563 	struct crypto_alg *__crt_alg;
564 
565 	void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
566 };
567 
568 struct crypto_ablkcipher {
569 	struct crypto_tfm base;
570 };
571 
572 struct crypto_blkcipher {
573 	struct crypto_tfm base;
574 };
575 
576 struct crypto_cipher {
577 	struct crypto_tfm base;
578 };
579 
580 struct crypto_comp {
581 	struct crypto_tfm base;
582 };
583 
584 struct crypto_hash {
585 	struct crypto_tfm base;
586 };
587 
588 enum {
589 	CRYPTOA_UNSPEC,
590 	CRYPTOA_ALG,
591 	CRYPTOA_TYPE,
592 	CRYPTOA_U32,
593 	__CRYPTOA_MAX,
594 };
595 
596 #define CRYPTOA_MAX (__CRYPTOA_MAX - 1)
597 
598 /* Maximum number of (rtattr) parameters for each template. */
599 #define CRYPTO_MAX_ATTRS 32
600 
601 struct crypto_attr_alg {
602 	char name[CRYPTO_MAX_ALG_NAME];
603 };
604 
605 struct crypto_attr_type {
606 	u32 type;
607 	u32 mask;
608 };
609 
610 struct crypto_attr_u32 {
611 	u32 num;
612 };
613 
614 /*
615  * Transform user interface.
616  */
617 
618 struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
619 void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm);
620 
621 static inline void crypto_free_tfm(struct crypto_tfm *tfm)
622 {
623 	return crypto_destroy_tfm(tfm, tfm);
624 }
625 
626 int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
627 
628 /*
629  * Transform helpers which query the underlying algorithm.
630  */
631 static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm)
632 {
633 	return tfm->__crt_alg->cra_name;
634 }
635 
636 static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm)
637 {
638 	return tfm->__crt_alg->cra_driver_name;
639 }
640 
641 static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm)
642 {
643 	return tfm->__crt_alg->cra_priority;
644 }
645 
646 static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
647 {
648 	return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
649 }
650 
651 static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
652 {
653 	return tfm->__crt_alg->cra_blocksize;
654 }
655 
656 static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
657 {
658 	return tfm->__crt_alg->cra_alignmask;
659 }
660 
661 static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm)
662 {
663 	return tfm->crt_flags;
664 }
665 
666 static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags)
667 {
668 	tfm->crt_flags |= flags;
669 }
670 
671 static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags)
672 {
673 	tfm->crt_flags &= ~flags;
674 }
675 
676 static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
677 {
678 	return tfm->__crt_ctx;
679 }
680 
681 static inline unsigned int crypto_tfm_ctx_alignment(void)
682 {
683 	struct crypto_tfm *tfm;
684 	return __alignof__(tfm->__crt_ctx);
685 }
686 
687 /*
688  * API wrappers.
689  */
690 static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast(
691 	struct crypto_tfm *tfm)
692 {
693 	return (struct crypto_ablkcipher *)tfm;
694 }
695 
696 static inline u32 crypto_skcipher_type(u32 type)
697 {
698 	type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
699 	type |= CRYPTO_ALG_TYPE_BLKCIPHER;
700 	return type;
701 }
702 
703 static inline u32 crypto_skcipher_mask(u32 mask)
704 {
705 	mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
706 	mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK;
707 	return mask;
708 }
709 
710 /**
711  * DOC: Asynchronous Block Cipher API
712  *
713  * Asynchronous block cipher API is used with the ciphers of type
714  * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto).
715  *
716  * Asynchronous cipher operations imply that the function invocation for a
717  * cipher request returns immediately before the completion of the operation.
718  * The cipher request is scheduled as a separate kernel thread and therefore
719  * load-balanced on the different CPUs via the process scheduler. To allow
720  * the kernel crypto API to inform the caller about the completion of a cipher
721  * request, the caller must provide a callback function. That function is
722  * invoked with the cipher handle when the request completes.
723  *
724  * To support the asynchronous operation, additional information than just the
725  * cipher handle must be supplied to the kernel crypto API. That additional
726  * information is given by filling in the ablkcipher_request data structure.
727  *
728  * For the asynchronous block cipher API, the state is maintained with the tfm
729  * cipher handle. A single tfm can be used across multiple calls and in
730  * parallel. For asynchronous block cipher calls, context data supplied and
731  * only used by the caller can be referenced the request data structure in
732  * addition to the IV used for the cipher request. The maintenance of such
733  * state information would be important for a crypto driver implementer to
734  * have, because when calling the callback function upon completion of the
735  * cipher operation, that callback function may need some information about
736  * which operation just finished if it invoked multiple in parallel. This
737  * state information is unused by the kernel crypto API.
738  */
739 
740 /**
741  * crypto_alloc_ablkcipher() - allocate asynchronous block cipher handle
742  * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
743  *	      ablkcipher cipher
744  * @type: specifies the type of the cipher
745  * @mask: specifies the mask for the cipher
746  *
747  * Allocate a cipher handle for an ablkcipher. The returned struct
748  * crypto_ablkcipher is the cipher handle that is required for any subsequent
749  * API invocation for that ablkcipher.
750  *
751  * Return: allocated cipher handle in case of success; IS_ERR() is true in case
752  *	   of an error, PTR_ERR() returns the error code.
753  */
754 struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
755 						  u32 type, u32 mask);
756 
757 static inline struct crypto_tfm *crypto_ablkcipher_tfm(
758 	struct crypto_ablkcipher *tfm)
759 {
760 	return &tfm->base;
761 }
762 
763 /**
764  * crypto_free_ablkcipher() - zeroize and free cipher handle
765  * @tfm: cipher handle to be freed
766  */
767 static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
768 {
769 	crypto_free_tfm(crypto_ablkcipher_tfm(tfm));
770 }
771 
772 /**
773  * crypto_has_ablkcipher() - Search for the availability of an ablkcipher.
774  * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
775  *	      ablkcipher
776  * @type: specifies the type of the cipher
777  * @mask: specifies the mask for the cipher
778  *
779  * Return: true when the ablkcipher is known to the kernel crypto API; false
780  *	   otherwise
781  */
782 static inline int crypto_has_ablkcipher(const char *alg_name, u32 type,
783 					u32 mask)
784 {
785 	return crypto_has_alg(alg_name, crypto_skcipher_type(type),
786 			      crypto_skcipher_mask(mask));
787 }
788 
789 static inline struct ablkcipher_tfm *crypto_ablkcipher_crt(
790 	struct crypto_ablkcipher *tfm)
791 {
792 	return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher;
793 }
794 
795 /**
796  * crypto_ablkcipher_ivsize() - obtain IV size
797  * @tfm: cipher handle
798  *
799  * The size of the IV for the ablkcipher referenced by the cipher handle is
800  * returned. This IV size may be zero if the cipher does not need an IV.
801  *
802  * Return: IV size in bytes
803  */
804 static inline unsigned int crypto_ablkcipher_ivsize(
805 	struct crypto_ablkcipher *tfm)
806 {
807 	return crypto_ablkcipher_crt(tfm)->ivsize;
808 }
809 
810 /**
811  * crypto_ablkcipher_blocksize() - obtain block size of cipher
812  * @tfm: cipher handle
813  *
814  * The block size for the ablkcipher referenced with the cipher handle is
815  * returned. The caller may use that information to allocate appropriate
816  * memory for the data returned by the encryption or decryption operation
817  *
818  * Return: block size of cipher
819  */
820 static inline unsigned int crypto_ablkcipher_blocksize(
821 	struct crypto_ablkcipher *tfm)
822 {
823 	return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm));
824 }
825 
826 static inline unsigned int crypto_ablkcipher_alignmask(
827 	struct crypto_ablkcipher *tfm)
828 {
829 	return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm));
830 }
831 
832 static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm)
833 {
834 	return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm));
835 }
836 
837 static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm,
838 					       u32 flags)
839 {
840 	crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags);
841 }
842 
843 static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm,
844 						 u32 flags)
845 {
846 	crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags);
847 }
848 
849 /**
850  * crypto_ablkcipher_setkey() - set key for cipher
851  * @tfm: cipher handle
852  * @key: buffer holding the key
853  * @keylen: length of the key in bytes
854  *
855  * The caller provided key is set for the ablkcipher referenced by the cipher
856  * handle.
857  *
858  * Note, the key length determines the cipher type. Many block ciphers implement
859  * different cipher modes depending on the key size, such as AES-128 vs AES-192
860  * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
861  * is performed.
862  *
863  * Return: 0 if the setting of the key was successful; < 0 if an error occurred
864  */
865 static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
866 					   const u8 *key, unsigned int keylen)
867 {
868 	struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm);
869 
870 	return crt->setkey(crt->base, key, keylen);
871 }
872 
873 /**
874  * crypto_ablkcipher_reqtfm() - obtain cipher handle from request
875  * @req: ablkcipher_request out of which the cipher handle is to be obtained
876  *
877  * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request
878  * data structure.
879  *
880  * Return: crypto_ablkcipher handle
881  */
882 static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
883 	struct ablkcipher_request *req)
884 {
885 	return __crypto_ablkcipher_cast(req->base.tfm);
886 }
887 
888 /**
889  * crypto_ablkcipher_encrypt() - encrypt plaintext
890  * @req: reference to the ablkcipher_request handle that holds all information
891  *	 needed to perform the cipher operation
892  *
893  * Encrypt plaintext data using the ablkcipher_request handle. That data
894  * structure and how it is filled with data is discussed with the
895  * ablkcipher_request_* functions.
896  *
897  * Return: 0 if the cipher operation was successful; < 0 if an error occurred
898  */
899 static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
900 {
901 	struct ablkcipher_tfm *crt =
902 		crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
903 	return crt->encrypt(req);
904 }
905 
906 /**
907  * crypto_ablkcipher_decrypt() - decrypt ciphertext
908  * @req: reference to the ablkcipher_request handle that holds all information
909  *	 needed to perform the cipher operation
910  *
911  * Decrypt ciphertext data using the ablkcipher_request handle. That data
912  * structure and how it is filled with data is discussed with the
913  * ablkcipher_request_* functions.
914  *
915  * Return: 0 if the cipher operation was successful; < 0 if an error occurred
916  */
917 static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
918 {
919 	struct ablkcipher_tfm *crt =
920 		crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
921 	return crt->decrypt(req);
922 }
923 
924 /**
925  * DOC: Asynchronous Cipher Request Handle
926  *
927  * The ablkcipher_request data structure contains all pointers to data
928  * required for the asynchronous cipher operation. This includes the cipher
929  * handle (which can be used by multiple ablkcipher_request instances), pointer
930  * to plaintext and ciphertext, asynchronous callback function, etc. It acts
931  * as a handle to the ablkcipher_request_* API calls in a similar way as
932  * ablkcipher handle to the crypto_ablkcipher_* API calls.
933  */
934 
935 /**
936  * crypto_ablkcipher_reqsize() - obtain size of the request data structure
937  * @tfm: cipher handle
938  *
939  * Return: number of bytes
940  */
941 static inline unsigned int crypto_ablkcipher_reqsize(
942 	struct crypto_ablkcipher *tfm)
943 {
944 	return crypto_ablkcipher_crt(tfm)->reqsize;
945 }
946 
947 /**
948  * ablkcipher_request_set_tfm() - update cipher handle reference in request
949  * @req: request handle to be modified
950  * @tfm: cipher handle that shall be added to the request handle
951  *
952  * Allow the caller to replace the existing ablkcipher handle in the request
953  * data structure with a different one.
954  */
955 static inline void ablkcipher_request_set_tfm(
956 	struct ablkcipher_request *req, struct crypto_ablkcipher *tfm)
957 {
958 	req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base);
959 }
960 
961 static inline struct ablkcipher_request *ablkcipher_request_cast(
962 	struct crypto_async_request *req)
963 {
964 	return container_of(req, struct ablkcipher_request, base);
965 }
966 
967 /**
968  * ablkcipher_request_alloc() - allocate request data structure
969  * @tfm: cipher handle to be registered with the request
970  * @gfp: memory allocation flag that is handed to kmalloc by the API call.
971  *
972  * Allocate the request data structure that must be used with the ablkcipher
973  * encrypt and decrypt API calls. During the allocation, the provided ablkcipher
974  * handle is registered in the request data structure.
975  *
976  * Return: allocated request handle in case of success; IS_ERR() is true in case
977  *	   of an error, PTR_ERR() returns the error code.
978  */
979 static inline struct ablkcipher_request *ablkcipher_request_alloc(
980 	struct crypto_ablkcipher *tfm, gfp_t gfp)
981 {
982 	struct ablkcipher_request *req;
983 
984 	req = kmalloc(sizeof(struct ablkcipher_request) +
985 		      crypto_ablkcipher_reqsize(tfm), gfp);
986 
987 	if (likely(req))
988 		ablkcipher_request_set_tfm(req, tfm);
989 
990 	return req;
991 }
992 
993 /**
994  * ablkcipher_request_free() - zeroize and free request data structure
995  * @req: request data structure cipher handle to be freed
996  */
997 static inline void ablkcipher_request_free(struct ablkcipher_request *req)
998 {
999 	kzfree(req);
1000 }
1001 
1002 /**
1003  * ablkcipher_request_set_callback() - set asynchronous callback function
1004  * @req: request handle
1005  * @flags: specify zero or an ORing of the flags
1006  *         CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
1007  *	   increase the wait queue beyond the initial maximum size;
1008  *	   CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
1009  * @compl: callback function pointer to be registered with the request handle
1010  * @data: The data pointer refers to memory that is not used by the kernel
1011  *	  crypto API, but provided to the callback function for it to use. Here,
1012  *	  the caller can provide a reference to memory the callback function can
1013  *	  operate on. As the callback function is invoked asynchronously to the
1014  *	  related functionality, it may need to access data structures of the
1015  *	  related functionality which can be referenced using this pointer. The
1016  *	  callback function can access the memory via the "data" field in the
1017  *	  crypto_async_request data structure provided to the callback function.
1018  *
1019  * This function allows setting the callback function that is triggered once the
1020  * cipher operation completes.
1021  *
1022  * The callback function is registered with the ablkcipher_request handle and
1023  * must comply with the following template
1024  *
1025  *	void callback_function(struct crypto_async_request *req, int error)
1026  */
1027 static inline void ablkcipher_request_set_callback(
1028 	struct ablkcipher_request *req,
1029 	u32 flags, crypto_completion_t compl, void *data)
1030 {
1031 	req->base.complete = compl;
1032 	req->base.data = data;
1033 	req->base.flags = flags;
1034 }
1035 
1036 /**
1037  * ablkcipher_request_set_crypt() - set data buffers
1038  * @req: request handle
1039  * @src: source scatter / gather list
1040  * @dst: destination scatter / gather list
1041  * @nbytes: number of bytes to process from @src
1042  * @iv: IV for the cipher operation which must comply with the IV size defined
1043  *      by crypto_ablkcipher_ivsize
1044  *
1045  * This function allows setting of the source data and destination data
1046  * scatter / gather lists.
1047  *
1048  * For encryption, the source is treated as the plaintext and the
1049  * destination is the ciphertext. For a decryption operation, the use is
1050  * reversed - the source is the ciphertext and the destination is the plaintext.
1051  */
1052 static inline void ablkcipher_request_set_crypt(
1053 	struct ablkcipher_request *req,
1054 	struct scatterlist *src, struct scatterlist *dst,
1055 	unsigned int nbytes, void *iv)
1056 {
1057 	req->src = src;
1058 	req->dst = dst;
1059 	req->nbytes = nbytes;
1060 	req->info = iv;
1061 }
1062 
1063 /**
1064  * DOC: Synchronous Block Cipher API
1065  *
1066  * The synchronous block cipher API is used with the ciphers of type
1067  * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto)
1068  *
1069  * Synchronous calls, have a context in the tfm. But since a single tfm can be
1070  * used in multiple calls and in parallel, this info should not be changeable
1071  * (unless a lock is used). This applies, for example, to the symmetric key.
1072  * However, the IV is changeable, so there is an iv field in blkcipher_tfm
1073  * structure for synchronous blkcipher api. So, its the only state info that can
1074  * be kept for synchronous calls without using a big lock across a tfm.
1075  *
1076  * The block cipher API allows the use of a complete cipher, i.e. a cipher
1077  * consisting of a template (a block chaining mode) and a single block cipher
1078  * primitive (e.g. AES).
1079  *
1080  * The plaintext data buffer and the ciphertext data buffer are pointed to
1081  * by using scatter/gather lists. The cipher operation is performed
1082  * on all segments of the provided scatter/gather lists.
1083  *
1084  * The kernel crypto API supports a cipher operation "in-place" which means that
1085  * the caller may provide the same scatter/gather list for the plaintext and
1086  * cipher text. After the completion of the cipher operation, the plaintext
1087  * data is replaced with the ciphertext data in case of an encryption and vice
1088  * versa for a decryption. The caller must ensure that the scatter/gather lists
1089  * for the output data point to sufficiently large buffers, i.e. multiples of
1090  * the block size of the cipher.
1091  */
1092 
1093 static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
1094 	struct crypto_tfm *tfm)
1095 {
1096 	return (struct crypto_blkcipher *)tfm;
1097 }
1098 
1099 static inline struct crypto_blkcipher *crypto_blkcipher_cast(
1100 	struct crypto_tfm *tfm)
1101 {
1102 	BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER);
1103 	return __crypto_blkcipher_cast(tfm);
1104 }
1105 
1106 /**
1107  * crypto_alloc_blkcipher() - allocate synchronous block cipher handle
1108  * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1109  *	      blkcipher cipher
1110  * @type: specifies the type of the cipher
1111  * @mask: specifies the mask for the cipher
1112  *
1113  * Allocate a cipher handle for a block cipher. The returned struct
1114  * crypto_blkcipher is the cipher handle that is required for any subsequent
1115  * API invocation for that block cipher.
1116  *
1117  * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1118  *	   of an error, PTR_ERR() returns the error code.
1119  */
1120 static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
1121 	const char *alg_name, u32 type, u32 mask)
1122 {
1123 	type &= ~CRYPTO_ALG_TYPE_MASK;
1124 	type |= CRYPTO_ALG_TYPE_BLKCIPHER;
1125 	mask |= CRYPTO_ALG_TYPE_MASK;
1126 
1127 	return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask));
1128 }
1129 
1130 static inline struct crypto_tfm *crypto_blkcipher_tfm(
1131 	struct crypto_blkcipher *tfm)
1132 {
1133 	return &tfm->base;
1134 }
1135 
1136 /**
1137  * crypto_free_blkcipher() - zeroize and free the block cipher handle
1138  * @tfm: cipher handle to be freed
1139  */
1140 static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
1141 {
1142 	crypto_free_tfm(crypto_blkcipher_tfm(tfm));
1143 }
1144 
1145 /**
1146  * crypto_has_blkcipher() - Search for the availability of a block cipher
1147  * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1148  *	      block cipher
1149  * @type: specifies the type of the cipher
1150  * @mask: specifies the mask for the cipher
1151  *
1152  * Return: true when the block cipher is known to the kernel crypto API; false
1153  *	   otherwise
1154  */
1155 static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
1156 {
1157 	type &= ~CRYPTO_ALG_TYPE_MASK;
1158 	type |= CRYPTO_ALG_TYPE_BLKCIPHER;
1159 	mask |= CRYPTO_ALG_TYPE_MASK;
1160 
1161 	return crypto_has_alg(alg_name, type, mask);
1162 }
1163 
1164 /**
1165  * crypto_blkcipher_name() - return the name / cra_name from the cipher handle
1166  * @tfm: cipher handle
1167  *
1168  * Return: The character string holding the name of the cipher
1169  */
1170 static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
1171 {
1172 	return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
1173 }
1174 
1175 static inline struct blkcipher_tfm *crypto_blkcipher_crt(
1176 	struct crypto_blkcipher *tfm)
1177 {
1178 	return &crypto_blkcipher_tfm(tfm)->crt_blkcipher;
1179 }
1180 
1181 static inline struct blkcipher_alg *crypto_blkcipher_alg(
1182 	struct crypto_blkcipher *tfm)
1183 {
1184 	return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher;
1185 }
1186 
1187 /**
1188  * crypto_blkcipher_ivsize() - obtain IV size
1189  * @tfm: cipher handle
1190  *
1191  * The size of the IV for the block cipher referenced by the cipher handle is
1192  * returned. This IV size may be zero if the cipher does not need an IV.
1193  *
1194  * Return: IV size in bytes
1195  */
1196 static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
1197 {
1198 	return crypto_blkcipher_alg(tfm)->ivsize;
1199 }
1200 
1201 /**
1202  * crypto_blkcipher_blocksize() - obtain block size of cipher
1203  * @tfm: cipher handle
1204  *
1205  * The block size for the block cipher referenced with the cipher handle is
1206  * returned. The caller may use that information to allocate appropriate
1207  * memory for the data returned by the encryption or decryption operation.
1208  *
1209  * Return: block size of cipher
1210  */
1211 static inline unsigned int crypto_blkcipher_blocksize(
1212 	struct crypto_blkcipher *tfm)
1213 {
1214 	return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm));
1215 }
1216 
1217 static inline unsigned int crypto_blkcipher_alignmask(
1218 	struct crypto_blkcipher *tfm)
1219 {
1220 	return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm));
1221 }
1222 
1223 static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm)
1224 {
1225 	return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm));
1226 }
1227 
1228 static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm,
1229 					      u32 flags)
1230 {
1231 	crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags);
1232 }
1233 
1234 static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
1235 						u32 flags)
1236 {
1237 	crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
1238 }
1239 
1240 /**
1241  * crypto_blkcipher_setkey() - set key for cipher
1242  * @tfm: cipher handle
1243  * @key: buffer holding the key
1244  * @keylen: length of the key in bytes
1245  *
1246  * The caller provided key is set for the block cipher referenced by the cipher
1247  * handle.
1248  *
1249  * Note, the key length determines the cipher type. Many block ciphers implement
1250  * different cipher modes depending on the key size, such as AES-128 vs AES-192
1251  * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1252  * is performed.
1253  *
1254  * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1255  */
1256 static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
1257 					  const u8 *key, unsigned int keylen)
1258 {
1259 	return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm),
1260 						 key, keylen);
1261 }
1262 
1263 /**
1264  * crypto_blkcipher_encrypt() - encrypt plaintext
1265  * @desc: reference to the block cipher handle with meta data
1266  * @dst: scatter/gather list that is filled by the cipher operation with the
1267  *	ciphertext
1268  * @src: scatter/gather list that holds the plaintext
1269  * @nbytes: number of bytes of the plaintext to encrypt.
1270  *
1271  * Encrypt plaintext data using the IV set by the caller with a preceding
1272  * call of crypto_blkcipher_set_iv.
1273  *
1274  * The blkcipher_desc data structure must be filled by the caller and can
1275  * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
1276  * with the block cipher handle; desc.flags is filled with either
1277  * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
1278  *
1279  * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1280  */
1281 static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
1282 					   struct scatterlist *dst,
1283 					   struct scatterlist *src,
1284 					   unsigned int nbytes)
1285 {
1286 	desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
1287 	return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
1288 }
1289 
1290 /**
1291  * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV
1292  * @desc: reference to the block cipher handle with meta data
1293  * @dst: scatter/gather list that is filled by the cipher operation with the
1294  *	ciphertext
1295  * @src: scatter/gather list that holds the plaintext
1296  * @nbytes: number of bytes of the plaintext to encrypt.
1297  *
1298  * Encrypt plaintext data with the use of an IV that is solely used for this
1299  * cipher operation. Any previously set IV is not used.
1300  *
1301  * The blkcipher_desc data structure must be filled by the caller and can
1302  * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
1303  * with the block cipher handle; desc.info is filled with the IV to be used for
1304  * the current operation; desc.flags is filled with either
1305  * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
1306  *
1307  * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1308  */
1309 static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
1310 					      struct scatterlist *dst,
1311 					      struct scatterlist *src,
1312 					      unsigned int nbytes)
1313 {
1314 	return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
1315 }
1316 
1317 /**
1318  * crypto_blkcipher_decrypt() - decrypt ciphertext
1319  * @desc: reference to the block cipher handle with meta data
1320  * @dst: scatter/gather list that is filled by the cipher operation with the
1321  *	plaintext
1322  * @src: scatter/gather list that holds the ciphertext
1323  * @nbytes: number of bytes of the ciphertext to decrypt.
1324  *
1325  * Decrypt ciphertext data using the IV set by the caller with a preceding
1326  * call of crypto_blkcipher_set_iv.
1327  *
1328  * The blkcipher_desc data structure must be filled by the caller as documented
1329  * for the crypto_blkcipher_encrypt call above.
1330  *
1331  * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1332  *
1333  */
1334 static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
1335 					   struct scatterlist *dst,
1336 					   struct scatterlist *src,
1337 					   unsigned int nbytes)
1338 {
1339 	desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
1340 	return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
1341 }
1342 
1343 /**
1344  * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV
1345  * @desc: reference to the block cipher handle with meta data
1346  * @dst: scatter/gather list that is filled by the cipher operation with the
1347  *	plaintext
1348  * @src: scatter/gather list that holds the ciphertext
1349  * @nbytes: number of bytes of the ciphertext to decrypt.
1350  *
1351  * Decrypt ciphertext data with the use of an IV that is solely used for this
1352  * cipher operation. Any previously set IV is not used.
1353  *
1354  * The blkcipher_desc data structure must be filled by the caller as documented
1355  * for the crypto_blkcipher_encrypt_iv call above.
1356  *
1357  * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1358  */
1359 static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
1360 					      struct scatterlist *dst,
1361 					      struct scatterlist *src,
1362 					      unsigned int nbytes)
1363 {
1364 	return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
1365 }
1366 
1367 /**
1368  * crypto_blkcipher_set_iv() - set IV for cipher
1369  * @tfm: cipher handle
1370  * @src: buffer holding the IV
1371  * @len: length of the IV in bytes
1372  *
1373  * The caller provided IV is set for the block cipher referenced by the cipher
1374  * handle.
1375  */
1376 static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
1377 					   const u8 *src, unsigned int len)
1378 {
1379 	memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
1380 }
1381 
1382 /**
1383  * crypto_blkcipher_get_iv() - obtain IV from cipher
1384  * @tfm: cipher handle
1385  * @dst: buffer filled with the IV
1386  * @len: length of the buffer dst
1387  *
1388  * The caller can obtain the IV set for the block cipher referenced by the
1389  * cipher handle and store it into the user-provided buffer. If the buffer
1390  * has an insufficient space, the IV is truncated to fit the buffer.
1391  */
1392 static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
1393 					   u8 *dst, unsigned int len)
1394 {
1395 	memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
1396 }
1397 
1398 /**
1399  * DOC: Single Block Cipher API
1400  *
1401  * The single block cipher API is used with the ciphers of type
1402  * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
1403  *
1404  * Using the single block cipher API calls, operations with the basic cipher
1405  * primitive can be implemented. These cipher primitives exclude any block
1406  * chaining operations including IV handling.
1407  *
1408  * The purpose of this single block cipher API is to support the implementation
1409  * of templates or other concepts that only need to perform the cipher operation
1410  * on one block at a time. Templates invoke the underlying cipher primitive
1411  * block-wise and process either the input or the output data of these cipher
1412  * operations.
1413  */
1414 
1415 static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
1416 {
1417 	return (struct crypto_cipher *)tfm;
1418 }
1419 
1420 static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
1421 {
1422 	BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
1423 	return __crypto_cipher_cast(tfm);
1424 }
1425 
1426 /**
1427  * crypto_alloc_cipher() - allocate single block cipher handle
1428  * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1429  *	     single block cipher
1430  * @type: specifies the type of the cipher
1431  * @mask: specifies the mask for the cipher
1432  *
1433  * Allocate a cipher handle for a single block cipher. The returned struct
1434  * crypto_cipher is the cipher handle that is required for any subsequent API
1435  * invocation for that single block cipher.
1436  *
1437  * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1438  *	   of an error, PTR_ERR() returns the error code.
1439  */
1440 static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
1441 							u32 type, u32 mask)
1442 {
1443 	type &= ~CRYPTO_ALG_TYPE_MASK;
1444 	type |= CRYPTO_ALG_TYPE_CIPHER;
1445 	mask |= CRYPTO_ALG_TYPE_MASK;
1446 
1447 	return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
1448 }
1449 
1450 static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
1451 {
1452 	return &tfm->base;
1453 }
1454 
1455 /**
1456  * crypto_free_cipher() - zeroize and free the single block cipher handle
1457  * @tfm: cipher handle to be freed
1458  */
1459 static inline void crypto_free_cipher(struct crypto_cipher *tfm)
1460 {
1461 	crypto_free_tfm(crypto_cipher_tfm(tfm));
1462 }
1463 
1464 /**
1465  * crypto_has_cipher() - Search for the availability of a single block cipher
1466  * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1467  *	     single block cipher
1468  * @type: specifies the type of the cipher
1469  * @mask: specifies the mask for the cipher
1470  *
1471  * Return: true when the single block cipher is known to the kernel crypto API;
1472  *	   false otherwise
1473  */
1474 static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
1475 {
1476 	type &= ~CRYPTO_ALG_TYPE_MASK;
1477 	type |= CRYPTO_ALG_TYPE_CIPHER;
1478 	mask |= CRYPTO_ALG_TYPE_MASK;
1479 
1480 	return crypto_has_alg(alg_name, type, mask);
1481 }
1482 
1483 static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
1484 {
1485 	return &crypto_cipher_tfm(tfm)->crt_cipher;
1486 }
1487 
1488 /**
1489  * crypto_cipher_blocksize() - obtain block size for cipher
1490  * @tfm: cipher handle
1491  *
1492  * The block size for the single block cipher referenced with the cipher handle
1493  * tfm is returned. The caller may use that information to allocate appropriate
1494  * memory for the data returned by the encryption or decryption operation
1495  *
1496  * Return: block size of cipher
1497  */
1498 static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
1499 {
1500 	return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
1501 }
1502 
1503 static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm)
1504 {
1505 	return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm));
1506 }
1507 
1508 static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm)
1509 {
1510 	return crypto_tfm_get_flags(crypto_cipher_tfm(tfm));
1511 }
1512 
1513 static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm,
1514 					   u32 flags)
1515 {
1516 	crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags);
1517 }
1518 
1519 static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
1520 					     u32 flags)
1521 {
1522 	crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
1523 }
1524 
1525 /**
1526  * crypto_cipher_setkey() - set key for cipher
1527  * @tfm: cipher handle
1528  * @key: buffer holding the key
1529  * @keylen: length of the key in bytes
1530  *
1531  * The caller provided key is set for the single block cipher referenced by the
1532  * cipher handle.
1533  *
1534  * Note, the key length determines the cipher type. Many block ciphers implement
1535  * different cipher modes depending on the key size, such as AES-128 vs AES-192
1536  * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1537  * is performed.
1538  *
1539  * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1540  */
1541 static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
1542                                        const u8 *key, unsigned int keylen)
1543 {
1544 	return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm),
1545 						  key, keylen);
1546 }
1547 
1548 /**
1549  * crypto_cipher_encrypt_one() - encrypt one block of plaintext
1550  * @tfm: cipher handle
1551  * @dst: points to the buffer that will be filled with the ciphertext
1552  * @src: buffer holding the plaintext to be encrypted
1553  *
1554  * Invoke the encryption operation of one block. The caller must ensure that
1555  * the plaintext and ciphertext buffers are at least one block in size.
1556  */
1557 static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
1558 					     u8 *dst, const u8 *src)
1559 {
1560 	crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm),
1561 						dst, src);
1562 }
1563 
1564 /**
1565  * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
1566  * @tfm: cipher handle
1567  * @dst: points to the buffer that will be filled with the plaintext
1568  * @src: buffer holding the ciphertext to be decrypted
1569  *
1570  * Invoke the decryption operation of one block. The caller must ensure that
1571  * the plaintext and ciphertext buffers are at least one block in size.
1572  */
1573 static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
1574 					     u8 *dst, const u8 *src)
1575 {
1576 	crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm),
1577 						dst, src);
1578 }
1579 
1580 /**
1581  * DOC: Synchronous Message Digest API
1582  *
1583  * The synchronous message digest API is used with the ciphers of type
1584  * CRYPTO_ALG_TYPE_HASH (listed as type "hash" in /proc/crypto)
1585  */
1586 
1587 static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm)
1588 {
1589 	return (struct crypto_hash *)tfm;
1590 }
1591 
1592 static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm)
1593 {
1594 	BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_HASH) &
1595 	       CRYPTO_ALG_TYPE_HASH_MASK);
1596 	return __crypto_hash_cast(tfm);
1597 }
1598 
1599 /**
1600  * crypto_alloc_hash() - allocate synchronous message digest handle
1601  * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1602  *	      message digest cipher
1603  * @type: specifies the type of the cipher
1604  * @mask: specifies the mask for the cipher
1605  *
1606  * Allocate a cipher handle for a message digest. The returned struct
1607  * crypto_hash is the cipher handle that is required for any subsequent
1608  * API invocation for that message digest.
1609  *
1610  * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1611  * of an error, PTR_ERR() returns the error code.
1612  */
1613 static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name,
1614 						    u32 type, u32 mask)
1615 {
1616 	type &= ~CRYPTO_ALG_TYPE_MASK;
1617 	mask &= ~CRYPTO_ALG_TYPE_MASK;
1618 	type |= CRYPTO_ALG_TYPE_HASH;
1619 	mask |= CRYPTO_ALG_TYPE_HASH_MASK;
1620 
1621 	return __crypto_hash_cast(crypto_alloc_base(alg_name, type, mask));
1622 }
1623 
1624 static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm)
1625 {
1626 	return &tfm->base;
1627 }
1628 
1629 /**
1630  * crypto_free_hash() - zeroize and free message digest handle
1631  * @tfm: cipher handle to be freed
1632  */
1633 static inline void crypto_free_hash(struct crypto_hash *tfm)
1634 {
1635 	crypto_free_tfm(crypto_hash_tfm(tfm));
1636 }
1637 
1638 /**
1639  * crypto_has_hash() - Search for the availability of a message digest
1640  * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1641  *	      message digest cipher
1642  * @type: specifies the type of the cipher
1643  * @mask: specifies the mask for the cipher
1644  *
1645  * Return: true when the message digest cipher is known to the kernel crypto
1646  *	   API; false otherwise
1647  */
1648 static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask)
1649 {
1650 	type &= ~CRYPTO_ALG_TYPE_MASK;
1651 	mask &= ~CRYPTO_ALG_TYPE_MASK;
1652 	type |= CRYPTO_ALG_TYPE_HASH;
1653 	mask |= CRYPTO_ALG_TYPE_HASH_MASK;
1654 
1655 	return crypto_has_alg(alg_name, type, mask);
1656 }
1657 
1658 static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm)
1659 {
1660 	return &crypto_hash_tfm(tfm)->crt_hash;
1661 }
1662 
1663 /**
1664  * crypto_hash_blocksize() - obtain block size for message digest
1665  * @tfm: cipher handle
1666  *
1667  * The block size for the message digest cipher referenced with the cipher
1668  * handle is returned.
1669  *
1670  * Return: block size of cipher
1671  */
1672 static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm)
1673 {
1674 	return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm));
1675 }
1676 
1677 static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm)
1678 {
1679 	return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm));
1680 }
1681 
1682 /**
1683  * crypto_hash_digestsize() - obtain message digest size
1684  * @tfm: cipher handle
1685  *
1686  * The size for the message digest created by the message digest cipher
1687  * referenced with the cipher handle is returned.
1688  *
1689  * Return: message digest size
1690  */
1691 static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm)
1692 {
1693 	return crypto_hash_crt(tfm)->digestsize;
1694 }
1695 
1696 static inline u32 crypto_hash_get_flags(struct crypto_hash *tfm)
1697 {
1698 	return crypto_tfm_get_flags(crypto_hash_tfm(tfm));
1699 }
1700 
1701 static inline void crypto_hash_set_flags(struct crypto_hash *tfm, u32 flags)
1702 {
1703 	crypto_tfm_set_flags(crypto_hash_tfm(tfm), flags);
1704 }
1705 
1706 static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags)
1707 {
1708 	crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags);
1709 }
1710 
1711 /**
1712  * crypto_hash_init() - (re)initialize message digest handle
1713  * @desc: cipher request handle that to be filled by caller --
1714  *	  desc.tfm is filled with the hash cipher handle;
1715  *	  desc.flags is filled with either CRYPTO_TFM_REQ_MAY_SLEEP or 0.
1716  *
1717  * The call (re-)initializes the message digest referenced by the hash cipher
1718  * request handle. Any potentially existing state created by previous
1719  * operations is discarded.
1720  *
1721  * Return: 0 if the message digest initialization was successful; < 0 if an
1722  *	   error occurred
1723  */
1724 static inline int crypto_hash_init(struct hash_desc *desc)
1725 {
1726 	return crypto_hash_crt(desc->tfm)->init(desc);
1727 }
1728 
1729 /**
1730  * crypto_hash_update() - add data to message digest for processing
1731  * @desc: cipher request handle
1732  * @sg: scatter / gather list pointing to the data to be added to the message
1733  *      digest
1734  * @nbytes: number of bytes to be processed from @sg
1735  *
1736  * Updates the message digest state of the cipher handle pointed to by the
1737  * hash cipher request handle with the input data pointed to by the
1738  * scatter/gather list.
1739  *
1740  * Return: 0 if the message digest update was successful; < 0 if an error
1741  *	   occurred
1742  */
1743 static inline int crypto_hash_update(struct hash_desc *desc,
1744 				     struct scatterlist *sg,
1745 				     unsigned int nbytes)
1746 {
1747 	return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes);
1748 }
1749 
1750 /**
1751  * crypto_hash_final() - calculate message digest
1752  * @desc: cipher request handle
1753  * @out: message digest output buffer -- The caller must ensure that the out
1754  *	 buffer has a sufficient size (e.g. by using the crypto_hash_digestsize
1755  *	 function).
1756  *
1757  * Finalize the message digest operation and create the message digest
1758  * based on all data added to the cipher handle. The message digest is placed
1759  * into the output buffer.
1760  *
1761  * Return: 0 if the message digest creation was successful; < 0 if an error
1762  *	   occurred
1763  */
1764 static inline int crypto_hash_final(struct hash_desc *desc, u8 *out)
1765 {
1766 	return crypto_hash_crt(desc->tfm)->final(desc, out);
1767 }
1768 
1769 /**
1770  * crypto_hash_digest() - calculate message digest for a buffer
1771  * @desc: see crypto_hash_final()
1772  * @sg: see crypto_hash_update()
1773  * @nbytes:  see crypto_hash_update()
1774  * @out: see crypto_hash_final()
1775  *
1776  * This function is a "short-hand" for the function calls of crypto_hash_init,
1777  * crypto_hash_update and crypto_hash_final. The parameters have the same
1778  * meaning as discussed for those separate three functions.
1779  *
1780  * Return: 0 if the message digest creation was successful; < 0 if an error
1781  *	   occurred
1782  */
1783 static inline int crypto_hash_digest(struct hash_desc *desc,
1784 				     struct scatterlist *sg,
1785 				     unsigned int nbytes, u8 *out)
1786 {
1787 	return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out);
1788 }
1789 
1790 /**
1791  * crypto_hash_setkey() - set key for message digest
1792  * @hash: cipher handle
1793  * @key: buffer holding the key
1794  * @keylen: length of the key in bytes
1795  *
1796  * The caller provided key is set for the message digest cipher. The cipher
1797  * handle must point to a keyed hash in order for this function to succeed.
1798  *
1799  * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1800  */
1801 static inline int crypto_hash_setkey(struct crypto_hash *hash,
1802 				     const u8 *key, unsigned int keylen)
1803 {
1804 	return crypto_hash_crt(hash)->setkey(hash, key, keylen);
1805 }
1806 
1807 static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
1808 {
1809 	return (struct crypto_comp *)tfm;
1810 }
1811 
1812 static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm)
1813 {
1814 	BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) &
1815 	       CRYPTO_ALG_TYPE_MASK);
1816 	return __crypto_comp_cast(tfm);
1817 }
1818 
1819 static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name,
1820 						    u32 type, u32 mask)
1821 {
1822 	type &= ~CRYPTO_ALG_TYPE_MASK;
1823 	type |= CRYPTO_ALG_TYPE_COMPRESS;
1824 	mask |= CRYPTO_ALG_TYPE_MASK;
1825 
1826 	return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask));
1827 }
1828 
1829 static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm)
1830 {
1831 	return &tfm->base;
1832 }
1833 
1834 static inline void crypto_free_comp(struct crypto_comp *tfm)
1835 {
1836 	crypto_free_tfm(crypto_comp_tfm(tfm));
1837 }
1838 
1839 static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask)
1840 {
1841 	type &= ~CRYPTO_ALG_TYPE_MASK;
1842 	type |= CRYPTO_ALG_TYPE_COMPRESS;
1843 	mask |= CRYPTO_ALG_TYPE_MASK;
1844 
1845 	return crypto_has_alg(alg_name, type, mask);
1846 }
1847 
1848 static inline const char *crypto_comp_name(struct crypto_comp *tfm)
1849 {
1850 	return crypto_tfm_alg_name(crypto_comp_tfm(tfm));
1851 }
1852 
1853 static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm)
1854 {
1855 	return &crypto_comp_tfm(tfm)->crt_compress;
1856 }
1857 
1858 static inline int crypto_comp_compress(struct crypto_comp *tfm,
1859                                        const u8 *src, unsigned int slen,
1860                                        u8 *dst, unsigned int *dlen)
1861 {
1862 	return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm),
1863 						  src, slen, dst, dlen);
1864 }
1865 
1866 static inline int crypto_comp_decompress(struct crypto_comp *tfm,
1867                                          const u8 *src, unsigned int slen,
1868                                          u8 *dst, unsigned int *dlen)
1869 {
1870 	return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm),
1871 						    src, slen, dst, dlen);
1872 }
1873 
1874 #endif	/* _LINUX_CRYPTO_H */
1875 
1876