1 /* 2 * Scatterlist Cryptographic API. 3 * 4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 5 * Copyright (c) 2002 David S. Miller (davem@redhat.com) 6 * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> 7 * 8 * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> 9 * and Nettle, by Niels Möller. 10 * 11 * This program is free software; you can redistribute it and/or modify it 12 * under the terms of the GNU General Public License as published by the Free 13 * Software Foundation; either version 2 of the License, or (at your option) 14 * any later version. 15 * 16 */ 17 #ifndef _LINUX_CRYPTO_H 18 #define _LINUX_CRYPTO_H 19 20 #include <linux/atomic.h> 21 #include <linux/kernel.h> 22 #include <linux/list.h> 23 #include <linux/bug.h> 24 #include <linux/slab.h> 25 #include <linux/string.h> 26 #include <linux/uaccess.h> 27 #include <linux/completion.h> 28 29 /* 30 * Autoloaded crypto modules should only use a prefixed name to avoid allowing 31 * arbitrary modules to be loaded. Loading from userspace may still need the 32 * unprefixed names, so retains those aliases as well. 33 * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3 34 * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro 35 * expands twice on the same line. Instead, use a separate base name for the 36 * alias. 37 */ 38 #define MODULE_ALIAS_CRYPTO(name) \ 39 __MODULE_INFO(alias, alias_userspace, name); \ 40 __MODULE_INFO(alias, alias_crypto, "crypto-" name) 41 42 /* 43 * Algorithm masks and types. 44 */ 45 #define CRYPTO_ALG_TYPE_MASK 0x0000000f 46 #define CRYPTO_ALG_TYPE_CIPHER 0x00000001 47 #define CRYPTO_ALG_TYPE_COMPRESS 0x00000002 48 #define CRYPTO_ALG_TYPE_AEAD 0x00000003 49 #define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 50 #define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 51 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 52 #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 53 #define CRYPTO_ALG_TYPE_KPP 0x00000008 54 #define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a 55 #define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b 56 #define CRYPTO_ALG_TYPE_RNG 0x0000000c 57 #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d 58 #define CRYPTO_ALG_TYPE_DIGEST 0x0000000e 59 #define CRYPTO_ALG_TYPE_HASH 0x0000000e 60 #define CRYPTO_ALG_TYPE_SHASH 0x0000000e 61 #define CRYPTO_ALG_TYPE_AHASH 0x0000000f 62 63 #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e 64 #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e 65 #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c 66 #define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e 67 68 #define CRYPTO_ALG_LARVAL 0x00000010 69 #define CRYPTO_ALG_DEAD 0x00000020 70 #define CRYPTO_ALG_DYING 0x00000040 71 #define CRYPTO_ALG_ASYNC 0x00000080 72 73 /* 74 * Set this bit if and only if the algorithm requires another algorithm of 75 * the same type to handle corner cases. 76 */ 77 #define CRYPTO_ALG_NEED_FALLBACK 0x00000100 78 79 /* 80 * This bit is set for symmetric key ciphers that have already been wrapped 81 * with a generic IV generator to prevent them from being wrapped again. 82 */ 83 #define CRYPTO_ALG_GENIV 0x00000200 84 85 /* 86 * Set if the algorithm has passed automated run-time testing. Note that 87 * if there is no run-time testing for a given algorithm it is considered 88 * to have passed. 89 */ 90 91 #define CRYPTO_ALG_TESTED 0x00000400 92 93 /* 94 * Set if the algorithm is an instance that is built from templates. 95 */ 96 #define CRYPTO_ALG_INSTANCE 0x00000800 97 98 /* Set this bit if the algorithm provided is hardware accelerated but 99 * not available to userspace via instruction set or so. 100 */ 101 #define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000 102 103 /* 104 * Mark a cipher as a service implementation only usable by another 105 * cipher and never by a normal user of the kernel crypto API 106 */ 107 #define CRYPTO_ALG_INTERNAL 0x00002000 108 109 /* 110 * Set if the algorithm has a ->setkey() method but can be used without 111 * calling it first, i.e. there is a default key. 112 */ 113 #define CRYPTO_ALG_OPTIONAL_KEY 0x00004000 114 115 /* 116 * Don't trigger module loading 117 */ 118 #define CRYPTO_NOLOAD 0x00008000 119 120 /* 121 * Transform masks and values (for crt_flags). 122 */ 123 #define CRYPTO_TFM_NEED_KEY 0x00000001 124 125 #define CRYPTO_TFM_REQ_MASK 0x000fff00 126 #define CRYPTO_TFM_RES_MASK 0xfff00000 127 128 #define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100 129 #define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 130 #define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 131 #define CRYPTO_TFM_RES_WEAK_KEY 0x00100000 132 #define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000 133 #define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000 134 #define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000 135 #define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000 136 137 /* 138 * Miscellaneous stuff. 139 */ 140 #define CRYPTO_MAX_ALG_NAME 128 141 142 /* 143 * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual 144 * declaration) is used to ensure that the crypto_tfm context structure is 145 * aligned correctly for the given architecture so that there are no alignment 146 * faults for C data types. In particular, this is required on platforms such 147 * as arm where pointers are 32-bit aligned but there are data types such as 148 * u64 which require 64-bit alignment. 149 */ 150 #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN 151 152 #define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN))) 153 154 struct scatterlist; 155 struct crypto_ablkcipher; 156 struct crypto_async_request; 157 struct crypto_blkcipher; 158 struct crypto_tfm; 159 struct crypto_type; 160 struct skcipher_givcrypt_request; 161 162 typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); 163 164 /** 165 * DOC: Block Cipher Context Data Structures 166 * 167 * These data structures define the operating context for each block cipher 168 * type. 169 */ 170 171 struct crypto_async_request { 172 struct list_head list; 173 crypto_completion_t complete; 174 void *data; 175 struct crypto_tfm *tfm; 176 177 u32 flags; 178 }; 179 180 struct ablkcipher_request { 181 struct crypto_async_request base; 182 183 unsigned int nbytes; 184 185 void *info; 186 187 struct scatterlist *src; 188 struct scatterlist *dst; 189 190 void *__ctx[] CRYPTO_MINALIGN_ATTR; 191 }; 192 193 struct blkcipher_desc { 194 struct crypto_blkcipher *tfm; 195 void *info; 196 u32 flags; 197 }; 198 199 struct cipher_desc { 200 struct crypto_tfm *tfm; 201 void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 202 unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst, 203 const u8 *src, unsigned int nbytes); 204 void *info; 205 }; 206 207 /** 208 * DOC: Block Cipher Algorithm Definitions 209 * 210 * These data structures define modular crypto algorithm implementations, 211 * managed via crypto_register_alg() and crypto_unregister_alg(). 212 */ 213 214 /** 215 * struct ablkcipher_alg - asynchronous block cipher definition 216 * @min_keysize: Minimum key size supported by the transformation. This is the 217 * smallest key length supported by this transformation algorithm. 218 * This must be set to one of the pre-defined values as this is 219 * not hardware specific. Possible values for this field can be 220 * found via git grep "_MIN_KEY_SIZE" include/crypto/ 221 * @max_keysize: Maximum key size supported by the transformation. This is the 222 * largest key length supported by this transformation algorithm. 223 * This must be set to one of the pre-defined values as this is 224 * not hardware specific. Possible values for this field can be 225 * found via git grep "_MAX_KEY_SIZE" include/crypto/ 226 * @setkey: Set key for the transformation. This function is used to either 227 * program a supplied key into the hardware or store the key in the 228 * transformation context for programming it later. Note that this 229 * function does modify the transformation context. This function can 230 * be called multiple times during the existence of the transformation 231 * object, so one must make sure the key is properly reprogrammed into 232 * the hardware. This function is also responsible for checking the key 233 * length for validity. In case a software fallback was put in place in 234 * the @cra_init call, this function might need to use the fallback if 235 * the algorithm doesn't support all of the key sizes. 236 * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt 237 * the supplied scatterlist containing the blocks of data. The crypto 238 * API consumer is responsible for aligning the entries of the 239 * scatterlist properly and making sure the chunks are correctly 240 * sized. In case a software fallback was put in place in the 241 * @cra_init call, this function might need to use the fallback if 242 * the algorithm doesn't support all of the key sizes. In case the 243 * key was stored in transformation context, the key might need to be 244 * re-programmed into the hardware in this function. This function 245 * shall not modify the transformation context, as this function may 246 * be called in parallel with the same transformation object. 247 * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt 248 * and the conditions are exactly the same. 249 * @givencrypt: Update the IV for encryption. With this function, a cipher 250 * implementation may provide the function on how to update the IV 251 * for encryption. 252 * @givdecrypt: Update the IV for decryption. This is the reverse of 253 * @givencrypt . 254 * @geniv: The transformation implementation may use an "IV generator" provided 255 * by the kernel crypto API. Several use cases have a predefined 256 * approach how IVs are to be updated. For such use cases, the kernel 257 * crypto API provides ready-to-use implementations that can be 258 * referenced with this variable. 259 * @ivsize: IV size applicable for transformation. The consumer must provide an 260 * IV of exactly that size to perform the encrypt or decrypt operation. 261 * 262 * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are 263 * mandatory and must be filled. 264 */ 265 struct ablkcipher_alg { 266 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, 267 unsigned int keylen); 268 int (*encrypt)(struct ablkcipher_request *req); 269 int (*decrypt)(struct ablkcipher_request *req); 270 int (*givencrypt)(struct skcipher_givcrypt_request *req); 271 int (*givdecrypt)(struct skcipher_givcrypt_request *req); 272 273 const char *geniv; 274 275 unsigned int min_keysize; 276 unsigned int max_keysize; 277 unsigned int ivsize; 278 }; 279 280 /** 281 * struct blkcipher_alg - synchronous block cipher definition 282 * @min_keysize: see struct ablkcipher_alg 283 * @max_keysize: see struct ablkcipher_alg 284 * @setkey: see struct ablkcipher_alg 285 * @encrypt: see struct ablkcipher_alg 286 * @decrypt: see struct ablkcipher_alg 287 * @geniv: see struct ablkcipher_alg 288 * @ivsize: see struct ablkcipher_alg 289 * 290 * All fields except @geniv and @ivsize are mandatory and must be filled. 291 */ 292 struct blkcipher_alg { 293 int (*setkey)(struct crypto_tfm *tfm, const u8 *key, 294 unsigned int keylen); 295 int (*encrypt)(struct blkcipher_desc *desc, 296 struct scatterlist *dst, struct scatterlist *src, 297 unsigned int nbytes); 298 int (*decrypt)(struct blkcipher_desc *desc, 299 struct scatterlist *dst, struct scatterlist *src, 300 unsigned int nbytes); 301 302 const char *geniv; 303 304 unsigned int min_keysize; 305 unsigned int max_keysize; 306 unsigned int ivsize; 307 }; 308 309 /** 310 * struct cipher_alg - single-block symmetric ciphers definition 311 * @cia_min_keysize: Minimum key size supported by the transformation. This is 312 * the smallest key length supported by this transformation 313 * algorithm. This must be set to one of the pre-defined 314 * values as this is not hardware specific. Possible values 315 * for this field can be found via git grep "_MIN_KEY_SIZE" 316 * include/crypto/ 317 * @cia_max_keysize: Maximum key size supported by the transformation. This is 318 * the largest key length supported by this transformation 319 * algorithm. This must be set to one of the pre-defined values 320 * as this is not hardware specific. Possible values for this 321 * field can be found via git grep "_MAX_KEY_SIZE" 322 * include/crypto/ 323 * @cia_setkey: Set key for the transformation. This function is used to either 324 * program a supplied key into the hardware or store the key in the 325 * transformation context for programming it later. Note that this 326 * function does modify the transformation context. This function 327 * can be called multiple times during the existence of the 328 * transformation object, so one must make sure the key is properly 329 * reprogrammed into the hardware. This function is also 330 * responsible for checking the key length for validity. 331 * @cia_encrypt: Encrypt a single block. This function is used to encrypt a 332 * single block of data, which must be @cra_blocksize big. This 333 * always operates on a full @cra_blocksize and it is not possible 334 * to encrypt a block of smaller size. The supplied buffers must 335 * therefore also be at least of @cra_blocksize size. Both the 336 * input and output buffers are always aligned to @cra_alignmask. 337 * In case either of the input or output buffer supplied by user 338 * of the crypto API is not aligned to @cra_alignmask, the crypto 339 * API will re-align the buffers. The re-alignment means that a 340 * new buffer will be allocated, the data will be copied into the 341 * new buffer, then the processing will happen on the new buffer, 342 * then the data will be copied back into the original buffer and 343 * finally the new buffer will be freed. In case a software 344 * fallback was put in place in the @cra_init call, this function 345 * might need to use the fallback if the algorithm doesn't support 346 * all of the key sizes. In case the key was stored in 347 * transformation context, the key might need to be re-programmed 348 * into the hardware in this function. This function shall not 349 * modify the transformation context, as this function may be 350 * called in parallel with the same transformation object. 351 * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to 352 * @cia_encrypt, and the conditions are exactly the same. 353 * 354 * All fields are mandatory and must be filled. 355 */ 356 struct cipher_alg { 357 unsigned int cia_min_keysize; 358 unsigned int cia_max_keysize; 359 int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key, 360 unsigned int keylen); 361 void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 362 void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 363 }; 364 365 struct compress_alg { 366 int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, 367 unsigned int slen, u8 *dst, unsigned int *dlen); 368 int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src, 369 unsigned int slen, u8 *dst, unsigned int *dlen); 370 }; 371 372 373 #define cra_ablkcipher cra_u.ablkcipher 374 #define cra_blkcipher cra_u.blkcipher 375 #define cra_cipher cra_u.cipher 376 #define cra_compress cra_u.compress 377 378 /** 379 * struct crypto_alg - definition of a cryptograpic cipher algorithm 380 * @cra_flags: Flags describing this transformation. See include/linux/crypto.h 381 * CRYPTO_ALG_* flags for the flags which go in here. Those are 382 * used for fine-tuning the description of the transformation 383 * algorithm. 384 * @cra_blocksize: Minimum block size of this transformation. The size in bytes 385 * of the smallest possible unit which can be transformed with 386 * this algorithm. The users must respect this value. 387 * In case of HASH transformation, it is possible for a smaller 388 * block than @cra_blocksize to be passed to the crypto API for 389 * transformation, in case of any other transformation type, an 390 * error will be returned upon any attempt to transform smaller 391 * than @cra_blocksize chunks. 392 * @cra_ctxsize: Size of the operational context of the transformation. This 393 * value informs the kernel crypto API about the memory size 394 * needed to be allocated for the transformation context. 395 * @cra_alignmask: Alignment mask for the input and output data buffer. The data 396 * buffer containing the input data for the algorithm must be 397 * aligned to this alignment mask. The data buffer for the 398 * output data must be aligned to this alignment mask. Note that 399 * the Crypto API will do the re-alignment in software, but 400 * only under special conditions and there is a performance hit. 401 * The re-alignment happens at these occasions for different 402 * @cra_u types: cipher -- For both input data and output data 403 * buffer; ahash -- For output hash destination buf; shash -- 404 * For output hash destination buf. 405 * This is needed on hardware which is flawed by design and 406 * cannot pick data from arbitrary addresses. 407 * @cra_priority: Priority of this transformation implementation. In case 408 * multiple transformations with same @cra_name are available to 409 * the Crypto API, the kernel will use the one with highest 410 * @cra_priority. 411 * @cra_name: Generic name (usable by multiple implementations) of the 412 * transformation algorithm. This is the name of the transformation 413 * itself. This field is used by the kernel when looking up the 414 * providers of particular transformation. 415 * @cra_driver_name: Unique name of the transformation provider. This is the 416 * name of the provider of the transformation. This can be any 417 * arbitrary value, but in the usual case, this contains the 418 * name of the chip or provider and the name of the 419 * transformation algorithm. 420 * @cra_type: Type of the cryptographic transformation. This is a pointer to 421 * struct crypto_type, which implements callbacks common for all 422 * transformation types. There are multiple options: 423 * &crypto_blkcipher_type, &crypto_ablkcipher_type, 424 * &crypto_ahash_type, &crypto_rng_type. 425 * This field might be empty. In that case, there are no common 426 * callbacks. This is the case for: cipher, compress, shash. 427 * @cra_u: Callbacks implementing the transformation. This is a union of 428 * multiple structures. Depending on the type of transformation selected 429 * by @cra_type and @cra_flags above, the associated structure must be 430 * filled with callbacks. This field might be empty. This is the case 431 * for ahash, shash. 432 * @cra_init: Initialize the cryptographic transformation object. This function 433 * is used to initialize the cryptographic transformation object. 434 * This function is called only once at the instantiation time, right 435 * after the transformation context was allocated. In case the 436 * cryptographic hardware has some special requirements which need to 437 * be handled by software, this function shall check for the precise 438 * requirement of the transformation and put any software fallbacks 439 * in place. 440 * @cra_exit: Deinitialize the cryptographic transformation object. This is a 441 * counterpart to @cra_init, used to remove various changes set in 442 * @cra_init. 443 * @cra_u.ablkcipher: Union member which contains an asynchronous block cipher 444 * definition. See @struct @ablkcipher_alg. 445 * @cra_u.blkcipher: Union member which contains a synchronous block cipher 446 * definition See @struct @blkcipher_alg. 447 * @cra_u.cipher: Union member which contains a single-block symmetric cipher 448 * definition. See @struct @cipher_alg. 449 * @cra_u.compress: Union member which contains a (de)compression algorithm. 450 * See @struct @compress_alg. 451 * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE 452 * @cra_list: internally used 453 * @cra_users: internally used 454 * @cra_refcnt: internally used 455 * @cra_destroy: internally used 456 * 457 * The struct crypto_alg describes a generic Crypto API algorithm and is common 458 * for all of the transformations. Any variable not documented here shall not 459 * be used by a cipher implementation as it is internal to the Crypto API. 460 */ 461 struct crypto_alg { 462 struct list_head cra_list; 463 struct list_head cra_users; 464 465 u32 cra_flags; 466 unsigned int cra_blocksize; 467 unsigned int cra_ctxsize; 468 unsigned int cra_alignmask; 469 470 int cra_priority; 471 refcount_t cra_refcnt; 472 473 char cra_name[CRYPTO_MAX_ALG_NAME]; 474 char cra_driver_name[CRYPTO_MAX_ALG_NAME]; 475 476 const struct crypto_type *cra_type; 477 478 union { 479 struct ablkcipher_alg ablkcipher; 480 struct blkcipher_alg blkcipher; 481 struct cipher_alg cipher; 482 struct compress_alg compress; 483 } cra_u; 484 485 int (*cra_init)(struct crypto_tfm *tfm); 486 void (*cra_exit)(struct crypto_tfm *tfm); 487 void (*cra_destroy)(struct crypto_alg *alg); 488 489 struct module *cra_module; 490 } CRYPTO_MINALIGN_ATTR; 491 492 /* 493 * A helper struct for waiting for completion of async crypto ops 494 */ 495 struct crypto_wait { 496 struct completion completion; 497 int err; 498 }; 499 500 /* 501 * Macro for declaring a crypto op async wait object on stack 502 */ 503 #define DECLARE_CRYPTO_WAIT(_wait) \ 504 struct crypto_wait _wait = { \ 505 COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 } 506 507 /* 508 * Async ops completion helper functioons 509 */ 510 void crypto_req_done(struct crypto_async_request *req, int err); 511 512 static inline int crypto_wait_req(int err, struct crypto_wait *wait) 513 { 514 switch (err) { 515 case -EINPROGRESS: 516 case -EBUSY: 517 wait_for_completion(&wait->completion); 518 reinit_completion(&wait->completion); 519 err = wait->err; 520 break; 521 }; 522 523 return err; 524 } 525 526 static inline void crypto_init_wait(struct crypto_wait *wait) 527 { 528 init_completion(&wait->completion); 529 } 530 531 /* 532 * Algorithm registration interface. 533 */ 534 int crypto_register_alg(struct crypto_alg *alg); 535 int crypto_unregister_alg(struct crypto_alg *alg); 536 int crypto_register_algs(struct crypto_alg *algs, int count); 537 int crypto_unregister_algs(struct crypto_alg *algs, int count); 538 539 /* 540 * Algorithm query interface. 541 */ 542 int crypto_has_alg(const char *name, u32 type, u32 mask); 543 544 /* 545 * Transforms: user-instantiated objects which encapsulate algorithms 546 * and core processing logic. Managed via crypto_alloc_*() and 547 * crypto_free_*(), as well as the various helpers below. 548 */ 549 550 struct ablkcipher_tfm { 551 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, 552 unsigned int keylen); 553 int (*encrypt)(struct ablkcipher_request *req); 554 int (*decrypt)(struct ablkcipher_request *req); 555 556 struct crypto_ablkcipher *base; 557 558 unsigned int ivsize; 559 unsigned int reqsize; 560 }; 561 562 struct blkcipher_tfm { 563 void *iv; 564 int (*setkey)(struct crypto_tfm *tfm, const u8 *key, 565 unsigned int keylen); 566 int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, 567 struct scatterlist *src, unsigned int nbytes); 568 int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, 569 struct scatterlist *src, unsigned int nbytes); 570 }; 571 572 struct cipher_tfm { 573 int (*cit_setkey)(struct crypto_tfm *tfm, 574 const u8 *key, unsigned int keylen); 575 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 576 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 577 }; 578 579 struct compress_tfm { 580 int (*cot_compress)(struct crypto_tfm *tfm, 581 const u8 *src, unsigned int slen, 582 u8 *dst, unsigned int *dlen); 583 int (*cot_decompress)(struct crypto_tfm *tfm, 584 const u8 *src, unsigned int slen, 585 u8 *dst, unsigned int *dlen); 586 }; 587 588 #define crt_ablkcipher crt_u.ablkcipher 589 #define crt_blkcipher crt_u.blkcipher 590 #define crt_cipher crt_u.cipher 591 #define crt_compress crt_u.compress 592 593 struct crypto_tfm { 594 595 u32 crt_flags; 596 597 union { 598 struct ablkcipher_tfm ablkcipher; 599 struct blkcipher_tfm blkcipher; 600 struct cipher_tfm cipher; 601 struct compress_tfm compress; 602 } crt_u; 603 604 void (*exit)(struct crypto_tfm *tfm); 605 606 struct crypto_alg *__crt_alg; 607 608 void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; 609 }; 610 611 struct crypto_ablkcipher { 612 struct crypto_tfm base; 613 }; 614 615 struct crypto_blkcipher { 616 struct crypto_tfm base; 617 }; 618 619 struct crypto_cipher { 620 struct crypto_tfm base; 621 }; 622 623 struct crypto_comp { 624 struct crypto_tfm base; 625 }; 626 627 enum { 628 CRYPTOA_UNSPEC, 629 CRYPTOA_ALG, 630 CRYPTOA_TYPE, 631 CRYPTOA_U32, 632 __CRYPTOA_MAX, 633 }; 634 635 #define CRYPTOA_MAX (__CRYPTOA_MAX - 1) 636 637 /* Maximum number of (rtattr) parameters for each template. */ 638 #define CRYPTO_MAX_ATTRS 32 639 640 struct crypto_attr_alg { 641 char name[CRYPTO_MAX_ALG_NAME]; 642 }; 643 644 struct crypto_attr_type { 645 u32 type; 646 u32 mask; 647 }; 648 649 struct crypto_attr_u32 { 650 u32 num; 651 }; 652 653 /* 654 * Transform user interface. 655 */ 656 657 struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); 658 void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm); 659 660 static inline void crypto_free_tfm(struct crypto_tfm *tfm) 661 { 662 return crypto_destroy_tfm(tfm, tfm); 663 } 664 665 int alg_test(const char *driver, const char *alg, u32 type, u32 mask); 666 667 /* 668 * Transform helpers which query the underlying algorithm. 669 */ 670 static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm) 671 { 672 return tfm->__crt_alg->cra_name; 673 } 674 675 static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm) 676 { 677 return tfm->__crt_alg->cra_driver_name; 678 } 679 680 static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm) 681 { 682 return tfm->__crt_alg->cra_priority; 683 } 684 685 static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm) 686 { 687 return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK; 688 } 689 690 static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm) 691 { 692 return tfm->__crt_alg->cra_blocksize; 693 } 694 695 static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm) 696 { 697 return tfm->__crt_alg->cra_alignmask; 698 } 699 700 static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm) 701 { 702 return tfm->crt_flags; 703 } 704 705 static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags) 706 { 707 tfm->crt_flags |= flags; 708 } 709 710 static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags) 711 { 712 tfm->crt_flags &= ~flags; 713 } 714 715 static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm) 716 { 717 return tfm->__crt_ctx; 718 } 719 720 static inline unsigned int crypto_tfm_ctx_alignment(void) 721 { 722 struct crypto_tfm *tfm; 723 return __alignof__(tfm->__crt_ctx); 724 } 725 726 /* 727 * API wrappers. 728 */ 729 static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast( 730 struct crypto_tfm *tfm) 731 { 732 return (struct crypto_ablkcipher *)tfm; 733 } 734 735 static inline u32 crypto_skcipher_type(u32 type) 736 { 737 type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); 738 type |= CRYPTO_ALG_TYPE_BLKCIPHER; 739 return type; 740 } 741 742 static inline u32 crypto_skcipher_mask(u32 mask) 743 { 744 mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); 745 mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK; 746 return mask; 747 } 748 749 /** 750 * DOC: Asynchronous Block Cipher API 751 * 752 * Asynchronous block cipher API is used with the ciphers of type 753 * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto). 754 * 755 * Asynchronous cipher operations imply that the function invocation for a 756 * cipher request returns immediately before the completion of the operation. 757 * The cipher request is scheduled as a separate kernel thread and therefore 758 * load-balanced on the different CPUs via the process scheduler. To allow 759 * the kernel crypto API to inform the caller about the completion of a cipher 760 * request, the caller must provide a callback function. That function is 761 * invoked with the cipher handle when the request completes. 762 * 763 * To support the asynchronous operation, additional information than just the 764 * cipher handle must be supplied to the kernel crypto API. That additional 765 * information is given by filling in the ablkcipher_request data structure. 766 * 767 * For the asynchronous block cipher API, the state is maintained with the tfm 768 * cipher handle. A single tfm can be used across multiple calls and in 769 * parallel. For asynchronous block cipher calls, context data supplied and 770 * only used by the caller can be referenced the request data structure in 771 * addition to the IV used for the cipher request. The maintenance of such 772 * state information would be important for a crypto driver implementer to 773 * have, because when calling the callback function upon completion of the 774 * cipher operation, that callback function may need some information about 775 * which operation just finished if it invoked multiple in parallel. This 776 * state information is unused by the kernel crypto API. 777 */ 778 779 static inline struct crypto_tfm *crypto_ablkcipher_tfm( 780 struct crypto_ablkcipher *tfm) 781 { 782 return &tfm->base; 783 } 784 785 /** 786 * crypto_free_ablkcipher() - zeroize and free cipher handle 787 * @tfm: cipher handle to be freed 788 */ 789 static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) 790 { 791 crypto_free_tfm(crypto_ablkcipher_tfm(tfm)); 792 } 793 794 /** 795 * crypto_has_ablkcipher() - Search for the availability of an ablkcipher. 796 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 797 * ablkcipher 798 * @type: specifies the type of the cipher 799 * @mask: specifies the mask for the cipher 800 * 801 * Return: true when the ablkcipher is known to the kernel crypto API; false 802 * otherwise 803 */ 804 static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, 805 u32 mask) 806 { 807 return crypto_has_alg(alg_name, crypto_skcipher_type(type), 808 crypto_skcipher_mask(mask)); 809 } 810 811 static inline struct ablkcipher_tfm *crypto_ablkcipher_crt( 812 struct crypto_ablkcipher *tfm) 813 { 814 return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher; 815 } 816 817 /** 818 * crypto_ablkcipher_ivsize() - obtain IV size 819 * @tfm: cipher handle 820 * 821 * The size of the IV for the ablkcipher referenced by the cipher handle is 822 * returned. This IV size may be zero if the cipher does not need an IV. 823 * 824 * Return: IV size in bytes 825 */ 826 static inline unsigned int crypto_ablkcipher_ivsize( 827 struct crypto_ablkcipher *tfm) 828 { 829 return crypto_ablkcipher_crt(tfm)->ivsize; 830 } 831 832 /** 833 * crypto_ablkcipher_blocksize() - obtain block size of cipher 834 * @tfm: cipher handle 835 * 836 * The block size for the ablkcipher referenced with the cipher handle is 837 * returned. The caller may use that information to allocate appropriate 838 * memory for the data returned by the encryption or decryption operation 839 * 840 * Return: block size of cipher 841 */ 842 static inline unsigned int crypto_ablkcipher_blocksize( 843 struct crypto_ablkcipher *tfm) 844 { 845 return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm)); 846 } 847 848 static inline unsigned int crypto_ablkcipher_alignmask( 849 struct crypto_ablkcipher *tfm) 850 { 851 return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm)); 852 } 853 854 static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm) 855 { 856 return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm)); 857 } 858 859 static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm, 860 u32 flags) 861 { 862 crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags); 863 } 864 865 static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm, 866 u32 flags) 867 { 868 crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags); 869 } 870 871 /** 872 * crypto_ablkcipher_setkey() - set key for cipher 873 * @tfm: cipher handle 874 * @key: buffer holding the key 875 * @keylen: length of the key in bytes 876 * 877 * The caller provided key is set for the ablkcipher referenced by the cipher 878 * handle. 879 * 880 * Note, the key length determines the cipher type. Many block ciphers implement 881 * different cipher modes depending on the key size, such as AES-128 vs AES-192 882 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 883 * is performed. 884 * 885 * Return: 0 if the setting of the key was successful; < 0 if an error occurred 886 */ 887 static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, 888 const u8 *key, unsigned int keylen) 889 { 890 struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm); 891 892 return crt->setkey(crt->base, key, keylen); 893 } 894 895 /** 896 * crypto_ablkcipher_reqtfm() - obtain cipher handle from request 897 * @req: ablkcipher_request out of which the cipher handle is to be obtained 898 * 899 * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request 900 * data structure. 901 * 902 * Return: crypto_ablkcipher handle 903 */ 904 static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( 905 struct ablkcipher_request *req) 906 { 907 return __crypto_ablkcipher_cast(req->base.tfm); 908 } 909 910 /** 911 * crypto_ablkcipher_encrypt() - encrypt plaintext 912 * @req: reference to the ablkcipher_request handle that holds all information 913 * needed to perform the cipher operation 914 * 915 * Encrypt plaintext data using the ablkcipher_request handle. That data 916 * structure and how it is filled with data is discussed with the 917 * ablkcipher_request_* functions. 918 * 919 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 920 */ 921 static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) 922 { 923 struct ablkcipher_tfm *crt = 924 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); 925 return crt->encrypt(req); 926 } 927 928 /** 929 * crypto_ablkcipher_decrypt() - decrypt ciphertext 930 * @req: reference to the ablkcipher_request handle that holds all information 931 * needed to perform the cipher operation 932 * 933 * Decrypt ciphertext data using the ablkcipher_request handle. That data 934 * structure and how it is filled with data is discussed with the 935 * ablkcipher_request_* functions. 936 * 937 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 938 */ 939 static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) 940 { 941 struct ablkcipher_tfm *crt = 942 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); 943 return crt->decrypt(req); 944 } 945 946 /** 947 * DOC: Asynchronous Cipher Request Handle 948 * 949 * The ablkcipher_request data structure contains all pointers to data 950 * required for the asynchronous cipher operation. This includes the cipher 951 * handle (which can be used by multiple ablkcipher_request instances), pointer 952 * to plaintext and ciphertext, asynchronous callback function, etc. It acts 953 * as a handle to the ablkcipher_request_* API calls in a similar way as 954 * ablkcipher handle to the crypto_ablkcipher_* API calls. 955 */ 956 957 /** 958 * crypto_ablkcipher_reqsize() - obtain size of the request data structure 959 * @tfm: cipher handle 960 * 961 * Return: number of bytes 962 */ 963 static inline unsigned int crypto_ablkcipher_reqsize( 964 struct crypto_ablkcipher *tfm) 965 { 966 return crypto_ablkcipher_crt(tfm)->reqsize; 967 } 968 969 /** 970 * ablkcipher_request_set_tfm() - update cipher handle reference in request 971 * @req: request handle to be modified 972 * @tfm: cipher handle that shall be added to the request handle 973 * 974 * Allow the caller to replace the existing ablkcipher handle in the request 975 * data structure with a different one. 976 */ 977 static inline void ablkcipher_request_set_tfm( 978 struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) 979 { 980 req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base); 981 } 982 983 static inline struct ablkcipher_request *ablkcipher_request_cast( 984 struct crypto_async_request *req) 985 { 986 return container_of(req, struct ablkcipher_request, base); 987 } 988 989 /** 990 * ablkcipher_request_alloc() - allocate request data structure 991 * @tfm: cipher handle to be registered with the request 992 * @gfp: memory allocation flag that is handed to kmalloc by the API call. 993 * 994 * Allocate the request data structure that must be used with the ablkcipher 995 * encrypt and decrypt API calls. During the allocation, the provided ablkcipher 996 * handle is registered in the request data structure. 997 * 998 * Return: allocated request handle in case of success, or NULL if out of memory 999 */ 1000 static inline struct ablkcipher_request *ablkcipher_request_alloc( 1001 struct crypto_ablkcipher *tfm, gfp_t gfp) 1002 { 1003 struct ablkcipher_request *req; 1004 1005 req = kmalloc(sizeof(struct ablkcipher_request) + 1006 crypto_ablkcipher_reqsize(tfm), gfp); 1007 1008 if (likely(req)) 1009 ablkcipher_request_set_tfm(req, tfm); 1010 1011 return req; 1012 } 1013 1014 /** 1015 * ablkcipher_request_free() - zeroize and free request data structure 1016 * @req: request data structure cipher handle to be freed 1017 */ 1018 static inline void ablkcipher_request_free(struct ablkcipher_request *req) 1019 { 1020 kzfree(req); 1021 } 1022 1023 /** 1024 * ablkcipher_request_set_callback() - set asynchronous callback function 1025 * @req: request handle 1026 * @flags: specify zero or an ORing of the flags 1027 * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and 1028 * increase the wait queue beyond the initial maximum size; 1029 * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep 1030 * @compl: callback function pointer to be registered with the request handle 1031 * @data: The data pointer refers to memory that is not used by the kernel 1032 * crypto API, but provided to the callback function for it to use. Here, 1033 * the caller can provide a reference to memory the callback function can 1034 * operate on. As the callback function is invoked asynchronously to the 1035 * related functionality, it may need to access data structures of the 1036 * related functionality which can be referenced using this pointer. The 1037 * callback function can access the memory via the "data" field in the 1038 * crypto_async_request data structure provided to the callback function. 1039 * 1040 * This function allows setting the callback function that is triggered once the 1041 * cipher operation completes. 1042 * 1043 * The callback function is registered with the ablkcipher_request handle and 1044 * must comply with the following template:: 1045 * 1046 * void callback_function(struct crypto_async_request *req, int error) 1047 */ 1048 static inline void ablkcipher_request_set_callback( 1049 struct ablkcipher_request *req, 1050 u32 flags, crypto_completion_t compl, void *data) 1051 { 1052 req->base.complete = compl; 1053 req->base.data = data; 1054 req->base.flags = flags; 1055 } 1056 1057 /** 1058 * ablkcipher_request_set_crypt() - set data buffers 1059 * @req: request handle 1060 * @src: source scatter / gather list 1061 * @dst: destination scatter / gather list 1062 * @nbytes: number of bytes to process from @src 1063 * @iv: IV for the cipher operation which must comply with the IV size defined 1064 * by crypto_ablkcipher_ivsize 1065 * 1066 * This function allows setting of the source data and destination data 1067 * scatter / gather lists. 1068 * 1069 * For encryption, the source is treated as the plaintext and the 1070 * destination is the ciphertext. For a decryption operation, the use is 1071 * reversed - the source is the ciphertext and the destination is the plaintext. 1072 */ 1073 static inline void ablkcipher_request_set_crypt( 1074 struct ablkcipher_request *req, 1075 struct scatterlist *src, struct scatterlist *dst, 1076 unsigned int nbytes, void *iv) 1077 { 1078 req->src = src; 1079 req->dst = dst; 1080 req->nbytes = nbytes; 1081 req->info = iv; 1082 } 1083 1084 /** 1085 * DOC: Synchronous Block Cipher API 1086 * 1087 * The synchronous block cipher API is used with the ciphers of type 1088 * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto) 1089 * 1090 * Synchronous calls, have a context in the tfm. But since a single tfm can be 1091 * used in multiple calls and in parallel, this info should not be changeable 1092 * (unless a lock is used). This applies, for example, to the symmetric key. 1093 * However, the IV is changeable, so there is an iv field in blkcipher_tfm 1094 * structure for synchronous blkcipher api. So, its the only state info that can 1095 * be kept for synchronous calls without using a big lock across a tfm. 1096 * 1097 * The block cipher API allows the use of a complete cipher, i.e. a cipher 1098 * consisting of a template (a block chaining mode) and a single block cipher 1099 * primitive (e.g. AES). 1100 * 1101 * The plaintext data buffer and the ciphertext data buffer are pointed to 1102 * by using scatter/gather lists. The cipher operation is performed 1103 * on all segments of the provided scatter/gather lists. 1104 * 1105 * The kernel crypto API supports a cipher operation "in-place" which means that 1106 * the caller may provide the same scatter/gather list for the plaintext and 1107 * cipher text. After the completion of the cipher operation, the plaintext 1108 * data is replaced with the ciphertext data in case of an encryption and vice 1109 * versa for a decryption. The caller must ensure that the scatter/gather lists 1110 * for the output data point to sufficiently large buffers, i.e. multiples of 1111 * the block size of the cipher. 1112 */ 1113 1114 static inline struct crypto_blkcipher *__crypto_blkcipher_cast( 1115 struct crypto_tfm *tfm) 1116 { 1117 return (struct crypto_blkcipher *)tfm; 1118 } 1119 1120 static inline struct crypto_blkcipher *crypto_blkcipher_cast( 1121 struct crypto_tfm *tfm) 1122 { 1123 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER); 1124 return __crypto_blkcipher_cast(tfm); 1125 } 1126 1127 /** 1128 * crypto_alloc_blkcipher() - allocate synchronous block cipher handle 1129 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 1130 * blkcipher cipher 1131 * @type: specifies the type of the cipher 1132 * @mask: specifies the mask for the cipher 1133 * 1134 * Allocate a cipher handle for a block cipher. The returned struct 1135 * crypto_blkcipher is the cipher handle that is required for any subsequent 1136 * API invocation for that block cipher. 1137 * 1138 * Return: allocated cipher handle in case of success; IS_ERR() is true in case 1139 * of an error, PTR_ERR() returns the error code. 1140 */ 1141 static inline struct crypto_blkcipher *crypto_alloc_blkcipher( 1142 const char *alg_name, u32 type, u32 mask) 1143 { 1144 type &= ~CRYPTO_ALG_TYPE_MASK; 1145 type |= CRYPTO_ALG_TYPE_BLKCIPHER; 1146 mask |= CRYPTO_ALG_TYPE_MASK; 1147 1148 return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask)); 1149 } 1150 1151 static inline struct crypto_tfm *crypto_blkcipher_tfm( 1152 struct crypto_blkcipher *tfm) 1153 { 1154 return &tfm->base; 1155 } 1156 1157 /** 1158 * crypto_free_blkcipher() - zeroize and free the block cipher handle 1159 * @tfm: cipher handle to be freed 1160 */ 1161 static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) 1162 { 1163 crypto_free_tfm(crypto_blkcipher_tfm(tfm)); 1164 } 1165 1166 /** 1167 * crypto_has_blkcipher() - Search for the availability of a block cipher 1168 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 1169 * block cipher 1170 * @type: specifies the type of the cipher 1171 * @mask: specifies the mask for the cipher 1172 * 1173 * Return: true when the block cipher is known to the kernel crypto API; false 1174 * otherwise 1175 */ 1176 static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) 1177 { 1178 type &= ~CRYPTO_ALG_TYPE_MASK; 1179 type |= CRYPTO_ALG_TYPE_BLKCIPHER; 1180 mask |= CRYPTO_ALG_TYPE_MASK; 1181 1182 return crypto_has_alg(alg_name, type, mask); 1183 } 1184 1185 /** 1186 * crypto_blkcipher_name() - return the name / cra_name from the cipher handle 1187 * @tfm: cipher handle 1188 * 1189 * Return: The character string holding the name of the cipher 1190 */ 1191 static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm) 1192 { 1193 return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm)); 1194 } 1195 1196 static inline struct blkcipher_tfm *crypto_blkcipher_crt( 1197 struct crypto_blkcipher *tfm) 1198 { 1199 return &crypto_blkcipher_tfm(tfm)->crt_blkcipher; 1200 } 1201 1202 static inline struct blkcipher_alg *crypto_blkcipher_alg( 1203 struct crypto_blkcipher *tfm) 1204 { 1205 return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher; 1206 } 1207 1208 /** 1209 * crypto_blkcipher_ivsize() - obtain IV size 1210 * @tfm: cipher handle 1211 * 1212 * The size of the IV for the block cipher referenced by the cipher handle is 1213 * returned. This IV size may be zero if the cipher does not need an IV. 1214 * 1215 * Return: IV size in bytes 1216 */ 1217 static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm) 1218 { 1219 return crypto_blkcipher_alg(tfm)->ivsize; 1220 } 1221 1222 /** 1223 * crypto_blkcipher_blocksize() - obtain block size of cipher 1224 * @tfm: cipher handle 1225 * 1226 * The block size for the block cipher referenced with the cipher handle is 1227 * returned. The caller may use that information to allocate appropriate 1228 * memory for the data returned by the encryption or decryption operation. 1229 * 1230 * Return: block size of cipher 1231 */ 1232 static inline unsigned int crypto_blkcipher_blocksize( 1233 struct crypto_blkcipher *tfm) 1234 { 1235 return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm)); 1236 } 1237 1238 static inline unsigned int crypto_blkcipher_alignmask( 1239 struct crypto_blkcipher *tfm) 1240 { 1241 return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm)); 1242 } 1243 1244 static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm) 1245 { 1246 return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm)); 1247 } 1248 1249 static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm, 1250 u32 flags) 1251 { 1252 crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags); 1253 } 1254 1255 static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm, 1256 u32 flags) 1257 { 1258 crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags); 1259 } 1260 1261 /** 1262 * crypto_blkcipher_setkey() - set key for cipher 1263 * @tfm: cipher handle 1264 * @key: buffer holding the key 1265 * @keylen: length of the key in bytes 1266 * 1267 * The caller provided key is set for the block cipher referenced by the cipher 1268 * handle. 1269 * 1270 * Note, the key length determines the cipher type. Many block ciphers implement 1271 * different cipher modes depending on the key size, such as AES-128 vs AES-192 1272 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 1273 * is performed. 1274 * 1275 * Return: 0 if the setting of the key was successful; < 0 if an error occurred 1276 */ 1277 static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, 1278 const u8 *key, unsigned int keylen) 1279 { 1280 return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm), 1281 key, keylen); 1282 } 1283 1284 /** 1285 * crypto_blkcipher_encrypt() - encrypt plaintext 1286 * @desc: reference to the block cipher handle with meta data 1287 * @dst: scatter/gather list that is filled by the cipher operation with the 1288 * ciphertext 1289 * @src: scatter/gather list that holds the plaintext 1290 * @nbytes: number of bytes of the plaintext to encrypt. 1291 * 1292 * Encrypt plaintext data using the IV set by the caller with a preceding 1293 * call of crypto_blkcipher_set_iv. 1294 * 1295 * The blkcipher_desc data structure must be filled by the caller and can 1296 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled 1297 * with the block cipher handle; desc.flags is filled with either 1298 * CRYPTO_TFM_REQ_MAY_SLEEP or 0. 1299 * 1300 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 1301 */ 1302 static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, 1303 struct scatterlist *dst, 1304 struct scatterlist *src, 1305 unsigned int nbytes) 1306 { 1307 desc->info = crypto_blkcipher_crt(desc->tfm)->iv; 1308 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); 1309 } 1310 1311 /** 1312 * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV 1313 * @desc: reference to the block cipher handle with meta data 1314 * @dst: scatter/gather list that is filled by the cipher operation with the 1315 * ciphertext 1316 * @src: scatter/gather list that holds the plaintext 1317 * @nbytes: number of bytes of the plaintext to encrypt. 1318 * 1319 * Encrypt plaintext data with the use of an IV that is solely used for this 1320 * cipher operation. Any previously set IV is not used. 1321 * 1322 * The blkcipher_desc data structure must be filled by the caller and can 1323 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled 1324 * with the block cipher handle; desc.info is filled with the IV to be used for 1325 * the current operation; desc.flags is filled with either 1326 * CRYPTO_TFM_REQ_MAY_SLEEP or 0. 1327 * 1328 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 1329 */ 1330 static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, 1331 struct scatterlist *dst, 1332 struct scatterlist *src, 1333 unsigned int nbytes) 1334 { 1335 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); 1336 } 1337 1338 /** 1339 * crypto_blkcipher_decrypt() - decrypt ciphertext 1340 * @desc: reference to the block cipher handle with meta data 1341 * @dst: scatter/gather list that is filled by the cipher operation with the 1342 * plaintext 1343 * @src: scatter/gather list that holds the ciphertext 1344 * @nbytes: number of bytes of the ciphertext to decrypt. 1345 * 1346 * Decrypt ciphertext data using the IV set by the caller with a preceding 1347 * call of crypto_blkcipher_set_iv. 1348 * 1349 * The blkcipher_desc data structure must be filled by the caller as documented 1350 * for the crypto_blkcipher_encrypt call above. 1351 * 1352 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 1353 * 1354 */ 1355 static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, 1356 struct scatterlist *dst, 1357 struct scatterlist *src, 1358 unsigned int nbytes) 1359 { 1360 desc->info = crypto_blkcipher_crt(desc->tfm)->iv; 1361 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); 1362 } 1363 1364 /** 1365 * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV 1366 * @desc: reference to the block cipher handle with meta data 1367 * @dst: scatter/gather list that is filled by the cipher operation with the 1368 * plaintext 1369 * @src: scatter/gather list that holds the ciphertext 1370 * @nbytes: number of bytes of the ciphertext to decrypt. 1371 * 1372 * Decrypt ciphertext data with the use of an IV that is solely used for this 1373 * cipher operation. Any previously set IV is not used. 1374 * 1375 * The blkcipher_desc data structure must be filled by the caller as documented 1376 * for the crypto_blkcipher_encrypt_iv call above. 1377 * 1378 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 1379 */ 1380 static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, 1381 struct scatterlist *dst, 1382 struct scatterlist *src, 1383 unsigned int nbytes) 1384 { 1385 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); 1386 } 1387 1388 /** 1389 * crypto_blkcipher_set_iv() - set IV for cipher 1390 * @tfm: cipher handle 1391 * @src: buffer holding the IV 1392 * @len: length of the IV in bytes 1393 * 1394 * The caller provided IV is set for the block cipher referenced by the cipher 1395 * handle. 1396 */ 1397 static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm, 1398 const u8 *src, unsigned int len) 1399 { 1400 memcpy(crypto_blkcipher_crt(tfm)->iv, src, len); 1401 } 1402 1403 /** 1404 * crypto_blkcipher_get_iv() - obtain IV from cipher 1405 * @tfm: cipher handle 1406 * @dst: buffer filled with the IV 1407 * @len: length of the buffer dst 1408 * 1409 * The caller can obtain the IV set for the block cipher referenced by the 1410 * cipher handle and store it into the user-provided buffer. If the buffer 1411 * has an insufficient space, the IV is truncated to fit the buffer. 1412 */ 1413 static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm, 1414 u8 *dst, unsigned int len) 1415 { 1416 memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len); 1417 } 1418 1419 /** 1420 * DOC: Single Block Cipher API 1421 * 1422 * The single block cipher API is used with the ciphers of type 1423 * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto). 1424 * 1425 * Using the single block cipher API calls, operations with the basic cipher 1426 * primitive can be implemented. These cipher primitives exclude any block 1427 * chaining operations including IV handling. 1428 * 1429 * The purpose of this single block cipher API is to support the implementation 1430 * of templates or other concepts that only need to perform the cipher operation 1431 * on one block at a time. Templates invoke the underlying cipher primitive 1432 * block-wise and process either the input or the output data of these cipher 1433 * operations. 1434 */ 1435 1436 static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) 1437 { 1438 return (struct crypto_cipher *)tfm; 1439 } 1440 1441 static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm) 1442 { 1443 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); 1444 return __crypto_cipher_cast(tfm); 1445 } 1446 1447 /** 1448 * crypto_alloc_cipher() - allocate single block cipher handle 1449 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 1450 * single block cipher 1451 * @type: specifies the type of the cipher 1452 * @mask: specifies the mask for the cipher 1453 * 1454 * Allocate a cipher handle for a single block cipher. The returned struct 1455 * crypto_cipher is the cipher handle that is required for any subsequent API 1456 * invocation for that single block cipher. 1457 * 1458 * Return: allocated cipher handle in case of success; IS_ERR() is true in case 1459 * of an error, PTR_ERR() returns the error code. 1460 */ 1461 static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, 1462 u32 type, u32 mask) 1463 { 1464 type &= ~CRYPTO_ALG_TYPE_MASK; 1465 type |= CRYPTO_ALG_TYPE_CIPHER; 1466 mask |= CRYPTO_ALG_TYPE_MASK; 1467 1468 return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask)); 1469 } 1470 1471 static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm) 1472 { 1473 return &tfm->base; 1474 } 1475 1476 /** 1477 * crypto_free_cipher() - zeroize and free the single block cipher handle 1478 * @tfm: cipher handle to be freed 1479 */ 1480 static inline void crypto_free_cipher(struct crypto_cipher *tfm) 1481 { 1482 crypto_free_tfm(crypto_cipher_tfm(tfm)); 1483 } 1484 1485 /** 1486 * crypto_has_cipher() - Search for the availability of a single block cipher 1487 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 1488 * single block cipher 1489 * @type: specifies the type of the cipher 1490 * @mask: specifies the mask for the cipher 1491 * 1492 * Return: true when the single block cipher is known to the kernel crypto API; 1493 * false otherwise 1494 */ 1495 static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) 1496 { 1497 type &= ~CRYPTO_ALG_TYPE_MASK; 1498 type |= CRYPTO_ALG_TYPE_CIPHER; 1499 mask |= CRYPTO_ALG_TYPE_MASK; 1500 1501 return crypto_has_alg(alg_name, type, mask); 1502 } 1503 1504 static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm) 1505 { 1506 return &crypto_cipher_tfm(tfm)->crt_cipher; 1507 } 1508 1509 /** 1510 * crypto_cipher_blocksize() - obtain block size for cipher 1511 * @tfm: cipher handle 1512 * 1513 * The block size for the single block cipher referenced with the cipher handle 1514 * tfm is returned. The caller may use that information to allocate appropriate 1515 * memory for the data returned by the encryption or decryption operation 1516 * 1517 * Return: block size of cipher 1518 */ 1519 static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) 1520 { 1521 return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); 1522 } 1523 1524 static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm) 1525 { 1526 return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm)); 1527 } 1528 1529 static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm) 1530 { 1531 return crypto_tfm_get_flags(crypto_cipher_tfm(tfm)); 1532 } 1533 1534 static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm, 1535 u32 flags) 1536 { 1537 crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags); 1538 } 1539 1540 static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm, 1541 u32 flags) 1542 { 1543 crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); 1544 } 1545 1546 /** 1547 * crypto_cipher_setkey() - set key for cipher 1548 * @tfm: cipher handle 1549 * @key: buffer holding the key 1550 * @keylen: length of the key in bytes 1551 * 1552 * The caller provided key is set for the single block cipher referenced by the 1553 * cipher handle. 1554 * 1555 * Note, the key length determines the cipher type. Many block ciphers implement 1556 * different cipher modes depending on the key size, such as AES-128 vs AES-192 1557 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 1558 * is performed. 1559 * 1560 * Return: 0 if the setting of the key was successful; < 0 if an error occurred 1561 */ 1562 static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, 1563 const u8 *key, unsigned int keylen) 1564 { 1565 return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm), 1566 key, keylen); 1567 } 1568 1569 /** 1570 * crypto_cipher_encrypt_one() - encrypt one block of plaintext 1571 * @tfm: cipher handle 1572 * @dst: points to the buffer that will be filled with the ciphertext 1573 * @src: buffer holding the plaintext to be encrypted 1574 * 1575 * Invoke the encryption operation of one block. The caller must ensure that 1576 * the plaintext and ciphertext buffers are at least one block in size. 1577 */ 1578 static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, 1579 u8 *dst, const u8 *src) 1580 { 1581 crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm), 1582 dst, src); 1583 } 1584 1585 /** 1586 * crypto_cipher_decrypt_one() - decrypt one block of ciphertext 1587 * @tfm: cipher handle 1588 * @dst: points to the buffer that will be filled with the plaintext 1589 * @src: buffer holding the ciphertext to be decrypted 1590 * 1591 * Invoke the decryption operation of one block. The caller must ensure that 1592 * the plaintext and ciphertext buffers are at least one block in size. 1593 */ 1594 static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, 1595 u8 *dst, const u8 *src) 1596 { 1597 crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm), 1598 dst, src); 1599 } 1600 1601 static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) 1602 { 1603 return (struct crypto_comp *)tfm; 1604 } 1605 1606 static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm) 1607 { 1608 BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) & 1609 CRYPTO_ALG_TYPE_MASK); 1610 return __crypto_comp_cast(tfm); 1611 } 1612 1613 static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name, 1614 u32 type, u32 mask) 1615 { 1616 type &= ~CRYPTO_ALG_TYPE_MASK; 1617 type |= CRYPTO_ALG_TYPE_COMPRESS; 1618 mask |= CRYPTO_ALG_TYPE_MASK; 1619 1620 return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask)); 1621 } 1622 1623 static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm) 1624 { 1625 return &tfm->base; 1626 } 1627 1628 static inline void crypto_free_comp(struct crypto_comp *tfm) 1629 { 1630 crypto_free_tfm(crypto_comp_tfm(tfm)); 1631 } 1632 1633 static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask) 1634 { 1635 type &= ~CRYPTO_ALG_TYPE_MASK; 1636 type |= CRYPTO_ALG_TYPE_COMPRESS; 1637 mask |= CRYPTO_ALG_TYPE_MASK; 1638 1639 return crypto_has_alg(alg_name, type, mask); 1640 } 1641 1642 static inline const char *crypto_comp_name(struct crypto_comp *tfm) 1643 { 1644 return crypto_tfm_alg_name(crypto_comp_tfm(tfm)); 1645 } 1646 1647 static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm) 1648 { 1649 return &crypto_comp_tfm(tfm)->crt_compress; 1650 } 1651 1652 static inline int crypto_comp_compress(struct crypto_comp *tfm, 1653 const u8 *src, unsigned int slen, 1654 u8 *dst, unsigned int *dlen) 1655 { 1656 return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm), 1657 src, slen, dst, dlen); 1658 } 1659 1660 static inline int crypto_comp_decompress(struct crypto_comp *tfm, 1661 const u8 *src, unsigned int slen, 1662 u8 *dst, unsigned int *dlen) 1663 { 1664 return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm), 1665 src, slen, dst, dlen); 1666 } 1667 1668 #endif /* _LINUX_CRYPTO_H */ 1669 1670