1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Shared crypto simd helpers 4 * 5 * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> 6 * Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au> 7 * Copyright (c) 2019 Google LLC 8 * 9 * Based on aesni-intel_glue.c by: 10 * Copyright (C) 2008, Intel Corp. 11 * Author: Huang Ying <ying.huang@intel.com> 12 */ 13 14 /* 15 * Shared crypto SIMD helpers. These functions dynamically create and register 16 * an skcipher or AEAD algorithm that wraps another, internal algorithm. The 17 * wrapper ensures that the internal algorithm is only executed in a context 18 * where SIMD instructions are usable, i.e. where may_use_simd() returns true. 19 * If SIMD is already usable, the wrapper directly calls the internal algorithm. 20 * Otherwise it defers execution to a workqueue via cryptd. 21 * 22 * This is an alternative to the internal algorithm implementing a fallback for 23 * the !may_use_simd() case itself. 24 * 25 * Note that the wrapper algorithm is asynchronous, i.e. it has the 26 * CRYPTO_ALG_ASYNC flag set. Therefore it won't be found by users who 27 * explicitly allocate a synchronous algorithm. 28 */ 29 30 #include <crypto/cryptd.h> 31 #include <crypto/internal/aead.h> 32 #include <crypto/internal/simd.h> 33 #include <crypto/internal/skcipher.h> 34 #include <linux/kernel.h> 35 #include <linux/module.h> 36 #include <linux/preempt.h> 37 #include <asm/simd.h> 38 39 /* skcipher support */ 40 41 struct simd_skcipher_alg { 42 const char *ialg_name; 43 struct skcipher_alg alg; 44 }; 45 46 struct simd_skcipher_ctx { 47 struct cryptd_skcipher *cryptd_tfm; 48 }; 49 50 static int simd_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, 51 unsigned int key_len) 52 { 53 struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 54 struct crypto_skcipher *child = &ctx->cryptd_tfm->base; 55 56 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 57 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(tfm) & 58 CRYPTO_TFM_REQ_MASK); 59 return crypto_skcipher_setkey(child, key, key_len); 60 } 61 62 static int simd_skcipher_encrypt(struct skcipher_request *req) 63 { 64 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 65 struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 66 struct skcipher_request *subreq; 67 struct crypto_skcipher *child; 68 69 subreq = skcipher_request_ctx(req); 70 *subreq = *req; 71 72 if (!crypto_simd_usable() || 73 (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm))) 74 child = &ctx->cryptd_tfm->base; 75 else 76 child = cryptd_skcipher_child(ctx->cryptd_tfm); 77 78 skcipher_request_set_tfm(subreq, child); 79 80 return crypto_skcipher_encrypt(subreq); 81 } 82 83 static int simd_skcipher_decrypt(struct skcipher_request *req) 84 { 85 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 86 struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 87 struct skcipher_request *subreq; 88 struct crypto_skcipher *child; 89 90 subreq = skcipher_request_ctx(req); 91 *subreq = *req; 92 93 if (!crypto_simd_usable() || 94 (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm))) 95 child = &ctx->cryptd_tfm->base; 96 else 97 child = cryptd_skcipher_child(ctx->cryptd_tfm); 98 99 skcipher_request_set_tfm(subreq, child); 100 101 return crypto_skcipher_decrypt(subreq); 102 } 103 104 static void simd_skcipher_exit(struct crypto_skcipher *tfm) 105 { 106 struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 107 108 cryptd_free_skcipher(ctx->cryptd_tfm); 109 } 110 111 static int simd_skcipher_init(struct crypto_skcipher *tfm) 112 { 113 struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 114 struct cryptd_skcipher *cryptd_tfm; 115 struct simd_skcipher_alg *salg; 116 struct skcipher_alg *alg; 117 unsigned reqsize; 118 119 alg = crypto_skcipher_alg(tfm); 120 salg = container_of(alg, struct simd_skcipher_alg, alg); 121 122 cryptd_tfm = cryptd_alloc_skcipher(salg->ialg_name, 123 CRYPTO_ALG_INTERNAL, 124 CRYPTO_ALG_INTERNAL); 125 if (IS_ERR(cryptd_tfm)) 126 return PTR_ERR(cryptd_tfm); 127 128 ctx->cryptd_tfm = cryptd_tfm; 129 130 reqsize = crypto_skcipher_reqsize(cryptd_skcipher_child(cryptd_tfm)); 131 reqsize = max(reqsize, crypto_skcipher_reqsize(&cryptd_tfm->base)); 132 reqsize += sizeof(struct skcipher_request); 133 134 crypto_skcipher_set_reqsize(tfm, reqsize); 135 136 return 0; 137 } 138 139 struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, 140 const char *drvname, 141 const char *basename) 142 { 143 struct simd_skcipher_alg *salg; 144 struct crypto_skcipher *tfm; 145 struct skcipher_alg *ialg; 146 struct skcipher_alg *alg; 147 int err; 148 149 tfm = crypto_alloc_skcipher(basename, CRYPTO_ALG_INTERNAL, 150 CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC); 151 if (IS_ERR(tfm)) 152 return ERR_CAST(tfm); 153 154 ialg = crypto_skcipher_alg(tfm); 155 156 salg = kzalloc(sizeof(*salg), GFP_KERNEL); 157 if (!salg) { 158 salg = ERR_PTR(-ENOMEM); 159 goto out_put_tfm; 160 } 161 162 salg->ialg_name = basename; 163 alg = &salg->alg; 164 165 err = -ENAMETOOLONG; 166 if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >= 167 CRYPTO_MAX_ALG_NAME) 168 goto out_free_salg; 169 170 if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 171 drvname) >= CRYPTO_MAX_ALG_NAME) 172 goto out_free_salg; 173 174 alg->base.cra_flags = CRYPTO_ALG_ASYNC; 175 alg->base.cra_priority = ialg->base.cra_priority; 176 alg->base.cra_blocksize = ialg->base.cra_blocksize; 177 alg->base.cra_alignmask = ialg->base.cra_alignmask; 178 alg->base.cra_module = ialg->base.cra_module; 179 alg->base.cra_ctxsize = sizeof(struct simd_skcipher_ctx); 180 181 alg->ivsize = ialg->ivsize; 182 alg->chunksize = ialg->chunksize; 183 alg->min_keysize = ialg->min_keysize; 184 alg->max_keysize = ialg->max_keysize; 185 186 alg->init = simd_skcipher_init; 187 alg->exit = simd_skcipher_exit; 188 189 alg->setkey = simd_skcipher_setkey; 190 alg->encrypt = simd_skcipher_encrypt; 191 alg->decrypt = simd_skcipher_decrypt; 192 193 err = crypto_register_skcipher(alg); 194 if (err) 195 goto out_free_salg; 196 197 out_put_tfm: 198 crypto_free_skcipher(tfm); 199 return salg; 200 201 out_free_salg: 202 kfree(salg); 203 salg = ERR_PTR(err); 204 goto out_put_tfm; 205 } 206 EXPORT_SYMBOL_GPL(simd_skcipher_create_compat); 207 208 struct simd_skcipher_alg *simd_skcipher_create(const char *algname, 209 const char *basename) 210 { 211 char drvname[CRYPTO_MAX_ALG_NAME]; 212 213 if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >= 214 CRYPTO_MAX_ALG_NAME) 215 return ERR_PTR(-ENAMETOOLONG); 216 217 return simd_skcipher_create_compat(algname, drvname, basename); 218 } 219 EXPORT_SYMBOL_GPL(simd_skcipher_create); 220 221 void simd_skcipher_free(struct simd_skcipher_alg *salg) 222 { 223 crypto_unregister_skcipher(&salg->alg); 224 kfree(salg); 225 } 226 EXPORT_SYMBOL_GPL(simd_skcipher_free); 227 228 int simd_register_skciphers_compat(struct skcipher_alg *algs, int count, 229 struct simd_skcipher_alg **simd_algs) 230 { 231 int err; 232 int i; 233 const char *algname; 234 const char *drvname; 235 const char *basename; 236 struct simd_skcipher_alg *simd; 237 238 err = crypto_register_skciphers(algs, count); 239 if (err) 240 return err; 241 242 for (i = 0; i < count; i++) { 243 WARN_ON(strncmp(algs[i].base.cra_name, "__", 2)); 244 WARN_ON(strncmp(algs[i].base.cra_driver_name, "__", 2)); 245 algname = algs[i].base.cra_name + 2; 246 drvname = algs[i].base.cra_driver_name + 2; 247 basename = algs[i].base.cra_driver_name; 248 simd = simd_skcipher_create_compat(algname, drvname, basename); 249 err = PTR_ERR(simd); 250 if (IS_ERR(simd)) 251 goto err_unregister; 252 simd_algs[i] = simd; 253 } 254 return 0; 255 256 err_unregister: 257 simd_unregister_skciphers(algs, count, simd_algs); 258 return err; 259 } 260 EXPORT_SYMBOL_GPL(simd_register_skciphers_compat); 261 262 void simd_unregister_skciphers(struct skcipher_alg *algs, int count, 263 struct simd_skcipher_alg **simd_algs) 264 { 265 int i; 266 267 crypto_unregister_skciphers(algs, count); 268 269 for (i = 0; i < count; i++) { 270 if (simd_algs[i]) { 271 simd_skcipher_free(simd_algs[i]); 272 simd_algs[i] = NULL; 273 } 274 } 275 } 276 EXPORT_SYMBOL_GPL(simd_unregister_skciphers); 277 278 /* AEAD support */ 279 280 struct simd_aead_alg { 281 const char *ialg_name; 282 struct aead_alg alg; 283 }; 284 285 struct simd_aead_ctx { 286 struct cryptd_aead *cryptd_tfm; 287 }; 288 289 static int simd_aead_setkey(struct crypto_aead *tfm, const u8 *key, 290 unsigned int key_len) 291 { 292 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); 293 struct crypto_aead *child = &ctx->cryptd_tfm->base; 294 295 crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK); 296 crypto_aead_set_flags(child, crypto_aead_get_flags(tfm) & 297 CRYPTO_TFM_REQ_MASK); 298 return crypto_aead_setkey(child, key, key_len); 299 } 300 301 static int simd_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) 302 { 303 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); 304 struct crypto_aead *child = &ctx->cryptd_tfm->base; 305 306 return crypto_aead_setauthsize(child, authsize); 307 } 308 309 static int simd_aead_encrypt(struct aead_request *req) 310 { 311 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 312 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); 313 struct aead_request *subreq; 314 struct crypto_aead *child; 315 316 subreq = aead_request_ctx(req); 317 *subreq = *req; 318 319 if (!crypto_simd_usable() || 320 (in_atomic() && cryptd_aead_queued(ctx->cryptd_tfm))) 321 child = &ctx->cryptd_tfm->base; 322 else 323 child = cryptd_aead_child(ctx->cryptd_tfm); 324 325 aead_request_set_tfm(subreq, child); 326 327 return crypto_aead_encrypt(subreq); 328 } 329 330 static int simd_aead_decrypt(struct aead_request *req) 331 { 332 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 333 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); 334 struct aead_request *subreq; 335 struct crypto_aead *child; 336 337 subreq = aead_request_ctx(req); 338 *subreq = *req; 339 340 if (!crypto_simd_usable() || 341 (in_atomic() && cryptd_aead_queued(ctx->cryptd_tfm))) 342 child = &ctx->cryptd_tfm->base; 343 else 344 child = cryptd_aead_child(ctx->cryptd_tfm); 345 346 aead_request_set_tfm(subreq, child); 347 348 return crypto_aead_decrypt(subreq); 349 } 350 351 static void simd_aead_exit(struct crypto_aead *tfm) 352 { 353 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); 354 355 cryptd_free_aead(ctx->cryptd_tfm); 356 } 357 358 static int simd_aead_init(struct crypto_aead *tfm) 359 { 360 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); 361 struct cryptd_aead *cryptd_tfm; 362 struct simd_aead_alg *salg; 363 struct aead_alg *alg; 364 unsigned reqsize; 365 366 alg = crypto_aead_alg(tfm); 367 salg = container_of(alg, struct simd_aead_alg, alg); 368 369 cryptd_tfm = cryptd_alloc_aead(salg->ialg_name, CRYPTO_ALG_INTERNAL, 370 CRYPTO_ALG_INTERNAL); 371 if (IS_ERR(cryptd_tfm)) 372 return PTR_ERR(cryptd_tfm); 373 374 ctx->cryptd_tfm = cryptd_tfm; 375 376 reqsize = crypto_aead_reqsize(cryptd_aead_child(cryptd_tfm)); 377 reqsize = max(reqsize, crypto_aead_reqsize(&cryptd_tfm->base)); 378 reqsize += sizeof(struct aead_request); 379 380 crypto_aead_set_reqsize(tfm, reqsize); 381 382 return 0; 383 } 384 385 struct simd_aead_alg *simd_aead_create_compat(const char *algname, 386 const char *drvname, 387 const char *basename) 388 { 389 struct simd_aead_alg *salg; 390 struct crypto_aead *tfm; 391 struct aead_alg *ialg; 392 struct aead_alg *alg; 393 int err; 394 395 tfm = crypto_alloc_aead(basename, CRYPTO_ALG_INTERNAL, 396 CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC); 397 if (IS_ERR(tfm)) 398 return ERR_CAST(tfm); 399 400 ialg = crypto_aead_alg(tfm); 401 402 salg = kzalloc(sizeof(*salg), GFP_KERNEL); 403 if (!salg) { 404 salg = ERR_PTR(-ENOMEM); 405 goto out_put_tfm; 406 } 407 408 salg->ialg_name = basename; 409 alg = &salg->alg; 410 411 err = -ENAMETOOLONG; 412 if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >= 413 CRYPTO_MAX_ALG_NAME) 414 goto out_free_salg; 415 416 if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 417 drvname) >= CRYPTO_MAX_ALG_NAME) 418 goto out_free_salg; 419 420 alg->base.cra_flags = CRYPTO_ALG_ASYNC; 421 alg->base.cra_priority = ialg->base.cra_priority; 422 alg->base.cra_blocksize = ialg->base.cra_blocksize; 423 alg->base.cra_alignmask = ialg->base.cra_alignmask; 424 alg->base.cra_module = ialg->base.cra_module; 425 alg->base.cra_ctxsize = sizeof(struct simd_aead_ctx); 426 427 alg->ivsize = ialg->ivsize; 428 alg->maxauthsize = ialg->maxauthsize; 429 alg->chunksize = ialg->chunksize; 430 431 alg->init = simd_aead_init; 432 alg->exit = simd_aead_exit; 433 434 alg->setkey = simd_aead_setkey; 435 alg->setauthsize = simd_aead_setauthsize; 436 alg->encrypt = simd_aead_encrypt; 437 alg->decrypt = simd_aead_decrypt; 438 439 err = crypto_register_aead(alg); 440 if (err) 441 goto out_free_salg; 442 443 out_put_tfm: 444 crypto_free_aead(tfm); 445 return salg; 446 447 out_free_salg: 448 kfree(salg); 449 salg = ERR_PTR(err); 450 goto out_put_tfm; 451 } 452 EXPORT_SYMBOL_GPL(simd_aead_create_compat); 453 454 struct simd_aead_alg *simd_aead_create(const char *algname, 455 const char *basename) 456 { 457 char drvname[CRYPTO_MAX_ALG_NAME]; 458 459 if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >= 460 CRYPTO_MAX_ALG_NAME) 461 return ERR_PTR(-ENAMETOOLONG); 462 463 return simd_aead_create_compat(algname, drvname, basename); 464 } 465 EXPORT_SYMBOL_GPL(simd_aead_create); 466 467 void simd_aead_free(struct simd_aead_alg *salg) 468 { 469 crypto_unregister_aead(&salg->alg); 470 kfree(salg); 471 } 472 EXPORT_SYMBOL_GPL(simd_aead_free); 473 474 int simd_register_aeads_compat(struct aead_alg *algs, int count, 475 struct simd_aead_alg **simd_algs) 476 { 477 int err; 478 int i; 479 const char *algname; 480 const char *drvname; 481 const char *basename; 482 struct simd_aead_alg *simd; 483 484 err = crypto_register_aeads(algs, count); 485 if (err) 486 return err; 487 488 for (i = 0; i < count; i++) { 489 WARN_ON(strncmp(algs[i].base.cra_name, "__", 2)); 490 WARN_ON(strncmp(algs[i].base.cra_driver_name, "__", 2)); 491 algname = algs[i].base.cra_name + 2; 492 drvname = algs[i].base.cra_driver_name + 2; 493 basename = algs[i].base.cra_driver_name; 494 simd = simd_aead_create_compat(algname, drvname, basename); 495 err = PTR_ERR(simd); 496 if (IS_ERR(simd)) 497 goto err_unregister; 498 simd_algs[i] = simd; 499 } 500 return 0; 501 502 err_unregister: 503 simd_unregister_aeads(algs, count, simd_algs); 504 return err; 505 } 506 EXPORT_SYMBOL_GPL(simd_register_aeads_compat); 507 508 void simd_unregister_aeads(struct aead_alg *algs, int count, 509 struct simd_aead_alg **simd_algs) 510 { 511 int i; 512 513 crypto_unregister_aeads(algs, count); 514 515 for (i = 0; i < count; i++) { 516 if (simd_algs[i]) { 517 simd_aead_free(simd_algs[i]); 518 simd_algs[i] = NULL; 519 } 520 } 521 } 522 EXPORT_SYMBOL_GPL(simd_unregister_aeads); 523 524 MODULE_LICENSE("GPL"); 525