1 /* 2 * Shared crypto simd helpers 3 * 4 * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> 5 * Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au> 6 * Copyright (c) 2019 Google LLC 7 * 8 * Based on aesni-intel_glue.c by: 9 * Copyright (C) 2008, Intel Corp. 10 * Author: Huang Ying <ying.huang@intel.com> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program. If not, see <http://www.gnu.org/licenses/>. 24 */ 25 26 /* 27 * Shared crypto SIMD helpers. These functions dynamically create and register 28 * an skcipher or AEAD algorithm that wraps another, internal algorithm. The 29 * wrapper ensures that the internal algorithm is only executed in a context 30 * where SIMD instructions are usable, i.e. where may_use_simd() returns true. 31 * If SIMD is already usable, the wrapper directly calls the internal algorithm. 32 * Otherwise it defers execution to a workqueue via cryptd. 33 * 34 * This is an alternative to the internal algorithm implementing a fallback for 35 * the !may_use_simd() case itself. 36 * 37 * Note that the wrapper algorithm is asynchronous, i.e. it has the 38 * CRYPTO_ALG_ASYNC flag set. Therefore it won't be found by users who 39 * explicitly allocate a synchronous algorithm. 40 */ 41 42 #include <crypto/cryptd.h> 43 #include <crypto/internal/aead.h> 44 #include <crypto/internal/simd.h> 45 #include <crypto/internal/skcipher.h> 46 #include <linux/kernel.h> 47 #include <linux/module.h> 48 #include <linux/preempt.h> 49 #include <asm/simd.h> 50 51 /* skcipher support */ 52 53 struct simd_skcipher_alg { 54 const char *ialg_name; 55 struct skcipher_alg alg; 56 }; 57 58 struct simd_skcipher_ctx { 59 struct cryptd_skcipher *cryptd_tfm; 60 }; 61 62 static int simd_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, 63 unsigned int key_len) 64 { 65 struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 66 struct crypto_skcipher *child = &ctx->cryptd_tfm->base; 67 int err; 68 69 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 70 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(tfm) & 71 CRYPTO_TFM_REQ_MASK); 72 err = crypto_skcipher_setkey(child, key, key_len); 73 crypto_skcipher_set_flags(tfm, crypto_skcipher_get_flags(child) & 74 CRYPTO_TFM_RES_MASK); 75 return err; 76 } 77 78 static int simd_skcipher_encrypt(struct skcipher_request *req) 79 { 80 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 81 struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 82 struct skcipher_request *subreq; 83 struct crypto_skcipher *child; 84 85 subreq = skcipher_request_ctx(req); 86 *subreq = *req; 87 88 if (!crypto_simd_usable() || 89 (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm))) 90 child = &ctx->cryptd_tfm->base; 91 else 92 child = cryptd_skcipher_child(ctx->cryptd_tfm); 93 94 skcipher_request_set_tfm(subreq, child); 95 96 return crypto_skcipher_encrypt(subreq); 97 } 98 99 static int simd_skcipher_decrypt(struct skcipher_request *req) 100 { 101 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 102 struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 103 struct skcipher_request *subreq; 104 struct crypto_skcipher *child; 105 106 subreq = skcipher_request_ctx(req); 107 *subreq = *req; 108 109 if (!crypto_simd_usable() || 110 (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm))) 111 child = &ctx->cryptd_tfm->base; 112 else 113 child = cryptd_skcipher_child(ctx->cryptd_tfm); 114 115 skcipher_request_set_tfm(subreq, child); 116 117 return crypto_skcipher_decrypt(subreq); 118 } 119 120 static void simd_skcipher_exit(struct crypto_skcipher *tfm) 121 { 122 struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 123 124 cryptd_free_skcipher(ctx->cryptd_tfm); 125 } 126 127 static int simd_skcipher_init(struct crypto_skcipher *tfm) 128 { 129 struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 130 struct cryptd_skcipher *cryptd_tfm; 131 struct simd_skcipher_alg *salg; 132 struct skcipher_alg *alg; 133 unsigned reqsize; 134 135 alg = crypto_skcipher_alg(tfm); 136 salg = container_of(alg, struct simd_skcipher_alg, alg); 137 138 cryptd_tfm = cryptd_alloc_skcipher(salg->ialg_name, 139 CRYPTO_ALG_INTERNAL, 140 CRYPTO_ALG_INTERNAL); 141 if (IS_ERR(cryptd_tfm)) 142 return PTR_ERR(cryptd_tfm); 143 144 ctx->cryptd_tfm = cryptd_tfm; 145 146 reqsize = crypto_skcipher_reqsize(cryptd_skcipher_child(cryptd_tfm)); 147 reqsize = max(reqsize, crypto_skcipher_reqsize(&cryptd_tfm->base)); 148 reqsize += sizeof(struct skcipher_request); 149 150 crypto_skcipher_set_reqsize(tfm, reqsize); 151 152 return 0; 153 } 154 155 struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, 156 const char *drvname, 157 const char *basename) 158 { 159 struct simd_skcipher_alg *salg; 160 struct crypto_skcipher *tfm; 161 struct skcipher_alg *ialg; 162 struct skcipher_alg *alg; 163 int err; 164 165 tfm = crypto_alloc_skcipher(basename, CRYPTO_ALG_INTERNAL, 166 CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC); 167 if (IS_ERR(tfm)) 168 return ERR_CAST(tfm); 169 170 ialg = crypto_skcipher_alg(tfm); 171 172 salg = kzalloc(sizeof(*salg), GFP_KERNEL); 173 if (!salg) { 174 salg = ERR_PTR(-ENOMEM); 175 goto out_put_tfm; 176 } 177 178 salg->ialg_name = basename; 179 alg = &salg->alg; 180 181 err = -ENAMETOOLONG; 182 if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >= 183 CRYPTO_MAX_ALG_NAME) 184 goto out_free_salg; 185 186 if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 187 drvname) >= CRYPTO_MAX_ALG_NAME) 188 goto out_free_salg; 189 190 alg->base.cra_flags = CRYPTO_ALG_ASYNC; 191 alg->base.cra_priority = ialg->base.cra_priority; 192 alg->base.cra_blocksize = ialg->base.cra_blocksize; 193 alg->base.cra_alignmask = ialg->base.cra_alignmask; 194 alg->base.cra_module = ialg->base.cra_module; 195 alg->base.cra_ctxsize = sizeof(struct simd_skcipher_ctx); 196 197 alg->ivsize = ialg->ivsize; 198 alg->chunksize = ialg->chunksize; 199 alg->min_keysize = ialg->min_keysize; 200 alg->max_keysize = ialg->max_keysize; 201 202 alg->init = simd_skcipher_init; 203 alg->exit = simd_skcipher_exit; 204 205 alg->setkey = simd_skcipher_setkey; 206 alg->encrypt = simd_skcipher_encrypt; 207 alg->decrypt = simd_skcipher_decrypt; 208 209 err = crypto_register_skcipher(alg); 210 if (err) 211 goto out_free_salg; 212 213 out_put_tfm: 214 crypto_free_skcipher(tfm); 215 return salg; 216 217 out_free_salg: 218 kfree(salg); 219 salg = ERR_PTR(err); 220 goto out_put_tfm; 221 } 222 EXPORT_SYMBOL_GPL(simd_skcipher_create_compat); 223 224 struct simd_skcipher_alg *simd_skcipher_create(const char *algname, 225 const char *basename) 226 { 227 char drvname[CRYPTO_MAX_ALG_NAME]; 228 229 if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >= 230 CRYPTO_MAX_ALG_NAME) 231 return ERR_PTR(-ENAMETOOLONG); 232 233 return simd_skcipher_create_compat(algname, drvname, basename); 234 } 235 EXPORT_SYMBOL_GPL(simd_skcipher_create); 236 237 void simd_skcipher_free(struct simd_skcipher_alg *salg) 238 { 239 crypto_unregister_skcipher(&salg->alg); 240 kfree(salg); 241 } 242 EXPORT_SYMBOL_GPL(simd_skcipher_free); 243 244 int simd_register_skciphers_compat(struct skcipher_alg *algs, int count, 245 struct simd_skcipher_alg **simd_algs) 246 { 247 int err; 248 int i; 249 const char *algname; 250 const char *drvname; 251 const char *basename; 252 struct simd_skcipher_alg *simd; 253 254 err = crypto_register_skciphers(algs, count); 255 if (err) 256 return err; 257 258 for (i = 0; i < count; i++) { 259 WARN_ON(strncmp(algs[i].base.cra_name, "__", 2)); 260 WARN_ON(strncmp(algs[i].base.cra_driver_name, "__", 2)); 261 algname = algs[i].base.cra_name + 2; 262 drvname = algs[i].base.cra_driver_name + 2; 263 basename = algs[i].base.cra_driver_name; 264 simd = simd_skcipher_create_compat(algname, drvname, basename); 265 err = PTR_ERR(simd); 266 if (IS_ERR(simd)) 267 goto err_unregister; 268 simd_algs[i] = simd; 269 } 270 return 0; 271 272 err_unregister: 273 simd_unregister_skciphers(algs, count, simd_algs); 274 return err; 275 } 276 EXPORT_SYMBOL_GPL(simd_register_skciphers_compat); 277 278 void simd_unregister_skciphers(struct skcipher_alg *algs, int count, 279 struct simd_skcipher_alg **simd_algs) 280 { 281 int i; 282 283 crypto_unregister_skciphers(algs, count); 284 285 for (i = 0; i < count; i++) { 286 if (simd_algs[i]) { 287 simd_skcipher_free(simd_algs[i]); 288 simd_algs[i] = NULL; 289 } 290 } 291 } 292 EXPORT_SYMBOL_GPL(simd_unregister_skciphers); 293 294 /* AEAD support */ 295 296 struct simd_aead_alg { 297 const char *ialg_name; 298 struct aead_alg alg; 299 }; 300 301 struct simd_aead_ctx { 302 struct cryptd_aead *cryptd_tfm; 303 }; 304 305 static int simd_aead_setkey(struct crypto_aead *tfm, const u8 *key, 306 unsigned int key_len) 307 { 308 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); 309 struct crypto_aead *child = &ctx->cryptd_tfm->base; 310 int err; 311 312 crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK); 313 crypto_aead_set_flags(child, crypto_aead_get_flags(tfm) & 314 CRYPTO_TFM_REQ_MASK); 315 err = crypto_aead_setkey(child, key, key_len); 316 crypto_aead_set_flags(tfm, crypto_aead_get_flags(child) & 317 CRYPTO_TFM_RES_MASK); 318 return err; 319 } 320 321 static int simd_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) 322 { 323 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); 324 struct crypto_aead *child = &ctx->cryptd_tfm->base; 325 326 return crypto_aead_setauthsize(child, authsize); 327 } 328 329 static int simd_aead_encrypt(struct aead_request *req) 330 { 331 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 332 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); 333 struct aead_request *subreq; 334 struct crypto_aead *child; 335 336 subreq = aead_request_ctx(req); 337 *subreq = *req; 338 339 if (!crypto_simd_usable() || 340 (in_atomic() && cryptd_aead_queued(ctx->cryptd_tfm))) 341 child = &ctx->cryptd_tfm->base; 342 else 343 child = cryptd_aead_child(ctx->cryptd_tfm); 344 345 aead_request_set_tfm(subreq, child); 346 347 return crypto_aead_encrypt(subreq); 348 } 349 350 static int simd_aead_decrypt(struct aead_request *req) 351 { 352 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 353 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); 354 struct aead_request *subreq; 355 struct crypto_aead *child; 356 357 subreq = aead_request_ctx(req); 358 *subreq = *req; 359 360 if (!crypto_simd_usable() || 361 (in_atomic() && cryptd_aead_queued(ctx->cryptd_tfm))) 362 child = &ctx->cryptd_tfm->base; 363 else 364 child = cryptd_aead_child(ctx->cryptd_tfm); 365 366 aead_request_set_tfm(subreq, child); 367 368 return crypto_aead_decrypt(subreq); 369 } 370 371 static void simd_aead_exit(struct crypto_aead *tfm) 372 { 373 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); 374 375 cryptd_free_aead(ctx->cryptd_tfm); 376 } 377 378 static int simd_aead_init(struct crypto_aead *tfm) 379 { 380 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); 381 struct cryptd_aead *cryptd_tfm; 382 struct simd_aead_alg *salg; 383 struct aead_alg *alg; 384 unsigned reqsize; 385 386 alg = crypto_aead_alg(tfm); 387 salg = container_of(alg, struct simd_aead_alg, alg); 388 389 cryptd_tfm = cryptd_alloc_aead(salg->ialg_name, CRYPTO_ALG_INTERNAL, 390 CRYPTO_ALG_INTERNAL); 391 if (IS_ERR(cryptd_tfm)) 392 return PTR_ERR(cryptd_tfm); 393 394 ctx->cryptd_tfm = cryptd_tfm; 395 396 reqsize = crypto_aead_reqsize(cryptd_aead_child(cryptd_tfm)); 397 reqsize = max(reqsize, crypto_aead_reqsize(&cryptd_tfm->base)); 398 reqsize += sizeof(struct aead_request); 399 400 crypto_aead_set_reqsize(tfm, reqsize); 401 402 return 0; 403 } 404 405 struct simd_aead_alg *simd_aead_create_compat(const char *algname, 406 const char *drvname, 407 const char *basename) 408 { 409 struct simd_aead_alg *salg; 410 struct crypto_aead *tfm; 411 struct aead_alg *ialg; 412 struct aead_alg *alg; 413 int err; 414 415 tfm = crypto_alloc_aead(basename, CRYPTO_ALG_INTERNAL, 416 CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC); 417 if (IS_ERR(tfm)) 418 return ERR_CAST(tfm); 419 420 ialg = crypto_aead_alg(tfm); 421 422 salg = kzalloc(sizeof(*salg), GFP_KERNEL); 423 if (!salg) { 424 salg = ERR_PTR(-ENOMEM); 425 goto out_put_tfm; 426 } 427 428 salg->ialg_name = basename; 429 alg = &salg->alg; 430 431 err = -ENAMETOOLONG; 432 if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >= 433 CRYPTO_MAX_ALG_NAME) 434 goto out_free_salg; 435 436 if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 437 drvname) >= CRYPTO_MAX_ALG_NAME) 438 goto out_free_salg; 439 440 alg->base.cra_flags = CRYPTO_ALG_ASYNC; 441 alg->base.cra_priority = ialg->base.cra_priority; 442 alg->base.cra_blocksize = ialg->base.cra_blocksize; 443 alg->base.cra_alignmask = ialg->base.cra_alignmask; 444 alg->base.cra_module = ialg->base.cra_module; 445 alg->base.cra_ctxsize = sizeof(struct simd_aead_ctx); 446 447 alg->ivsize = ialg->ivsize; 448 alg->maxauthsize = ialg->maxauthsize; 449 alg->chunksize = ialg->chunksize; 450 451 alg->init = simd_aead_init; 452 alg->exit = simd_aead_exit; 453 454 alg->setkey = simd_aead_setkey; 455 alg->setauthsize = simd_aead_setauthsize; 456 alg->encrypt = simd_aead_encrypt; 457 alg->decrypt = simd_aead_decrypt; 458 459 err = crypto_register_aead(alg); 460 if (err) 461 goto out_free_salg; 462 463 out_put_tfm: 464 crypto_free_aead(tfm); 465 return salg; 466 467 out_free_salg: 468 kfree(salg); 469 salg = ERR_PTR(err); 470 goto out_put_tfm; 471 } 472 EXPORT_SYMBOL_GPL(simd_aead_create_compat); 473 474 struct simd_aead_alg *simd_aead_create(const char *algname, 475 const char *basename) 476 { 477 char drvname[CRYPTO_MAX_ALG_NAME]; 478 479 if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >= 480 CRYPTO_MAX_ALG_NAME) 481 return ERR_PTR(-ENAMETOOLONG); 482 483 return simd_aead_create_compat(algname, drvname, basename); 484 } 485 EXPORT_SYMBOL_GPL(simd_aead_create); 486 487 void simd_aead_free(struct simd_aead_alg *salg) 488 { 489 crypto_unregister_aead(&salg->alg); 490 kfree(salg); 491 } 492 EXPORT_SYMBOL_GPL(simd_aead_free); 493 494 int simd_register_aeads_compat(struct aead_alg *algs, int count, 495 struct simd_aead_alg **simd_algs) 496 { 497 int err; 498 int i; 499 const char *algname; 500 const char *drvname; 501 const char *basename; 502 struct simd_aead_alg *simd; 503 504 err = crypto_register_aeads(algs, count); 505 if (err) 506 return err; 507 508 for (i = 0; i < count; i++) { 509 WARN_ON(strncmp(algs[i].base.cra_name, "__", 2)); 510 WARN_ON(strncmp(algs[i].base.cra_driver_name, "__", 2)); 511 algname = algs[i].base.cra_name + 2; 512 drvname = algs[i].base.cra_driver_name + 2; 513 basename = algs[i].base.cra_driver_name; 514 simd = simd_aead_create_compat(algname, drvname, basename); 515 err = PTR_ERR(simd); 516 if (IS_ERR(simd)) 517 goto err_unregister; 518 simd_algs[i] = simd; 519 } 520 return 0; 521 522 err_unregister: 523 simd_unregister_aeads(algs, count, simd_algs); 524 return err; 525 } 526 EXPORT_SYMBOL_GPL(simd_register_aeads_compat); 527 528 void simd_unregister_aeads(struct aead_alg *algs, int count, 529 struct simd_aead_alg **simd_algs) 530 { 531 int i; 532 533 crypto_unregister_aeads(algs, count); 534 535 for (i = 0; i < count; i++) { 536 if (simd_algs[i]) { 537 simd_aead_free(simd_algs[i]); 538 simd_algs[i] = NULL; 539 } 540 } 541 } 542 EXPORT_SYMBOL_GPL(simd_unregister_aeads); 543 544 MODULE_LICENSE("GPL"); 545