1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Cryptographic API. 4 * 5 * s390 implementation of the AES Cipher Algorithm. 6 * 7 * s390 Version: 8 * Copyright IBM Corp. 2005, 2017 9 * Author(s): Jan Glauber (jang@de.ibm.com) 10 * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback 11 * Patrick Steuer <patrick.steuer@de.ibm.com> 12 * Harald Freudenberger <freude@de.ibm.com> 13 * 14 * Derived from "crypto/aes_generic.c" 15 */ 16 17 #define KMSG_COMPONENT "aes_s390" 18 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 19 20 #include <crypto/aes.h> 21 #include <crypto/algapi.h> 22 #include <crypto/ghash.h> 23 #include <crypto/internal/aead.h> 24 #include <crypto/internal/skcipher.h> 25 #include <crypto/scatterwalk.h> 26 #include <linux/err.h> 27 #include <linux/module.h> 28 #include <linux/cpufeature.h> 29 #include <linux/init.h> 30 #include <linux/mutex.h> 31 #include <linux/fips.h> 32 #include <linux/string.h> 33 #include <crypto/xts.h> 34 #include <asm/cpacf.h> 35 36 static u8 *ctrblk; 37 static DEFINE_MUTEX(ctrblk_lock); 38 39 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions, 40 kma_functions; 41 42 struct s390_aes_ctx { 43 u8 key[AES_MAX_KEY_SIZE]; 44 int key_len; 45 unsigned long fc; 46 union { 47 struct crypto_skcipher *skcipher; 48 struct crypto_cipher *cip; 49 } fallback; 50 }; 51 52 struct s390_xts_ctx { 53 u8 key[32]; 54 u8 pcc_key[32]; 55 int key_len; 56 unsigned long fc; 57 struct crypto_skcipher *fallback; 58 }; 59 60 struct gcm_sg_walk { 61 struct scatter_walk walk; 62 unsigned int walk_bytes; 63 u8 *walk_ptr; 64 unsigned int walk_bytes_remain; 65 u8 buf[AES_BLOCK_SIZE]; 66 unsigned int buf_bytes; 67 u8 *ptr; 68 unsigned int nbytes; 69 }; 70 71 static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key, 72 unsigned int key_len) 73 { 74 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 75 76 sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 77 sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags & 78 CRYPTO_TFM_REQ_MASK); 79 80 return crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len); 81 } 82 83 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 84 unsigned int key_len) 85 { 86 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 87 unsigned long fc; 88 89 /* Pick the correct function code based on the key length */ 90 fc = (key_len == 16) ? CPACF_KM_AES_128 : 91 (key_len == 24) ? CPACF_KM_AES_192 : 92 (key_len == 32) ? CPACF_KM_AES_256 : 0; 93 94 /* Check if the function code is available */ 95 sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 96 if (!sctx->fc) 97 return setkey_fallback_cip(tfm, in_key, key_len); 98 99 sctx->key_len = key_len; 100 memcpy(sctx->key, in_key, key_len); 101 return 0; 102 } 103 104 static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 105 { 106 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 107 108 if (unlikely(!sctx->fc)) { 109 crypto_cipher_encrypt_one(sctx->fallback.cip, out, in); 110 return; 111 } 112 cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE); 113 } 114 115 static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 116 { 117 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 118 119 if (unlikely(!sctx->fc)) { 120 crypto_cipher_decrypt_one(sctx->fallback.cip, out, in); 121 return; 122 } 123 cpacf_km(sctx->fc | CPACF_DECRYPT, 124 &sctx->key, out, in, AES_BLOCK_SIZE); 125 } 126 127 static int fallback_init_cip(struct crypto_tfm *tfm) 128 { 129 const char *name = tfm->__crt_alg->cra_name; 130 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 131 132 sctx->fallback.cip = crypto_alloc_cipher(name, 0, 133 CRYPTO_ALG_NEED_FALLBACK); 134 135 if (IS_ERR(sctx->fallback.cip)) { 136 pr_err("Allocating AES fallback algorithm %s failed\n", 137 name); 138 return PTR_ERR(sctx->fallback.cip); 139 } 140 141 return 0; 142 } 143 144 static void fallback_exit_cip(struct crypto_tfm *tfm) 145 { 146 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 147 148 crypto_free_cipher(sctx->fallback.cip); 149 sctx->fallback.cip = NULL; 150 } 151 152 static struct crypto_alg aes_alg = { 153 .cra_name = "aes", 154 .cra_driver_name = "aes-s390", 155 .cra_priority = 300, 156 .cra_flags = CRYPTO_ALG_TYPE_CIPHER | 157 CRYPTO_ALG_NEED_FALLBACK, 158 .cra_blocksize = AES_BLOCK_SIZE, 159 .cra_ctxsize = sizeof(struct s390_aes_ctx), 160 .cra_module = THIS_MODULE, 161 .cra_init = fallback_init_cip, 162 .cra_exit = fallback_exit_cip, 163 .cra_u = { 164 .cipher = { 165 .cia_min_keysize = AES_MIN_KEY_SIZE, 166 .cia_max_keysize = AES_MAX_KEY_SIZE, 167 .cia_setkey = aes_set_key, 168 .cia_encrypt = crypto_aes_encrypt, 169 .cia_decrypt = crypto_aes_decrypt, 170 } 171 } 172 }; 173 174 static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key, 175 unsigned int len) 176 { 177 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 178 179 crypto_skcipher_clear_flags(sctx->fallback.skcipher, 180 CRYPTO_TFM_REQ_MASK); 181 crypto_skcipher_set_flags(sctx->fallback.skcipher, 182 crypto_skcipher_get_flags(tfm) & 183 CRYPTO_TFM_REQ_MASK); 184 return crypto_skcipher_setkey(sctx->fallback.skcipher, key, len); 185 } 186 187 static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx, 188 struct skcipher_request *req, 189 unsigned long modifier) 190 { 191 struct skcipher_request *subreq = skcipher_request_ctx(req); 192 193 *subreq = *req; 194 skcipher_request_set_tfm(subreq, sctx->fallback.skcipher); 195 return (modifier & CPACF_DECRYPT) ? 196 crypto_skcipher_decrypt(subreq) : 197 crypto_skcipher_encrypt(subreq); 198 } 199 200 static int ecb_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 201 unsigned int key_len) 202 { 203 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 204 unsigned long fc; 205 206 /* Pick the correct function code based on the key length */ 207 fc = (key_len == 16) ? CPACF_KM_AES_128 : 208 (key_len == 24) ? CPACF_KM_AES_192 : 209 (key_len == 32) ? CPACF_KM_AES_256 : 0; 210 211 /* Check if the function code is available */ 212 sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 213 if (!sctx->fc) 214 return setkey_fallback_skcipher(tfm, in_key, key_len); 215 216 sctx->key_len = key_len; 217 memcpy(sctx->key, in_key, key_len); 218 return 0; 219 } 220 221 static int ecb_aes_crypt(struct skcipher_request *req, unsigned long modifier) 222 { 223 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 224 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 225 struct skcipher_walk walk; 226 unsigned int nbytes, n; 227 int ret; 228 229 if (unlikely(!sctx->fc)) 230 return fallback_skcipher_crypt(sctx, req, modifier); 231 232 ret = skcipher_walk_virt(&walk, req, false); 233 while ((nbytes = walk.nbytes) != 0) { 234 /* only use complete blocks */ 235 n = nbytes & ~(AES_BLOCK_SIZE - 1); 236 cpacf_km(sctx->fc | modifier, sctx->key, 237 walk.dst.virt.addr, walk.src.virt.addr, n); 238 ret = skcipher_walk_done(&walk, nbytes - n); 239 } 240 return ret; 241 } 242 243 static int ecb_aes_encrypt(struct skcipher_request *req) 244 { 245 return ecb_aes_crypt(req, 0); 246 } 247 248 static int ecb_aes_decrypt(struct skcipher_request *req) 249 { 250 return ecb_aes_crypt(req, CPACF_DECRYPT); 251 } 252 253 static int fallback_init_skcipher(struct crypto_skcipher *tfm) 254 { 255 const char *name = crypto_tfm_alg_name(&tfm->base); 256 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 257 258 sctx->fallback.skcipher = crypto_alloc_skcipher(name, 0, 259 CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC); 260 261 if (IS_ERR(sctx->fallback.skcipher)) { 262 pr_err("Allocating AES fallback algorithm %s failed\n", 263 name); 264 return PTR_ERR(sctx->fallback.skcipher); 265 } 266 267 crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) + 268 crypto_skcipher_reqsize(sctx->fallback.skcipher)); 269 return 0; 270 } 271 272 static void fallback_exit_skcipher(struct crypto_skcipher *tfm) 273 { 274 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 275 276 crypto_free_skcipher(sctx->fallback.skcipher); 277 } 278 279 static struct skcipher_alg ecb_aes_alg = { 280 .base.cra_name = "ecb(aes)", 281 .base.cra_driver_name = "ecb-aes-s390", 282 .base.cra_priority = 401, /* combo: aes + ecb + 1 */ 283 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, 284 .base.cra_blocksize = AES_BLOCK_SIZE, 285 .base.cra_ctxsize = sizeof(struct s390_aes_ctx), 286 .base.cra_module = THIS_MODULE, 287 .init = fallback_init_skcipher, 288 .exit = fallback_exit_skcipher, 289 .min_keysize = AES_MIN_KEY_SIZE, 290 .max_keysize = AES_MAX_KEY_SIZE, 291 .setkey = ecb_aes_set_key, 292 .encrypt = ecb_aes_encrypt, 293 .decrypt = ecb_aes_decrypt, 294 }; 295 296 static int cbc_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 297 unsigned int key_len) 298 { 299 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 300 unsigned long fc; 301 302 /* Pick the correct function code based on the key length */ 303 fc = (key_len == 16) ? CPACF_KMC_AES_128 : 304 (key_len == 24) ? CPACF_KMC_AES_192 : 305 (key_len == 32) ? CPACF_KMC_AES_256 : 0; 306 307 /* Check if the function code is available */ 308 sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0; 309 if (!sctx->fc) 310 return setkey_fallback_skcipher(tfm, in_key, key_len); 311 312 sctx->key_len = key_len; 313 memcpy(sctx->key, in_key, key_len); 314 return 0; 315 } 316 317 static int cbc_aes_crypt(struct skcipher_request *req, unsigned long modifier) 318 { 319 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 320 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 321 struct skcipher_walk walk; 322 unsigned int nbytes, n; 323 int ret; 324 struct { 325 u8 iv[AES_BLOCK_SIZE]; 326 u8 key[AES_MAX_KEY_SIZE]; 327 } param; 328 329 if (unlikely(!sctx->fc)) 330 return fallback_skcipher_crypt(sctx, req, modifier); 331 332 ret = skcipher_walk_virt(&walk, req, false); 333 if (ret) 334 return ret; 335 memcpy(param.iv, walk.iv, AES_BLOCK_SIZE); 336 memcpy(param.key, sctx->key, sctx->key_len); 337 while ((nbytes = walk.nbytes) != 0) { 338 /* only use complete blocks */ 339 n = nbytes & ~(AES_BLOCK_SIZE - 1); 340 cpacf_kmc(sctx->fc | modifier, ¶m, 341 walk.dst.virt.addr, walk.src.virt.addr, n); 342 memcpy(walk.iv, param.iv, AES_BLOCK_SIZE); 343 ret = skcipher_walk_done(&walk, nbytes - n); 344 } 345 return ret; 346 } 347 348 static int cbc_aes_encrypt(struct skcipher_request *req) 349 { 350 return cbc_aes_crypt(req, 0); 351 } 352 353 static int cbc_aes_decrypt(struct skcipher_request *req) 354 { 355 return cbc_aes_crypt(req, CPACF_DECRYPT); 356 } 357 358 static struct skcipher_alg cbc_aes_alg = { 359 .base.cra_name = "cbc(aes)", 360 .base.cra_driver_name = "cbc-aes-s390", 361 .base.cra_priority = 402, /* ecb-aes-s390 + 1 */ 362 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, 363 .base.cra_blocksize = AES_BLOCK_SIZE, 364 .base.cra_ctxsize = sizeof(struct s390_aes_ctx), 365 .base.cra_module = THIS_MODULE, 366 .init = fallback_init_skcipher, 367 .exit = fallback_exit_skcipher, 368 .min_keysize = AES_MIN_KEY_SIZE, 369 .max_keysize = AES_MAX_KEY_SIZE, 370 .ivsize = AES_BLOCK_SIZE, 371 .setkey = cbc_aes_set_key, 372 .encrypt = cbc_aes_encrypt, 373 .decrypt = cbc_aes_decrypt, 374 }; 375 376 static int xts_fallback_setkey(struct crypto_skcipher *tfm, const u8 *key, 377 unsigned int len) 378 { 379 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); 380 381 crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK); 382 crypto_skcipher_set_flags(xts_ctx->fallback, 383 crypto_skcipher_get_flags(tfm) & 384 CRYPTO_TFM_REQ_MASK); 385 return crypto_skcipher_setkey(xts_ctx->fallback, key, len); 386 } 387 388 static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 389 unsigned int key_len) 390 { 391 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); 392 unsigned long fc; 393 int err; 394 395 err = xts_fallback_setkey(tfm, in_key, key_len); 396 if (err) 397 return err; 398 399 /* In fips mode only 128 bit or 256 bit keys are valid */ 400 if (fips_enabled && key_len != 32 && key_len != 64) 401 return -EINVAL; 402 403 /* Pick the correct function code based on the key length */ 404 fc = (key_len == 32) ? CPACF_KM_XTS_128 : 405 (key_len == 64) ? CPACF_KM_XTS_256 : 0; 406 407 /* Check if the function code is available */ 408 xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 409 if (!xts_ctx->fc) 410 return 0; 411 412 /* Split the XTS key into the two subkeys */ 413 key_len = key_len / 2; 414 xts_ctx->key_len = key_len; 415 memcpy(xts_ctx->key, in_key, key_len); 416 memcpy(xts_ctx->pcc_key, in_key + key_len, key_len); 417 return 0; 418 } 419 420 static int xts_aes_crypt(struct skcipher_request *req, unsigned long modifier) 421 { 422 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 423 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); 424 struct skcipher_walk walk; 425 unsigned int offset, nbytes, n; 426 int ret; 427 struct { 428 u8 key[32]; 429 u8 tweak[16]; 430 u8 block[16]; 431 u8 bit[16]; 432 u8 xts[16]; 433 } pcc_param; 434 struct { 435 u8 key[32]; 436 u8 init[16]; 437 } xts_param; 438 439 if (req->cryptlen < AES_BLOCK_SIZE) 440 return -EINVAL; 441 442 if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) { 443 struct skcipher_request *subreq = skcipher_request_ctx(req); 444 445 *subreq = *req; 446 skcipher_request_set_tfm(subreq, xts_ctx->fallback); 447 return (modifier & CPACF_DECRYPT) ? 448 crypto_skcipher_decrypt(subreq) : 449 crypto_skcipher_encrypt(subreq); 450 } 451 452 ret = skcipher_walk_virt(&walk, req, false); 453 if (ret) 454 return ret; 455 offset = xts_ctx->key_len & 0x10; 456 memset(pcc_param.block, 0, sizeof(pcc_param.block)); 457 memset(pcc_param.bit, 0, sizeof(pcc_param.bit)); 458 memset(pcc_param.xts, 0, sizeof(pcc_param.xts)); 459 memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak)); 460 memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len); 461 cpacf_pcc(xts_ctx->fc, pcc_param.key + offset); 462 463 memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len); 464 memcpy(xts_param.init, pcc_param.xts, 16); 465 466 while ((nbytes = walk.nbytes) != 0) { 467 /* only use complete blocks */ 468 n = nbytes & ~(AES_BLOCK_SIZE - 1); 469 cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset, 470 walk.dst.virt.addr, walk.src.virt.addr, n); 471 ret = skcipher_walk_done(&walk, nbytes - n); 472 } 473 return ret; 474 } 475 476 static int xts_aes_encrypt(struct skcipher_request *req) 477 { 478 return xts_aes_crypt(req, 0); 479 } 480 481 static int xts_aes_decrypt(struct skcipher_request *req) 482 { 483 return xts_aes_crypt(req, CPACF_DECRYPT); 484 } 485 486 static int xts_fallback_init(struct crypto_skcipher *tfm) 487 { 488 const char *name = crypto_tfm_alg_name(&tfm->base); 489 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); 490 491 xts_ctx->fallback = crypto_alloc_skcipher(name, 0, 492 CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC); 493 494 if (IS_ERR(xts_ctx->fallback)) { 495 pr_err("Allocating XTS fallback algorithm %s failed\n", 496 name); 497 return PTR_ERR(xts_ctx->fallback); 498 } 499 crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) + 500 crypto_skcipher_reqsize(xts_ctx->fallback)); 501 return 0; 502 } 503 504 static void xts_fallback_exit(struct crypto_skcipher *tfm) 505 { 506 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); 507 508 crypto_free_skcipher(xts_ctx->fallback); 509 } 510 511 static struct skcipher_alg xts_aes_alg = { 512 .base.cra_name = "xts(aes)", 513 .base.cra_driver_name = "xts-aes-s390", 514 .base.cra_priority = 402, /* ecb-aes-s390 + 1 */ 515 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, 516 .base.cra_blocksize = AES_BLOCK_SIZE, 517 .base.cra_ctxsize = sizeof(struct s390_xts_ctx), 518 .base.cra_module = THIS_MODULE, 519 .init = xts_fallback_init, 520 .exit = xts_fallback_exit, 521 .min_keysize = 2 * AES_MIN_KEY_SIZE, 522 .max_keysize = 2 * AES_MAX_KEY_SIZE, 523 .ivsize = AES_BLOCK_SIZE, 524 .setkey = xts_aes_set_key, 525 .encrypt = xts_aes_encrypt, 526 .decrypt = xts_aes_decrypt, 527 }; 528 529 static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 530 unsigned int key_len) 531 { 532 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 533 unsigned long fc; 534 535 /* Pick the correct function code based on the key length */ 536 fc = (key_len == 16) ? CPACF_KMCTR_AES_128 : 537 (key_len == 24) ? CPACF_KMCTR_AES_192 : 538 (key_len == 32) ? CPACF_KMCTR_AES_256 : 0; 539 540 /* Check if the function code is available */ 541 sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0; 542 if (!sctx->fc) 543 return setkey_fallback_skcipher(tfm, in_key, key_len); 544 545 sctx->key_len = key_len; 546 memcpy(sctx->key, in_key, key_len); 547 return 0; 548 } 549 550 static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes) 551 { 552 unsigned int i, n; 553 554 /* only use complete blocks, max. PAGE_SIZE */ 555 memcpy(ctrptr, iv, AES_BLOCK_SIZE); 556 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1); 557 for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) { 558 memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE); 559 crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE); 560 ctrptr += AES_BLOCK_SIZE; 561 } 562 return n; 563 } 564 565 static int ctr_aes_crypt(struct skcipher_request *req) 566 { 567 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 568 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 569 u8 buf[AES_BLOCK_SIZE], *ctrptr; 570 struct skcipher_walk walk; 571 unsigned int n, nbytes; 572 int ret, locked; 573 574 if (unlikely(!sctx->fc)) 575 return fallback_skcipher_crypt(sctx, req, 0); 576 577 locked = mutex_trylock(&ctrblk_lock); 578 579 ret = skcipher_walk_virt(&walk, req, false); 580 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { 581 n = AES_BLOCK_SIZE; 582 583 if (nbytes >= 2*AES_BLOCK_SIZE && locked) 584 n = __ctrblk_init(ctrblk, walk.iv, nbytes); 585 ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv; 586 cpacf_kmctr(sctx->fc, sctx->key, walk.dst.virt.addr, 587 walk.src.virt.addr, n, ctrptr); 588 if (ctrptr == ctrblk) 589 memcpy(walk.iv, ctrptr + n - AES_BLOCK_SIZE, 590 AES_BLOCK_SIZE); 591 crypto_inc(walk.iv, AES_BLOCK_SIZE); 592 ret = skcipher_walk_done(&walk, nbytes - n); 593 } 594 if (locked) 595 mutex_unlock(&ctrblk_lock); 596 /* 597 * final block may be < AES_BLOCK_SIZE, copy only nbytes 598 */ 599 if (nbytes) { 600 cpacf_kmctr(sctx->fc, sctx->key, buf, walk.src.virt.addr, 601 AES_BLOCK_SIZE, walk.iv); 602 memcpy(walk.dst.virt.addr, buf, nbytes); 603 crypto_inc(walk.iv, AES_BLOCK_SIZE); 604 ret = skcipher_walk_done(&walk, 0); 605 } 606 607 return ret; 608 } 609 610 static struct skcipher_alg ctr_aes_alg = { 611 .base.cra_name = "ctr(aes)", 612 .base.cra_driver_name = "ctr-aes-s390", 613 .base.cra_priority = 402, /* ecb-aes-s390 + 1 */ 614 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, 615 .base.cra_blocksize = 1, 616 .base.cra_ctxsize = sizeof(struct s390_aes_ctx), 617 .base.cra_module = THIS_MODULE, 618 .init = fallback_init_skcipher, 619 .exit = fallback_exit_skcipher, 620 .min_keysize = AES_MIN_KEY_SIZE, 621 .max_keysize = AES_MAX_KEY_SIZE, 622 .ivsize = AES_BLOCK_SIZE, 623 .setkey = ctr_aes_set_key, 624 .encrypt = ctr_aes_crypt, 625 .decrypt = ctr_aes_crypt, 626 .chunksize = AES_BLOCK_SIZE, 627 }; 628 629 static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key, 630 unsigned int keylen) 631 { 632 struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm); 633 634 switch (keylen) { 635 case AES_KEYSIZE_128: 636 ctx->fc = CPACF_KMA_GCM_AES_128; 637 break; 638 case AES_KEYSIZE_192: 639 ctx->fc = CPACF_KMA_GCM_AES_192; 640 break; 641 case AES_KEYSIZE_256: 642 ctx->fc = CPACF_KMA_GCM_AES_256; 643 break; 644 default: 645 return -EINVAL; 646 } 647 648 memcpy(ctx->key, key, keylen); 649 ctx->key_len = keylen; 650 return 0; 651 } 652 653 static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize) 654 { 655 switch (authsize) { 656 case 4: 657 case 8: 658 case 12: 659 case 13: 660 case 14: 661 case 15: 662 case 16: 663 break; 664 default: 665 return -EINVAL; 666 } 667 668 return 0; 669 } 670 671 static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg, 672 unsigned int len) 673 { 674 memset(gw, 0, sizeof(*gw)); 675 gw->walk_bytes_remain = len; 676 scatterwalk_start(&gw->walk, sg); 677 } 678 679 static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw) 680 { 681 struct scatterlist *nextsg; 682 683 gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain); 684 while (!gw->walk_bytes) { 685 nextsg = sg_next(gw->walk.sg); 686 if (!nextsg) 687 return 0; 688 scatterwalk_start(&gw->walk, nextsg); 689 gw->walk_bytes = scatterwalk_clamp(&gw->walk, 690 gw->walk_bytes_remain); 691 } 692 gw->walk_ptr = scatterwalk_map(&gw->walk); 693 return gw->walk_bytes; 694 } 695 696 static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw, 697 unsigned int nbytes) 698 { 699 gw->walk_bytes_remain -= nbytes; 700 scatterwalk_unmap(&gw->walk); 701 scatterwalk_advance(&gw->walk, nbytes); 702 scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain); 703 gw->walk_ptr = NULL; 704 } 705 706 static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) 707 { 708 int n; 709 710 if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) { 711 gw->ptr = gw->buf; 712 gw->nbytes = gw->buf_bytes; 713 goto out; 714 } 715 716 if (gw->walk_bytes_remain == 0) { 717 gw->ptr = NULL; 718 gw->nbytes = 0; 719 goto out; 720 } 721 722 if (!_gcm_sg_clamp_and_map(gw)) { 723 gw->ptr = NULL; 724 gw->nbytes = 0; 725 goto out; 726 } 727 728 if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) { 729 gw->ptr = gw->walk_ptr; 730 gw->nbytes = gw->walk_bytes; 731 goto out; 732 } 733 734 while (1) { 735 n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes); 736 memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n); 737 gw->buf_bytes += n; 738 _gcm_sg_unmap_and_advance(gw, n); 739 if (gw->buf_bytes >= minbytesneeded) { 740 gw->ptr = gw->buf; 741 gw->nbytes = gw->buf_bytes; 742 goto out; 743 } 744 if (!_gcm_sg_clamp_and_map(gw)) { 745 gw->ptr = NULL; 746 gw->nbytes = 0; 747 goto out; 748 } 749 } 750 751 out: 752 return gw->nbytes; 753 } 754 755 static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) 756 { 757 if (gw->walk_bytes_remain == 0) { 758 gw->ptr = NULL; 759 gw->nbytes = 0; 760 goto out; 761 } 762 763 if (!_gcm_sg_clamp_and_map(gw)) { 764 gw->ptr = NULL; 765 gw->nbytes = 0; 766 goto out; 767 } 768 769 if (gw->walk_bytes >= minbytesneeded) { 770 gw->ptr = gw->walk_ptr; 771 gw->nbytes = gw->walk_bytes; 772 goto out; 773 } 774 775 scatterwalk_unmap(&gw->walk); 776 gw->walk_ptr = NULL; 777 778 gw->ptr = gw->buf; 779 gw->nbytes = sizeof(gw->buf); 780 781 out: 782 return gw->nbytes; 783 } 784 785 static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) 786 { 787 if (gw->ptr == NULL) 788 return 0; 789 790 if (gw->ptr == gw->buf) { 791 int n = gw->buf_bytes - bytesdone; 792 if (n > 0) { 793 memmove(gw->buf, gw->buf + bytesdone, n); 794 gw->buf_bytes = n; 795 } else 796 gw->buf_bytes = 0; 797 } else 798 _gcm_sg_unmap_and_advance(gw, bytesdone); 799 800 return bytesdone; 801 } 802 803 static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) 804 { 805 int i, n; 806 807 if (gw->ptr == NULL) 808 return 0; 809 810 if (gw->ptr == gw->buf) { 811 for (i = 0; i < bytesdone; i += n) { 812 if (!_gcm_sg_clamp_and_map(gw)) 813 return i; 814 n = min(gw->walk_bytes, bytesdone - i); 815 memcpy(gw->walk_ptr, gw->buf + i, n); 816 _gcm_sg_unmap_and_advance(gw, n); 817 } 818 } else 819 _gcm_sg_unmap_and_advance(gw, bytesdone); 820 821 return bytesdone; 822 } 823 824 static int gcm_aes_crypt(struct aead_request *req, unsigned int flags) 825 { 826 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 827 struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm); 828 unsigned int ivsize = crypto_aead_ivsize(tfm); 829 unsigned int taglen = crypto_aead_authsize(tfm); 830 unsigned int aadlen = req->assoclen; 831 unsigned int pclen = req->cryptlen; 832 int ret = 0; 833 834 unsigned int n, len, in_bytes, out_bytes, 835 min_bytes, bytes, aad_bytes, pc_bytes; 836 struct gcm_sg_walk gw_in, gw_out; 837 u8 tag[GHASH_DIGEST_SIZE]; 838 839 struct { 840 u32 _[3]; /* reserved */ 841 u32 cv; /* Counter Value */ 842 u8 t[GHASH_DIGEST_SIZE];/* Tag */ 843 u8 h[AES_BLOCK_SIZE]; /* Hash-subkey */ 844 u64 taadl; /* Total AAD Length */ 845 u64 tpcl; /* Total Plain-/Cipher-text Length */ 846 u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */ 847 u8 k[AES_MAX_KEY_SIZE]; /* Key */ 848 } param; 849 850 /* 851 * encrypt 852 * req->src: aad||plaintext 853 * req->dst: aad||ciphertext||tag 854 * decrypt 855 * req->src: aad||ciphertext||tag 856 * req->dst: aad||plaintext, return 0 or -EBADMSG 857 * aad, plaintext and ciphertext may be empty. 858 */ 859 if (flags & CPACF_DECRYPT) 860 pclen -= taglen; 861 len = aadlen + pclen; 862 863 memset(¶m, 0, sizeof(param)); 864 param.cv = 1; 865 param.taadl = aadlen * 8; 866 param.tpcl = pclen * 8; 867 memcpy(param.j0, req->iv, ivsize); 868 *(u32 *)(param.j0 + ivsize) = 1; 869 memcpy(param.k, ctx->key, ctx->key_len); 870 871 gcm_walk_start(&gw_in, req->src, len); 872 gcm_walk_start(&gw_out, req->dst, len); 873 874 do { 875 min_bytes = min_t(unsigned int, 876 aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE); 877 in_bytes = gcm_in_walk_go(&gw_in, min_bytes); 878 out_bytes = gcm_out_walk_go(&gw_out, min_bytes); 879 bytes = min(in_bytes, out_bytes); 880 881 if (aadlen + pclen <= bytes) { 882 aad_bytes = aadlen; 883 pc_bytes = pclen; 884 flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC; 885 } else { 886 if (aadlen <= bytes) { 887 aad_bytes = aadlen; 888 pc_bytes = (bytes - aadlen) & 889 ~(AES_BLOCK_SIZE - 1); 890 flags |= CPACF_KMA_LAAD; 891 } else { 892 aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1); 893 pc_bytes = 0; 894 } 895 } 896 897 if (aad_bytes > 0) 898 memcpy(gw_out.ptr, gw_in.ptr, aad_bytes); 899 900 cpacf_kma(ctx->fc | flags, ¶m, 901 gw_out.ptr + aad_bytes, 902 gw_in.ptr + aad_bytes, pc_bytes, 903 gw_in.ptr, aad_bytes); 904 905 n = aad_bytes + pc_bytes; 906 if (gcm_in_walk_done(&gw_in, n) != n) 907 return -ENOMEM; 908 if (gcm_out_walk_done(&gw_out, n) != n) 909 return -ENOMEM; 910 aadlen -= aad_bytes; 911 pclen -= pc_bytes; 912 } while (aadlen + pclen > 0); 913 914 if (flags & CPACF_DECRYPT) { 915 scatterwalk_map_and_copy(tag, req->src, len, taglen, 0); 916 if (crypto_memneq(tag, param.t, taglen)) 917 ret = -EBADMSG; 918 } else 919 scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1); 920 921 memzero_explicit(¶m, sizeof(param)); 922 return ret; 923 } 924 925 static int gcm_aes_encrypt(struct aead_request *req) 926 { 927 return gcm_aes_crypt(req, CPACF_ENCRYPT); 928 } 929 930 static int gcm_aes_decrypt(struct aead_request *req) 931 { 932 return gcm_aes_crypt(req, CPACF_DECRYPT); 933 } 934 935 static struct aead_alg gcm_aes_aead = { 936 .setkey = gcm_aes_setkey, 937 .setauthsize = gcm_aes_setauthsize, 938 .encrypt = gcm_aes_encrypt, 939 .decrypt = gcm_aes_decrypt, 940 941 .ivsize = GHASH_BLOCK_SIZE - sizeof(u32), 942 .maxauthsize = GHASH_DIGEST_SIZE, 943 .chunksize = AES_BLOCK_SIZE, 944 945 .base = { 946 .cra_blocksize = 1, 947 .cra_ctxsize = sizeof(struct s390_aes_ctx), 948 .cra_priority = 900, 949 .cra_name = "gcm(aes)", 950 .cra_driver_name = "gcm-aes-s390", 951 .cra_module = THIS_MODULE, 952 }, 953 }; 954 955 static struct crypto_alg *aes_s390_alg; 956 static struct skcipher_alg *aes_s390_skcipher_algs[4]; 957 static int aes_s390_skciphers_num; 958 static struct aead_alg *aes_s390_aead_alg; 959 960 static int aes_s390_register_skcipher(struct skcipher_alg *alg) 961 { 962 int ret; 963 964 ret = crypto_register_skcipher(alg); 965 if (!ret) 966 aes_s390_skcipher_algs[aes_s390_skciphers_num++] = alg; 967 return ret; 968 } 969 970 static void aes_s390_fini(void) 971 { 972 if (aes_s390_alg) 973 crypto_unregister_alg(aes_s390_alg); 974 while (aes_s390_skciphers_num--) 975 crypto_unregister_skcipher(aes_s390_skcipher_algs[aes_s390_skciphers_num]); 976 if (ctrblk) 977 free_page((unsigned long) ctrblk); 978 979 if (aes_s390_aead_alg) 980 crypto_unregister_aead(aes_s390_aead_alg); 981 } 982 983 static int __init aes_s390_init(void) 984 { 985 int ret; 986 987 /* Query available functions for KM, KMC, KMCTR and KMA */ 988 cpacf_query(CPACF_KM, &km_functions); 989 cpacf_query(CPACF_KMC, &kmc_functions); 990 cpacf_query(CPACF_KMCTR, &kmctr_functions); 991 cpacf_query(CPACF_KMA, &kma_functions); 992 993 if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) || 994 cpacf_test_func(&km_functions, CPACF_KM_AES_192) || 995 cpacf_test_func(&km_functions, CPACF_KM_AES_256)) { 996 ret = crypto_register_alg(&aes_alg); 997 if (ret) 998 goto out_err; 999 aes_s390_alg = &aes_alg; 1000 ret = aes_s390_register_skcipher(&ecb_aes_alg); 1001 if (ret) 1002 goto out_err; 1003 } 1004 1005 if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) || 1006 cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) || 1007 cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) { 1008 ret = aes_s390_register_skcipher(&cbc_aes_alg); 1009 if (ret) 1010 goto out_err; 1011 } 1012 1013 if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) || 1014 cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) { 1015 ret = aes_s390_register_skcipher(&xts_aes_alg); 1016 if (ret) 1017 goto out_err; 1018 } 1019 1020 if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) || 1021 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) || 1022 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) { 1023 ctrblk = (u8 *) __get_free_page(GFP_KERNEL); 1024 if (!ctrblk) { 1025 ret = -ENOMEM; 1026 goto out_err; 1027 } 1028 ret = aes_s390_register_skcipher(&ctr_aes_alg); 1029 if (ret) 1030 goto out_err; 1031 } 1032 1033 if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) || 1034 cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) || 1035 cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) { 1036 ret = crypto_register_aead(&gcm_aes_aead); 1037 if (ret) 1038 goto out_err; 1039 aes_s390_aead_alg = &gcm_aes_aead; 1040 } 1041 1042 return 0; 1043 out_err: 1044 aes_s390_fini(); 1045 return ret; 1046 } 1047 1048 module_cpu_feature_match(MSA, aes_s390_init); 1049 module_exit(aes_s390_fini); 1050 1051 MODULE_ALIAS_CRYPTO("aes-all"); 1052 1053 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); 1054 MODULE_LICENSE("GPL"); 1055