1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Cryptographic API. 4 * 5 * s390 implementation of the AES Cipher Algorithm with protected keys. 6 * 7 * s390 Version: 8 * Copyright IBM Corp. 2017, 2023 9 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 10 * Harald Freudenberger <freude@de.ibm.com> 11 */ 12 13 #define KMSG_COMPONENT "paes_s390" 14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 15 16 #include <crypto/aes.h> 17 #include <crypto/algapi.h> 18 #include <linux/bug.h> 19 #include <linux/err.h> 20 #include <linux/module.h> 21 #include <linux/cpufeature.h> 22 #include <linux/init.h> 23 #include <linux/mutex.h> 24 #include <linux/spinlock.h> 25 #include <linux/delay.h> 26 #include <crypto/internal/skcipher.h> 27 #include <crypto/xts.h> 28 #include <asm/cpacf.h> 29 #include <asm/pkey.h> 30 31 /* 32 * Key blobs smaller/bigger than these defines are rejected 33 * by the common code even before the individual setkey function 34 * is called. As paes can handle different kinds of key blobs 35 * and padding is also possible, the limits need to be generous. 36 */ 37 #define PAES_MIN_KEYSIZE 16 38 #define PAES_MAX_KEYSIZE MAXEP11AESKEYBLOBSIZE 39 40 static u8 *ctrblk; 41 static DEFINE_MUTEX(ctrblk_lock); 42 43 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions; 44 45 struct key_blob { 46 /* 47 * Small keys will be stored in the keybuf. Larger keys are 48 * stored in extra allocated memory. In both cases does 49 * key point to the memory where the key is stored. 50 * The code distinguishes by checking keylen against 51 * sizeof(keybuf). See the two following helper functions. 52 */ 53 u8 *key; 54 u8 keybuf[128]; 55 unsigned int keylen; 56 }; 57 58 static inline int _key_to_kb(struct key_blob *kb, 59 const u8 *key, 60 unsigned int keylen) 61 { 62 struct clearkey_header { 63 u8 type; 64 u8 res0[3]; 65 u8 version; 66 u8 res1[3]; 67 u32 keytype; 68 u32 len; 69 } __packed * h; 70 71 switch (keylen) { 72 case 16: 73 case 24: 74 case 32: 75 /* clear key value, prepare pkey clear key token in keybuf */ 76 memset(kb->keybuf, 0, sizeof(kb->keybuf)); 77 h = (struct clearkey_header *) kb->keybuf; 78 h->version = 0x02; /* TOKVER_CLEAR_KEY */ 79 h->keytype = (keylen - 8) >> 3; 80 h->len = keylen; 81 memcpy(kb->keybuf + sizeof(*h), key, keylen); 82 kb->keylen = sizeof(*h) + keylen; 83 kb->key = kb->keybuf; 84 break; 85 default: 86 /* other key material, let pkey handle this */ 87 if (keylen <= sizeof(kb->keybuf)) 88 kb->key = kb->keybuf; 89 else { 90 kb->key = kmalloc(keylen, GFP_KERNEL); 91 if (!kb->key) 92 return -ENOMEM; 93 } 94 memcpy(kb->key, key, keylen); 95 kb->keylen = keylen; 96 break; 97 } 98 99 return 0; 100 } 101 102 static inline void _free_kb_keybuf(struct key_blob *kb) 103 { 104 if (kb->key && kb->key != kb->keybuf 105 && kb->keylen > sizeof(kb->keybuf)) { 106 kfree_sensitive(kb->key); 107 kb->key = NULL; 108 } 109 } 110 111 struct s390_paes_ctx { 112 struct key_blob kb; 113 struct pkey_protkey pk; 114 spinlock_t pk_lock; 115 unsigned long fc; 116 }; 117 118 struct s390_pxts_ctx { 119 struct key_blob kb[2]; 120 struct pkey_protkey pk[2]; 121 spinlock_t pk_lock; 122 unsigned long fc; 123 }; 124 125 static inline int __paes_keyblob2pkey(struct key_blob *kb, 126 struct pkey_protkey *pk) 127 { 128 int i, ret; 129 130 /* try three times in case of failure */ 131 for (i = 0; i < 3; i++) { 132 if (i > 0 && ret == -EAGAIN && in_task()) 133 if (msleep_interruptible(1000)) 134 return -EINTR; 135 ret = pkey_keyblob2pkey(kb->key, kb->keylen, 136 pk->protkey, &pk->len, &pk->type); 137 if (ret == 0) 138 break; 139 } 140 141 return ret; 142 } 143 144 static inline int __paes_convert_key(struct s390_paes_ctx *ctx) 145 { 146 int ret; 147 struct pkey_protkey pkey; 148 149 pkey.len = sizeof(pkey.protkey); 150 ret = __paes_keyblob2pkey(&ctx->kb, &pkey); 151 if (ret) 152 return ret; 153 154 spin_lock_bh(&ctx->pk_lock); 155 memcpy(&ctx->pk, &pkey, sizeof(pkey)); 156 spin_unlock_bh(&ctx->pk_lock); 157 158 return 0; 159 } 160 161 static int ecb_paes_init(struct crypto_skcipher *tfm) 162 { 163 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 164 165 ctx->kb.key = NULL; 166 spin_lock_init(&ctx->pk_lock); 167 168 return 0; 169 } 170 171 static void ecb_paes_exit(struct crypto_skcipher *tfm) 172 { 173 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 174 175 _free_kb_keybuf(&ctx->kb); 176 } 177 178 static inline int __ecb_paes_set_key(struct s390_paes_ctx *ctx) 179 { 180 int rc; 181 unsigned long fc; 182 183 rc = __paes_convert_key(ctx); 184 if (rc) 185 return rc; 186 187 /* Pick the correct function code based on the protected key type */ 188 fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 : 189 (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 : 190 (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0; 191 192 /* Check if the function code is available */ 193 ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 194 195 return ctx->fc ? 0 : -EINVAL; 196 } 197 198 static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 199 unsigned int key_len) 200 { 201 int rc; 202 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 203 204 _free_kb_keybuf(&ctx->kb); 205 rc = _key_to_kb(&ctx->kb, in_key, key_len); 206 if (rc) 207 return rc; 208 209 return __ecb_paes_set_key(ctx); 210 } 211 212 static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier) 213 { 214 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 215 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 216 struct skcipher_walk walk; 217 unsigned int nbytes, n, k; 218 int ret; 219 struct { 220 u8 key[MAXPROTKEYSIZE]; 221 } param; 222 223 ret = skcipher_walk_virt(&walk, req, false); 224 if (ret) 225 return ret; 226 227 spin_lock_bh(&ctx->pk_lock); 228 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); 229 spin_unlock_bh(&ctx->pk_lock); 230 231 while ((nbytes = walk.nbytes) != 0) { 232 /* only use complete blocks */ 233 n = nbytes & ~(AES_BLOCK_SIZE - 1); 234 k = cpacf_km(ctx->fc | modifier, ¶m, 235 walk.dst.virt.addr, walk.src.virt.addr, n); 236 if (k) 237 ret = skcipher_walk_done(&walk, nbytes - k); 238 if (k < n) { 239 if (__paes_convert_key(ctx)) 240 return skcipher_walk_done(&walk, -EIO); 241 spin_lock_bh(&ctx->pk_lock); 242 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); 243 spin_unlock_bh(&ctx->pk_lock); 244 } 245 } 246 return ret; 247 } 248 249 static int ecb_paes_encrypt(struct skcipher_request *req) 250 { 251 return ecb_paes_crypt(req, 0); 252 } 253 254 static int ecb_paes_decrypt(struct skcipher_request *req) 255 { 256 return ecb_paes_crypt(req, CPACF_DECRYPT); 257 } 258 259 static struct skcipher_alg ecb_paes_alg = { 260 .base.cra_name = "ecb(paes)", 261 .base.cra_driver_name = "ecb-paes-s390", 262 .base.cra_priority = 401, /* combo: aes + ecb + 1 */ 263 .base.cra_blocksize = AES_BLOCK_SIZE, 264 .base.cra_ctxsize = sizeof(struct s390_paes_ctx), 265 .base.cra_module = THIS_MODULE, 266 .base.cra_list = LIST_HEAD_INIT(ecb_paes_alg.base.cra_list), 267 .init = ecb_paes_init, 268 .exit = ecb_paes_exit, 269 .min_keysize = PAES_MIN_KEYSIZE, 270 .max_keysize = PAES_MAX_KEYSIZE, 271 .setkey = ecb_paes_set_key, 272 .encrypt = ecb_paes_encrypt, 273 .decrypt = ecb_paes_decrypt, 274 }; 275 276 static int cbc_paes_init(struct crypto_skcipher *tfm) 277 { 278 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 279 280 ctx->kb.key = NULL; 281 spin_lock_init(&ctx->pk_lock); 282 283 return 0; 284 } 285 286 static void cbc_paes_exit(struct crypto_skcipher *tfm) 287 { 288 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 289 290 _free_kb_keybuf(&ctx->kb); 291 } 292 293 static inline int __cbc_paes_set_key(struct s390_paes_ctx *ctx) 294 { 295 int rc; 296 unsigned long fc; 297 298 rc = __paes_convert_key(ctx); 299 if (rc) 300 return rc; 301 302 /* Pick the correct function code based on the protected key type */ 303 fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMC_PAES_128 : 304 (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMC_PAES_192 : 305 (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KMC_PAES_256 : 0; 306 307 /* Check if the function code is available */ 308 ctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0; 309 310 return ctx->fc ? 0 : -EINVAL; 311 } 312 313 static int cbc_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 314 unsigned int key_len) 315 { 316 int rc; 317 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 318 319 _free_kb_keybuf(&ctx->kb); 320 rc = _key_to_kb(&ctx->kb, in_key, key_len); 321 if (rc) 322 return rc; 323 324 return __cbc_paes_set_key(ctx); 325 } 326 327 static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier) 328 { 329 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 330 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 331 struct skcipher_walk walk; 332 unsigned int nbytes, n, k; 333 int ret; 334 struct { 335 u8 iv[AES_BLOCK_SIZE]; 336 u8 key[MAXPROTKEYSIZE]; 337 } param; 338 339 ret = skcipher_walk_virt(&walk, req, false); 340 if (ret) 341 return ret; 342 343 memcpy(param.iv, walk.iv, AES_BLOCK_SIZE); 344 spin_lock_bh(&ctx->pk_lock); 345 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); 346 spin_unlock_bh(&ctx->pk_lock); 347 348 while ((nbytes = walk.nbytes) != 0) { 349 /* only use complete blocks */ 350 n = nbytes & ~(AES_BLOCK_SIZE - 1); 351 k = cpacf_kmc(ctx->fc | modifier, ¶m, 352 walk.dst.virt.addr, walk.src.virt.addr, n); 353 if (k) { 354 memcpy(walk.iv, param.iv, AES_BLOCK_SIZE); 355 ret = skcipher_walk_done(&walk, nbytes - k); 356 } 357 if (k < n) { 358 if (__paes_convert_key(ctx)) 359 return skcipher_walk_done(&walk, -EIO); 360 spin_lock_bh(&ctx->pk_lock); 361 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); 362 spin_unlock_bh(&ctx->pk_lock); 363 } 364 } 365 return ret; 366 } 367 368 static int cbc_paes_encrypt(struct skcipher_request *req) 369 { 370 return cbc_paes_crypt(req, 0); 371 } 372 373 static int cbc_paes_decrypt(struct skcipher_request *req) 374 { 375 return cbc_paes_crypt(req, CPACF_DECRYPT); 376 } 377 378 static struct skcipher_alg cbc_paes_alg = { 379 .base.cra_name = "cbc(paes)", 380 .base.cra_driver_name = "cbc-paes-s390", 381 .base.cra_priority = 402, /* ecb-paes-s390 + 1 */ 382 .base.cra_blocksize = AES_BLOCK_SIZE, 383 .base.cra_ctxsize = sizeof(struct s390_paes_ctx), 384 .base.cra_module = THIS_MODULE, 385 .base.cra_list = LIST_HEAD_INIT(cbc_paes_alg.base.cra_list), 386 .init = cbc_paes_init, 387 .exit = cbc_paes_exit, 388 .min_keysize = PAES_MIN_KEYSIZE, 389 .max_keysize = PAES_MAX_KEYSIZE, 390 .ivsize = AES_BLOCK_SIZE, 391 .setkey = cbc_paes_set_key, 392 .encrypt = cbc_paes_encrypt, 393 .decrypt = cbc_paes_decrypt, 394 }; 395 396 static int xts_paes_init(struct crypto_skcipher *tfm) 397 { 398 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); 399 400 ctx->kb[0].key = NULL; 401 ctx->kb[1].key = NULL; 402 spin_lock_init(&ctx->pk_lock); 403 404 return 0; 405 } 406 407 static void xts_paes_exit(struct crypto_skcipher *tfm) 408 { 409 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); 410 411 _free_kb_keybuf(&ctx->kb[0]); 412 _free_kb_keybuf(&ctx->kb[1]); 413 } 414 415 static inline int __xts_paes_convert_key(struct s390_pxts_ctx *ctx) 416 { 417 struct pkey_protkey pkey0, pkey1; 418 419 pkey0.len = sizeof(pkey0.protkey); 420 pkey1.len = sizeof(pkey1.protkey); 421 422 if (__paes_keyblob2pkey(&ctx->kb[0], &pkey0) || 423 __paes_keyblob2pkey(&ctx->kb[1], &pkey1)) 424 return -EINVAL; 425 426 spin_lock_bh(&ctx->pk_lock); 427 memcpy(&ctx->pk[0], &pkey0, sizeof(pkey0)); 428 memcpy(&ctx->pk[1], &pkey1, sizeof(pkey1)); 429 spin_unlock_bh(&ctx->pk_lock); 430 431 return 0; 432 } 433 434 static inline int __xts_paes_set_key(struct s390_pxts_ctx *ctx) 435 { 436 unsigned long fc; 437 438 if (__xts_paes_convert_key(ctx)) 439 return -EINVAL; 440 441 if (ctx->pk[0].type != ctx->pk[1].type) 442 return -EINVAL; 443 444 /* Pick the correct function code based on the protected key type */ 445 fc = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PXTS_128 : 446 (ctx->pk[0].type == PKEY_KEYTYPE_AES_256) ? 447 CPACF_KM_PXTS_256 : 0; 448 449 /* Check if the function code is available */ 450 ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 451 452 return ctx->fc ? 0 : -EINVAL; 453 } 454 455 static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 456 unsigned int xts_key_len) 457 { 458 int rc; 459 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); 460 u8 ckey[2 * AES_MAX_KEY_SIZE]; 461 unsigned int ckey_len, key_len; 462 463 if (xts_key_len % 2) 464 return -EINVAL; 465 466 key_len = xts_key_len / 2; 467 468 _free_kb_keybuf(&ctx->kb[0]); 469 _free_kb_keybuf(&ctx->kb[1]); 470 rc = _key_to_kb(&ctx->kb[0], in_key, key_len); 471 if (rc) 472 return rc; 473 rc = _key_to_kb(&ctx->kb[1], in_key + key_len, key_len); 474 if (rc) 475 return rc; 476 477 rc = __xts_paes_set_key(ctx); 478 if (rc) 479 return rc; 480 481 /* 482 * xts_verify_key verifies the key length is not odd and makes 483 * sure that the two keys are not the same. This can be done 484 * on the two protected keys as well 485 */ 486 ckey_len = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 487 AES_KEYSIZE_128 : AES_KEYSIZE_256; 488 memcpy(ckey, ctx->pk[0].protkey, ckey_len); 489 memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len); 490 return xts_verify_key(tfm, ckey, 2*ckey_len); 491 } 492 493 static int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier) 494 { 495 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 496 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); 497 struct skcipher_walk walk; 498 unsigned int keylen, offset, nbytes, n, k; 499 int ret; 500 struct { 501 u8 key[MAXPROTKEYSIZE]; /* key + verification pattern */ 502 u8 tweak[16]; 503 u8 block[16]; 504 u8 bit[16]; 505 u8 xts[16]; 506 } pcc_param; 507 struct { 508 u8 key[MAXPROTKEYSIZE]; /* key + verification pattern */ 509 u8 init[16]; 510 } xts_param; 511 512 ret = skcipher_walk_virt(&walk, req, false); 513 if (ret) 514 return ret; 515 516 keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64; 517 offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0; 518 519 memset(&pcc_param, 0, sizeof(pcc_param)); 520 memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak)); 521 spin_lock_bh(&ctx->pk_lock); 522 memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen); 523 memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen); 524 spin_unlock_bh(&ctx->pk_lock); 525 cpacf_pcc(ctx->fc, pcc_param.key + offset); 526 memcpy(xts_param.init, pcc_param.xts, 16); 527 528 while ((nbytes = walk.nbytes) != 0) { 529 /* only use complete blocks */ 530 n = nbytes & ~(AES_BLOCK_SIZE - 1); 531 k = cpacf_km(ctx->fc | modifier, xts_param.key + offset, 532 walk.dst.virt.addr, walk.src.virt.addr, n); 533 if (k) 534 ret = skcipher_walk_done(&walk, nbytes - k); 535 if (k < n) { 536 if (__xts_paes_convert_key(ctx)) 537 return skcipher_walk_done(&walk, -EIO); 538 spin_lock_bh(&ctx->pk_lock); 539 memcpy(xts_param.key + offset, 540 ctx->pk[0].protkey, keylen); 541 spin_unlock_bh(&ctx->pk_lock); 542 } 543 } 544 545 return ret; 546 } 547 548 static int xts_paes_encrypt(struct skcipher_request *req) 549 { 550 return xts_paes_crypt(req, 0); 551 } 552 553 static int xts_paes_decrypt(struct skcipher_request *req) 554 { 555 return xts_paes_crypt(req, CPACF_DECRYPT); 556 } 557 558 static struct skcipher_alg xts_paes_alg = { 559 .base.cra_name = "xts(paes)", 560 .base.cra_driver_name = "xts-paes-s390", 561 .base.cra_priority = 402, /* ecb-paes-s390 + 1 */ 562 .base.cra_blocksize = AES_BLOCK_SIZE, 563 .base.cra_ctxsize = sizeof(struct s390_pxts_ctx), 564 .base.cra_module = THIS_MODULE, 565 .base.cra_list = LIST_HEAD_INIT(xts_paes_alg.base.cra_list), 566 .init = xts_paes_init, 567 .exit = xts_paes_exit, 568 .min_keysize = 2 * PAES_MIN_KEYSIZE, 569 .max_keysize = 2 * PAES_MAX_KEYSIZE, 570 .ivsize = AES_BLOCK_SIZE, 571 .setkey = xts_paes_set_key, 572 .encrypt = xts_paes_encrypt, 573 .decrypt = xts_paes_decrypt, 574 }; 575 576 static int ctr_paes_init(struct crypto_skcipher *tfm) 577 { 578 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 579 580 ctx->kb.key = NULL; 581 spin_lock_init(&ctx->pk_lock); 582 583 return 0; 584 } 585 586 static void ctr_paes_exit(struct crypto_skcipher *tfm) 587 { 588 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 589 590 _free_kb_keybuf(&ctx->kb); 591 } 592 593 static inline int __ctr_paes_set_key(struct s390_paes_ctx *ctx) 594 { 595 int rc; 596 unsigned long fc; 597 598 rc = __paes_convert_key(ctx); 599 if (rc) 600 return rc; 601 602 /* Pick the correct function code based on the protected key type */ 603 fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMCTR_PAES_128 : 604 (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMCTR_PAES_192 : 605 (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? 606 CPACF_KMCTR_PAES_256 : 0; 607 608 /* Check if the function code is available */ 609 ctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0; 610 611 return ctx->fc ? 0 : -EINVAL; 612 } 613 614 static int ctr_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 615 unsigned int key_len) 616 { 617 int rc; 618 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 619 620 _free_kb_keybuf(&ctx->kb); 621 rc = _key_to_kb(&ctx->kb, in_key, key_len); 622 if (rc) 623 return rc; 624 625 return __ctr_paes_set_key(ctx); 626 } 627 628 static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes) 629 { 630 unsigned int i, n; 631 632 /* only use complete blocks, max. PAGE_SIZE */ 633 memcpy(ctrptr, iv, AES_BLOCK_SIZE); 634 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1); 635 for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) { 636 memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE); 637 crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE); 638 ctrptr += AES_BLOCK_SIZE; 639 } 640 return n; 641 } 642 643 static int ctr_paes_crypt(struct skcipher_request *req) 644 { 645 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 646 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 647 u8 buf[AES_BLOCK_SIZE], *ctrptr; 648 struct skcipher_walk walk; 649 unsigned int nbytes, n, k; 650 int ret, locked; 651 struct { 652 u8 key[MAXPROTKEYSIZE]; 653 } param; 654 655 ret = skcipher_walk_virt(&walk, req, false); 656 if (ret) 657 return ret; 658 659 spin_lock_bh(&ctx->pk_lock); 660 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); 661 spin_unlock_bh(&ctx->pk_lock); 662 663 locked = mutex_trylock(&ctrblk_lock); 664 665 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { 666 n = AES_BLOCK_SIZE; 667 if (nbytes >= 2*AES_BLOCK_SIZE && locked) 668 n = __ctrblk_init(ctrblk, walk.iv, nbytes); 669 ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv; 670 k = cpacf_kmctr(ctx->fc, ¶m, walk.dst.virt.addr, 671 walk.src.virt.addr, n, ctrptr); 672 if (k) { 673 if (ctrptr == ctrblk) 674 memcpy(walk.iv, ctrptr + k - AES_BLOCK_SIZE, 675 AES_BLOCK_SIZE); 676 crypto_inc(walk.iv, AES_BLOCK_SIZE); 677 ret = skcipher_walk_done(&walk, nbytes - k); 678 } 679 if (k < n) { 680 if (__paes_convert_key(ctx)) { 681 if (locked) 682 mutex_unlock(&ctrblk_lock); 683 return skcipher_walk_done(&walk, -EIO); 684 } 685 spin_lock_bh(&ctx->pk_lock); 686 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); 687 spin_unlock_bh(&ctx->pk_lock); 688 } 689 } 690 if (locked) 691 mutex_unlock(&ctrblk_lock); 692 /* 693 * final block may be < AES_BLOCK_SIZE, copy only nbytes 694 */ 695 if (nbytes) { 696 memset(buf, 0, AES_BLOCK_SIZE); 697 memcpy(buf, walk.src.virt.addr, nbytes); 698 while (1) { 699 if (cpacf_kmctr(ctx->fc, ¶m, buf, 700 buf, AES_BLOCK_SIZE, 701 walk.iv) == AES_BLOCK_SIZE) 702 break; 703 if (__paes_convert_key(ctx)) 704 return skcipher_walk_done(&walk, -EIO); 705 spin_lock_bh(&ctx->pk_lock); 706 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); 707 spin_unlock_bh(&ctx->pk_lock); 708 } 709 memcpy(walk.dst.virt.addr, buf, nbytes); 710 crypto_inc(walk.iv, AES_BLOCK_SIZE); 711 ret = skcipher_walk_done(&walk, nbytes); 712 } 713 714 return ret; 715 } 716 717 static struct skcipher_alg ctr_paes_alg = { 718 .base.cra_name = "ctr(paes)", 719 .base.cra_driver_name = "ctr-paes-s390", 720 .base.cra_priority = 402, /* ecb-paes-s390 + 1 */ 721 .base.cra_blocksize = 1, 722 .base.cra_ctxsize = sizeof(struct s390_paes_ctx), 723 .base.cra_module = THIS_MODULE, 724 .base.cra_list = LIST_HEAD_INIT(ctr_paes_alg.base.cra_list), 725 .init = ctr_paes_init, 726 .exit = ctr_paes_exit, 727 .min_keysize = PAES_MIN_KEYSIZE, 728 .max_keysize = PAES_MAX_KEYSIZE, 729 .ivsize = AES_BLOCK_SIZE, 730 .setkey = ctr_paes_set_key, 731 .encrypt = ctr_paes_crypt, 732 .decrypt = ctr_paes_crypt, 733 .chunksize = AES_BLOCK_SIZE, 734 }; 735 736 static inline void __crypto_unregister_skcipher(struct skcipher_alg *alg) 737 { 738 if (!list_empty(&alg->base.cra_list)) 739 crypto_unregister_skcipher(alg); 740 } 741 742 static void paes_s390_fini(void) 743 { 744 __crypto_unregister_skcipher(&ctr_paes_alg); 745 __crypto_unregister_skcipher(&xts_paes_alg); 746 __crypto_unregister_skcipher(&cbc_paes_alg); 747 __crypto_unregister_skcipher(&ecb_paes_alg); 748 if (ctrblk) 749 free_page((unsigned long) ctrblk); 750 } 751 752 static int __init paes_s390_init(void) 753 { 754 int ret; 755 756 /* Query available functions for KM, KMC and KMCTR */ 757 cpacf_query(CPACF_KM, &km_functions); 758 cpacf_query(CPACF_KMC, &kmc_functions); 759 cpacf_query(CPACF_KMCTR, &kmctr_functions); 760 761 if (cpacf_test_func(&km_functions, CPACF_KM_PAES_128) || 762 cpacf_test_func(&km_functions, CPACF_KM_PAES_192) || 763 cpacf_test_func(&km_functions, CPACF_KM_PAES_256)) { 764 ret = crypto_register_skcipher(&ecb_paes_alg); 765 if (ret) 766 goto out_err; 767 } 768 769 if (cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) || 770 cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) || 771 cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) { 772 ret = crypto_register_skcipher(&cbc_paes_alg); 773 if (ret) 774 goto out_err; 775 } 776 777 if (cpacf_test_func(&km_functions, CPACF_KM_PXTS_128) || 778 cpacf_test_func(&km_functions, CPACF_KM_PXTS_256)) { 779 ret = crypto_register_skcipher(&xts_paes_alg); 780 if (ret) 781 goto out_err; 782 } 783 784 if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) || 785 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) || 786 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) { 787 ctrblk = (u8 *) __get_free_page(GFP_KERNEL); 788 if (!ctrblk) { 789 ret = -ENOMEM; 790 goto out_err; 791 } 792 ret = crypto_register_skcipher(&ctr_paes_alg); 793 if (ret) 794 goto out_err; 795 } 796 797 return 0; 798 out_err: 799 paes_s390_fini(); 800 return ret; 801 } 802 803 module_init(paes_s390_init); 804 module_exit(paes_s390_fini); 805 806 MODULE_ALIAS_CRYPTO("paes"); 807 808 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm with protected keys"); 809 MODULE_LICENSE("GPL"); 810