1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Cryptographic Coprocessor (CCP) driver 4 * 5 * Copyright (C) 2013-2019 Advanced Micro Devices, Inc. 6 * 7 * Author: Tom Lendacky <thomas.lendacky@amd.com> 8 * Author: Gary R Hook <gary.hook@amd.com> 9 */ 10 11 #include <linux/module.h> 12 #include <linux/kernel.h> 13 #include <linux/interrupt.h> 14 #include <crypto/scatterwalk.h> 15 #include <crypto/des.h> 16 #include <linux/ccp.h> 17 18 #include "ccp-dev.h" 19 20 /* SHA initial context values */ 21 static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = { 22 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), 23 cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), 24 cpu_to_be32(SHA1_H4), 25 }; 26 27 static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = { 28 cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), 29 cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), 30 cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), 31 cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), 32 }; 33 34 static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = { 35 cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), 36 cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), 37 cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), 38 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), 39 }; 40 41 static const __be64 ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = { 42 cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1), 43 cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3), 44 cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5), 45 cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7), 46 }; 47 48 static const __be64 ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = { 49 cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1), 50 cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3), 51 cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5), 52 cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7), 53 }; 54 55 #define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \ 56 ccp_gen_jobid(ccp) : 0) 57 58 static u32 ccp_gen_jobid(struct ccp_device *ccp) 59 { 60 return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK; 61 } 62 63 static void ccp_sg_free(struct ccp_sg_workarea *wa) 64 { 65 if (wa->dma_count) 66 dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir); 67 68 wa->dma_count = 0; 69 } 70 71 static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, 72 struct scatterlist *sg, u64 len, 73 enum dma_data_direction dma_dir) 74 { 75 memset(wa, 0, sizeof(*wa)); 76 77 wa->sg = sg; 78 if (!sg) 79 return 0; 80 81 wa->nents = sg_nents_for_len(sg, len); 82 if (wa->nents < 0) 83 return wa->nents; 84 85 wa->bytes_left = len; 86 wa->sg_used = 0; 87 88 if (len == 0) 89 return 0; 90 91 if (dma_dir == DMA_NONE) 92 return 0; 93 94 wa->dma_sg = sg; 95 wa->dma_dev = dev; 96 wa->dma_dir = dma_dir; 97 wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir); 98 if (!wa->dma_count) 99 return -ENOMEM; 100 101 return 0; 102 } 103 104 static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len) 105 { 106 unsigned int nbytes = min_t(u64, len, wa->bytes_left); 107 108 if (!wa->sg) 109 return; 110 111 wa->sg_used += nbytes; 112 wa->bytes_left -= nbytes; 113 if (wa->sg_used == wa->sg->length) { 114 wa->sg = sg_next(wa->sg); 115 wa->sg_used = 0; 116 } 117 } 118 119 static void ccp_dm_free(struct ccp_dm_workarea *wa) 120 { 121 if (wa->length <= CCP_DMAPOOL_MAX_SIZE) { 122 if (wa->address) 123 dma_pool_free(wa->dma_pool, wa->address, 124 wa->dma.address); 125 } else { 126 if (wa->dma.address) 127 dma_unmap_single(wa->dev, wa->dma.address, wa->length, 128 wa->dma.dir); 129 kfree(wa->address); 130 } 131 132 wa->address = NULL; 133 wa->dma.address = 0; 134 } 135 136 static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa, 137 struct ccp_cmd_queue *cmd_q, 138 unsigned int len, 139 enum dma_data_direction dir) 140 { 141 memset(wa, 0, sizeof(*wa)); 142 143 if (!len) 144 return 0; 145 146 wa->dev = cmd_q->ccp->dev; 147 wa->length = len; 148 149 if (len <= CCP_DMAPOOL_MAX_SIZE) { 150 wa->dma_pool = cmd_q->dma_pool; 151 152 wa->address = dma_pool_zalloc(wa->dma_pool, GFP_KERNEL, 153 &wa->dma.address); 154 if (!wa->address) 155 return -ENOMEM; 156 157 wa->dma.length = CCP_DMAPOOL_MAX_SIZE; 158 159 } else { 160 wa->address = kzalloc(len, GFP_KERNEL); 161 if (!wa->address) 162 return -ENOMEM; 163 164 wa->dma.address = dma_map_single(wa->dev, wa->address, len, 165 dir); 166 if (dma_mapping_error(wa->dev, wa->dma.address)) 167 return -ENOMEM; 168 169 wa->dma.length = len; 170 } 171 wa->dma.dir = dir; 172 173 return 0; 174 } 175 176 static int ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, 177 struct scatterlist *sg, unsigned int sg_offset, 178 unsigned int len) 179 { 180 WARN_ON(!wa->address); 181 182 if (len > (wa->length - wa_offset)) 183 return -EINVAL; 184 185 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len, 186 0); 187 return 0; 188 } 189 190 static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, 191 struct scatterlist *sg, unsigned int sg_offset, 192 unsigned int len) 193 { 194 WARN_ON(!wa->address); 195 196 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len, 197 1); 198 } 199 200 static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa, 201 unsigned int wa_offset, 202 struct scatterlist *sg, 203 unsigned int sg_offset, 204 unsigned int len) 205 { 206 u8 *p, *q; 207 int rc; 208 209 rc = ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len); 210 if (rc) 211 return rc; 212 213 p = wa->address + wa_offset; 214 q = p + len - 1; 215 while (p < q) { 216 *p = *p ^ *q; 217 *q = *p ^ *q; 218 *p = *p ^ *q; 219 p++; 220 q--; 221 } 222 return 0; 223 } 224 225 static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa, 226 unsigned int wa_offset, 227 struct scatterlist *sg, 228 unsigned int sg_offset, 229 unsigned int len) 230 { 231 u8 *p, *q; 232 233 p = wa->address + wa_offset; 234 q = p + len - 1; 235 while (p < q) { 236 *p = *p ^ *q; 237 *q = *p ^ *q; 238 *p = *p ^ *q; 239 p++; 240 q--; 241 } 242 243 ccp_get_dm_area(wa, wa_offset, sg, sg_offset, len); 244 } 245 246 static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q) 247 { 248 ccp_dm_free(&data->dm_wa); 249 ccp_sg_free(&data->sg_wa); 250 } 251 252 static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q, 253 struct scatterlist *sg, u64 sg_len, 254 unsigned int dm_len, 255 enum dma_data_direction dir) 256 { 257 int ret; 258 259 memset(data, 0, sizeof(*data)); 260 261 ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len, 262 dir); 263 if (ret) 264 goto e_err; 265 266 ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir); 267 if (ret) 268 goto e_err; 269 270 return 0; 271 272 e_err: 273 ccp_free_data(data, cmd_q); 274 275 return ret; 276 } 277 278 static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from) 279 { 280 struct ccp_sg_workarea *sg_wa = &data->sg_wa; 281 struct ccp_dm_workarea *dm_wa = &data->dm_wa; 282 unsigned int buf_count, nbytes; 283 284 /* Clear the buffer if setting it */ 285 if (!from) 286 memset(dm_wa->address, 0, dm_wa->length); 287 288 if (!sg_wa->sg) 289 return 0; 290 291 /* Perform the copy operation 292 * nbytes will always be <= UINT_MAX because dm_wa->length is 293 * an unsigned int 294 */ 295 nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length); 296 scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used, 297 nbytes, from); 298 299 /* Update the structures and generate the count */ 300 buf_count = 0; 301 while (sg_wa->bytes_left && (buf_count < dm_wa->length)) { 302 nbytes = min(sg_wa->sg->length - sg_wa->sg_used, 303 dm_wa->length - buf_count); 304 nbytes = min_t(u64, sg_wa->bytes_left, nbytes); 305 306 buf_count += nbytes; 307 ccp_update_sg_workarea(sg_wa, nbytes); 308 } 309 310 return buf_count; 311 } 312 313 static unsigned int ccp_fill_queue_buf(struct ccp_data *data) 314 { 315 return ccp_queue_buf(data, 0); 316 } 317 318 static unsigned int ccp_empty_queue_buf(struct ccp_data *data) 319 { 320 return ccp_queue_buf(data, 1); 321 } 322 323 static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, 324 struct ccp_op *op, unsigned int block_size, 325 bool blocksize_op) 326 { 327 unsigned int sg_src_len, sg_dst_len, op_len; 328 329 /* The CCP can only DMA from/to one address each per operation. This 330 * requires that we find the smallest DMA area between the source 331 * and destination. The resulting len values will always be <= UINT_MAX 332 * because the dma length is an unsigned int. 333 */ 334 sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used; 335 sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len); 336 337 if (dst) { 338 sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used; 339 sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len); 340 op_len = min(sg_src_len, sg_dst_len); 341 } else { 342 op_len = sg_src_len; 343 } 344 345 /* The data operation length will be at least block_size in length 346 * or the smaller of available sg room remaining for the source or 347 * the destination 348 */ 349 op_len = max(op_len, block_size); 350 351 /* Unless we have to buffer data, there's no reason to wait */ 352 op->soc = 0; 353 354 if (sg_src_len < block_size) { 355 /* Not enough data in the sg element, so it 356 * needs to be buffered into a blocksize chunk 357 */ 358 int cp_len = ccp_fill_queue_buf(src); 359 360 op->soc = 1; 361 op->src.u.dma.address = src->dm_wa.dma.address; 362 op->src.u.dma.offset = 0; 363 op->src.u.dma.length = (blocksize_op) ? block_size : cp_len; 364 } else { 365 /* Enough data in the sg element, but we need to 366 * adjust for any previously copied data 367 */ 368 op->src.u.dma.address = sg_dma_address(src->sg_wa.sg); 369 op->src.u.dma.offset = src->sg_wa.sg_used; 370 op->src.u.dma.length = op_len & ~(block_size - 1); 371 372 ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length); 373 } 374 375 if (dst) { 376 if (sg_dst_len < block_size) { 377 /* Not enough room in the sg element or we're on the 378 * last piece of data (when using padding), so the 379 * output needs to be buffered into a blocksize chunk 380 */ 381 op->soc = 1; 382 op->dst.u.dma.address = dst->dm_wa.dma.address; 383 op->dst.u.dma.offset = 0; 384 op->dst.u.dma.length = op->src.u.dma.length; 385 } else { 386 /* Enough room in the sg element, but we need to 387 * adjust for any previously used area 388 */ 389 op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg); 390 op->dst.u.dma.offset = dst->sg_wa.sg_used; 391 op->dst.u.dma.length = op->src.u.dma.length; 392 } 393 } 394 } 395 396 static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst, 397 struct ccp_op *op) 398 { 399 op->init = 0; 400 401 if (dst) { 402 if (op->dst.u.dma.address == dst->dm_wa.dma.address) 403 ccp_empty_queue_buf(dst); 404 else 405 ccp_update_sg_workarea(&dst->sg_wa, 406 op->dst.u.dma.length); 407 } 408 } 409 410 static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q, 411 struct ccp_dm_workarea *wa, u32 jobid, u32 sb, 412 u32 byte_swap, bool from) 413 { 414 struct ccp_op op; 415 416 memset(&op, 0, sizeof(op)); 417 418 op.cmd_q = cmd_q; 419 op.jobid = jobid; 420 op.eom = 1; 421 422 if (from) { 423 op.soc = 1; 424 op.src.type = CCP_MEMTYPE_SB; 425 op.src.u.sb = sb; 426 op.dst.type = CCP_MEMTYPE_SYSTEM; 427 op.dst.u.dma.address = wa->dma.address; 428 op.dst.u.dma.length = wa->length; 429 } else { 430 op.src.type = CCP_MEMTYPE_SYSTEM; 431 op.src.u.dma.address = wa->dma.address; 432 op.src.u.dma.length = wa->length; 433 op.dst.type = CCP_MEMTYPE_SB; 434 op.dst.u.sb = sb; 435 } 436 437 op.u.passthru.byte_swap = byte_swap; 438 439 return cmd_q->ccp->vdata->perform->passthru(&op); 440 } 441 442 static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q, 443 struct ccp_dm_workarea *wa, u32 jobid, u32 sb, 444 u32 byte_swap) 445 { 446 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false); 447 } 448 449 static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q, 450 struct ccp_dm_workarea *wa, u32 jobid, u32 sb, 451 u32 byte_swap) 452 { 453 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true); 454 } 455 456 static noinline_for_stack int 457 ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 458 { 459 struct ccp_aes_engine *aes = &cmd->u.aes; 460 struct ccp_dm_workarea key, ctx; 461 struct ccp_data src; 462 struct ccp_op op; 463 unsigned int dm_offset; 464 int ret; 465 466 if (!((aes->key_len == AES_KEYSIZE_128) || 467 (aes->key_len == AES_KEYSIZE_192) || 468 (aes->key_len == AES_KEYSIZE_256))) 469 return -EINVAL; 470 471 if (aes->src_len & (AES_BLOCK_SIZE - 1)) 472 return -EINVAL; 473 474 if (aes->iv_len != AES_BLOCK_SIZE) 475 return -EINVAL; 476 477 if (!aes->key || !aes->iv || !aes->src) 478 return -EINVAL; 479 480 if (aes->cmac_final) { 481 if (aes->cmac_key_len != AES_BLOCK_SIZE) 482 return -EINVAL; 483 484 if (!aes->cmac_key) 485 return -EINVAL; 486 } 487 488 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1); 489 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1); 490 491 ret = -EIO; 492 memset(&op, 0, sizeof(op)); 493 op.cmd_q = cmd_q; 494 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 495 op.sb_key = cmd_q->sb_key; 496 op.sb_ctx = cmd_q->sb_ctx; 497 op.init = 1; 498 op.u.aes.type = aes->type; 499 op.u.aes.mode = aes->mode; 500 op.u.aes.action = aes->action; 501 502 /* All supported key sizes fit in a single (32-byte) SB entry 503 * and must be in little endian format. Use the 256-bit byte 504 * swap passthru option to convert from big endian to little 505 * endian. 506 */ 507 ret = ccp_init_dm_workarea(&key, cmd_q, 508 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES, 509 DMA_TO_DEVICE); 510 if (ret) 511 return ret; 512 513 dm_offset = CCP_SB_BYTES - aes->key_len; 514 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); 515 if (ret) 516 goto e_key; 517 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, 518 CCP_PASSTHRU_BYTESWAP_256BIT); 519 if (ret) { 520 cmd->engine_error = cmd_q->cmd_error; 521 goto e_key; 522 } 523 524 /* The AES context fits in a single (32-byte) SB entry and 525 * must be in little endian format. Use the 256-bit byte swap 526 * passthru option to convert from big endian to little endian. 527 */ 528 ret = ccp_init_dm_workarea(&ctx, cmd_q, 529 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, 530 DMA_BIDIRECTIONAL); 531 if (ret) 532 goto e_key; 533 534 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; 535 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 536 if (ret) 537 goto e_ctx; 538 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 539 CCP_PASSTHRU_BYTESWAP_256BIT); 540 if (ret) { 541 cmd->engine_error = cmd_q->cmd_error; 542 goto e_ctx; 543 } 544 545 /* Send data to the CCP AES engine */ 546 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len, 547 AES_BLOCK_SIZE, DMA_TO_DEVICE); 548 if (ret) 549 goto e_ctx; 550 551 while (src.sg_wa.bytes_left) { 552 ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true); 553 if (aes->cmac_final && !src.sg_wa.bytes_left) { 554 op.eom = 1; 555 556 /* Push the K1/K2 key to the CCP now */ 557 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, 558 op.sb_ctx, 559 CCP_PASSTHRU_BYTESWAP_256BIT); 560 if (ret) { 561 cmd->engine_error = cmd_q->cmd_error; 562 goto e_src; 563 } 564 565 ret = ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0, 566 aes->cmac_key_len); 567 if (ret) 568 goto e_src; 569 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 570 CCP_PASSTHRU_BYTESWAP_256BIT); 571 if (ret) { 572 cmd->engine_error = cmd_q->cmd_error; 573 goto e_src; 574 } 575 } 576 577 ret = cmd_q->ccp->vdata->perform->aes(&op); 578 if (ret) { 579 cmd->engine_error = cmd_q->cmd_error; 580 goto e_src; 581 } 582 583 ccp_process_data(&src, NULL, &op); 584 } 585 586 /* Retrieve the AES context - convert from LE to BE using 587 * 32-byte (256-bit) byteswapping 588 */ 589 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 590 CCP_PASSTHRU_BYTESWAP_256BIT); 591 if (ret) { 592 cmd->engine_error = cmd_q->cmd_error; 593 goto e_src; 594 } 595 596 /* ...but we only need AES_BLOCK_SIZE bytes */ 597 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; 598 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 599 600 e_src: 601 ccp_free_data(&src, cmd_q); 602 603 e_ctx: 604 ccp_dm_free(&ctx); 605 606 e_key: 607 ccp_dm_free(&key); 608 609 return ret; 610 } 611 612 static noinline_for_stack int 613 ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 614 { 615 struct ccp_aes_engine *aes = &cmd->u.aes; 616 struct ccp_dm_workarea key, ctx, final_wa, tag; 617 struct ccp_data src, dst; 618 struct ccp_data aad; 619 struct ccp_op op; 620 621 unsigned long long *final; 622 unsigned int dm_offset; 623 unsigned int jobid; 624 unsigned int ilen; 625 bool in_place = true; /* Default value */ 626 int ret; 627 628 struct scatterlist *p_inp, sg_inp[2]; 629 struct scatterlist *p_tag, sg_tag[2]; 630 struct scatterlist *p_outp, sg_outp[2]; 631 struct scatterlist *p_aad; 632 633 if (!aes->iv) 634 return -EINVAL; 635 636 if (!((aes->key_len == AES_KEYSIZE_128) || 637 (aes->key_len == AES_KEYSIZE_192) || 638 (aes->key_len == AES_KEYSIZE_256))) 639 return -EINVAL; 640 641 if (!aes->key) /* Gotta have a key SGL */ 642 return -EINVAL; 643 644 /* First, decompose the source buffer into AAD & PT, 645 * and the destination buffer into AAD, CT & tag, or 646 * the input into CT & tag. 647 * It is expected that the input and output SGs will 648 * be valid, even if the AAD and input lengths are 0. 649 */ 650 p_aad = aes->src; 651 p_inp = scatterwalk_ffwd(sg_inp, aes->src, aes->aad_len); 652 p_outp = scatterwalk_ffwd(sg_outp, aes->dst, aes->aad_len); 653 if (aes->action == CCP_AES_ACTION_ENCRYPT) { 654 ilen = aes->src_len; 655 p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen); 656 } else { 657 /* Input length for decryption includes tag */ 658 ilen = aes->src_len - AES_BLOCK_SIZE; 659 p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen); 660 } 661 662 jobid = CCP_NEW_JOBID(cmd_q->ccp); 663 664 memset(&op, 0, sizeof(op)); 665 op.cmd_q = cmd_q; 666 op.jobid = jobid; 667 op.sb_key = cmd_q->sb_key; /* Pre-allocated */ 668 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ 669 op.init = 1; 670 op.u.aes.type = aes->type; 671 672 /* Copy the key to the LSB */ 673 ret = ccp_init_dm_workarea(&key, cmd_q, 674 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, 675 DMA_TO_DEVICE); 676 if (ret) 677 return ret; 678 679 dm_offset = CCP_SB_BYTES - aes->key_len; 680 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); 681 if (ret) 682 goto e_key; 683 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, 684 CCP_PASSTHRU_BYTESWAP_256BIT); 685 if (ret) { 686 cmd->engine_error = cmd_q->cmd_error; 687 goto e_key; 688 } 689 690 /* Copy the context (IV) to the LSB. 691 * There is an assumption here that the IV is 96 bits in length, plus 692 * a nonce of 32 bits. If no IV is present, use a zeroed buffer. 693 */ 694 ret = ccp_init_dm_workarea(&ctx, cmd_q, 695 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, 696 DMA_BIDIRECTIONAL); 697 if (ret) 698 goto e_key; 699 700 dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len; 701 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 702 if (ret) 703 goto e_ctx; 704 705 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 706 CCP_PASSTHRU_BYTESWAP_256BIT); 707 if (ret) { 708 cmd->engine_error = cmd_q->cmd_error; 709 goto e_ctx; 710 } 711 712 op.init = 1; 713 if (aes->aad_len > 0) { 714 /* Step 1: Run a GHASH over the Additional Authenticated Data */ 715 ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len, 716 AES_BLOCK_SIZE, 717 DMA_TO_DEVICE); 718 if (ret) 719 goto e_ctx; 720 721 op.u.aes.mode = CCP_AES_MODE_GHASH; 722 op.u.aes.action = CCP_AES_GHASHAAD; 723 724 while (aad.sg_wa.bytes_left) { 725 ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true); 726 727 ret = cmd_q->ccp->vdata->perform->aes(&op); 728 if (ret) { 729 cmd->engine_error = cmd_q->cmd_error; 730 goto e_aad; 731 } 732 733 ccp_process_data(&aad, NULL, &op); 734 op.init = 0; 735 } 736 } 737 738 op.u.aes.mode = CCP_AES_MODE_GCTR; 739 op.u.aes.action = aes->action; 740 741 if (ilen > 0) { 742 /* Step 2: Run a GCTR over the plaintext */ 743 in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false; 744 745 ret = ccp_init_data(&src, cmd_q, p_inp, ilen, 746 AES_BLOCK_SIZE, 747 in_place ? DMA_BIDIRECTIONAL 748 : DMA_TO_DEVICE); 749 if (ret) 750 goto e_ctx; 751 752 if (in_place) { 753 dst = src; 754 } else { 755 ret = ccp_init_data(&dst, cmd_q, p_outp, ilen, 756 AES_BLOCK_SIZE, DMA_FROM_DEVICE); 757 if (ret) 758 goto e_src; 759 } 760 761 op.soc = 0; 762 op.eom = 0; 763 op.init = 1; 764 while (src.sg_wa.bytes_left) { 765 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true); 766 if (!src.sg_wa.bytes_left) { 767 unsigned int nbytes = aes->src_len 768 % AES_BLOCK_SIZE; 769 770 if (nbytes) { 771 op.eom = 1; 772 op.u.aes.size = (nbytes * 8) - 1; 773 } 774 } 775 776 ret = cmd_q->ccp->vdata->perform->aes(&op); 777 if (ret) { 778 cmd->engine_error = cmd_q->cmd_error; 779 goto e_dst; 780 } 781 782 ccp_process_data(&src, &dst, &op); 783 op.init = 0; 784 } 785 } 786 787 /* Step 3: Update the IV portion of the context with the original IV */ 788 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 789 CCP_PASSTHRU_BYTESWAP_256BIT); 790 if (ret) { 791 cmd->engine_error = cmd_q->cmd_error; 792 goto e_dst; 793 } 794 795 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 796 if (ret) 797 goto e_dst; 798 799 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 800 CCP_PASSTHRU_BYTESWAP_256BIT); 801 if (ret) { 802 cmd->engine_error = cmd_q->cmd_error; 803 goto e_dst; 804 } 805 806 /* Step 4: Concatenate the lengths of the AAD and source, and 807 * hash that 16 byte buffer. 808 */ 809 ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE, 810 DMA_BIDIRECTIONAL); 811 if (ret) 812 goto e_dst; 813 final = (unsigned long long *) final_wa.address; 814 final[0] = cpu_to_be64(aes->aad_len * 8); 815 final[1] = cpu_to_be64(ilen * 8); 816 817 memset(&op, 0, sizeof(op)); 818 op.cmd_q = cmd_q; 819 op.jobid = jobid; 820 op.sb_key = cmd_q->sb_key; /* Pre-allocated */ 821 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ 822 op.init = 1; 823 op.u.aes.type = aes->type; 824 op.u.aes.mode = CCP_AES_MODE_GHASH; 825 op.u.aes.action = CCP_AES_GHASHFINAL; 826 op.src.type = CCP_MEMTYPE_SYSTEM; 827 op.src.u.dma.address = final_wa.dma.address; 828 op.src.u.dma.length = AES_BLOCK_SIZE; 829 op.dst.type = CCP_MEMTYPE_SYSTEM; 830 op.dst.u.dma.address = final_wa.dma.address; 831 op.dst.u.dma.length = AES_BLOCK_SIZE; 832 op.eom = 1; 833 op.u.aes.size = 0; 834 ret = cmd_q->ccp->vdata->perform->aes(&op); 835 if (ret) 836 goto e_dst; 837 838 if (aes->action == CCP_AES_ACTION_ENCRYPT) { 839 /* Put the ciphered tag after the ciphertext. */ 840 ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE); 841 } else { 842 /* Does this ciphered tag match the input? */ 843 ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE, 844 DMA_BIDIRECTIONAL); 845 if (ret) 846 goto e_tag; 847 ret = ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE); 848 if (ret) 849 goto e_tag; 850 851 ret = crypto_memneq(tag.address, final_wa.address, 852 AES_BLOCK_SIZE) ? -EBADMSG : 0; 853 ccp_dm_free(&tag); 854 } 855 856 e_tag: 857 ccp_dm_free(&final_wa); 858 859 e_dst: 860 if (aes->src_len && !in_place) 861 ccp_free_data(&dst, cmd_q); 862 863 e_src: 864 if (aes->src_len) 865 ccp_free_data(&src, cmd_q); 866 867 e_aad: 868 if (aes->aad_len) 869 ccp_free_data(&aad, cmd_q); 870 871 e_ctx: 872 ccp_dm_free(&ctx); 873 874 e_key: 875 ccp_dm_free(&key); 876 877 return ret; 878 } 879 880 static noinline_for_stack int 881 ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 882 { 883 struct ccp_aes_engine *aes = &cmd->u.aes; 884 struct ccp_dm_workarea key, ctx; 885 struct ccp_data src, dst; 886 struct ccp_op op; 887 unsigned int dm_offset; 888 bool in_place = false; 889 int ret; 890 891 if (!((aes->key_len == AES_KEYSIZE_128) || 892 (aes->key_len == AES_KEYSIZE_192) || 893 (aes->key_len == AES_KEYSIZE_256))) 894 return -EINVAL; 895 896 if (((aes->mode == CCP_AES_MODE_ECB) || 897 (aes->mode == CCP_AES_MODE_CBC)) && 898 (aes->src_len & (AES_BLOCK_SIZE - 1))) 899 return -EINVAL; 900 901 if (!aes->key || !aes->src || !aes->dst) 902 return -EINVAL; 903 904 if (aes->mode != CCP_AES_MODE_ECB) { 905 if (aes->iv_len != AES_BLOCK_SIZE) 906 return -EINVAL; 907 908 if (!aes->iv) 909 return -EINVAL; 910 } 911 912 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1); 913 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1); 914 915 ret = -EIO; 916 memset(&op, 0, sizeof(op)); 917 op.cmd_q = cmd_q; 918 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 919 op.sb_key = cmd_q->sb_key; 920 op.sb_ctx = cmd_q->sb_ctx; 921 op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1; 922 op.u.aes.type = aes->type; 923 op.u.aes.mode = aes->mode; 924 op.u.aes.action = aes->action; 925 926 /* All supported key sizes fit in a single (32-byte) SB entry 927 * and must be in little endian format. Use the 256-bit byte 928 * swap passthru option to convert from big endian to little 929 * endian. 930 */ 931 ret = ccp_init_dm_workarea(&key, cmd_q, 932 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES, 933 DMA_TO_DEVICE); 934 if (ret) 935 return ret; 936 937 dm_offset = CCP_SB_BYTES - aes->key_len; 938 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); 939 if (ret) 940 goto e_key; 941 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, 942 CCP_PASSTHRU_BYTESWAP_256BIT); 943 if (ret) { 944 cmd->engine_error = cmd_q->cmd_error; 945 goto e_key; 946 } 947 948 /* The AES context fits in a single (32-byte) SB entry and 949 * must be in little endian format. Use the 256-bit byte swap 950 * passthru option to convert from big endian to little endian. 951 */ 952 ret = ccp_init_dm_workarea(&ctx, cmd_q, 953 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, 954 DMA_BIDIRECTIONAL); 955 if (ret) 956 goto e_key; 957 958 if (aes->mode != CCP_AES_MODE_ECB) { 959 /* Load the AES context - convert to LE */ 960 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; 961 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 962 if (ret) 963 goto e_ctx; 964 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 965 CCP_PASSTHRU_BYTESWAP_256BIT); 966 if (ret) { 967 cmd->engine_error = cmd_q->cmd_error; 968 goto e_ctx; 969 } 970 } 971 switch (aes->mode) { 972 case CCP_AES_MODE_CFB: /* CFB128 only */ 973 case CCP_AES_MODE_CTR: 974 op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1; 975 break; 976 default: 977 op.u.aes.size = 0; 978 } 979 980 /* Prepare the input and output data workareas. For in-place 981 * operations we need to set the dma direction to BIDIRECTIONAL 982 * and copy the src workarea to the dst workarea. 983 */ 984 if (sg_virt(aes->src) == sg_virt(aes->dst)) 985 in_place = true; 986 987 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len, 988 AES_BLOCK_SIZE, 989 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); 990 if (ret) 991 goto e_ctx; 992 993 if (in_place) { 994 dst = src; 995 } else { 996 ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len, 997 AES_BLOCK_SIZE, DMA_FROM_DEVICE); 998 if (ret) 999 goto e_src; 1000 } 1001 1002 /* Send data to the CCP AES engine */ 1003 while (src.sg_wa.bytes_left) { 1004 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true); 1005 if (!src.sg_wa.bytes_left) { 1006 op.eom = 1; 1007 1008 /* Since we don't retrieve the AES context in ECB 1009 * mode we have to wait for the operation to complete 1010 * on the last piece of data 1011 */ 1012 if (aes->mode == CCP_AES_MODE_ECB) 1013 op.soc = 1; 1014 } 1015 1016 ret = cmd_q->ccp->vdata->perform->aes(&op); 1017 if (ret) { 1018 cmd->engine_error = cmd_q->cmd_error; 1019 goto e_dst; 1020 } 1021 1022 ccp_process_data(&src, &dst, &op); 1023 } 1024 1025 if (aes->mode != CCP_AES_MODE_ECB) { 1026 /* Retrieve the AES context - convert from LE to BE using 1027 * 32-byte (256-bit) byteswapping 1028 */ 1029 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1030 CCP_PASSTHRU_BYTESWAP_256BIT); 1031 if (ret) { 1032 cmd->engine_error = cmd_q->cmd_error; 1033 goto e_dst; 1034 } 1035 1036 /* ...but we only need AES_BLOCK_SIZE bytes */ 1037 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; 1038 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 1039 } 1040 1041 e_dst: 1042 if (!in_place) 1043 ccp_free_data(&dst, cmd_q); 1044 1045 e_src: 1046 ccp_free_data(&src, cmd_q); 1047 1048 e_ctx: 1049 ccp_dm_free(&ctx); 1050 1051 e_key: 1052 ccp_dm_free(&key); 1053 1054 return ret; 1055 } 1056 1057 static noinline_for_stack int 1058 ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 1059 { 1060 struct ccp_xts_aes_engine *xts = &cmd->u.xts; 1061 struct ccp_dm_workarea key, ctx; 1062 struct ccp_data src, dst; 1063 struct ccp_op op; 1064 unsigned int unit_size, dm_offset; 1065 bool in_place = false; 1066 unsigned int sb_count; 1067 enum ccp_aes_type aestype; 1068 int ret; 1069 1070 switch (xts->unit_size) { 1071 case CCP_XTS_AES_UNIT_SIZE_16: 1072 unit_size = 16; 1073 break; 1074 case CCP_XTS_AES_UNIT_SIZE_512: 1075 unit_size = 512; 1076 break; 1077 case CCP_XTS_AES_UNIT_SIZE_1024: 1078 unit_size = 1024; 1079 break; 1080 case CCP_XTS_AES_UNIT_SIZE_2048: 1081 unit_size = 2048; 1082 break; 1083 case CCP_XTS_AES_UNIT_SIZE_4096: 1084 unit_size = 4096; 1085 break; 1086 1087 default: 1088 return -EINVAL; 1089 } 1090 1091 if (xts->key_len == AES_KEYSIZE_128) 1092 aestype = CCP_AES_TYPE_128; 1093 else if (xts->key_len == AES_KEYSIZE_256) 1094 aestype = CCP_AES_TYPE_256; 1095 else 1096 return -EINVAL; 1097 1098 if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1))) 1099 return -EINVAL; 1100 1101 if (xts->iv_len != AES_BLOCK_SIZE) 1102 return -EINVAL; 1103 1104 if (!xts->key || !xts->iv || !xts->src || !xts->dst) 1105 return -EINVAL; 1106 1107 BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1); 1108 BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1); 1109 1110 ret = -EIO; 1111 memset(&op, 0, sizeof(op)); 1112 op.cmd_q = cmd_q; 1113 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 1114 op.sb_key = cmd_q->sb_key; 1115 op.sb_ctx = cmd_q->sb_ctx; 1116 op.init = 1; 1117 op.u.xts.type = aestype; 1118 op.u.xts.action = xts->action; 1119 op.u.xts.unit_size = xts->unit_size; 1120 1121 /* A version 3 device only supports 128-bit keys, which fits into a 1122 * single SB entry. A version 5 device uses a 512-bit vector, so two 1123 * SB entries. 1124 */ 1125 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) 1126 sb_count = CCP_XTS_AES_KEY_SB_COUNT; 1127 else 1128 sb_count = CCP5_XTS_AES_KEY_SB_COUNT; 1129 ret = ccp_init_dm_workarea(&key, cmd_q, 1130 sb_count * CCP_SB_BYTES, 1131 DMA_TO_DEVICE); 1132 if (ret) 1133 return ret; 1134 1135 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) { 1136 /* All supported key sizes must be in little endian format. 1137 * Use the 256-bit byte swap passthru option to convert from 1138 * big endian to little endian. 1139 */ 1140 dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128; 1141 ret = ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len); 1142 if (ret) 1143 goto e_key; 1144 ret = ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len); 1145 if (ret) 1146 goto e_key; 1147 } else { 1148 /* Version 5 CCPs use a 512-bit space for the key: each portion 1149 * occupies 256 bits, or one entire slot, and is zero-padded. 1150 */ 1151 unsigned int pad; 1152 1153 dm_offset = CCP_SB_BYTES; 1154 pad = dm_offset - xts->key_len; 1155 ret = ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len); 1156 if (ret) 1157 goto e_key; 1158 ret = ccp_set_dm_area(&key, dm_offset + pad, xts->key, 1159 xts->key_len, xts->key_len); 1160 if (ret) 1161 goto e_key; 1162 } 1163 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, 1164 CCP_PASSTHRU_BYTESWAP_256BIT); 1165 if (ret) { 1166 cmd->engine_error = cmd_q->cmd_error; 1167 goto e_key; 1168 } 1169 1170 /* The AES context fits in a single (32-byte) SB entry and 1171 * for XTS is already in little endian format so no byte swapping 1172 * is needed. 1173 */ 1174 ret = ccp_init_dm_workarea(&ctx, cmd_q, 1175 CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES, 1176 DMA_BIDIRECTIONAL); 1177 if (ret) 1178 goto e_key; 1179 1180 ret = ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len); 1181 if (ret) 1182 goto e_ctx; 1183 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1184 CCP_PASSTHRU_BYTESWAP_NOOP); 1185 if (ret) { 1186 cmd->engine_error = cmd_q->cmd_error; 1187 goto e_ctx; 1188 } 1189 1190 /* Prepare the input and output data workareas. For in-place 1191 * operations we need to set the dma direction to BIDIRECTIONAL 1192 * and copy the src workarea to the dst workarea. 1193 */ 1194 if (sg_virt(xts->src) == sg_virt(xts->dst)) 1195 in_place = true; 1196 1197 ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len, 1198 unit_size, 1199 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); 1200 if (ret) 1201 goto e_ctx; 1202 1203 if (in_place) { 1204 dst = src; 1205 } else { 1206 ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len, 1207 unit_size, DMA_FROM_DEVICE); 1208 if (ret) 1209 goto e_src; 1210 } 1211 1212 /* Send data to the CCP AES engine */ 1213 while (src.sg_wa.bytes_left) { 1214 ccp_prepare_data(&src, &dst, &op, unit_size, true); 1215 if (!src.sg_wa.bytes_left) 1216 op.eom = 1; 1217 1218 ret = cmd_q->ccp->vdata->perform->xts_aes(&op); 1219 if (ret) { 1220 cmd->engine_error = cmd_q->cmd_error; 1221 goto e_dst; 1222 } 1223 1224 ccp_process_data(&src, &dst, &op); 1225 } 1226 1227 /* Retrieve the AES context - convert from LE to BE using 1228 * 32-byte (256-bit) byteswapping 1229 */ 1230 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1231 CCP_PASSTHRU_BYTESWAP_256BIT); 1232 if (ret) { 1233 cmd->engine_error = cmd_q->cmd_error; 1234 goto e_dst; 1235 } 1236 1237 /* ...but we only need AES_BLOCK_SIZE bytes */ 1238 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; 1239 ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len); 1240 1241 e_dst: 1242 if (!in_place) 1243 ccp_free_data(&dst, cmd_q); 1244 1245 e_src: 1246 ccp_free_data(&src, cmd_q); 1247 1248 e_ctx: 1249 ccp_dm_free(&ctx); 1250 1251 e_key: 1252 ccp_dm_free(&key); 1253 1254 return ret; 1255 } 1256 1257 static noinline_for_stack int 1258 ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 1259 { 1260 struct ccp_des3_engine *des3 = &cmd->u.des3; 1261 1262 struct ccp_dm_workarea key, ctx; 1263 struct ccp_data src, dst; 1264 struct ccp_op op; 1265 unsigned int dm_offset; 1266 unsigned int len_singlekey; 1267 bool in_place = false; 1268 int ret; 1269 1270 /* Error checks */ 1271 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) 1272 return -EINVAL; 1273 1274 if (!cmd_q->ccp->vdata->perform->des3) 1275 return -EINVAL; 1276 1277 if (des3->key_len != DES3_EDE_KEY_SIZE) 1278 return -EINVAL; 1279 1280 if (((des3->mode == CCP_DES3_MODE_ECB) || 1281 (des3->mode == CCP_DES3_MODE_CBC)) && 1282 (des3->src_len & (DES3_EDE_BLOCK_SIZE - 1))) 1283 return -EINVAL; 1284 1285 if (!des3->key || !des3->src || !des3->dst) 1286 return -EINVAL; 1287 1288 if (des3->mode != CCP_DES3_MODE_ECB) { 1289 if (des3->iv_len != DES3_EDE_BLOCK_SIZE) 1290 return -EINVAL; 1291 1292 if (!des3->iv) 1293 return -EINVAL; 1294 } 1295 1296 ret = -EIO; 1297 /* Zero out all the fields of the command desc */ 1298 memset(&op, 0, sizeof(op)); 1299 1300 /* Set up the Function field */ 1301 op.cmd_q = cmd_q; 1302 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 1303 op.sb_key = cmd_q->sb_key; 1304 1305 op.init = (des3->mode == CCP_DES3_MODE_ECB) ? 0 : 1; 1306 op.u.des3.type = des3->type; 1307 op.u.des3.mode = des3->mode; 1308 op.u.des3.action = des3->action; 1309 1310 /* 1311 * All supported key sizes fit in a single (32-byte) KSB entry and 1312 * (like AES) must be in little endian format. Use the 256-bit byte 1313 * swap passthru option to convert from big endian to little endian. 1314 */ 1315 ret = ccp_init_dm_workarea(&key, cmd_q, 1316 CCP_DES3_KEY_SB_COUNT * CCP_SB_BYTES, 1317 DMA_TO_DEVICE); 1318 if (ret) 1319 return ret; 1320 1321 /* 1322 * The contents of the key triplet are in the reverse order of what 1323 * is required by the engine. Copy the 3 pieces individually to put 1324 * them where they belong. 1325 */ 1326 dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */ 1327 1328 len_singlekey = des3->key_len / 3; 1329 ret = ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey, 1330 des3->key, 0, len_singlekey); 1331 if (ret) 1332 goto e_key; 1333 ret = ccp_set_dm_area(&key, dm_offset + len_singlekey, 1334 des3->key, len_singlekey, len_singlekey); 1335 if (ret) 1336 goto e_key; 1337 ret = ccp_set_dm_area(&key, dm_offset, 1338 des3->key, 2 * len_singlekey, len_singlekey); 1339 if (ret) 1340 goto e_key; 1341 1342 /* Copy the key to the SB */ 1343 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, 1344 CCP_PASSTHRU_BYTESWAP_256BIT); 1345 if (ret) { 1346 cmd->engine_error = cmd_q->cmd_error; 1347 goto e_key; 1348 } 1349 1350 /* 1351 * The DES3 context fits in a single (32-byte) KSB entry and 1352 * must be in little endian format. Use the 256-bit byte swap 1353 * passthru option to convert from big endian to little endian. 1354 */ 1355 if (des3->mode != CCP_DES3_MODE_ECB) { 1356 op.sb_ctx = cmd_q->sb_ctx; 1357 1358 ret = ccp_init_dm_workarea(&ctx, cmd_q, 1359 CCP_DES3_CTX_SB_COUNT * CCP_SB_BYTES, 1360 DMA_BIDIRECTIONAL); 1361 if (ret) 1362 goto e_key; 1363 1364 /* Load the context into the LSB */ 1365 dm_offset = CCP_SB_BYTES - des3->iv_len; 1366 ret = ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, 1367 des3->iv_len); 1368 if (ret) 1369 goto e_ctx; 1370 1371 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1372 CCP_PASSTHRU_BYTESWAP_256BIT); 1373 if (ret) { 1374 cmd->engine_error = cmd_q->cmd_error; 1375 goto e_ctx; 1376 } 1377 } 1378 1379 /* 1380 * Prepare the input and output data workareas. For in-place 1381 * operations we need to set the dma direction to BIDIRECTIONAL 1382 * and copy the src workarea to the dst workarea. 1383 */ 1384 if (sg_virt(des3->src) == sg_virt(des3->dst)) 1385 in_place = true; 1386 1387 ret = ccp_init_data(&src, cmd_q, des3->src, des3->src_len, 1388 DES3_EDE_BLOCK_SIZE, 1389 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); 1390 if (ret) 1391 goto e_ctx; 1392 1393 if (in_place) 1394 dst = src; 1395 else { 1396 ret = ccp_init_data(&dst, cmd_q, des3->dst, des3->src_len, 1397 DES3_EDE_BLOCK_SIZE, DMA_FROM_DEVICE); 1398 if (ret) 1399 goto e_src; 1400 } 1401 1402 /* Send data to the CCP DES3 engine */ 1403 while (src.sg_wa.bytes_left) { 1404 ccp_prepare_data(&src, &dst, &op, DES3_EDE_BLOCK_SIZE, true); 1405 if (!src.sg_wa.bytes_left) { 1406 op.eom = 1; 1407 1408 /* Since we don't retrieve the context in ECB mode 1409 * we have to wait for the operation to complete 1410 * on the last piece of data 1411 */ 1412 op.soc = 0; 1413 } 1414 1415 ret = cmd_q->ccp->vdata->perform->des3(&op); 1416 if (ret) { 1417 cmd->engine_error = cmd_q->cmd_error; 1418 goto e_dst; 1419 } 1420 1421 ccp_process_data(&src, &dst, &op); 1422 } 1423 1424 if (des3->mode != CCP_DES3_MODE_ECB) { 1425 /* Retrieve the context and make BE */ 1426 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1427 CCP_PASSTHRU_BYTESWAP_256BIT); 1428 if (ret) { 1429 cmd->engine_error = cmd_q->cmd_error; 1430 goto e_dst; 1431 } 1432 1433 /* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */ 1434 ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0, 1435 DES3_EDE_BLOCK_SIZE); 1436 } 1437 e_dst: 1438 if (!in_place) 1439 ccp_free_data(&dst, cmd_q); 1440 1441 e_src: 1442 ccp_free_data(&src, cmd_q); 1443 1444 e_ctx: 1445 if (des3->mode != CCP_DES3_MODE_ECB) 1446 ccp_dm_free(&ctx); 1447 1448 e_key: 1449 ccp_dm_free(&key); 1450 1451 return ret; 1452 } 1453 1454 static noinline_for_stack int 1455 ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 1456 { 1457 struct ccp_sha_engine *sha = &cmd->u.sha; 1458 struct ccp_dm_workarea ctx; 1459 struct ccp_data src; 1460 struct ccp_op op; 1461 unsigned int ioffset, ooffset; 1462 unsigned int digest_size; 1463 int sb_count; 1464 const void *init; 1465 u64 block_size; 1466 int ctx_size; 1467 int ret; 1468 1469 switch (sha->type) { 1470 case CCP_SHA_TYPE_1: 1471 if (sha->ctx_len < SHA1_DIGEST_SIZE) 1472 return -EINVAL; 1473 block_size = SHA1_BLOCK_SIZE; 1474 break; 1475 case CCP_SHA_TYPE_224: 1476 if (sha->ctx_len < SHA224_DIGEST_SIZE) 1477 return -EINVAL; 1478 block_size = SHA224_BLOCK_SIZE; 1479 break; 1480 case CCP_SHA_TYPE_256: 1481 if (sha->ctx_len < SHA256_DIGEST_SIZE) 1482 return -EINVAL; 1483 block_size = SHA256_BLOCK_SIZE; 1484 break; 1485 case CCP_SHA_TYPE_384: 1486 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0) 1487 || sha->ctx_len < SHA384_DIGEST_SIZE) 1488 return -EINVAL; 1489 block_size = SHA384_BLOCK_SIZE; 1490 break; 1491 case CCP_SHA_TYPE_512: 1492 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0) 1493 || sha->ctx_len < SHA512_DIGEST_SIZE) 1494 return -EINVAL; 1495 block_size = SHA512_BLOCK_SIZE; 1496 break; 1497 default: 1498 return -EINVAL; 1499 } 1500 1501 if (!sha->ctx) 1502 return -EINVAL; 1503 1504 if (!sha->final && (sha->src_len & (block_size - 1))) 1505 return -EINVAL; 1506 1507 /* The version 3 device can't handle zero-length input */ 1508 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) { 1509 1510 if (!sha->src_len) { 1511 unsigned int digest_len; 1512 const u8 *sha_zero; 1513 1514 /* Not final, just return */ 1515 if (!sha->final) 1516 return 0; 1517 1518 /* CCP can't do a zero length sha operation so the 1519 * caller must buffer the data. 1520 */ 1521 if (sha->msg_bits) 1522 return -EINVAL; 1523 1524 /* The CCP cannot perform zero-length sha operations 1525 * so the caller is required to buffer data for the 1526 * final operation. However, a sha operation for a 1527 * message with a total length of zero is valid so 1528 * known values are required to supply the result. 1529 */ 1530 switch (sha->type) { 1531 case CCP_SHA_TYPE_1: 1532 sha_zero = sha1_zero_message_hash; 1533 digest_len = SHA1_DIGEST_SIZE; 1534 break; 1535 case CCP_SHA_TYPE_224: 1536 sha_zero = sha224_zero_message_hash; 1537 digest_len = SHA224_DIGEST_SIZE; 1538 break; 1539 case CCP_SHA_TYPE_256: 1540 sha_zero = sha256_zero_message_hash; 1541 digest_len = SHA256_DIGEST_SIZE; 1542 break; 1543 default: 1544 return -EINVAL; 1545 } 1546 1547 scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0, 1548 digest_len, 1); 1549 1550 return 0; 1551 } 1552 } 1553 1554 /* Set variables used throughout */ 1555 switch (sha->type) { 1556 case CCP_SHA_TYPE_1: 1557 digest_size = SHA1_DIGEST_SIZE; 1558 init = (void *) ccp_sha1_init; 1559 ctx_size = SHA1_DIGEST_SIZE; 1560 sb_count = 1; 1561 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0)) 1562 ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE; 1563 else 1564 ooffset = ioffset = 0; 1565 break; 1566 case CCP_SHA_TYPE_224: 1567 digest_size = SHA224_DIGEST_SIZE; 1568 init = (void *) ccp_sha224_init; 1569 ctx_size = SHA256_DIGEST_SIZE; 1570 sb_count = 1; 1571 ioffset = 0; 1572 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0)) 1573 ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE; 1574 else 1575 ooffset = 0; 1576 break; 1577 case CCP_SHA_TYPE_256: 1578 digest_size = SHA256_DIGEST_SIZE; 1579 init = (void *) ccp_sha256_init; 1580 ctx_size = SHA256_DIGEST_SIZE; 1581 sb_count = 1; 1582 ooffset = ioffset = 0; 1583 break; 1584 case CCP_SHA_TYPE_384: 1585 digest_size = SHA384_DIGEST_SIZE; 1586 init = (void *) ccp_sha384_init; 1587 ctx_size = SHA512_DIGEST_SIZE; 1588 sb_count = 2; 1589 ioffset = 0; 1590 ooffset = 2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE; 1591 break; 1592 case CCP_SHA_TYPE_512: 1593 digest_size = SHA512_DIGEST_SIZE; 1594 init = (void *) ccp_sha512_init; 1595 ctx_size = SHA512_DIGEST_SIZE; 1596 sb_count = 2; 1597 ooffset = ioffset = 0; 1598 break; 1599 default: 1600 ret = -EINVAL; 1601 goto e_data; 1602 } 1603 1604 /* For zero-length plaintext the src pointer is ignored; 1605 * otherwise both parts must be valid 1606 */ 1607 if (sha->src_len && !sha->src) 1608 return -EINVAL; 1609 1610 memset(&op, 0, sizeof(op)); 1611 op.cmd_q = cmd_q; 1612 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 1613 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ 1614 op.u.sha.type = sha->type; 1615 op.u.sha.msg_bits = sha->msg_bits; 1616 1617 /* For SHA1/224/256 the context fits in a single (32-byte) SB entry; 1618 * SHA384/512 require 2 adjacent SB slots, with the right half in the 1619 * first slot, and the left half in the second. Each portion must then 1620 * be in little endian format: use the 256-bit byte swap option. 1621 */ 1622 ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES, 1623 DMA_BIDIRECTIONAL); 1624 if (ret) 1625 return ret; 1626 if (sha->first) { 1627 switch (sha->type) { 1628 case CCP_SHA_TYPE_1: 1629 case CCP_SHA_TYPE_224: 1630 case CCP_SHA_TYPE_256: 1631 memcpy(ctx.address + ioffset, init, ctx_size); 1632 break; 1633 case CCP_SHA_TYPE_384: 1634 case CCP_SHA_TYPE_512: 1635 memcpy(ctx.address + ctx_size / 2, init, 1636 ctx_size / 2); 1637 memcpy(ctx.address, init + ctx_size / 2, 1638 ctx_size / 2); 1639 break; 1640 default: 1641 ret = -EINVAL; 1642 goto e_ctx; 1643 } 1644 } else { 1645 /* Restore the context */ 1646 ret = ccp_set_dm_area(&ctx, 0, sha->ctx, 0, 1647 sb_count * CCP_SB_BYTES); 1648 if (ret) 1649 goto e_ctx; 1650 } 1651 1652 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1653 CCP_PASSTHRU_BYTESWAP_256BIT); 1654 if (ret) { 1655 cmd->engine_error = cmd_q->cmd_error; 1656 goto e_ctx; 1657 } 1658 1659 if (sha->src) { 1660 /* Send data to the CCP SHA engine; block_size is set above */ 1661 ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len, 1662 block_size, DMA_TO_DEVICE); 1663 if (ret) 1664 goto e_ctx; 1665 1666 while (src.sg_wa.bytes_left) { 1667 ccp_prepare_data(&src, NULL, &op, block_size, false); 1668 if (sha->final && !src.sg_wa.bytes_left) 1669 op.eom = 1; 1670 1671 ret = cmd_q->ccp->vdata->perform->sha(&op); 1672 if (ret) { 1673 cmd->engine_error = cmd_q->cmd_error; 1674 goto e_data; 1675 } 1676 1677 ccp_process_data(&src, NULL, &op); 1678 } 1679 } else { 1680 op.eom = 1; 1681 ret = cmd_q->ccp->vdata->perform->sha(&op); 1682 if (ret) { 1683 cmd->engine_error = cmd_q->cmd_error; 1684 goto e_data; 1685 } 1686 } 1687 1688 /* Retrieve the SHA context - convert from LE to BE using 1689 * 32-byte (256-bit) byteswapping to BE 1690 */ 1691 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1692 CCP_PASSTHRU_BYTESWAP_256BIT); 1693 if (ret) { 1694 cmd->engine_error = cmd_q->cmd_error; 1695 goto e_data; 1696 } 1697 1698 if (sha->final) { 1699 /* Finishing up, so get the digest */ 1700 switch (sha->type) { 1701 case CCP_SHA_TYPE_1: 1702 case CCP_SHA_TYPE_224: 1703 case CCP_SHA_TYPE_256: 1704 ccp_get_dm_area(&ctx, ooffset, 1705 sha->ctx, 0, 1706 digest_size); 1707 break; 1708 case CCP_SHA_TYPE_384: 1709 case CCP_SHA_TYPE_512: 1710 ccp_get_dm_area(&ctx, 0, 1711 sha->ctx, LSB_ITEM_SIZE - ooffset, 1712 LSB_ITEM_SIZE); 1713 ccp_get_dm_area(&ctx, LSB_ITEM_SIZE + ooffset, 1714 sha->ctx, 0, 1715 LSB_ITEM_SIZE - ooffset); 1716 break; 1717 default: 1718 ret = -EINVAL; 1719 goto e_ctx; 1720 } 1721 } else { 1722 /* Stash the context */ 1723 ccp_get_dm_area(&ctx, 0, sha->ctx, 0, 1724 sb_count * CCP_SB_BYTES); 1725 } 1726 1727 if (sha->final && sha->opad) { 1728 /* HMAC operation, recursively perform final SHA */ 1729 struct ccp_cmd hmac_cmd; 1730 struct scatterlist sg; 1731 u8 *hmac_buf; 1732 1733 if (sha->opad_len != block_size) { 1734 ret = -EINVAL; 1735 goto e_data; 1736 } 1737 1738 hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL); 1739 if (!hmac_buf) { 1740 ret = -ENOMEM; 1741 goto e_data; 1742 } 1743 sg_init_one(&sg, hmac_buf, block_size + digest_size); 1744 1745 scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0); 1746 switch (sha->type) { 1747 case CCP_SHA_TYPE_1: 1748 case CCP_SHA_TYPE_224: 1749 case CCP_SHA_TYPE_256: 1750 memcpy(hmac_buf + block_size, 1751 ctx.address + ooffset, 1752 digest_size); 1753 break; 1754 case CCP_SHA_TYPE_384: 1755 case CCP_SHA_TYPE_512: 1756 memcpy(hmac_buf + block_size, 1757 ctx.address + LSB_ITEM_SIZE + ooffset, 1758 LSB_ITEM_SIZE); 1759 memcpy(hmac_buf + block_size + 1760 (LSB_ITEM_SIZE - ooffset), 1761 ctx.address, 1762 LSB_ITEM_SIZE); 1763 break; 1764 default: 1765 ret = -EINVAL; 1766 goto e_ctx; 1767 } 1768 1769 memset(&hmac_cmd, 0, sizeof(hmac_cmd)); 1770 hmac_cmd.engine = CCP_ENGINE_SHA; 1771 hmac_cmd.u.sha.type = sha->type; 1772 hmac_cmd.u.sha.ctx = sha->ctx; 1773 hmac_cmd.u.sha.ctx_len = sha->ctx_len; 1774 hmac_cmd.u.sha.src = &sg; 1775 hmac_cmd.u.sha.src_len = block_size + digest_size; 1776 hmac_cmd.u.sha.opad = NULL; 1777 hmac_cmd.u.sha.opad_len = 0; 1778 hmac_cmd.u.sha.first = 1; 1779 hmac_cmd.u.sha.final = 1; 1780 hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3; 1781 1782 ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd); 1783 if (ret) 1784 cmd->engine_error = hmac_cmd.engine_error; 1785 1786 kfree(hmac_buf); 1787 } 1788 1789 e_data: 1790 if (sha->src) 1791 ccp_free_data(&src, cmd_q); 1792 1793 e_ctx: 1794 ccp_dm_free(&ctx); 1795 1796 return ret; 1797 } 1798 1799 static noinline_for_stack int 1800 ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 1801 { 1802 struct ccp_rsa_engine *rsa = &cmd->u.rsa; 1803 struct ccp_dm_workarea exp, src, dst; 1804 struct ccp_op op; 1805 unsigned int sb_count, i_len, o_len; 1806 int ret; 1807 1808 /* Check against the maximum allowable size, in bits */ 1809 if (rsa->key_size > cmd_q->ccp->vdata->rsamax) 1810 return -EINVAL; 1811 1812 if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst) 1813 return -EINVAL; 1814 1815 memset(&op, 0, sizeof(op)); 1816 op.cmd_q = cmd_q; 1817 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 1818 1819 /* The RSA modulus must precede the message being acted upon, so 1820 * it must be copied to a DMA area where the message and the 1821 * modulus can be concatenated. Therefore the input buffer 1822 * length required is twice the output buffer length (which 1823 * must be a multiple of 256-bits). Compute o_len, i_len in bytes. 1824 * Buffer sizes must be a multiple of 32 bytes; rounding up may be 1825 * required. 1826 */ 1827 o_len = 32 * ((rsa->key_size + 255) / 256); 1828 i_len = o_len * 2; 1829 1830 sb_count = 0; 1831 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) { 1832 /* sb_count is the number of storage block slots required 1833 * for the modulus. 1834 */ 1835 sb_count = o_len / CCP_SB_BYTES; 1836 op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, 1837 sb_count); 1838 if (!op.sb_key) 1839 return -EIO; 1840 } else { 1841 /* A version 5 device allows a modulus size that will not fit 1842 * in the LSB, so the command will transfer it from memory. 1843 * Set the sb key to the default, even though it's not used. 1844 */ 1845 op.sb_key = cmd_q->sb_key; 1846 } 1847 1848 /* The RSA exponent must be in little endian format. Reverse its 1849 * byte order. 1850 */ 1851 ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE); 1852 if (ret) 1853 goto e_sb; 1854 1855 ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len); 1856 if (ret) 1857 goto e_exp; 1858 1859 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) { 1860 /* Copy the exponent to the local storage block, using 1861 * as many 32-byte blocks as were allocated above. It's 1862 * already little endian, so no further change is required. 1863 */ 1864 ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key, 1865 CCP_PASSTHRU_BYTESWAP_NOOP); 1866 if (ret) { 1867 cmd->engine_error = cmd_q->cmd_error; 1868 goto e_exp; 1869 } 1870 } else { 1871 /* The exponent can be retrieved from memory via DMA. */ 1872 op.exp.u.dma.address = exp.dma.address; 1873 op.exp.u.dma.offset = 0; 1874 } 1875 1876 /* Concatenate the modulus and the message. Both the modulus and 1877 * the operands must be in little endian format. Since the input 1878 * is in big endian format it must be converted. 1879 */ 1880 ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE); 1881 if (ret) 1882 goto e_exp; 1883 1884 ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len); 1885 if (ret) 1886 goto e_src; 1887 ret = ccp_reverse_set_dm_area(&src, o_len, rsa->src, 0, rsa->src_len); 1888 if (ret) 1889 goto e_src; 1890 1891 /* Prepare the output area for the operation */ 1892 ret = ccp_init_dm_workarea(&dst, cmd_q, o_len, DMA_FROM_DEVICE); 1893 if (ret) 1894 goto e_src; 1895 1896 op.soc = 1; 1897 op.src.u.dma.address = src.dma.address; 1898 op.src.u.dma.offset = 0; 1899 op.src.u.dma.length = i_len; 1900 op.dst.u.dma.address = dst.dma.address; 1901 op.dst.u.dma.offset = 0; 1902 op.dst.u.dma.length = o_len; 1903 1904 op.u.rsa.mod_size = rsa->key_size; 1905 op.u.rsa.input_len = i_len; 1906 1907 ret = cmd_q->ccp->vdata->perform->rsa(&op); 1908 if (ret) { 1909 cmd->engine_error = cmd_q->cmd_error; 1910 goto e_dst; 1911 } 1912 1913 ccp_reverse_get_dm_area(&dst, 0, rsa->dst, 0, rsa->mod_len); 1914 1915 e_dst: 1916 ccp_dm_free(&dst); 1917 1918 e_src: 1919 ccp_dm_free(&src); 1920 1921 e_exp: 1922 ccp_dm_free(&exp); 1923 1924 e_sb: 1925 if (sb_count) 1926 cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count); 1927 1928 return ret; 1929 } 1930 1931 static noinline_for_stack int 1932 ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 1933 { 1934 struct ccp_passthru_engine *pt = &cmd->u.passthru; 1935 struct ccp_dm_workarea mask; 1936 struct ccp_data src, dst; 1937 struct ccp_op op; 1938 bool in_place = false; 1939 unsigned int i; 1940 int ret = 0; 1941 1942 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) 1943 return -EINVAL; 1944 1945 if (!pt->src || !pt->dst) 1946 return -EINVAL; 1947 1948 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { 1949 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE) 1950 return -EINVAL; 1951 if (!pt->mask) 1952 return -EINVAL; 1953 } 1954 1955 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1); 1956 1957 memset(&op, 0, sizeof(op)); 1958 op.cmd_q = cmd_q; 1959 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 1960 1961 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { 1962 /* Load the mask */ 1963 op.sb_key = cmd_q->sb_key; 1964 1965 ret = ccp_init_dm_workarea(&mask, cmd_q, 1966 CCP_PASSTHRU_SB_COUNT * 1967 CCP_SB_BYTES, 1968 DMA_TO_DEVICE); 1969 if (ret) 1970 return ret; 1971 1972 ret = ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len); 1973 if (ret) 1974 goto e_mask; 1975 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key, 1976 CCP_PASSTHRU_BYTESWAP_NOOP); 1977 if (ret) { 1978 cmd->engine_error = cmd_q->cmd_error; 1979 goto e_mask; 1980 } 1981 } 1982 1983 /* Prepare the input and output data workareas. For in-place 1984 * operations we need to set the dma direction to BIDIRECTIONAL 1985 * and copy the src workarea to the dst workarea. 1986 */ 1987 if (sg_virt(pt->src) == sg_virt(pt->dst)) 1988 in_place = true; 1989 1990 ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len, 1991 CCP_PASSTHRU_MASKSIZE, 1992 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); 1993 if (ret) 1994 goto e_mask; 1995 1996 if (in_place) { 1997 dst = src; 1998 } else { 1999 ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len, 2000 CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE); 2001 if (ret) 2002 goto e_src; 2003 } 2004 2005 /* Send data to the CCP Passthru engine 2006 * Because the CCP engine works on a single source and destination 2007 * dma address at a time, each entry in the source scatterlist 2008 * (after the dma_map_sg call) must be less than or equal to the 2009 * (remaining) length in the destination scatterlist entry and the 2010 * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE 2011 */ 2012 dst.sg_wa.sg_used = 0; 2013 for (i = 1; i <= src.sg_wa.dma_count; i++) { 2014 if (!dst.sg_wa.sg || 2015 (dst.sg_wa.sg->length < src.sg_wa.sg->length)) { 2016 ret = -EINVAL; 2017 goto e_dst; 2018 } 2019 2020 if (i == src.sg_wa.dma_count) { 2021 op.eom = 1; 2022 op.soc = 1; 2023 } 2024 2025 op.src.type = CCP_MEMTYPE_SYSTEM; 2026 op.src.u.dma.address = sg_dma_address(src.sg_wa.sg); 2027 op.src.u.dma.offset = 0; 2028 op.src.u.dma.length = sg_dma_len(src.sg_wa.sg); 2029 2030 op.dst.type = CCP_MEMTYPE_SYSTEM; 2031 op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg); 2032 op.dst.u.dma.offset = dst.sg_wa.sg_used; 2033 op.dst.u.dma.length = op.src.u.dma.length; 2034 2035 ret = cmd_q->ccp->vdata->perform->passthru(&op); 2036 if (ret) { 2037 cmd->engine_error = cmd_q->cmd_error; 2038 goto e_dst; 2039 } 2040 2041 dst.sg_wa.sg_used += src.sg_wa.sg->length; 2042 if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) { 2043 dst.sg_wa.sg = sg_next(dst.sg_wa.sg); 2044 dst.sg_wa.sg_used = 0; 2045 } 2046 src.sg_wa.sg = sg_next(src.sg_wa.sg); 2047 } 2048 2049 e_dst: 2050 if (!in_place) 2051 ccp_free_data(&dst, cmd_q); 2052 2053 e_src: 2054 ccp_free_data(&src, cmd_q); 2055 2056 e_mask: 2057 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) 2058 ccp_dm_free(&mask); 2059 2060 return ret; 2061 } 2062 2063 static noinline_for_stack int 2064 ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q, 2065 struct ccp_cmd *cmd) 2066 { 2067 struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap; 2068 struct ccp_dm_workarea mask; 2069 struct ccp_op op; 2070 int ret; 2071 2072 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) 2073 return -EINVAL; 2074 2075 if (!pt->src_dma || !pt->dst_dma) 2076 return -EINVAL; 2077 2078 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { 2079 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE) 2080 return -EINVAL; 2081 if (!pt->mask) 2082 return -EINVAL; 2083 } 2084 2085 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1); 2086 2087 memset(&op, 0, sizeof(op)); 2088 op.cmd_q = cmd_q; 2089 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 2090 2091 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { 2092 /* Load the mask */ 2093 op.sb_key = cmd_q->sb_key; 2094 2095 mask.length = pt->mask_len; 2096 mask.dma.address = pt->mask; 2097 mask.dma.length = pt->mask_len; 2098 2099 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key, 2100 CCP_PASSTHRU_BYTESWAP_NOOP); 2101 if (ret) { 2102 cmd->engine_error = cmd_q->cmd_error; 2103 return ret; 2104 } 2105 } 2106 2107 /* Send data to the CCP Passthru engine */ 2108 op.eom = 1; 2109 op.soc = 1; 2110 2111 op.src.type = CCP_MEMTYPE_SYSTEM; 2112 op.src.u.dma.address = pt->src_dma; 2113 op.src.u.dma.offset = 0; 2114 op.src.u.dma.length = pt->src_len; 2115 2116 op.dst.type = CCP_MEMTYPE_SYSTEM; 2117 op.dst.u.dma.address = pt->dst_dma; 2118 op.dst.u.dma.offset = 0; 2119 op.dst.u.dma.length = pt->src_len; 2120 2121 ret = cmd_q->ccp->vdata->perform->passthru(&op); 2122 if (ret) 2123 cmd->engine_error = cmd_q->cmd_error; 2124 2125 return ret; 2126 } 2127 2128 static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 2129 { 2130 struct ccp_ecc_engine *ecc = &cmd->u.ecc; 2131 struct ccp_dm_workarea src, dst; 2132 struct ccp_op op; 2133 int ret; 2134 u8 *save; 2135 2136 if (!ecc->u.mm.operand_1 || 2137 (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES)) 2138 return -EINVAL; 2139 2140 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) 2141 if (!ecc->u.mm.operand_2 || 2142 (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES)) 2143 return -EINVAL; 2144 2145 if (!ecc->u.mm.result || 2146 (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES)) 2147 return -EINVAL; 2148 2149 memset(&op, 0, sizeof(op)); 2150 op.cmd_q = cmd_q; 2151 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 2152 2153 /* Concatenate the modulus and the operands. Both the modulus and 2154 * the operands must be in little endian format. Since the input 2155 * is in big endian format it must be converted and placed in a 2156 * fixed length buffer. 2157 */ 2158 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE, 2159 DMA_TO_DEVICE); 2160 if (ret) 2161 return ret; 2162 2163 /* Save the workarea address since it is updated in order to perform 2164 * the concatenation 2165 */ 2166 save = src.address; 2167 2168 /* Copy the ECC modulus */ 2169 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len); 2170 if (ret) 2171 goto e_src; 2172 src.address += CCP_ECC_OPERAND_SIZE; 2173 2174 /* Copy the first operand */ 2175 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_1, 0, 2176 ecc->u.mm.operand_1_len); 2177 if (ret) 2178 goto e_src; 2179 src.address += CCP_ECC_OPERAND_SIZE; 2180 2181 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) { 2182 /* Copy the second operand */ 2183 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_2, 0, 2184 ecc->u.mm.operand_2_len); 2185 if (ret) 2186 goto e_src; 2187 src.address += CCP_ECC_OPERAND_SIZE; 2188 } 2189 2190 /* Restore the workarea address */ 2191 src.address = save; 2192 2193 /* Prepare the output area for the operation */ 2194 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE, 2195 DMA_FROM_DEVICE); 2196 if (ret) 2197 goto e_src; 2198 2199 op.soc = 1; 2200 op.src.u.dma.address = src.dma.address; 2201 op.src.u.dma.offset = 0; 2202 op.src.u.dma.length = src.length; 2203 op.dst.u.dma.address = dst.dma.address; 2204 op.dst.u.dma.offset = 0; 2205 op.dst.u.dma.length = dst.length; 2206 2207 op.u.ecc.function = cmd->u.ecc.function; 2208 2209 ret = cmd_q->ccp->vdata->perform->ecc(&op); 2210 if (ret) { 2211 cmd->engine_error = cmd_q->cmd_error; 2212 goto e_dst; 2213 } 2214 2215 ecc->ecc_result = le16_to_cpup( 2216 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET)); 2217 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) { 2218 ret = -EIO; 2219 goto e_dst; 2220 } 2221 2222 /* Save the ECC result */ 2223 ccp_reverse_get_dm_area(&dst, 0, ecc->u.mm.result, 0, 2224 CCP_ECC_MODULUS_BYTES); 2225 2226 e_dst: 2227 ccp_dm_free(&dst); 2228 2229 e_src: 2230 ccp_dm_free(&src); 2231 2232 return ret; 2233 } 2234 2235 static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 2236 { 2237 struct ccp_ecc_engine *ecc = &cmd->u.ecc; 2238 struct ccp_dm_workarea src, dst; 2239 struct ccp_op op; 2240 int ret; 2241 u8 *save; 2242 2243 if (!ecc->u.pm.point_1.x || 2244 (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) || 2245 !ecc->u.pm.point_1.y || 2246 (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES)) 2247 return -EINVAL; 2248 2249 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { 2250 if (!ecc->u.pm.point_2.x || 2251 (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) || 2252 !ecc->u.pm.point_2.y || 2253 (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES)) 2254 return -EINVAL; 2255 } else { 2256 if (!ecc->u.pm.domain_a || 2257 (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES)) 2258 return -EINVAL; 2259 2260 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) 2261 if (!ecc->u.pm.scalar || 2262 (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES)) 2263 return -EINVAL; 2264 } 2265 2266 if (!ecc->u.pm.result.x || 2267 (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) || 2268 !ecc->u.pm.result.y || 2269 (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES)) 2270 return -EINVAL; 2271 2272 memset(&op, 0, sizeof(op)); 2273 op.cmd_q = cmd_q; 2274 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 2275 2276 /* Concatenate the modulus and the operands. Both the modulus and 2277 * the operands must be in little endian format. Since the input 2278 * is in big endian format it must be converted and placed in a 2279 * fixed length buffer. 2280 */ 2281 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE, 2282 DMA_TO_DEVICE); 2283 if (ret) 2284 return ret; 2285 2286 /* Save the workarea address since it is updated in order to perform 2287 * the concatenation 2288 */ 2289 save = src.address; 2290 2291 /* Copy the ECC modulus */ 2292 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len); 2293 if (ret) 2294 goto e_src; 2295 src.address += CCP_ECC_OPERAND_SIZE; 2296 2297 /* Copy the first point X and Y coordinate */ 2298 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.x, 0, 2299 ecc->u.pm.point_1.x_len); 2300 if (ret) 2301 goto e_src; 2302 src.address += CCP_ECC_OPERAND_SIZE; 2303 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.y, 0, 2304 ecc->u.pm.point_1.y_len); 2305 if (ret) 2306 goto e_src; 2307 src.address += CCP_ECC_OPERAND_SIZE; 2308 2309 /* Set the first point Z coordinate to 1 */ 2310 *src.address = 0x01; 2311 src.address += CCP_ECC_OPERAND_SIZE; 2312 2313 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { 2314 /* Copy the second point X and Y coordinate */ 2315 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.x, 0, 2316 ecc->u.pm.point_2.x_len); 2317 if (ret) 2318 goto e_src; 2319 src.address += CCP_ECC_OPERAND_SIZE; 2320 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.y, 0, 2321 ecc->u.pm.point_2.y_len); 2322 if (ret) 2323 goto e_src; 2324 src.address += CCP_ECC_OPERAND_SIZE; 2325 2326 /* Set the second point Z coordinate to 1 */ 2327 *src.address = 0x01; 2328 src.address += CCP_ECC_OPERAND_SIZE; 2329 } else { 2330 /* Copy the Domain "a" parameter */ 2331 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.domain_a, 0, 2332 ecc->u.pm.domain_a_len); 2333 if (ret) 2334 goto e_src; 2335 src.address += CCP_ECC_OPERAND_SIZE; 2336 2337 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) { 2338 /* Copy the scalar value */ 2339 ret = ccp_reverse_set_dm_area(&src, 0, 2340 ecc->u.pm.scalar, 0, 2341 ecc->u.pm.scalar_len); 2342 if (ret) 2343 goto e_src; 2344 src.address += CCP_ECC_OPERAND_SIZE; 2345 } 2346 } 2347 2348 /* Restore the workarea address */ 2349 src.address = save; 2350 2351 /* Prepare the output area for the operation */ 2352 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE, 2353 DMA_FROM_DEVICE); 2354 if (ret) 2355 goto e_src; 2356 2357 op.soc = 1; 2358 op.src.u.dma.address = src.dma.address; 2359 op.src.u.dma.offset = 0; 2360 op.src.u.dma.length = src.length; 2361 op.dst.u.dma.address = dst.dma.address; 2362 op.dst.u.dma.offset = 0; 2363 op.dst.u.dma.length = dst.length; 2364 2365 op.u.ecc.function = cmd->u.ecc.function; 2366 2367 ret = cmd_q->ccp->vdata->perform->ecc(&op); 2368 if (ret) { 2369 cmd->engine_error = cmd_q->cmd_error; 2370 goto e_dst; 2371 } 2372 2373 ecc->ecc_result = le16_to_cpup( 2374 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET)); 2375 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) { 2376 ret = -EIO; 2377 goto e_dst; 2378 } 2379 2380 /* Save the workarea address since it is updated as we walk through 2381 * to copy the point math result 2382 */ 2383 save = dst.address; 2384 2385 /* Save the ECC result X and Y coordinates */ 2386 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.x, 0, 2387 CCP_ECC_MODULUS_BYTES); 2388 dst.address += CCP_ECC_OUTPUT_SIZE; 2389 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0, 2390 CCP_ECC_MODULUS_BYTES); 2391 dst.address += CCP_ECC_OUTPUT_SIZE; 2392 2393 /* Restore the workarea address */ 2394 dst.address = save; 2395 2396 e_dst: 2397 ccp_dm_free(&dst); 2398 2399 e_src: 2400 ccp_dm_free(&src); 2401 2402 return ret; 2403 } 2404 2405 static noinline_for_stack int 2406 ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 2407 { 2408 struct ccp_ecc_engine *ecc = &cmd->u.ecc; 2409 2410 ecc->ecc_result = 0; 2411 2412 if (!ecc->mod || 2413 (ecc->mod_len > CCP_ECC_MODULUS_BYTES)) 2414 return -EINVAL; 2415 2416 switch (ecc->function) { 2417 case CCP_ECC_FUNCTION_MMUL_384BIT: 2418 case CCP_ECC_FUNCTION_MADD_384BIT: 2419 case CCP_ECC_FUNCTION_MINV_384BIT: 2420 return ccp_run_ecc_mm_cmd(cmd_q, cmd); 2421 2422 case CCP_ECC_FUNCTION_PADD_384BIT: 2423 case CCP_ECC_FUNCTION_PMUL_384BIT: 2424 case CCP_ECC_FUNCTION_PDBL_384BIT: 2425 return ccp_run_ecc_pm_cmd(cmd_q, cmd); 2426 2427 default: 2428 return -EINVAL; 2429 } 2430 } 2431 2432 int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 2433 { 2434 int ret; 2435 2436 cmd->engine_error = 0; 2437 cmd_q->cmd_error = 0; 2438 cmd_q->int_rcvd = 0; 2439 cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q); 2440 2441 switch (cmd->engine) { 2442 case CCP_ENGINE_AES: 2443 switch (cmd->u.aes.mode) { 2444 case CCP_AES_MODE_CMAC: 2445 ret = ccp_run_aes_cmac_cmd(cmd_q, cmd); 2446 break; 2447 case CCP_AES_MODE_GCM: 2448 ret = ccp_run_aes_gcm_cmd(cmd_q, cmd); 2449 break; 2450 default: 2451 ret = ccp_run_aes_cmd(cmd_q, cmd); 2452 break; 2453 } 2454 break; 2455 case CCP_ENGINE_XTS_AES_128: 2456 ret = ccp_run_xts_aes_cmd(cmd_q, cmd); 2457 break; 2458 case CCP_ENGINE_DES3: 2459 ret = ccp_run_des3_cmd(cmd_q, cmd); 2460 break; 2461 case CCP_ENGINE_SHA: 2462 ret = ccp_run_sha_cmd(cmd_q, cmd); 2463 break; 2464 case CCP_ENGINE_RSA: 2465 ret = ccp_run_rsa_cmd(cmd_q, cmd); 2466 break; 2467 case CCP_ENGINE_PASSTHRU: 2468 if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP) 2469 ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd); 2470 else 2471 ret = ccp_run_passthru_cmd(cmd_q, cmd); 2472 break; 2473 case CCP_ENGINE_ECC: 2474 ret = ccp_run_ecc_cmd(cmd_q, cmd); 2475 break; 2476 default: 2477 ret = -EINVAL; 2478 } 2479 2480 return ret; 2481 } 2482