1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Cryptographic Coprocessor (CCP) driver 4 * 5 * Copyright (C) 2013-2019 Advanced Micro Devices, Inc. 6 * 7 * Author: Tom Lendacky <thomas.lendacky@amd.com> 8 * Author: Gary R Hook <gary.hook@amd.com> 9 */ 10 11 #include <linux/module.h> 12 #include <linux/kernel.h> 13 #include <linux/pci.h> 14 #include <linux/interrupt.h> 15 #include <crypto/scatterwalk.h> 16 #include <crypto/des.h> 17 #include <linux/ccp.h> 18 19 #include "ccp-dev.h" 20 21 /* SHA initial context values */ 22 static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = { 23 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), 24 cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), 25 cpu_to_be32(SHA1_H4), 26 }; 27 28 static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = { 29 cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), 30 cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), 31 cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), 32 cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), 33 }; 34 35 static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = { 36 cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), 37 cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), 38 cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), 39 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), 40 }; 41 42 static const __be64 ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = { 43 cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1), 44 cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3), 45 cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5), 46 cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7), 47 }; 48 49 static const __be64 ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = { 50 cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1), 51 cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3), 52 cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5), 53 cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7), 54 }; 55 56 #define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \ 57 ccp_gen_jobid(ccp) : 0) 58 59 static u32 ccp_gen_jobid(struct ccp_device *ccp) 60 { 61 return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK; 62 } 63 64 static void ccp_sg_free(struct ccp_sg_workarea *wa) 65 { 66 if (wa->dma_count) 67 dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir); 68 69 wa->dma_count = 0; 70 } 71 72 static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, 73 struct scatterlist *sg, u64 len, 74 enum dma_data_direction dma_dir) 75 { 76 memset(wa, 0, sizeof(*wa)); 77 78 wa->sg = sg; 79 if (!sg) 80 return 0; 81 82 wa->nents = sg_nents_for_len(sg, len); 83 if (wa->nents < 0) 84 return wa->nents; 85 86 wa->bytes_left = len; 87 wa->sg_used = 0; 88 89 if (len == 0) 90 return 0; 91 92 if (dma_dir == DMA_NONE) 93 return 0; 94 95 wa->dma_sg = sg; 96 wa->dma_dev = dev; 97 wa->dma_dir = dma_dir; 98 wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir); 99 if (!wa->dma_count) 100 return -ENOMEM; 101 102 return 0; 103 } 104 105 static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len) 106 { 107 unsigned int nbytes = min_t(u64, len, wa->bytes_left); 108 109 if (!wa->sg) 110 return; 111 112 wa->sg_used += nbytes; 113 wa->bytes_left -= nbytes; 114 if (wa->sg_used == wa->sg->length) { 115 wa->sg = sg_next(wa->sg); 116 wa->sg_used = 0; 117 } 118 } 119 120 static void ccp_dm_free(struct ccp_dm_workarea *wa) 121 { 122 if (wa->length <= CCP_DMAPOOL_MAX_SIZE) { 123 if (wa->address) 124 dma_pool_free(wa->dma_pool, wa->address, 125 wa->dma.address); 126 } else { 127 if (wa->dma.address) 128 dma_unmap_single(wa->dev, wa->dma.address, wa->length, 129 wa->dma.dir); 130 kfree(wa->address); 131 } 132 133 wa->address = NULL; 134 wa->dma.address = 0; 135 } 136 137 static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa, 138 struct ccp_cmd_queue *cmd_q, 139 unsigned int len, 140 enum dma_data_direction dir) 141 { 142 memset(wa, 0, sizeof(*wa)); 143 144 if (!len) 145 return 0; 146 147 wa->dev = cmd_q->ccp->dev; 148 wa->length = len; 149 150 if (len <= CCP_DMAPOOL_MAX_SIZE) { 151 wa->dma_pool = cmd_q->dma_pool; 152 153 wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL, 154 &wa->dma.address); 155 if (!wa->address) 156 return -ENOMEM; 157 158 wa->dma.length = CCP_DMAPOOL_MAX_SIZE; 159 160 memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE); 161 } else { 162 wa->address = kzalloc(len, GFP_KERNEL); 163 if (!wa->address) 164 return -ENOMEM; 165 166 wa->dma.address = dma_map_single(wa->dev, wa->address, len, 167 dir); 168 if (dma_mapping_error(wa->dev, wa->dma.address)) 169 return -ENOMEM; 170 171 wa->dma.length = len; 172 } 173 wa->dma.dir = dir; 174 175 return 0; 176 } 177 178 static int ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, 179 struct scatterlist *sg, unsigned int sg_offset, 180 unsigned int len) 181 { 182 WARN_ON(!wa->address); 183 184 if (len > (wa->length - wa_offset)) 185 return -EINVAL; 186 187 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len, 188 0); 189 return 0; 190 } 191 192 static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, 193 struct scatterlist *sg, unsigned int sg_offset, 194 unsigned int len) 195 { 196 WARN_ON(!wa->address); 197 198 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len, 199 1); 200 } 201 202 static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa, 203 unsigned int wa_offset, 204 struct scatterlist *sg, 205 unsigned int sg_offset, 206 unsigned int len) 207 { 208 u8 *p, *q; 209 int rc; 210 211 rc = ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len); 212 if (rc) 213 return rc; 214 215 p = wa->address + wa_offset; 216 q = p + len - 1; 217 while (p < q) { 218 *p = *p ^ *q; 219 *q = *p ^ *q; 220 *p = *p ^ *q; 221 p++; 222 q--; 223 } 224 return 0; 225 } 226 227 static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa, 228 unsigned int wa_offset, 229 struct scatterlist *sg, 230 unsigned int sg_offset, 231 unsigned int len) 232 { 233 u8 *p, *q; 234 235 p = wa->address + wa_offset; 236 q = p + len - 1; 237 while (p < q) { 238 *p = *p ^ *q; 239 *q = *p ^ *q; 240 *p = *p ^ *q; 241 p++; 242 q--; 243 } 244 245 ccp_get_dm_area(wa, wa_offset, sg, sg_offset, len); 246 } 247 248 static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q) 249 { 250 ccp_dm_free(&data->dm_wa); 251 ccp_sg_free(&data->sg_wa); 252 } 253 254 static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q, 255 struct scatterlist *sg, u64 sg_len, 256 unsigned int dm_len, 257 enum dma_data_direction dir) 258 { 259 int ret; 260 261 memset(data, 0, sizeof(*data)); 262 263 ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len, 264 dir); 265 if (ret) 266 goto e_err; 267 268 ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir); 269 if (ret) 270 goto e_err; 271 272 return 0; 273 274 e_err: 275 ccp_free_data(data, cmd_q); 276 277 return ret; 278 } 279 280 static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from) 281 { 282 struct ccp_sg_workarea *sg_wa = &data->sg_wa; 283 struct ccp_dm_workarea *dm_wa = &data->dm_wa; 284 unsigned int buf_count, nbytes; 285 286 /* Clear the buffer if setting it */ 287 if (!from) 288 memset(dm_wa->address, 0, dm_wa->length); 289 290 if (!sg_wa->sg) 291 return 0; 292 293 /* Perform the copy operation 294 * nbytes will always be <= UINT_MAX because dm_wa->length is 295 * an unsigned int 296 */ 297 nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length); 298 scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used, 299 nbytes, from); 300 301 /* Update the structures and generate the count */ 302 buf_count = 0; 303 while (sg_wa->bytes_left && (buf_count < dm_wa->length)) { 304 nbytes = min(sg_wa->sg->length - sg_wa->sg_used, 305 dm_wa->length - buf_count); 306 nbytes = min_t(u64, sg_wa->bytes_left, nbytes); 307 308 buf_count += nbytes; 309 ccp_update_sg_workarea(sg_wa, nbytes); 310 } 311 312 return buf_count; 313 } 314 315 static unsigned int ccp_fill_queue_buf(struct ccp_data *data) 316 { 317 return ccp_queue_buf(data, 0); 318 } 319 320 static unsigned int ccp_empty_queue_buf(struct ccp_data *data) 321 { 322 return ccp_queue_buf(data, 1); 323 } 324 325 static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, 326 struct ccp_op *op, unsigned int block_size, 327 bool blocksize_op) 328 { 329 unsigned int sg_src_len, sg_dst_len, op_len; 330 331 /* The CCP can only DMA from/to one address each per operation. This 332 * requires that we find the smallest DMA area between the source 333 * and destination. The resulting len values will always be <= UINT_MAX 334 * because the dma length is an unsigned int. 335 */ 336 sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used; 337 sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len); 338 339 if (dst) { 340 sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used; 341 sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len); 342 op_len = min(sg_src_len, sg_dst_len); 343 } else { 344 op_len = sg_src_len; 345 } 346 347 /* The data operation length will be at least block_size in length 348 * or the smaller of available sg room remaining for the source or 349 * the destination 350 */ 351 op_len = max(op_len, block_size); 352 353 /* Unless we have to buffer data, there's no reason to wait */ 354 op->soc = 0; 355 356 if (sg_src_len < block_size) { 357 /* Not enough data in the sg element, so it 358 * needs to be buffered into a blocksize chunk 359 */ 360 int cp_len = ccp_fill_queue_buf(src); 361 362 op->soc = 1; 363 op->src.u.dma.address = src->dm_wa.dma.address; 364 op->src.u.dma.offset = 0; 365 op->src.u.dma.length = (blocksize_op) ? block_size : cp_len; 366 } else { 367 /* Enough data in the sg element, but we need to 368 * adjust for any previously copied data 369 */ 370 op->src.u.dma.address = sg_dma_address(src->sg_wa.sg); 371 op->src.u.dma.offset = src->sg_wa.sg_used; 372 op->src.u.dma.length = op_len & ~(block_size - 1); 373 374 ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length); 375 } 376 377 if (dst) { 378 if (sg_dst_len < block_size) { 379 /* Not enough room in the sg element or we're on the 380 * last piece of data (when using padding), so the 381 * output needs to be buffered into a blocksize chunk 382 */ 383 op->soc = 1; 384 op->dst.u.dma.address = dst->dm_wa.dma.address; 385 op->dst.u.dma.offset = 0; 386 op->dst.u.dma.length = op->src.u.dma.length; 387 } else { 388 /* Enough room in the sg element, but we need to 389 * adjust for any previously used area 390 */ 391 op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg); 392 op->dst.u.dma.offset = dst->sg_wa.sg_used; 393 op->dst.u.dma.length = op->src.u.dma.length; 394 } 395 } 396 } 397 398 static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst, 399 struct ccp_op *op) 400 { 401 op->init = 0; 402 403 if (dst) { 404 if (op->dst.u.dma.address == dst->dm_wa.dma.address) 405 ccp_empty_queue_buf(dst); 406 else 407 ccp_update_sg_workarea(&dst->sg_wa, 408 op->dst.u.dma.length); 409 } 410 } 411 412 static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q, 413 struct ccp_dm_workarea *wa, u32 jobid, u32 sb, 414 u32 byte_swap, bool from) 415 { 416 struct ccp_op op; 417 418 memset(&op, 0, sizeof(op)); 419 420 op.cmd_q = cmd_q; 421 op.jobid = jobid; 422 op.eom = 1; 423 424 if (from) { 425 op.soc = 1; 426 op.src.type = CCP_MEMTYPE_SB; 427 op.src.u.sb = sb; 428 op.dst.type = CCP_MEMTYPE_SYSTEM; 429 op.dst.u.dma.address = wa->dma.address; 430 op.dst.u.dma.length = wa->length; 431 } else { 432 op.src.type = CCP_MEMTYPE_SYSTEM; 433 op.src.u.dma.address = wa->dma.address; 434 op.src.u.dma.length = wa->length; 435 op.dst.type = CCP_MEMTYPE_SB; 436 op.dst.u.sb = sb; 437 } 438 439 op.u.passthru.byte_swap = byte_swap; 440 441 return cmd_q->ccp->vdata->perform->passthru(&op); 442 } 443 444 static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q, 445 struct ccp_dm_workarea *wa, u32 jobid, u32 sb, 446 u32 byte_swap) 447 { 448 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false); 449 } 450 451 static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q, 452 struct ccp_dm_workarea *wa, u32 jobid, u32 sb, 453 u32 byte_swap) 454 { 455 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true); 456 } 457 458 static noinline_for_stack int 459 ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 460 { 461 struct ccp_aes_engine *aes = &cmd->u.aes; 462 struct ccp_dm_workarea key, ctx; 463 struct ccp_data src; 464 struct ccp_op op; 465 unsigned int dm_offset; 466 int ret; 467 468 if (!((aes->key_len == AES_KEYSIZE_128) || 469 (aes->key_len == AES_KEYSIZE_192) || 470 (aes->key_len == AES_KEYSIZE_256))) 471 return -EINVAL; 472 473 if (aes->src_len & (AES_BLOCK_SIZE - 1)) 474 return -EINVAL; 475 476 if (aes->iv_len != AES_BLOCK_SIZE) 477 return -EINVAL; 478 479 if (!aes->key || !aes->iv || !aes->src) 480 return -EINVAL; 481 482 if (aes->cmac_final) { 483 if (aes->cmac_key_len != AES_BLOCK_SIZE) 484 return -EINVAL; 485 486 if (!aes->cmac_key) 487 return -EINVAL; 488 } 489 490 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1); 491 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1); 492 493 ret = -EIO; 494 memset(&op, 0, sizeof(op)); 495 op.cmd_q = cmd_q; 496 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 497 op.sb_key = cmd_q->sb_key; 498 op.sb_ctx = cmd_q->sb_ctx; 499 op.init = 1; 500 op.u.aes.type = aes->type; 501 op.u.aes.mode = aes->mode; 502 op.u.aes.action = aes->action; 503 504 /* All supported key sizes fit in a single (32-byte) SB entry 505 * and must be in little endian format. Use the 256-bit byte 506 * swap passthru option to convert from big endian to little 507 * endian. 508 */ 509 ret = ccp_init_dm_workarea(&key, cmd_q, 510 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES, 511 DMA_TO_DEVICE); 512 if (ret) 513 return ret; 514 515 dm_offset = CCP_SB_BYTES - aes->key_len; 516 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); 517 if (ret) 518 goto e_key; 519 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, 520 CCP_PASSTHRU_BYTESWAP_256BIT); 521 if (ret) { 522 cmd->engine_error = cmd_q->cmd_error; 523 goto e_key; 524 } 525 526 /* The AES context fits in a single (32-byte) SB entry and 527 * must be in little endian format. Use the 256-bit byte swap 528 * passthru option to convert from big endian to little endian. 529 */ 530 ret = ccp_init_dm_workarea(&ctx, cmd_q, 531 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, 532 DMA_BIDIRECTIONAL); 533 if (ret) 534 goto e_key; 535 536 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; 537 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 538 if (ret) 539 goto e_ctx; 540 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 541 CCP_PASSTHRU_BYTESWAP_256BIT); 542 if (ret) { 543 cmd->engine_error = cmd_q->cmd_error; 544 goto e_ctx; 545 } 546 547 /* Send data to the CCP AES engine */ 548 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len, 549 AES_BLOCK_SIZE, DMA_TO_DEVICE); 550 if (ret) 551 goto e_ctx; 552 553 while (src.sg_wa.bytes_left) { 554 ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true); 555 if (aes->cmac_final && !src.sg_wa.bytes_left) { 556 op.eom = 1; 557 558 /* Push the K1/K2 key to the CCP now */ 559 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, 560 op.sb_ctx, 561 CCP_PASSTHRU_BYTESWAP_256BIT); 562 if (ret) { 563 cmd->engine_error = cmd_q->cmd_error; 564 goto e_src; 565 } 566 567 ret = ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0, 568 aes->cmac_key_len); 569 if (ret) 570 goto e_src; 571 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 572 CCP_PASSTHRU_BYTESWAP_256BIT); 573 if (ret) { 574 cmd->engine_error = cmd_q->cmd_error; 575 goto e_src; 576 } 577 } 578 579 ret = cmd_q->ccp->vdata->perform->aes(&op); 580 if (ret) { 581 cmd->engine_error = cmd_q->cmd_error; 582 goto e_src; 583 } 584 585 ccp_process_data(&src, NULL, &op); 586 } 587 588 /* Retrieve the AES context - convert from LE to BE using 589 * 32-byte (256-bit) byteswapping 590 */ 591 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 592 CCP_PASSTHRU_BYTESWAP_256BIT); 593 if (ret) { 594 cmd->engine_error = cmd_q->cmd_error; 595 goto e_src; 596 } 597 598 /* ...but we only need AES_BLOCK_SIZE bytes */ 599 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; 600 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 601 602 e_src: 603 ccp_free_data(&src, cmd_q); 604 605 e_ctx: 606 ccp_dm_free(&ctx); 607 608 e_key: 609 ccp_dm_free(&key); 610 611 return ret; 612 } 613 614 static noinline_for_stack int 615 ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 616 { 617 struct ccp_aes_engine *aes = &cmd->u.aes; 618 struct ccp_dm_workarea key, ctx, final_wa, tag; 619 struct ccp_data src, dst; 620 struct ccp_data aad; 621 struct ccp_op op; 622 623 unsigned long long *final; 624 unsigned int dm_offset; 625 unsigned int jobid; 626 unsigned int ilen; 627 bool in_place = true; /* Default value */ 628 int ret; 629 630 struct scatterlist *p_inp, sg_inp[2]; 631 struct scatterlist *p_tag, sg_tag[2]; 632 struct scatterlist *p_outp, sg_outp[2]; 633 struct scatterlist *p_aad; 634 635 if (!aes->iv) 636 return -EINVAL; 637 638 if (!((aes->key_len == AES_KEYSIZE_128) || 639 (aes->key_len == AES_KEYSIZE_192) || 640 (aes->key_len == AES_KEYSIZE_256))) 641 return -EINVAL; 642 643 if (!aes->key) /* Gotta have a key SGL */ 644 return -EINVAL; 645 646 /* First, decompose the source buffer into AAD & PT, 647 * and the destination buffer into AAD, CT & tag, or 648 * the input into CT & tag. 649 * It is expected that the input and output SGs will 650 * be valid, even if the AAD and input lengths are 0. 651 */ 652 p_aad = aes->src; 653 p_inp = scatterwalk_ffwd(sg_inp, aes->src, aes->aad_len); 654 p_outp = scatterwalk_ffwd(sg_outp, aes->dst, aes->aad_len); 655 if (aes->action == CCP_AES_ACTION_ENCRYPT) { 656 ilen = aes->src_len; 657 p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen); 658 } else { 659 /* Input length for decryption includes tag */ 660 ilen = aes->src_len - AES_BLOCK_SIZE; 661 p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen); 662 } 663 664 jobid = CCP_NEW_JOBID(cmd_q->ccp); 665 666 memset(&op, 0, sizeof(op)); 667 op.cmd_q = cmd_q; 668 op.jobid = jobid; 669 op.sb_key = cmd_q->sb_key; /* Pre-allocated */ 670 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ 671 op.init = 1; 672 op.u.aes.type = aes->type; 673 674 /* Copy the key to the LSB */ 675 ret = ccp_init_dm_workarea(&key, cmd_q, 676 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, 677 DMA_TO_DEVICE); 678 if (ret) 679 return ret; 680 681 dm_offset = CCP_SB_BYTES - aes->key_len; 682 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); 683 if (ret) 684 goto e_key; 685 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, 686 CCP_PASSTHRU_BYTESWAP_256BIT); 687 if (ret) { 688 cmd->engine_error = cmd_q->cmd_error; 689 goto e_key; 690 } 691 692 /* Copy the context (IV) to the LSB. 693 * There is an assumption here that the IV is 96 bits in length, plus 694 * a nonce of 32 bits. If no IV is present, use a zeroed buffer. 695 */ 696 ret = ccp_init_dm_workarea(&ctx, cmd_q, 697 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, 698 DMA_BIDIRECTIONAL); 699 if (ret) 700 goto e_key; 701 702 dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len; 703 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 704 if (ret) 705 goto e_ctx; 706 707 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 708 CCP_PASSTHRU_BYTESWAP_256BIT); 709 if (ret) { 710 cmd->engine_error = cmd_q->cmd_error; 711 goto e_ctx; 712 } 713 714 op.init = 1; 715 if (aes->aad_len > 0) { 716 /* Step 1: Run a GHASH over the Additional Authenticated Data */ 717 ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len, 718 AES_BLOCK_SIZE, 719 DMA_TO_DEVICE); 720 if (ret) 721 goto e_ctx; 722 723 op.u.aes.mode = CCP_AES_MODE_GHASH; 724 op.u.aes.action = CCP_AES_GHASHAAD; 725 726 while (aad.sg_wa.bytes_left) { 727 ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true); 728 729 ret = cmd_q->ccp->vdata->perform->aes(&op); 730 if (ret) { 731 cmd->engine_error = cmd_q->cmd_error; 732 goto e_aad; 733 } 734 735 ccp_process_data(&aad, NULL, &op); 736 op.init = 0; 737 } 738 } 739 740 op.u.aes.mode = CCP_AES_MODE_GCTR; 741 op.u.aes.action = aes->action; 742 743 if (ilen > 0) { 744 /* Step 2: Run a GCTR over the plaintext */ 745 in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false; 746 747 ret = ccp_init_data(&src, cmd_q, p_inp, ilen, 748 AES_BLOCK_SIZE, 749 in_place ? DMA_BIDIRECTIONAL 750 : DMA_TO_DEVICE); 751 if (ret) 752 goto e_ctx; 753 754 if (in_place) { 755 dst = src; 756 } else { 757 ret = ccp_init_data(&dst, cmd_q, p_outp, ilen, 758 AES_BLOCK_SIZE, DMA_FROM_DEVICE); 759 if (ret) 760 goto e_src; 761 } 762 763 op.soc = 0; 764 op.eom = 0; 765 op.init = 1; 766 while (src.sg_wa.bytes_left) { 767 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true); 768 if (!src.sg_wa.bytes_left) { 769 unsigned int nbytes = aes->src_len 770 % AES_BLOCK_SIZE; 771 772 if (nbytes) { 773 op.eom = 1; 774 op.u.aes.size = (nbytes * 8) - 1; 775 } 776 } 777 778 ret = cmd_q->ccp->vdata->perform->aes(&op); 779 if (ret) { 780 cmd->engine_error = cmd_q->cmd_error; 781 goto e_dst; 782 } 783 784 ccp_process_data(&src, &dst, &op); 785 op.init = 0; 786 } 787 } 788 789 /* Step 3: Update the IV portion of the context with the original IV */ 790 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 791 CCP_PASSTHRU_BYTESWAP_256BIT); 792 if (ret) { 793 cmd->engine_error = cmd_q->cmd_error; 794 goto e_dst; 795 } 796 797 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 798 if (ret) 799 goto e_dst; 800 801 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 802 CCP_PASSTHRU_BYTESWAP_256BIT); 803 if (ret) { 804 cmd->engine_error = cmd_q->cmd_error; 805 goto e_dst; 806 } 807 808 /* Step 4: Concatenate the lengths of the AAD and source, and 809 * hash that 16 byte buffer. 810 */ 811 ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE, 812 DMA_BIDIRECTIONAL); 813 if (ret) 814 goto e_dst; 815 final = (unsigned long long *) final_wa.address; 816 final[0] = cpu_to_be64(aes->aad_len * 8); 817 final[1] = cpu_to_be64(ilen * 8); 818 819 memset(&op, 0, sizeof(op)); 820 op.cmd_q = cmd_q; 821 op.jobid = jobid; 822 op.sb_key = cmd_q->sb_key; /* Pre-allocated */ 823 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ 824 op.init = 1; 825 op.u.aes.type = aes->type; 826 op.u.aes.mode = CCP_AES_MODE_GHASH; 827 op.u.aes.action = CCP_AES_GHASHFINAL; 828 op.src.type = CCP_MEMTYPE_SYSTEM; 829 op.src.u.dma.address = final_wa.dma.address; 830 op.src.u.dma.length = AES_BLOCK_SIZE; 831 op.dst.type = CCP_MEMTYPE_SYSTEM; 832 op.dst.u.dma.address = final_wa.dma.address; 833 op.dst.u.dma.length = AES_BLOCK_SIZE; 834 op.eom = 1; 835 op.u.aes.size = 0; 836 ret = cmd_q->ccp->vdata->perform->aes(&op); 837 if (ret) 838 goto e_dst; 839 840 if (aes->action == CCP_AES_ACTION_ENCRYPT) { 841 /* Put the ciphered tag after the ciphertext. */ 842 ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE); 843 } else { 844 /* Does this ciphered tag match the input? */ 845 ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE, 846 DMA_BIDIRECTIONAL); 847 if (ret) 848 goto e_tag; 849 ret = ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE); 850 if (ret) 851 goto e_tag; 852 853 ret = crypto_memneq(tag.address, final_wa.address, 854 AES_BLOCK_SIZE) ? -EBADMSG : 0; 855 ccp_dm_free(&tag); 856 } 857 858 e_tag: 859 ccp_dm_free(&final_wa); 860 861 e_dst: 862 if (aes->src_len && !in_place) 863 ccp_free_data(&dst, cmd_q); 864 865 e_src: 866 if (aes->src_len) 867 ccp_free_data(&src, cmd_q); 868 869 e_aad: 870 if (aes->aad_len) 871 ccp_free_data(&aad, cmd_q); 872 873 e_ctx: 874 ccp_dm_free(&ctx); 875 876 e_key: 877 ccp_dm_free(&key); 878 879 return ret; 880 } 881 882 static noinline_for_stack int 883 ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 884 { 885 struct ccp_aes_engine *aes = &cmd->u.aes; 886 struct ccp_dm_workarea key, ctx; 887 struct ccp_data src, dst; 888 struct ccp_op op; 889 unsigned int dm_offset; 890 bool in_place = false; 891 int ret; 892 893 if (!((aes->key_len == AES_KEYSIZE_128) || 894 (aes->key_len == AES_KEYSIZE_192) || 895 (aes->key_len == AES_KEYSIZE_256))) 896 return -EINVAL; 897 898 if (((aes->mode == CCP_AES_MODE_ECB) || 899 (aes->mode == CCP_AES_MODE_CBC)) && 900 (aes->src_len & (AES_BLOCK_SIZE - 1))) 901 return -EINVAL; 902 903 if (!aes->key || !aes->src || !aes->dst) 904 return -EINVAL; 905 906 if (aes->mode != CCP_AES_MODE_ECB) { 907 if (aes->iv_len != AES_BLOCK_SIZE) 908 return -EINVAL; 909 910 if (!aes->iv) 911 return -EINVAL; 912 } 913 914 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1); 915 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1); 916 917 ret = -EIO; 918 memset(&op, 0, sizeof(op)); 919 op.cmd_q = cmd_q; 920 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 921 op.sb_key = cmd_q->sb_key; 922 op.sb_ctx = cmd_q->sb_ctx; 923 op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1; 924 op.u.aes.type = aes->type; 925 op.u.aes.mode = aes->mode; 926 op.u.aes.action = aes->action; 927 928 /* All supported key sizes fit in a single (32-byte) SB entry 929 * and must be in little endian format. Use the 256-bit byte 930 * swap passthru option to convert from big endian to little 931 * endian. 932 */ 933 ret = ccp_init_dm_workarea(&key, cmd_q, 934 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES, 935 DMA_TO_DEVICE); 936 if (ret) 937 return ret; 938 939 dm_offset = CCP_SB_BYTES - aes->key_len; 940 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); 941 if (ret) 942 goto e_key; 943 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, 944 CCP_PASSTHRU_BYTESWAP_256BIT); 945 if (ret) { 946 cmd->engine_error = cmd_q->cmd_error; 947 goto e_key; 948 } 949 950 /* The AES context fits in a single (32-byte) SB entry and 951 * must be in little endian format. Use the 256-bit byte swap 952 * passthru option to convert from big endian to little endian. 953 */ 954 ret = ccp_init_dm_workarea(&ctx, cmd_q, 955 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, 956 DMA_BIDIRECTIONAL); 957 if (ret) 958 goto e_key; 959 960 if (aes->mode != CCP_AES_MODE_ECB) { 961 /* Load the AES context - convert to LE */ 962 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; 963 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 964 if (ret) 965 goto e_ctx; 966 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 967 CCP_PASSTHRU_BYTESWAP_256BIT); 968 if (ret) { 969 cmd->engine_error = cmd_q->cmd_error; 970 goto e_ctx; 971 } 972 } 973 switch (aes->mode) { 974 case CCP_AES_MODE_CFB: /* CFB128 only */ 975 case CCP_AES_MODE_CTR: 976 op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1; 977 break; 978 default: 979 op.u.aes.size = 0; 980 } 981 982 /* Prepare the input and output data workareas. For in-place 983 * operations we need to set the dma direction to BIDIRECTIONAL 984 * and copy the src workarea to the dst workarea. 985 */ 986 if (sg_virt(aes->src) == sg_virt(aes->dst)) 987 in_place = true; 988 989 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len, 990 AES_BLOCK_SIZE, 991 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); 992 if (ret) 993 goto e_ctx; 994 995 if (in_place) { 996 dst = src; 997 } else { 998 ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len, 999 AES_BLOCK_SIZE, DMA_FROM_DEVICE); 1000 if (ret) 1001 goto e_src; 1002 } 1003 1004 /* Send data to the CCP AES engine */ 1005 while (src.sg_wa.bytes_left) { 1006 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true); 1007 if (!src.sg_wa.bytes_left) { 1008 op.eom = 1; 1009 1010 /* Since we don't retrieve the AES context in ECB 1011 * mode we have to wait for the operation to complete 1012 * on the last piece of data 1013 */ 1014 if (aes->mode == CCP_AES_MODE_ECB) 1015 op.soc = 1; 1016 } 1017 1018 ret = cmd_q->ccp->vdata->perform->aes(&op); 1019 if (ret) { 1020 cmd->engine_error = cmd_q->cmd_error; 1021 goto e_dst; 1022 } 1023 1024 ccp_process_data(&src, &dst, &op); 1025 } 1026 1027 if (aes->mode != CCP_AES_MODE_ECB) { 1028 /* Retrieve the AES context - convert from LE to BE using 1029 * 32-byte (256-bit) byteswapping 1030 */ 1031 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1032 CCP_PASSTHRU_BYTESWAP_256BIT); 1033 if (ret) { 1034 cmd->engine_error = cmd_q->cmd_error; 1035 goto e_dst; 1036 } 1037 1038 /* ...but we only need AES_BLOCK_SIZE bytes */ 1039 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; 1040 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 1041 } 1042 1043 e_dst: 1044 if (!in_place) 1045 ccp_free_data(&dst, cmd_q); 1046 1047 e_src: 1048 ccp_free_data(&src, cmd_q); 1049 1050 e_ctx: 1051 ccp_dm_free(&ctx); 1052 1053 e_key: 1054 ccp_dm_free(&key); 1055 1056 return ret; 1057 } 1058 1059 static noinline_for_stack int 1060 ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 1061 { 1062 struct ccp_xts_aes_engine *xts = &cmd->u.xts; 1063 struct ccp_dm_workarea key, ctx; 1064 struct ccp_data src, dst; 1065 struct ccp_op op; 1066 unsigned int unit_size, dm_offset; 1067 bool in_place = false; 1068 unsigned int sb_count; 1069 enum ccp_aes_type aestype; 1070 int ret; 1071 1072 switch (xts->unit_size) { 1073 case CCP_XTS_AES_UNIT_SIZE_16: 1074 unit_size = 16; 1075 break; 1076 case CCP_XTS_AES_UNIT_SIZE_512: 1077 unit_size = 512; 1078 break; 1079 case CCP_XTS_AES_UNIT_SIZE_1024: 1080 unit_size = 1024; 1081 break; 1082 case CCP_XTS_AES_UNIT_SIZE_2048: 1083 unit_size = 2048; 1084 break; 1085 case CCP_XTS_AES_UNIT_SIZE_4096: 1086 unit_size = 4096; 1087 break; 1088 1089 default: 1090 return -EINVAL; 1091 } 1092 1093 if (xts->key_len == AES_KEYSIZE_128) 1094 aestype = CCP_AES_TYPE_128; 1095 else if (xts->key_len == AES_KEYSIZE_256) 1096 aestype = CCP_AES_TYPE_256; 1097 else 1098 return -EINVAL; 1099 1100 if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1))) 1101 return -EINVAL; 1102 1103 if (xts->iv_len != AES_BLOCK_SIZE) 1104 return -EINVAL; 1105 1106 if (!xts->key || !xts->iv || !xts->src || !xts->dst) 1107 return -EINVAL; 1108 1109 BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1); 1110 BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1); 1111 1112 ret = -EIO; 1113 memset(&op, 0, sizeof(op)); 1114 op.cmd_q = cmd_q; 1115 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 1116 op.sb_key = cmd_q->sb_key; 1117 op.sb_ctx = cmd_q->sb_ctx; 1118 op.init = 1; 1119 op.u.xts.type = aestype; 1120 op.u.xts.action = xts->action; 1121 op.u.xts.unit_size = xts->unit_size; 1122 1123 /* A version 3 device only supports 128-bit keys, which fits into a 1124 * single SB entry. A version 5 device uses a 512-bit vector, so two 1125 * SB entries. 1126 */ 1127 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) 1128 sb_count = CCP_XTS_AES_KEY_SB_COUNT; 1129 else 1130 sb_count = CCP5_XTS_AES_KEY_SB_COUNT; 1131 ret = ccp_init_dm_workarea(&key, cmd_q, 1132 sb_count * CCP_SB_BYTES, 1133 DMA_TO_DEVICE); 1134 if (ret) 1135 return ret; 1136 1137 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) { 1138 /* All supported key sizes must be in little endian format. 1139 * Use the 256-bit byte swap passthru option to convert from 1140 * big endian to little endian. 1141 */ 1142 dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128; 1143 ret = ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len); 1144 if (ret) 1145 goto e_key; 1146 ret = ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len); 1147 if (ret) 1148 goto e_key; 1149 } else { 1150 /* Version 5 CCPs use a 512-bit space for the key: each portion 1151 * occupies 256 bits, or one entire slot, and is zero-padded. 1152 */ 1153 unsigned int pad; 1154 1155 dm_offset = CCP_SB_BYTES; 1156 pad = dm_offset - xts->key_len; 1157 ret = ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len); 1158 if (ret) 1159 goto e_key; 1160 ret = ccp_set_dm_area(&key, dm_offset + pad, xts->key, 1161 xts->key_len, xts->key_len); 1162 if (ret) 1163 goto e_key; 1164 } 1165 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, 1166 CCP_PASSTHRU_BYTESWAP_256BIT); 1167 if (ret) { 1168 cmd->engine_error = cmd_q->cmd_error; 1169 goto e_key; 1170 } 1171 1172 /* The AES context fits in a single (32-byte) SB entry and 1173 * for XTS is already in little endian format so no byte swapping 1174 * is needed. 1175 */ 1176 ret = ccp_init_dm_workarea(&ctx, cmd_q, 1177 CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES, 1178 DMA_BIDIRECTIONAL); 1179 if (ret) 1180 goto e_key; 1181 1182 ret = ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len); 1183 if (ret) 1184 goto e_ctx; 1185 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1186 CCP_PASSTHRU_BYTESWAP_NOOP); 1187 if (ret) { 1188 cmd->engine_error = cmd_q->cmd_error; 1189 goto e_ctx; 1190 } 1191 1192 /* Prepare the input and output data workareas. For in-place 1193 * operations we need to set the dma direction to BIDIRECTIONAL 1194 * and copy the src workarea to the dst workarea. 1195 */ 1196 if (sg_virt(xts->src) == sg_virt(xts->dst)) 1197 in_place = true; 1198 1199 ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len, 1200 unit_size, 1201 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); 1202 if (ret) 1203 goto e_ctx; 1204 1205 if (in_place) { 1206 dst = src; 1207 } else { 1208 ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len, 1209 unit_size, DMA_FROM_DEVICE); 1210 if (ret) 1211 goto e_src; 1212 } 1213 1214 /* Send data to the CCP AES engine */ 1215 while (src.sg_wa.bytes_left) { 1216 ccp_prepare_data(&src, &dst, &op, unit_size, true); 1217 if (!src.sg_wa.bytes_left) 1218 op.eom = 1; 1219 1220 ret = cmd_q->ccp->vdata->perform->xts_aes(&op); 1221 if (ret) { 1222 cmd->engine_error = cmd_q->cmd_error; 1223 goto e_dst; 1224 } 1225 1226 ccp_process_data(&src, &dst, &op); 1227 } 1228 1229 /* Retrieve the AES context - convert from LE to BE using 1230 * 32-byte (256-bit) byteswapping 1231 */ 1232 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1233 CCP_PASSTHRU_BYTESWAP_256BIT); 1234 if (ret) { 1235 cmd->engine_error = cmd_q->cmd_error; 1236 goto e_dst; 1237 } 1238 1239 /* ...but we only need AES_BLOCK_SIZE bytes */ 1240 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; 1241 ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len); 1242 1243 e_dst: 1244 if (!in_place) 1245 ccp_free_data(&dst, cmd_q); 1246 1247 e_src: 1248 ccp_free_data(&src, cmd_q); 1249 1250 e_ctx: 1251 ccp_dm_free(&ctx); 1252 1253 e_key: 1254 ccp_dm_free(&key); 1255 1256 return ret; 1257 } 1258 1259 static noinline_for_stack int 1260 ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 1261 { 1262 struct ccp_des3_engine *des3 = &cmd->u.des3; 1263 1264 struct ccp_dm_workarea key, ctx; 1265 struct ccp_data src, dst; 1266 struct ccp_op op; 1267 unsigned int dm_offset; 1268 unsigned int len_singlekey; 1269 bool in_place = false; 1270 int ret; 1271 1272 /* Error checks */ 1273 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) 1274 return -EINVAL; 1275 1276 if (!cmd_q->ccp->vdata->perform->des3) 1277 return -EINVAL; 1278 1279 if (des3->key_len != DES3_EDE_KEY_SIZE) 1280 return -EINVAL; 1281 1282 if (((des3->mode == CCP_DES3_MODE_ECB) || 1283 (des3->mode == CCP_DES3_MODE_CBC)) && 1284 (des3->src_len & (DES3_EDE_BLOCK_SIZE - 1))) 1285 return -EINVAL; 1286 1287 if (!des3->key || !des3->src || !des3->dst) 1288 return -EINVAL; 1289 1290 if (des3->mode != CCP_DES3_MODE_ECB) { 1291 if (des3->iv_len != DES3_EDE_BLOCK_SIZE) 1292 return -EINVAL; 1293 1294 if (!des3->iv) 1295 return -EINVAL; 1296 } 1297 1298 ret = -EIO; 1299 /* Zero out all the fields of the command desc */ 1300 memset(&op, 0, sizeof(op)); 1301 1302 /* Set up the Function field */ 1303 op.cmd_q = cmd_q; 1304 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 1305 op.sb_key = cmd_q->sb_key; 1306 1307 op.init = (des3->mode == CCP_DES3_MODE_ECB) ? 0 : 1; 1308 op.u.des3.type = des3->type; 1309 op.u.des3.mode = des3->mode; 1310 op.u.des3.action = des3->action; 1311 1312 /* 1313 * All supported key sizes fit in a single (32-byte) KSB entry and 1314 * (like AES) must be in little endian format. Use the 256-bit byte 1315 * swap passthru option to convert from big endian to little endian. 1316 */ 1317 ret = ccp_init_dm_workarea(&key, cmd_q, 1318 CCP_DES3_KEY_SB_COUNT * CCP_SB_BYTES, 1319 DMA_TO_DEVICE); 1320 if (ret) 1321 return ret; 1322 1323 /* 1324 * The contents of the key triplet are in the reverse order of what 1325 * is required by the engine. Copy the 3 pieces individually to put 1326 * them where they belong. 1327 */ 1328 dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */ 1329 1330 len_singlekey = des3->key_len / 3; 1331 ret = ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey, 1332 des3->key, 0, len_singlekey); 1333 if (ret) 1334 goto e_key; 1335 ret = ccp_set_dm_area(&key, dm_offset + len_singlekey, 1336 des3->key, len_singlekey, len_singlekey); 1337 if (ret) 1338 goto e_key; 1339 ret = ccp_set_dm_area(&key, dm_offset, 1340 des3->key, 2 * len_singlekey, len_singlekey); 1341 if (ret) 1342 goto e_key; 1343 1344 /* Copy the key to the SB */ 1345 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, 1346 CCP_PASSTHRU_BYTESWAP_256BIT); 1347 if (ret) { 1348 cmd->engine_error = cmd_q->cmd_error; 1349 goto e_key; 1350 } 1351 1352 /* 1353 * The DES3 context fits in a single (32-byte) KSB entry and 1354 * must be in little endian format. Use the 256-bit byte swap 1355 * passthru option to convert from big endian to little endian. 1356 */ 1357 if (des3->mode != CCP_DES3_MODE_ECB) { 1358 op.sb_ctx = cmd_q->sb_ctx; 1359 1360 ret = ccp_init_dm_workarea(&ctx, cmd_q, 1361 CCP_DES3_CTX_SB_COUNT * CCP_SB_BYTES, 1362 DMA_BIDIRECTIONAL); 1363 if (ret) 1364 goto e_key; 1365 1366 /* Load the context into the LSB */ 1367 dm_offset = CCP_SB_BYTES - des3->iv_len; 1368 ret = ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, 1369 des3->iv_len); 1370 if (ret) 1371 goto e_ctx; 1372 1373 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1374 CCP_PASSTHRU_BYTESWAP_256BIT); 1375 if (ret) { 1376 cmd->engine_error = cmd_q->cmd_error; 1377 goto e_ctx; 1378 } 1379 } 1380 1381 /* 1382 * Prepare the input and output data workareas. For in-place 1383 * operations we need to set the dma direction to BIDIRECTIONAL 1384 * and copy the src workarea to the dst workarea. 1385 */ 1386 if (sg_virt(des3->src) == sg_virt(des3->dst)) 1387 in_place = true; 1388 1389 ret = ccp_init_data(&src, cmd_q, des3->src, des3->src_len, 1390 DES3_EDE_BLOCK_SIZE, 1391 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); 1392 if (ret) 1393 goto e_ctx; 1394 1395 if (in_place) 1396 dst = src; 1397 else { 1398 ret = ccp_init_data(&dst, cmd_q, des3->dst, des3->src_len, 1399 DES3_EDE_BLOCK_SIZE, DMA_FROM_DEVICE); 1400 if (ret) 1401 goto e_src; 1402 } 1403 1404 /* Send data to the CCP DES3 engine */ 1405 while (src.sg_wa.bytes_left) { 1406 ccp_prepare_data(&src, &dst, &op, DES3_EDE_BLOCK_SIZE, true); 1407 if (!src.sg_wa.bytes_left) { 1408 op.eom = 1; 1409 1410 /* Since we don't retrieve the context in ECB mode 1411 * we have to wait for the operation to complete 1412 * on the last piece of data 1413 */ 1414 op.soc = 0; 1415 } 1416 1417 ret = cmd_q->ccp->vdata->perform->des3(&op); 1418 if (ret) { 1419 cmd->engine_error = cmd_q->cmd_error; 1420 goto e_dst; 1421 } 1422 1423 ccp_process_data(&src, &dst, &op); 1424 } 1425 1426 if (des3->mode != CCP_DES3_MODE_ECB) { 1427 /* Retrieve the context and make BE */ 1428 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1429 CCP_PASSTHRU_BYTESWAP_256BIT); 1430 if (ret) { 1431 cmd->engine_error = cmd_q->cmd_error; 1432 goto e_dst; 1433 } 1434 1435 /* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */ 1436 ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0, 1437 DES3_EDE_BLOCK_SIZE); 1438 } 1439 e_dst: 1440 if (!in_place) 1441 ccp_free_data(&dst, cmd_q); 1442 1443 e_src: 1444 ccp_free_data(&src, cmd_q); 1445 1446 e_ctx: 1447 if (des3->mode != CCP_DES3_MODE_ECB) 1448 ccp_dm_free(&ctx); 1449 1450 e_key: 1451 ccp_dm_free(&key); 1452 1453 return ret; 1454 } 1455 1456 static noinline_for_stack int 1457 ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 1458 { 1459 struct ccp_sha_engine *sha = &cmd->u.sha; 1460 struct ccp_dm_workarea ctx; 1461 struct ccp_data src; 1462 struct ccp_op op; 1463 unsigned int ioffset, ooffset; 1464 unsigned int digest_size; 1465 int sb_count; 1466 const void *init; 1467 u64 block_size; 1468 int ctx_size; 1469 int ret; 1470 1471 switch (sha->type) { 1472 case CCP_SHA_TYPE_1: 1473 if (sha->ctx_len < SHA1_DIGEST_SIZE) 1474 return -EINVAL; 1475 block_size = SHA1_BLOCK_SIZE; 1476 break; 1477 case CCP_SHA_TYPE_224: 1478 if (sha->ctx_len < SHA224_DIGEST_SIZE) 1479 return -EINVAL; 1480 block_size = SHA224_BLOCK_SIZE; 1481 break; 1482 case CCP_SHA_TYPE_256: 1483 if (sha->ctx_len < SHA256_DIGEST_SIZE) 1484 return -EINVAL; 1485 block_size = SHA256_BLOCK_SIZE; 1486 break; 1487 case CCP_SHA_TYPE_384: 1488 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0) 1489 || sha->ctx_len < SHA384_DIGEST_SIZE) 1490 return -EINVAL; 1491 block_size = SHA384_BLOCK_SIZE; 1492 break; 1493 case CCP_SHA_TYPE_512: 1494 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0) 1495 || sha->ctx_len < SHA512_DIGEST_SIZE) 1496 return -EINVAL; 1497 block_size = SHA512_BLOCK_SIZE; 1498 break; 1499 default: 1500 return -EINVAL; 1501 } 1502 1503 if (!sha->ctx) 1504 return -EINVAL; 1505 1506 if (!sha->final && (sha->src_len & (block_size - 1))) 1507 return -EINVAL; 1508 1509 /* The version 3 device can't handle zero-length input */ 1510 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) { 1511 1512 if (!sha->src_len) { 1513 unsigned int digest_len; 1514 const u8 *sha_zero; 1515 1516 /* Not final, just return */ 1517 if (!sha->final) 1518 return 0; 1519 1520 /* CCP can't do a zero length sha operation so the 1521 * caller must buffer the data. 1522 */ 1523 if (sha->msg_bits) 1524 return -EINVAL; 1525 1526 /* The CCP cannot perform zero-length sha operations 1527 * so the caller is required to buffer data for the 1528 * final operation. However, a sha operation for a 1529 * message with a total length of zero is valid so 1530 * known values are required to supply the result. 1531 */ 1532 switch (sha->type) { 1533 case CCP_SHA_TYPE_1: 1534 sha_zero = sha1_zero_message_hash; 1535 digest_len = SHA1_DIGEST_SIZE; 1536 break; 1537 case CCP_SHA_TYPE_224: 1538 sha_zero = sha224_zero_message_hash; 1539 digest_len = SHA224_DIGEST_SIZE; 1540 break; 1541 case CCP_SHA_TYPE_256: 1542 sha_zero = sha256_zero_message_hash; 1543 digest_len = SHA256_DIGEST_SIZE; 1544 break; 1545 default: 1546 return -EINVAL; 1547 } 1548 1549 scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0, 1550 digest_len, 1); 1551 1552 return 0; 1553 } 1554 } 1555 1556 /* Set variables used throughout */ 1557 switch (sha->type) { 1558 case CCP_SHA_TYPE_1: 1559 digest_size = SHA1_DIGEST_SIZE; 1560 init = (void *) ccp_sha1_init; 1561 ctx_size = SHA1_DIGEST_SIZE; 1562 sb_count = 1; 1563 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0)) 1564 ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE; 1565 else 1566 ooffset = ioffset = 0; 1567 break; 1568 case CCP_SHA_TYPE_224: 1569 digest_size = SHA224_DIGEST_SIZE; 1570 init = (void *) ccp_sha224_init; 1571 ctx_size = SHA256_DIGEST_SIZE; 1572 sb_count = 1; 1573 ioffset = 0; 1574 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0)) 1575 ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE; 1576 else 1577 ooffset = 0; 1578 break; 1579 case CCP_SHA_TYPE_256: 1580 digest_size = SHA256_DIGEST_SIZE; 1581 init = (void *) ccp_sha256_init; 1582 ctx_size = SHA256_DIGEST_SIZE; 1583 sb_count = 1; 1584 ooffset = ioffset = 0; 1585 break; 1586 case CCP_SHA_TYPE_384: 1587 digest_size = SHA384_DIGEST_SIZE; 1588 init = (void *) ccp_sha384_init; 1589 ctx_size = SHA512_DIGEST_SIZE; 1590 sb_count = 2; 1591 ioffset = 0; 1592 ooffset = 2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE; 1593 break; 1594 case CCP_SHA_TYPE_512: 1595 digest_size = SHA512_DIGEST_SIZE; 1596 init = (void *) ccp_sha512_init; 1597 ctx_size = SHA512_DIGEST_SIZE; 1598 sb_count = 2; 1599 ooffset = ioffset = 0; 1600 break; 1601 default: 1602 ret = -EINVAL; 1603 goto e_data; 1604 } 1605 1606 /* For zero-length plaintext the src pointer is ignored; 1607 * otherwise both parts must be valid 1608 */ 1609 if (sha->src_len && !sha->src) 1610 return -EINVAL; 1611 1612 memset(&op, 0, sizeof(op)); 1613 op.cmd_q = cmd_q; 1614 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 1615 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ 1616 op.u.sha.type = sha->type; 1617 op.u.sha.msg_bits = sha->msg_bits; 1618 1619 /* For SHA1/224/256 the context fits in a single (32-byte) SB entry; 1620 * SHA384/512 require 2 adjacent SB slots, with the right half in the 1621 * first slot, and the left half in the second. Each portion must then 1622 * be in little endian format: use the 256-bit byte swap option. 1623 */ 1624 ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES, 1625 DMA_BIDIRECTIONAL); 1626 if (ret) 1627 return ret; 1628 if (sha->first) { 1629 switch (sha->type) { 1630 case CCP_SHA_TYPE_1: 1631 case CCP_SHA_TYPE_224: 1632 case CCP_SHA_TYPE_256: 1633 memcpy(ctx.address + ioffset, init, ctx_size); 1634 break; 1635 case CCP_SHA_TYPE_384: 1636 case CCP_SHA_TYPE_512: 1637 memcpy(ctx.address + ctx_size / 2, init, 1638 ctx_size / 2); 1639 memcpy(ctx.address, init + ctx_size / 2, 1640 ctx_size / 2); 1641 break; 1642 default: 1643 ret = -EINVAL; 1644 goto e_ctx; 1645 } 1646 } else { 1647 /* Restore the context */ 1648 ret = ccp_set_dm_area(&ctx, 0, sha->ctx, 0, 1649 sb_count * CCP_SB_BYTES); 1650 if (ret) 1651 goto e_ctx; 1652 } 1653 1654 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1655 CCP_PASSTHRU_BYTESWAP_256BIT); 1656 if (ret) { 1657 cmd->engine_error = cmd_q->cmd_error; 1658 goto e_ctx; 1659 } 1660 1661 if (sha->src) { 1662 /* Send data to the CCP SHA engine; block_size is set above */ 1663 ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len, 1664 block_size, DMA_TO_DEVICE); 1665 if (ret) 1666 goto e_ctx; 1667 1668 while (src.sg_wa.bytes_left) { 1669 ccp_prepare_data(&src, NULL, &op, block_size, false); 1670 if (sha->final && !src.sg_wa.bytes_left) 1671 op.eom = 1; 1672 1673 ret = cmd_q->ccp->vdata->perform->sha(&op); 1674 if (ret) { 1675 cmd->engine_error = cmd_q->cmd_error; 1676 goto e_data; 1677 } 1678 1679 ccp_process_data(&src, NULL, &op); 1680 } 1681 } else { 1682 op.eom = 1; 1683 ret = cmd_q->ccp->vdata->perform->sha(&op); 1684 if (ret) { 1685 cmd->engine_error = cmd_q->cmd_error; 1686 goto e_data; 1687 } 1688 } 1689 1690 /* Retrieve the SHA context - convert from LE to BE using 1691 * 32-byte (256-bit) byteswapping to BE 1692 */ 1693 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1694 CCP_PASSTHRU_BYTESWAP_256BIT); 1695 if (ret) { 1696 cmd->engine_error = cmd_q->cmd_error; 1697 goto e_data; 1698 } 1699 1700 if (sha->final) { 1701 /* Finishing up, so get the digest */ 1702 switch (sha->type) { 1703 case CCP_SHA_TYPE_1: 1704 case CCP_SHA_TYPE_224: 1705 case CCP_SHA_TYPE_256: 1706 ccp_get_dm_area(&ctx, ooffset, 1707 sha->ctx, 0, 1708 digest_size); 1709 break; 1710 case CCP_SHA_TYPE_384: 1711 case CCP_SHA_TYPE_512: 1712 ccp_get_dm_area(&ctx, 0, 1713 sha->ctx, LSB_ITEM_SIZE - ooffset, 1714 LSB_ITEM_SIZE); 1715 ccp_get_dm_area(&ctx, LSB_ITEM_SIZE + ooffset, 1716 sha->ctx, 0, 1717 LSB_ITEM_SIZE - ooffset); 1718 break; 1719 default: 1720 ret = -EINVAL; 1721 goto e_ctx; 1722 } 1723 } else { 1724 /* Stash the context */ 1725 ccp_get_dm_area(&ctx, 0, sha->ctx, 0, 1726 sb_count * CCP_SB_BYTES); 1727 } 1728 1729 if (sha->final && sha->opad) { 1730 /* HMAC operation, recursively perform final SHA */ 1731 struct ccp_cmd hmac_cmd; 1732 struct scatterlist sg; 1733 u8 *hmac_buf; 1734 1735 if (sha->opad_len != block_size) { 1736 ret = -EINVAL; 1737 goto e_data; 1738 } 1739 1740 hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL); 1741 if (!hmac_buf) { 1742 ret = -ENOMEM; 1743 goto e_data; 1744 } 1745 sg_init_one(&sg, hmac_buf, block_size + digest_size); 1746 1747 scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0); 1748 switch (sha->type) { 1749 case CCP_SHA_TYPE_1: 1750 case CCP_SHA_TYPE_224: 1751 case CCP_SHA_TYPE_256: 1752 memcpy(hmac_buf + block_size, 1753 ctx.address + ooffset, 1754 digest_size); 1755 break; 1756 case CCP_SHA_TYPE_384: 1757 case CCP_SHA_TYPE_512: 1758 memcpy(hmac_buf + block_size, 1759 ctx.address + LSB_ITEM_SIZE + ooffset, 1760 LSB_ITEM_SIZE); 1761 memcpy(hmac_buf + block_size + 1762 (LSB_ITEM_SIZE - ooffset), 1763 ctx.address, 1764 LSB_ITEM_SIZE); 1765 break; 1766 default: 1767 ret = -EINVAL; 1768 goto e_ctx; 1769 } 1770 1771 memset(&hmac_cmd, 0, sizeof(hmac_cmd)); 1772 hmac_cmd.engine = CCP_ENGINE_SHA; 1773 hmac_cmd.u.sha.type = sha->type; 1774 hmac_cmd.u.sha.ctx = sha->ctx; 1775 hmac_cmd.u.sha.ctx_len = sha->ctx_len; 1776 hmac_cmd.u.sha.src = &sg; 1777 hmac_cmd.u.sha.src_len = block_size + digest_size; 1778 hmac_cmd.u.sha.opad = NULL; 1779 hmac_cmd.u.sha.opad_len = 0; 1780 hmac_cmd.u.sha.first = 1; 1781 hmac_cmd.u.sha.final = 1; 1782 hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3; 1783 1784 ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd); 1785 if (ret) 1786 cmd->engine_error = hmac_cmd.engine_error; 1787 1788 kfree(hmac_buf); 1789 } 1790 1791 e_data: 1792 if (sha->src) 1793 ccp_free_data(&src, cmd_q); 1794 1795 e_ctx: 1796 ccp_dm_free(&ctx); 1797 1798 return ret; 1799 } 1800 1801 static noinline_for_stack int 1802 ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 1803 { 1804 struct ccp_rsa_engine *rsa = &cmd->u.rsa; 1805 struct ccp_dm_workarea exp, src, dst; 1806 struct ccp_op op; 1807 unsigned int sb_count, i_len, o_len; 1808 int ret; 1809 1810 /* Check against the maximum allowable size, in bits */ 1811 if (rsa->key_size > cmd_q->ccp->vdata->rsamax) 1812 return -EINVAL; 1813 1814 if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst) 1815 return -EINVAL; 1816 1817 memset(&op, 0, sizeof(op)); 1818 op.cmd_q = cmd_q; 1819 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 1820 1821 /* The RSA modulus must precede the message being acted upon, so 1822 * it must be copied to a DMA area where the message and the 1823 * modulus can be concatenated. Therefore the input buffer 1824 * length required is twice the output buffer length (which 1825 * must be a multiple of 256-bits). Compute o_len, i_len in bytes. 1826 * Buffer sizes must be a multiple of 32 bytes; rounding up may be 1827 * required. 1828 */ 1829 o_len = 32 * ((rsa->key_size + 255) / 256); 1830 i_len = o_len * 2; 1831 1832 sb_count = 0; 1833 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) { 1834 /* sb_count is the number of storage block slots required 1835 * for the modulus. 1836 */ 1837 sb_count = o_len / CCP_SB_BYTES; 1838 op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, 1839 sb_count); 1840 if (!op.sb_key) 1841 return -EIO; 1842 } else { 1843 /* A version 5 device allows a modulus size that will not fit 1844 * in the LSB, so the command will transfer it from memory. 1845 * Set the sb key to the default, even though it's not used. 1846 */ 1847 op.sb_key = cmd_q->sb_key; 1848 } 1849 1850 /* The RSA exponent must be in little endian format. Reverse its 1851 * byte order. 1852 */ 1853 ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE); 1854 if (ret) 1855 goto e_sb; 1856 1857 ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len); 1858 if (ret) 1859 goto e_exp; 1860 1861 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) { 1862 /* Copy the exponent to the local storage block, using 1863 * as many 32-byte blocks as were allocated above. It's 1864 * already little endian, so no further change is required. 1865 */ 1866 ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key, 1867 CCP_PASSTHRU_BYTESWAP_NOOP); 1868 if (ret) { 1869 cmd->engine_error = cmd_q->cmd_error; 1870 goto e_exp; 1871 } 1872 } else { 1873 /* The exponent can be retrieved from memory via DMA. */ 1874 op.exp.u.dma.address = exp.dma.address; 1875 op.exp.u.dma.offset = 0; 1876 } 1877 1878 /* Concatenate the modulus and the message. Both the modulus and 1879 * the operands must be in little endian format. Since the input 1880 * is in big endian format it must be converted. 1881 */ 1882 ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE); 1883 if (ret) 1884 goto e_exp; 1885 1886 ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len); 1887 if (ret) 1888 goto e_src; 1889 ret = ccp_reverse_set_dm_area(&src, o_len, rsa->src, 0, rsa->src_len); 1890 if (ret) 1891 goto e_src; 1892 1893 /* Prepare the output area for the operation */ 1894 ret = ccp_init_dm_workarea(&dst, cmd_q, o_len, DMA_FROM_DEVICE); 1895 if (ret) 1896 goto e_src; 1897 1898 op.soc = 1; 1899 op.src.u.dma.address = src.dma.address; 1900 op.src.u.dma.offset = 0; 1901 op.src.u.dma.length = i_len; 1902 op.dst.u.dma.address = dst.dma.address; 1903 op.dst.u.dma.offset = 0; 1904 op.dst.u.dma.length = o_len; 1905 1906 op.u.rsa.mod_size = rsa->key_size; 1907 op.u.rsa.input_len = i_len; 1908 1909 ret = cmd_q->ccp->vdata->perform->rsa(&op); 1910 if (ret) { 1911 cmd->engine_error = cmd_q->cmd_error; 1912 goto e_dst; 1913 } 1914 1915 ccp_reverse_get_dm_area(&dst, 0, rsa->dst, 0, rsa->mod_len); 1916 1917 e_dst: 1918 ccp_dm_free(&dst); 1919 1920 e_src: 1921 ccp_dm_free(&src); 1922 1923 e_exp: 1924 ccp_dm_free(&exp); 1925 1926 e_sb: 1927 if (sb_count) 1928 cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count); 1929 1930 return ret; 1931 } 1932 1933 static noinline_for_stack int 1934 ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 1935 { 1936 struct ccp_passthru_engine *pt = &cmd->u.passthru; 1937 struct ccp_dm_workarea mask; 1938 struct ccp_data src, dst; 1939 struct ccp_op op; 1940 bool in_place = false; 1941 unsigned int i; 1942 int ret = 0; 1943 1944 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) 1945 return -EINVAL; 1946 1947 if (!pt->src || !pt->dst) 1948 return -EINVAL; 1949 1950 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { 1951 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE) 1952 return -EINVAL; 1953 if (!pt->mask) 1954 return -EINVAL; 1955 } 1956 1957 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1); 1958 1959 memset(&op, 0, sizeof(op)); 1960 op.cmd_q = cmd_q; 1961 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 1962 1963 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { 1964 /* Load the mask */ 1965 op.sb_key = cmd_q->sb_key; 1966 1967 ret = ccp_init_dm_workarea(&mask, cmd_q, 1968 CCP_PASSTHRU_SB_COUNT * 1969 CCP_SB_BYTES, 1970 DMA_TO_DEVICE); 1971 if (ret) 1972 return ret; 1973 1974 ret = ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len); 1975 if (ret) 1976 goto e_mask; 1977 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key, 1978 CCP_PASSTHRU_BYTESWAP_NOOP); 1979 if (ret) { 1980 cmd->engine_error = cmd_q->cmd_error; 1981 goto e_mask; 1982 } 1983 } 1984 1985 /* Prepare the input and output data workareas. For in-place 1986 * operations we need to set the dma direction to BIDIRECTIONAL 1987 * and copy the src workarea to the dst workarea. 1988 */ 1989 if (sg_virt(pt->src) == sg_virt(pt->dst)) 1990 in_place = true; 1991 1992 ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len, 1993 CCP_PASSTHRU_MASKSIZE, 1994 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); 1995 if (ret) 1996 goto e_mask; 1997 1998 if (in_place) { 1999 dst = src; 2000 } else { 2001 ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len, 2002 CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE); 2003 if (ret) 2004 goto e_src; 2005 } 2006 2007 /* Send data to the CCP Passthru engine 2008 * Because the CCP engine works on a single source and destination 2009 * dma address at a time, each entry in the source scatterlist 2010 * (after the dma_map_sg call) must be less than or equal to the 2011 * (remaining) length in the destination scatterlist entry and the 2012 * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE 2013 */ 2014 dst.sg_wa.sg_used = 0; 2015 for (i = 1; i <= src.sg_wa.dma_count; i++) { 2016 if (!dst.sg_wa.sg || 2017 (dst.sg_wa.sg->length < src.sg_wa.sg->length)) { 2018 ret = -EINVAL; 2019 goto e_dst; 2020 } 2021 2022 if (i == src.sg_wa.dma_count) { 2023 op.eom = 1; 2024 op.soc = 1; 2025 } 2026 2027 op.src.type = CCP_MEMTYPE_SYSTEM; 2028 op.src.u.dma.address = sg_dma_address(src.sg_wa.sg); 2029 op.src.u.dma.offset = 0; 2030 op.src.u.dma.length = sg_dma_len(src.sg_wa.sg); 2031 2032 op.dst.type = CCP_MEMTYPE_SYSTEM; 2033 op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg); 2034 op.dst.u.dma.offset = dst.sg_wa.sg_used; 2035 op.dst.u.dma.length = op.src.u.dma.length; 2036 2037 ret = cmd_q->ccp->vdata->perform->passthru(&op); 2038 if (ret) { 2039 cmd->engine_error = cmd_q->cmd_error; 2040 goto e_dst; 2041 } 2042 2043 dst.sg_wa.sg_used += src.sg_wa.sg->length; 2044 if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) { 2045 dst.sg_wa.sg = sg_next(dst.sg_wa.sg); 2046 dst.sg_wa.sg_used = 0; 2047 } 2048 src.sg_wa.sg = sg_next(src.sg_wa.sg); 2049 } 2050 2051 e_dst: 2052 if (!in_place) 2053 ccp_free_data(&dst, cmd_q); 2054 2055 e_src: 2056 ccp_free_data(&src, cmd_q); 2057 2058 e_mask: 2059 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) 2060 ccp_dm_free(&mask); 2061 2062 return ret; 2063 } 2064 2065 static noinline_for_stack int 2066 ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q, 2067 struct ccp_cmd *cmd) 2068 { 2069 struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap; 2070 struct ccp_dm_workarea mask; 2071 struct ccp_op op; 2072 int ret; 2073 2074 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) 2075 return -EINVAL; 2076 2077 if (!pt->src_dma || !pt->dst_dma) 2078 return -EINVAL; 2079 2080 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { 2081 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE) 2082 return -EINVAL; 2083 if (!pt->mask) 2084 return -EINVAL; 2085 } 2086 2087 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1); 2088 2089 memset(&op, 0, sizeof(op)); 2090 op.cmd_q = cmd_q; 2091 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 2092 2093 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { 2094 /* Load the mask */ 2095 op.sb_key = cmd_q->sb_key; 2096 2097 mask.length = pt->mask_len; 2098 mask.dma.address = pt->mask; 2099 mask.dma.length = pt->mask_len; 2100 2101 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key, 2102 CCP_PASSTHRU_BYTESWAP_NOOP); 2103 if (ret) { 2104 cmd->engine_error = cmd_q->cmd_error; 2105 return ret; 2106 } 2107 } 2108 2109 /* Send data to the CCP Passthru engine */ 2110 op.eom = 1; 2111 op.soc = 1; 2112 2113 op.src.type = CCP_MEMTYPE_SYSTEM; 2114 op.src.u.dma.address = pt->src_dma; 2115 op.src.u.dma.offset = 0; 2116 op.src.u.dma.length = pt->src_len; 2117 2118 op.dst.type = CCP_MEMTYPE_SYSTEM; 2119 op.dst.u.dma.address = pt->dst_dma; 2120 op.dst.u.dma.offset = 0; 2121 op.dst.u.dma.length = pt->src_len; 2122 2123 ret = cmd_q->ccp->vdata->perform->passthru(&op); 2124 if (ret) 2125 cmd->engine_error = cmd_q->cmd_error; 2126 2127 return ret; 2128 } 2129 2130 static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 2131 { 2132 struct ccp_ecc_engine *ecc = &cmd->u.ecc; 2133 struct ccp_dm_workarea src, dst; 2134 struct ccp_op op; 2135 int ret; 2136 u8 *save; 2137 2138 if (!ecc->u.mm.operand_1 || 2139 (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES)) 2140 return -EINVAL; 2141 2142 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) 2143 if (!ecc->u.mm.operand_2 || 2144 (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES)) 2145 return -EINVAL; 2146 2147 if (!ecc->u.mm.result || 2148 (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES)) 2149 return -EINVAL; 2150 2151 memset(&op, 0, sizeof(op)); 2152 op.cmd_q = cmd_q; 2153 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 2154 2155 /* Concatenate the modulus and the operands. Both the modulus and 2156 * the operands must be in little endian format. Since the input 2157 * is in big endian format it must be converted and placed in a 2158 * fixed length buffer. 2159 */ 2160 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE, 2161 DMA_TO_DEVICE); 2162 if (ret) 2163 return ret; 2164 2165 /* Save the workarea address since it is updated in order to perform 2166 * the concatenation 2167 */ 2168 save = src.address; 2169 2170 /* Copy the ECC modulus */ 2171 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len); 2172 if (ret) 2173 goto e_src; 2174 src.address += CCP_ECC_OPERAND_SIZE; 2175 2176 /* Copy the first operand */ 2177 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_1, 0, 2178 ecc->u.mm.operand_1_len); 2179 if (ret) 2180 goto e_src; 2181 src.address += CCP_ECC_OPERAND_SIZE; 2182 2183 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) { 2184 /* Copy the second operand */ 2185 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_2, 0, 2186 ecc->u.mm.operand_2_len); 2187 if (ret) 2188 goto e_src; 2189 src.address += CCP_ECC_OPERAND_SIZE; 2190 } 2191 2192 /* Restore the workarea address */ 2193 src.address = save; 2194 2195 /* Prepare the output area for the operation */ 2196 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE, 2197 DMA_FROM_DEVICE); 2198 if (ret) 2199 goto e_src; 2200 2201 op.soc = 1; 2202 op.src.u.dma.address = src.dma.address; 2203 op.src.u.dma.offset = 0; 2204 op.src.u.dma.length = src.length; 2205 op.dst.u.dma.address = dst.dma.address; 2206 op.dst.u.dma.offset = 0; 2207 op.dst.u.dma.length = dst.length; 2208 2209 op.u.ecc.function = cmd->u.ecc.function; 2210 2211 ret = cmd_q->ccp->vdata->perform->ecc(&op); 2212 if (ret) { 2213 cmd->engine_error = cmd_q->cmd_error; 2214 goto e_dst; 2215 } 2216 2217 ecc->ecc_result = le16_to_cpup( 2218 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET)); 2219 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) { 2220 ret = -EIO; 2221 goto e_dst; 2222 } 2223 2224 /* Save the ECC result */ 2225 ccp_reverse_get_dm_area(&dst, 0, ecc->u.mm.result, 0, 2226 CCP_ECC_MODULUS_BYTES); 2227 2228 e_dst: 2229 ccp_dm_free(&dst); 2230 2231 e_src: 2232 ccp_dm_free(&src); 2233 2234 return ret; 2235 } 2236 2237 static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 2238 { 2239 struct ccp_ecc_engine *ecc = &cmd->u.ecc; 2240 struct ccp_dm_workarea src, dst; 2241 struct ccp_op op; 2242 int ret; 2243 u8 *save; 2244 2245 if (!ecc->u.pm.point_1.x || 2246 (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) || 2247 !ecc->u.pm.point_1.y || 2248 (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES)) 2249 return -EINVAL; 2250 2251 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { 2252 if (!ecc->u.pm.point_2.x || 2253 (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) || 2254 !ecc->u.pm.point_2.y || 2255 (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES)) 2256 return -EINVAL; 2257 } else { 2258 if (!ecc->u.pm.domain_a || 2259 (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES)) 2260 return -EINVAL; 2261 2262 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) 2263 if (!ecc->u.pm.scalar || 2264 (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES)) 2265 return -EINVAL; 2266 } 2267 2268 if (!ecc->u.pm.result.x || 2269 (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) || 2270 !ecc->u.pm.result.y || 2271 (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES)) 2272 return -EINVAL; 2273 2274 memset(&op, 0, sizeof(op)); 2275 op.cmd_q = cmd_q; 2276 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 2277 2278 /* Concatenate the modulus and the operands. Both the modulus and 2279 * the operands must be in little endian format. Since the input 2280 * is in big endian format it must be converted and placed in a 2281 * fixed length buffer. 2282 */ 2283 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE, 2284 DMA_TO_DEVICE); 2285 if (ret) 2286 return ret; 2287 2288 /* Save the workarea address since it is updated in order to perform 2289 * the concatenation 2290 */ 2291 save = src.address; 2292 2293 /* Copy the ECC modulus */ 2294 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len); 2295 if (ret) 2296 goto e_src; 2297 src.address += CCP_ECC_OPERAND_SIZE; 2298 2299 /* Copy the first point X and Y coordinate */ 2300 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.x, 0, 2301 ecc->u.pm.point_1.x_len); 2302 if (ret) 2303 goto e_src; 2304 src.address += CCP_ECC_OPERAND_SIZE; 2305 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.y, 0, 2306 ecc->u.pm.point_1.y_len); 2307 if (ret) 2308 goto e_src; 2309 src.address += CCP_ECC_OPERAND_SIZE; 2310 2311 /* Set the first point Z coordinate to 1 */ 2312 *src.address = 0x01; 2313 src.address += CCP_ECC_OPERAND_SIZE; 2314 2315 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { 2316 /* Copy the second point X and Y coordinate */ 2317 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.x, 0, 2318 ecc->u.pm.point_2.x_len); 2319 if (ret) 2320 goto e_src; 2321 src.address += CCP_ECC_OPERAND_SIZE; 2322 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.y, 0, 2323 ecc->u.pm.point_2.y_len); 2324 if (ret) 2325 goto e_src; 2326 src.address += CCP_ECC_OPERAND_SIZE; 2327 2328 /* Set the second point Z coordinate to 1 */ 2329 *src.address = 0x01; 2330 src.address += CCP_ECC_OPERAND_SIZE; 2331 } else { 2332 /* Copy the Domain "a" parameter */ 2333 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.domain_a, 0, 2334 ecc->u.pm.domain_a_len); 2335 if (ret) 2336 goto e_src; 2337 src.address += CCP_ECC_OPERAND_SIZE; 2338 2339 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) { 2340 /* Copy the scalar value */ 2341 ret = ccp_reverse_set_dm_area(&src, 0, 2342 ecc->u.pm.scalar, 0, 2343 ecc->u.pm.scalar_len); 2344 if (ret) 2345 goto e_src; 2346 src.address += CCP_ECC_OPERAND_SIZE; 2347 } 2348 } 2349 2350 /* Restore the workarea address */ 2351 src.address = save; 2352 2353 /* Prepare the output area for the operation */ 2354 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE, 2355 DMA_FROM_DEVICE); 2356 if (ret) 2357 goto e_src; 2358 2359 op.soc = 1; 2360 op.src.u.dma.address = src.dma.address; 2361 op.src.u.dma.offset = 0; 2362 op.src.u.dma.length = src.length; 2363 op.dst.u.dma.address = dst.dma.address; 2364 op.dst.u.dma.offset = 0; 2365 op.dst.u.dma.length = dst.length; 2366 2367 op.u.ecc.function = cmd->u.ecc.function; 2368 2369 ret = cmd_q->ccp->vdata->perform->ecc(&op); 2370 if (ret) { 2371 cmd->engine_error = cmd_q->cmd_error; 2372 goto e_dst; 2373 } 2374 2375 ecc->ecc_result = le16_to_cpup( 2376 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET)); 2377 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) { 2378 ret = -EIO; 2379 goto e_dst; 2380 } 2381 2382 /* Save the workarea address since it is updated as we walk through 2383 * to copy the point math result 2384 */ 2385 save = dst.address; 2386 2387 /* Save the ECC result X and Y coordinates */ 2388 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.x, 0, 2389 CCP_ECC_MODULUS_BYTES); 2390 dst.address += CCP_ECC_OUTPUT_SIZE; 2391 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0, 2392 CCP_ECC_MODULUS_BYTES); 2393 dst.address += CCP_ECC_OUTPUT_SIZE; 2394 2395 /* Restore the workarea address */ 2396 dst.address = save; 2397 2398 e_dst: 2399 ccp_dm_free(&dst); 2400 2401 e_src: 2402 ccp_dm_free(&src); 2403 2404 return ret; 2405 } 2406 2407 static noinline_for_stack int 2408 ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 2409 { 2410 struct ccp_ecc_engine *ecc = &cmd->u.ecc; 2411 2412 ecc->ecc_result = 0; 2413 2414 if (!ecc->mod || 2415 (ecc->mod_len > CCP_ECC_MODULUS_BYTES)) 2416 return -EINVAL; 2417 2418 switch (ecc->function) { 2419 case CCP_ECC_FUNCTION_MMUL_384BIT: 2420 case CCP_ECC_FUNCTION_MADD_384BIT: 2421 case CCP_ECC_FUNCTION_MINV_384BIT: 2422 return ccp_run_ecc_mm_cmd(cmd_q, cmd); 2423 2424 case CCP_ECC_FUNCTION_PADD_384BIT: 2425 case CCP_ECC_FUNCTION_PMUL_384BIT: 2426 case CCP_ECC_FUNCTION_PDBL_384BIT: 2427 return ccp_run_ecc_pm_cmd(cmd_q, cmd); 2428 2429 default: 2430 return -EINVAL; 2431 } 2432 } 2433 2434 int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 2435 { 2436 int ret; 2437 2438 cmd->engine_error = 0; 2439 cmd_q->cmd_error = 0; 2440 cmd_q->int_rcvd = 0; 2441 cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q); 2442 2443 switch (cmd->engine) { 2444 case CCP_ENGINE_AES: 2445 switch (cmd->u.aes.mode) { 2446 case CCP_AES_MODE_CMAC: 2447 ret = ccp_run_aes_cmac_cmd(cmd_q, cmd); 2448 break; 2449 case CCP_AES_MODE_GCM: 2450 ret = ccp_run_aes_gcm_cmd(cmd_q, cmd); 2451 break; 2452 default: 2453 ret = ccp_run_aes_cmd(cmd_q, cmd); 2454 break; 2455 } 2456 break; 2457 case CCP_ENGINE_XTS_AES_128: 2458 ret = ccp_run_xts_aes_cmd(cmd_q, cmd); 2459 break; 2460 case CCP_ENGINE_DES3: 2461 ret = ccp_run_des3_cmd(cmd_q, cmd); 2462 break; 2463 case CCP_ENGINE_SHA: 2464 ret = ccp_run_sha_cmd(cmd_q, cmd); 2465 break; 2466 case CCP_ENGINE_RSA: 2467 ret = ccp_run_rsa_cmd(cmd_q, cmd); 2468 break; 2469 case CCP_ENGINE_PASSTHRU: 2470 if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP) 2471 ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd); 2472 else 2473 ret = ccp_run_passthru_cmd(cmd_q, cmd); 2474 break; 2475 case CCP_ENGINE_ECC: 2476 ret = ccp_run_ecc_cmd(cmd_q, cmd); 2477 break; 2478 default: 2479 ret = -EINVAL; 2480 } 2481 2482 return ret; 2483 } 2484