1 /* 2 * AMD Cryptographic Coprocessor (CCP) driver 3 * 4 * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. 5 * 6 * Author: Tom Lendacky <thomas.lendacky@amd.com> 7 * Author: Gary R Hook <gary.hook@amd.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include <linux/module.h> 15 #include <linux/kernel.h> 16 #include <linux/pci.h> 17 #include <linux/interrupt.h> 18 #include <crypto/scatterwalk.h> 19 #include <crypto/des.h> 20 #include <linux/ccp.h> 21 22 #include "ccp-dev.h" 23 24 /* SHA initial context values */ 25 static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = { 26 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), 27 cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), 28 cpu_to_be32(SHA1_H4), 29 }; 30 31 static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = { 32 cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), 33 cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), 34 cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), 35 cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), 36 }; 37 38 static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = { 39 cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), 40 cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), 41 cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), 42 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), 43 }; 44 45 static const __be64 ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = { 46 cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1), 47 cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3), 48 cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5), 49 cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7), 50 }; 51 52 static const __be64 ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = { 53 cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1), 54 cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3), 55 cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5), 56 cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7), 57 }; 58 59 #define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \ 60 ccp_gen_jobid(ccp) : 0) 61 62 static u32 ccp_gen_jobid(struct ccp_device *ccp) 63 { 64 return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK; 65 } 66 67 static void ccp_sg_free(struct ccp_sg_workarea *wa) 68 { 69 if (wa->dma_count) 70 dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir); 71 72 wa->dma_count = 0; 73 } 74 75 static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, 76 struct scatterlist *sg, u64 len, 77 enum dma_data_direction dma_dir) 78 { 79 memset(wa, 0, sizeof(*wa)); 80 81 wa->sg = sg; 82 if (!sg) 83 return 0; 84 85 wa->nents = sg_nents_for_len(sg, len); 86 if (wa->nents < 0) 87 return wa->nents; 88 89 wa->bytes_left = len; 90 wa->sg_used = 0; 91 92 if (len == 0) 93 return 0; 94 95 if (dma_dir == DMA_NONE) 96 return 0; 97 98 wa->dma_sg = sg; 99 wa->dma_dev = dev; 100 wa->dma_dir = dma_dir; 101 wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir); 102 if (!wa->dma_count) 103 return -ENOMEM; 104 105 return 0; 106 } 107 108 static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len) 109 { 110 unsigned int nbytes = min_t(u64, len, wa->bytes_left); 111 112 if (!wa->sg) 113 return; 114 115 wa->sg_used += nbytes; 116 wa->bytes_left -= nbytes; 117 if (wa->sg_used == wa->sg->length) { 118 wa->sg = sg_next(wa->sg); 119 wa->sg_used = 0; 120 } 121 } 122 123 static void ccp_dm_free(struct ccp_dm_workarea *wa) 124 { 125 if (wa->length <= CCP_DMAPOOL_MAX_SIZE) { 126 if (wa->address) 127 dma_pool_free(wa->dma_pool, wa->address, 128 wa->dma.address); 129 } else { 130 if (wa->dma.address) 131 dma_unmap_single(wa->dev, wa->dma.address, wa->length, 132 wa->dma.dir); 133 kfree(wa->address); 134 } 135 136 wa->address = NULL; 137 wa->dma.address = 0; 138 } 139 140 static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa, 141 struct ccp_cmd_queue *cmd_q, 142 unsigned int len, 143 enum dma_data_direction dir) 144 { 145 memset(wa, 0, sizeof(*wa)); 146 147 if (!len) 148 return 0; 149 150 wa->dev = cmd_q->ccp->dev; 151 wa->length = len; 152 153 if (len <= CCP_DMAPOOL_MAX_SIZE) { 154 wa->dma_pool = cmd_q->dma_pool; 155 156 wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL, 157 &wa->dma.address); 158 if (!wa->address) 159 return -ENOMEM; 160 161 wa->dma.length = CCP_DMAPOOL_MAX_SIZE; 162 163 memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE); 164 } else { 165 wa->address = kzalloc(len, GFP_KERNEL); 166 if (!wa->address) 167 return -ENOMEM; 168 169 wa->dma.address = dma_map_single(wa->dev, wa->address, len, 170 dir); 171 if (dma_mapping_error(wa->dev, wa->dma.address)) 172 return -ENOMEM; 173 174 wa->dma.length = len; 175 } 176 wa->dma.dir = dir; 177 178 return 0; 179 } 180 181 static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, 182 struct scatterlist *sg, unsigned int sg_offset, 183 unsigned int len) 184 { 185 WARN_ON(!wa->address); 186 187 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len, 188 0); 189 } 190 191 static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, 192 struct scatterlist *sg, unsigned int sg_offset, 193 unsigned int len) 194 { 195 WARN_ON(!wa->address); 196 197 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len, 198 1); 199 } 200 201 static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa, 202 unsigned int wa_offset, 203 struct scatterlist *sg, 204 unsigned int sg_offset, 205 unsigned int len) 206 { 207 u8 *p, *q; 208 209 ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len); 210 211 p = wa->address + wa_offset; 212 q = p + len - 1; 213 while (p < q) { 214 *p = *p ^ *q; 215 *q = *p ^ *q; 216 *p = *p ^ *q; 217 p++; 218 q--; 219 } 220 return 0; 221 } 222 223 static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa, 224 unsigned int wa_offset, 225 struct scatterlist *sg, 226 unsigned int sg_offset, 227 unsigned int len) 228 { 229 u8 *p, *q; 230 231 p = wa->address + wa_offset; 232 q = p + len - 1; 233 while (p < q) { 234 *p = *p ^ *q; 235 *q = *p ^ *q; 236 *p = *p ^ *q; 237 p++; 238 q--; 239 } 240 241 ccp_get_dm_area(wa, wa_offset, sg, sg_offset, len); 242 } 243 244 static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q) 245 { 246 ccp_dm_free(&data->dm_wa); 247 ccp_sg_free(&data->sg_wa); 248 } 249 250 static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q, 251 struct scatterlist *sg, u64 sg_len, 252 unsigned int dm_len, 253 enum dma_data_direction dir) 254 { 255 int ret; 256 257 memset(data, 0, sizeof(*data)); 258 259 ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len, 260 dir); 261 if (ret) 262 goto e_err; 263 264 ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir); 265 if (ret) 266 goto e_err; 267 268 return 0; 269 270 e_err: 271 ccp_free_data(data, cmd_q); 272 273 return ret; 274 } 275 276 static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from) 277 { 278 struct ccp_sg_workarea *sg_wa = &data->sg_wa; 279 struct ccp_dm_workarea *dm_wa = &data->dm_wa; 280 unsigned int buf_count, nbytes; 281 282 /* Clear the buffer if setting it */ 283 if (!from) 284 memset(dm_wa->address, 0, dm_wa->length); 285 286 if (!sg_wa->sg) 287 return 0; 288 289 /* Perform the copy operation 290 * nbytes will always be <= UINT_MAX because dm_wa->length is 291 * an unsigned int 292 */ 293 nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length); 294 scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used, 295 nbytes, from); 296 297 /* Update the structures and generate the count */ 298 buf_count = 0; 299 while (sg_wa->bytes_left && (buf_count < dm_wa->length)) { 300 nbytes = min(sg_wa->sg->length - sg_wa->sg_used, 301 dm_wa->length - buf_count); 302 nbytes = min_t(u64, sg_wa->bytes_left, nbytes); 303 304 buf_count += nbytes; 305 ccp_update_sg_workarea(sg_wa, nbytes); 306 } 307 308 return buf_count; 309 } 310 311 static unsigned int ccp_fill_queue_buf(struct ccp_data *data) 312 { 313 return ccp_queue_buf(data, 0); 314 } 315 316 static unsigned int ccp_empty_queue_buf(struct ccp_data *data) 317 { 318 return ccp_queue_buf(data, 1); 319 } 320 321 static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, 322 struct ccp_op *op, unsigned int block_size, 323 bool blocksize_op) 324 { 325 unsigned int sg_src_len, sg_dst_len, op_len; 326 327 /* The CCP can only DMA from/to one address each per operation. This 328 * requires that we find the smallest DMA area between the source 329 * and destination. The resulting len values will always be <= UINT_MAX 330 * because the dma length is an unsigned int. 331 */ 332 sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used; 333 sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len); 334 335 if (dst) { 336 sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used; 337 sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len); 338 op_len = min(sg_src_len, sg_dst_len); 339 } else { 340 op_len = sg_src_len; 341 } 342 343 /* The data operation length will be at least block_size in length 344 * or the smaller of available sg room remaining for the source or 345 * the destination 346 */ 347 op_len = max(op_len, block_size); 348 349 /* Unless we have to buffer data, there's no reason to wait */ 350 op->soc = 0; 351 352 if (sg_src_len < block_size) { 353 /* Not enough data in the sg element, so it 354 * needs to be buffered into a blocksize chunk 355 */ 356 int cp_len = ccp_fill_queue_buf(src); 357 358 op->soc = 1; 359 op->src.u.dma.address = src->dm_wa.dma.address; 360 op->src.u.dma.offset = 0; 361 op->src.u.dma.length = (blocksize_op) ? block_size : cp_len; 362 } else { 363 /* Enough data in the sg element, but we need to 364 * adjust for any previously copied data 365 */ 366 op->src.u.dma.address = sg_dma_address(src->sg_wa.sg); 367 op->src.u.dma.offset = src->sg_wa.sg_used; 368 op->src.u.dma.length = op_len & ~(block_size - 1); 369 370 ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length); 371 } 372 373 if (dst) { 374 if (sg_dst_len < block_size) { 375 /* Not enough room in the sg element or we're on the 376 * last piece of data (when using padding), so the 377 * output needs to be buffered into a blocksize chunk 378 */ 379 op->soc = 1; 380 op->dst.u.dma.address = dst->dm_wa.dma.address; 381 op->dst.u.dma.offset = 0; 382 op->dst.u.dma.length = op->src.u.dma.length; 383 } else { 384 /* Enough room in the sg element, but we need to 385 * adjust for any previously used area 386 */ 387 op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg); 388 op->dst.u.dma.offset = dst->sg_wa.sg_used; 389 op->dst.u.dma.length = op->src.u.dma.length; 390 } 391 } 392 } 393 394 static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst, 395 struct ccp_op *op) 396 { 397 op->init = 0; 398 399 if (dst) { 400 if (op->dst.u.dma.address == dst->dm_wa.dma.address) 401 ccp_empty_queue_buf(dst); 402 else 403 ccp_update_sg_workarea(&dst->sg_wa, 404 op->dst.u.dma.length); 405 } 406 } 407 408 static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q, 409 struct ccp_dm_workarea *wa, u32 jobid, u32 sb, 410 u32 byte_swap, bool from) 411 { 412 struct ccp_op op; 413 414 memset(&op, 0, sizeof(op)); 415 416 op.cmd_q = cmd_q; 417 op.jobid = jobid; 418 op.eom = 1; 419 420 if (from) { 421 op.soc = 1; 422 op.src.type = CCP_MEMTYPE_SB; 423 op.src.u.sb = sb; 424 op.dst.type = CCP_MEMTYPE_SYSTEM; 425 op.dst.u.dma.address = wa->dma.address; 426 op.dst.u.dma.length = wa->length; 427 } else { 428 op.src.type = CCP_MEMTYPE_SYSTEM; 429 op.src.u.dma.address = wa->dma.address; 430 op.src.u.dma.length = wa->length; 431 op.dst.type = CCP_MEMTYPE_SB; 432 op.dst.u.sb = sb; 433 } 434 435 op.u.passthru.byte_swap = byte_swap; 436 437 return cmd_q->ccp->vdata->perform->passthru(&op); 438 } 439 440 static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q, 441 struct ccp_dm_workarea *wa, u32 jobid, u32 sb, 442 u32 byte_swap) 443 { 444 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false); 445 } 446 447 static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q, 448 struct ccp_dm_workarea *wa, u32 jobid, u32 sb, 449 u32 byte_swap) 450 { 451 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true); 452 } 453 454 static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, 455 struct ccp_cmd *cmd) 456 { 457 struct ccp_aes_engine *aes = &cmd->u.aes; 458 struct ccp_dm_workarea key, ctx; 459 struct ccp_data src; 460 struct ccp_op op; 461 unsigned int dm_offset; 462 int ret; 463 464 if (!((aes->key_len == AES_KEYSIZE_128) || 465 (aes->key_len == AES_KEYSIZE_192) || 466 (aes->key_len == AES_KEYSIZE_256))) 467 return -EINVAL; 468 469 if (aes->src_len & (AES_BLOCK_SIZE - 1)) 470 return -EINVAL; 471 472 if (aes->iv_len != AES_BLOCK_SIZE) 473 return -EINVAL; 474 475 if (!aes->key || !aes->iv || !aes->src) 476 return -EINVAL; 477 478 if (aes->cmac_final) { 479 if (aes->cmac_key_len != AES_BLOCK_SIZE) 480 return -EINVAL; 481 482 if (!aes->cmac_key) 483 return -EINVAL; 484 } 485 486 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1); 487 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1); 488 489 ret = -EIO; 490 memset(&op, 0, sizeof(op)); 491 op.cmd_q = cmd_q; 492 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 493 op.sb_key = cmd_q->sb_key; 494 op.sb_ctx = cmd_q->sb_ctx; 495 op.init = 1; 496 op.u.aes.type = aes->type; 497 op.u.aes.mode = aes->mode; 498 op.u.aes.action = aes->action; 499 500 /* All supported key sizes fit in a single (32-byte) SB entry 501 * and must be in little endian format. Use the 256-bit byte 502 * swap passthru option to convert from big endian to little 503 * endian. 504 */ 505 ret = ccp_init_dm_workarea(&key, cmd_q, 506 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES, 507 DMA_TO_DEVICE); 508 if (ret) 509 return ret; 510 511 dm_offset = CCP_SB_BYTES - aes->key_len; 512 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); 513 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, 514 CCP_PASSTHRU_BYTESWAP_256BIT); 515 if (ret) { 516 cmd->engine_error = cmd_q->cmd_error; 517 goto e_key; 518 } 519 520 /* The AES context fits in a single (32-byte) SB entry and 521 * must be in little endian format. Use the 256-bit byte swap 522 * passthru option to convert from big endian to little endian. 523 */ 524 ret = ccp_init_dm_workarea(&ctx, cmd_q, 525 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, 526 DMA_BIDIRECTIONAL); 527 if (ret) 528 goto e_key; 529 530 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; 531 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 532 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 533 CCP_PASSTHRU_BYTESWAP_256BIT); 534 if (ret) { 535 cmd->engine_error = cmd_q->cmd_error; 536 goto e_ctx; 537 } 538 539 /* Send data to the CCP AES engine */ 540 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len, 541 AES_BLOCK_SIZE, DMA_TO_DEVICE); 542 if (ret) 543 goto e_ctx; 544 545 while (src.sg_wa.bytes_left) { 546 ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true); 547 if (aes->cmac_final && !src.sg_wa.bytes_left) { 548 op.eom = 1; 549 550 /* Push the K1/K2 key to the CCP now */ 551 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, 552 op.sb_ctx, 553 CCP_PASSTHRU_BYTESWAP_256BIT); 554 if (ret) { 555 cmd->engine_error = cmd_q->cmd_error; 556 goto e_src; 557 } 558 559 ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0, 560 aes->cmac_key_len); 561 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 562 CCP_PASSTHRU_BYTESWAP_256BIT); 563 if (ret) { 564 cmd->engine_error = cmd_q->cmd_error; 565 goto e_src; 566 } 567 } 568 569 ret = cmd_q->ccp->vdata->perform->aes(&op); 570 if (ret) { 571 cmd->engine_error = cmd_q->cmd_error; 572 goto e_src; 573 } 574 575 ccp_process_data(&src, NULL, &op); 576 } 577 578 /* Retrieve the AES context - convert from LE to BE using 579 * 32-byte (256-bit) byteswapping 580 */ 581 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 582 CCP_PASSTHRU_BYTESWAP_256BIT); 583 if (ret) { 584 cmd->engine_error = cmd_q->cmd_error; 585 goto e_src; 586 } 587 588 /* ...but we only need AES_BLOCK_SIZE bytes */ 589 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; 590 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 591 592 e_src: 593 ccp_free_data(&src, cmd_q); 594 595 e_ctx: 596 ccp_dm_free(&ctx); 597 598 e_key: 599 ccp_dm_free(&key); 600 601 return ret; 602 } 603 604 static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, 605 struct ccp_cmd *cmd) 606 { 607 struct ccp_aes_engine *aes = &cmd->u.aes; 608 struct ccp_dm_workarea key, ctx, final_wa, tag; 609 struct ccp_data src, dst; 610 struct ccp_data aad; 611 struct ccp_op op; 612 613 unsigned long long *final; 614 unsigned int dm_offset; 615 unsigned int ilen; 616 bool in_place = true; /* Default value */ 617 int ret; 618 619 struct scatterlist *p_inp, sg_inp[2]; 620 struct scatterlist *p_tag, sg_tag[2]; 621 struct scatterlist *p_outp, sg_outp[2]; 622 struct scatterlist *p_aad; 623 624 if (!aes->iv) 625 return -EINVAL; 626 627 if (!((aes->key_len == AES_KEYSIZE_128) || 628 (aes->key_len == AES_KEYSIZE_192) || 629 (aes->key_len == AES_KEYSIZE_256))) 630 return -EINVAL; 631 632 if (!aes->key) /* Gotta have a key SGL */ 633 return -EINVAL; 634 635 /* First, decompose the source buffer into AAD & PT, 636 * and the destination buffer into AAD, CT & tag, or 637 * the input into CT & tag. 638 * It is expected that the input and output SGs will 639 * be valid, even if the AAD and input lengths are 0. 640 */ 641 p_aad = aes->src; 642 p_inp = scatterwalk_ffwd(sg_inp, aes->src, aes->aad_len); 643 p_outp = scatterwalk_ffwd(sg_outp, aes->dst, aes->aad_len); 644 if (aes->action == CCP_AES_ACTION_ENCRYPT) { 645 ilen = aes->src_len; 646 p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen); 647 } else { 648 /* Input length for decryption includes tag */ 649 ilen = aes->src_len - AES_BLOCK_SIZE; 650 p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen); 651 } 652 653 memset(&op, 0, sizeof(op)); 654 op.cmd_q = cmd_q; 655 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 656 op.sb_key = cmd_q->sb_key; /* Pre-allocated */ 657 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ 658 op.init = 1; 659 op.u.aes.type = aes->type; 660 661 /* Copy the key to the LSB */ 662 ret = ccp_init_dm_workarea(&key, cmd_q, 663 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, 664 DMA_TO_DEVICE); 665 if (ret) 666 return ret; 667 668 dm_offset = CCP_SB_BYTES - aes->key_len; 669 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); 670 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, 671 CCP_PASSTHRU_BYTESWAP_256BIT); 672 if (ret) { 673 cmd->engine_error = cmd_q->cmd_error; 674 goto e_key; 675 } 676 677 /* Copy the context (IV) to the LSB. 678 * There is an assumption here that the IV is 96 bits in length, plus 679 * a nonce of 32 bits. If no IV is present, use a zeroed buffer. 680 */ 681 ret = ccp_init_dm_workarea(&ctx, cmd_q, 682 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, 683 DMA_BIDIRECTIONAL); 684 if (ret) 685 goto e_key; 686 687 dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len; 688 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 689 690 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 691 CCP_PASSTHRU_BYTESWAP_256BIT); 692 if (ret) { 693 cmd->engine_error = cmd_q->cmd_error; 694 goto e_ctx; 695 } 696 697 op.init = 1; 698 if (aes->aad_len > 0) { 699 /* Step 1: Run a GHASH over the Additional Authenticated Data */ 700 ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len, 701 AES_BLOCK_SIZE, 702 DMA_TO_DEVICE); 703 if (ret) 704 goto e_ctx; 705 706 op.u.aes.mode = CCP_AES_MODE_GHASH; 707 op.u.aes.action = CCP_AES_GHASHAAD; 708 709 while (aad.sg_wa.bytes_left) { 710 ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true); 711 712 ret = cmd_q->ccp->vdata->perform->aes(&op); 713 if (ret) { 714 cmd->engine_error = cmd_q->cmd_error; 715 goto e_aad; 716 } 717 718 ccp_process_data(&aad, NULL, &op); 719 op.init = 0; 720 } 721 } 722 723 op.u.aes.mode = CCP_AES_MODE_GCTR; 724 op.u.aes.action = aes->action; 725 726 if (ilen > 0) { 727 /* Step 2: Run a GCTR over the plaintext */ 728 in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false; 729 730 ret = ccp_init_data(&src, cmd_q, p_inp, ilen, 731 AES_BLOCK_SIZE, 732 in_place ? DMA_BIDIRECTIONAL 733 : DMA_TO_DEVICE); 734 if (ret) 735 goto e_ctx; 736 737 if (in_place) { 738 dst = src; 739 } else { 740 ret = ccp_init_data(&dst, cmd_q, p_outp, ilen, 741 AES_BLOCK_SIZE, DMA_FROM_DEVICE); 742 if (ret) 743 goto e_src; 744 } 745 746 op.soc = 0; 747 op.eom = 0; 748 op.init = 1; 749 while (src.sg_wa.bytes_left) { 750 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true); 751 if (!src.sg_wa.bytes_left) { 752 unsigned int nbytes = aes->src_len 753 % AES_BLOCK_SIZE; 754 755 if (nbytes) { 756 op.eom = 1; 757 op.u.aes.size = (nbytes * 8) - 1; 758 } 759 } 760 761 ret = cmd_q->ccp->vdata->perform->aes(&op); 762 if (ret) { 763 cmd->engine_error = cmd_q->cmd_error; 764 goto e_dst; 765 } 766 767 ccp_process_data(&src, &dst, &op); 768 op.init = 0; 769 } 770 } 771 772 /* Step 3: Update the IV portion of the context with the original IV */ 773 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 774 CCP_PASSTHRU_BYTESWAP_256BIT); 775 if (ret) { 776 cmd->engine_error = cmd_q->cmd_error; 777 goto e_dst; 778 } 779 780 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 781 782 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 783 CCP_PASSTHRU_BYTESWAP_256BIT); 784 if (ret) { 785 cmd->engine_error = cmd_q->cmd_error; 786 goto e_dst; 787 } 788 789 /* Step 4: Concatenate the lengths of the AAD and source, and 790 * hash that 16 byte buffer. 791 */ 792 ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE, 793 DMA_BIDIRECTIONAL); 794 if (ret) 795 goto e_dst; 796 final = (unsigned long long *) final_wa.address; 797 final[0] = cpu_to_be64(aes->aad_len * 8); 798 final[1] = cpu_to_be64(ilen * 8); 799 800 op.u.aes.mode = CCP_AES_MODE_GHASH; 801 op.u.aes.action = CCP_AES_GHASHFINAL; 802 op.src.type = CCP_MEMTYPE_SYSTEM; 803 op.src.u.dma.address = final_wa.dma.address; 804 op.src.u.dma.length = AES_BLOCK_SIZE; 805 op.dst.type = CCP_MEMTYPE_SYSTEM; 806 op.dst.u.dma.address = final_wa.dma.address; 807 op.dst.u.dma.length = AES_BLOCK_SIZE; 808 op.eom = 1; 809 op.u.aes.size = 0; 810 ret = cmd_q->ccp->vdata->perform->aes(&op); 811 if (ret) 812 goto e_dst; 813 814 if (aes->action == CCP_AES_ACTION_ENCRYPT) { 815 /* Put the ciphered tag after the ciphertext. */ 816 ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE); 817 } else { 818 /* Does this ciphered tag match the input? */ 819 ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE, 820 DMA_BIDIRECTIONAL); 821 if (ret) 822 goto e_tag; 823 ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE); 824 825 ret = memcmp(tag.address, final_wa.address, AES_BLOCK_SIZE); 826 ccp_dm_free(&tag); 827 } 828 829 e_tag: 830 ccp_dm_free(&final_wa); 831 832 e_dst: 833 if (aes->src_len && !in_place) 834 ccp_free_data(&dst, cmd_q); 835 836 e_src: 837 if (aes->src_len) 838 ccp_free_data(&src, cmd_q); 839 840 e_aad: 841 if (aes->aad_len) 842 ccp_free_data(&aad, cmd_q); 843 844 e_ctx: 845 ccp_dm_free(&ctx); 846 847 e_key: 848 ccp_dm_free(&key); 849 850 return ret; 851 } 852 853 static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 854 { 855 struct ccp_aes_engine *aes = &cmd->u.aes; 856 struct ccp_dm_workarea key, ctx; 857 struct ccp_data src, dst; 858 struct ccp_op op; 859 unsigned int dm_offset; 860 bool in_place = false; 861 int ret; 862 863 if (aes->mode == CCP_AES_MODE_CMAC) 864 return ccp_run_aes_cmac_cmd(cmd_q, cmd); 865 866 if (aes->mode == CCP_AES_MODE_GCM) 867 return ccp_run_aes_gcm_cmd(cmd_q, cmd); 868 869 if (!((aes->key_len == AES_KEYSIZE_128) || 870 (aes->key_len == AES_KEYSIZE_192) || 871 (aes->key_len == AES_KEYSIZE_256))) 872 return -EINVAL; 873 874 if (((aes->mode == CCP_AES_MODE_ECB) || 875 (aes->mode == CCP_AES_MODE_CBC) || 876 (aes->mode == CCP_AES_MODE_CFB)) && 877 (aes->src_len & (AES_BLOCK_SIZE - 1))) 878 return -EINVAL; 879 880 if (!aes->key || !aes->src || !aes->dst) 881 return -EINVAL; 882 883 if (aes->mode != CCP_AES_MODE_ECB) { 884 if (aes->iv_len != AES_BLOCK_SIZE) 885 return -EINVAL; 886 887 if (!aes->iv) 888 return -EINVAL; 889 } 890 891 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1); 892 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1); 893 894 ret = -EIO; 895 memset(&op, 0, sizeof(op)); 896 op.cmd_q = cmd_q; 897 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 898 op.sb_key = cmd_q->sb_key; 899 op.sb_ctx = cmd_q->sb_ctx; 900 op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1; 901 op.u.aes.type = aes->type; 902 op.u.aes.mode = aes->mode; 903 op.u.aes.action = aes->action; 904 905 /* All supported key sizes fit in a single (32-byte) SB entry 906 * and must be in little endian format. Use the 256-bit byte 907 * swap passthru option to convert from big endian to little 908 * endian. 909 */ 910 ret = ccp_init_dm_workarea(&key, cmd_q, 911 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES, 912 DMA_TO_DEVICE); 913 if (ret) 914 return ret; 915 916 dm_offset = CCP_SB_BYTES - aes->key_len; 917 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); 918 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, 919 CCP_PASSTHRU_BYTESWAP_256BIT); 920 if (ret) { 921 cmd->engine_error = cmd_q->cmd_error; 922 goto e_key; 923 } 924 925 /* The AES context fits in a single (32-byte) SB entry and 926 * must be in little endian format. Use the 256-bit byte swap 927 * passthru option to convert from big endian to little endian. 928 */ 929 ret = ccp_init_dm_workarea(&ctx, cmd_q, 930 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, 931 DMA_BIDIRECTIONAL); 932 if (ret) 933 goto e_key; 934 935 if (aes->mode != CCP_AES_MODE_ECB) { 936 /* Load the AES context - convert to LE */ 937 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; 938 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 939 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 940 CCP_PASSTHRU_BYTESWAP_256BIT); 941 if (ret) { 942 cmd->engine_error = cmd_q->cmd_error; 943 goto e_ctx; 944 } 945 } 946 switch (aes->mode) { 947 case CCP_AES_MODE_CFB: /* CFB128 only */ 948 case CCP_AES_MODE_CTR: 949 op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1; 950 break; 951 default: 952 op.u.aes.size = 0; 953 } 954 955 /* Prepare the input and output data workareas. For in-place 956 * operations we need to set the dma direction to BIDIRECTIONAL 957 * and copy the src workarea to the dst workarea. 958 */ 959 if (sg_virt(aes->src) == sg_virt(aes->dst)) 960 in_place = true; 961 962 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len, 963 AES_BLOCK_SIZE, 964 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); 965 if (ret) 966 goto e_ctx; 967 968 if (in_place) { 969 dst = src; 970 } else { 971 ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len, 972 AES_BLOCK_SIZE, DMA_FROM_DEVICE); 973 if (ret) 974 goto e_src; 975 } 976 977 /* Send data to the CCP AES engine */ 978 while (src.sg_wa.bytes_left) { 979 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true); 980 if (!src.sg_wa.bytes_left) { 981 op.eom = 1; 982 983 /* Since we don't retrieve the AES context in ECB 984 * mode we have to wait for the operation to complete 985 * on the last piece of data 986 */ 987 if (aes->mode == CCP_AES_MODE_ECB) 988 op.soc = 1; 989 } 990 991 ret = cmd_q->ccp->vdata->perform->aes(&op); 992 if (ret) { 993 cmd->engine_error = cmd_q->cmd_error; 994 goto e_dst; 995 } 996 997 ccp_process_data(&src, &dst, &op); 998 } 999 1000 if (aes->mode != CCP_AES_MODE_ECB) { 1001 /* Retrieve the AES context - convert from LE to BE using 1002 * 32-byte (256-bit) byteswapping 1003 */ 1004 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1005 CCP_PASSTHRU_BYTESWAP_256BIT); 1006 if (ret) { 1007 cmd->engine_error = cmd_q->cmd_error; 1008 goto e_dst; 1009 } 1010 1011 /* ...but we only need AES_BLOCK_SIZE bytes */ 1012 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; 1013 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 1014 } 1015 1016 e_dst: 1017 if (!in_place) 1018 ccp_free_data(&dst, cmd_q); 1019 1020 e_src: 1021 ccp_free_data(&src, cmd_q); 1022 1023 e_ctx: 1024 ccp_dm_free(&ctx); 1025 1026 e_key: 1027 ccp_dm_free(&key); 1028 1029 return ret; 1030 } 1031 1032 static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, 1033 struct ccp_cmd *cmd) 1034 { 1035 struct ccp_xts_aes_engine *xts = &cmd->u.xts; 1036 struct ccp_dm_workarea key, ctx; 1037 struct ccp_data src, dst; 1038 struct ccp_op op; 1039 unsigned int unit_size, dm_offset; 1040 bool in_place = false; 1041 unsigned int sb_count; 1042 enum ccp_aes_type aestype; 1043 int ret; 1044 1045 switch (xts->unit_size) { 1046 case CCP_XTS_AES_UNIT_SIZE_16: 1047 unit_size = 16; 1048 break; 1049 case CCP_XTS_AES_UNIT_SIZE_512: 1050 unit_size = 512; 1051 break; 1052 case CCP_XTS_AES_UNIT_SIZE_1024: 1053 unit_size = 1024; 1054 break; 1055 case CCP_XTS_AES_UNIT_SIZE_2048: 1056 unit_size = 2048; 1057 break; 1058 case CCP_XTS_AES_UNIT_SIZE_4096: 1059 unit_size = 4096; 1060 break; 1061 1062 default: 1063 return -EINVAL; 1064 } 1065 1066 if (xts->key_len == AES_KEYSIZE_128) 1067 aestype = CCP_AES_TYPE_128; 1068 else if (xts->key_len == AES_KEYSIZE_256) 1069 aestype = CCP_AES_TYPE_256; 1070 else 1071 return -EINVAL; 1072 1073 if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1))) 1074 return -EINVAL; 1075 1076 if (xts->iv_len != AES_BLOCK_SIZE) 1077 return -EINVAL; 1078 1079 if (!xts->key || !xts->iv || !xts->src || !xts->dst) 1080 return -EINVAL; 1081 1082 BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1); 1083 BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1); 1084 1085 ret = -EIO; 1086 memset(&op, 0, sizeof(op)); 1087 op.cmd_q = cmd_q; 1088 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 1089 op.sb_key = cmd_q->sb_key; 1090 op.sb_ctx = cmd_q->sb_ctx; 1091 op.init = 1; 1092 op.u.xts.type = aestype; 1093 op.u.xts.action = xts->action; 1094 op.u.xts.unit_size = xts->unit_size; 1095 1096 /* A version 3 device only supports 128-bit keys, which fits into a 1097 * single SB entry. A version 5 device uses a 512-bit vector, so two 1098 * SB entries. 1099 */ 1100 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) 1101 sb_count = CCP_XTS_AES_KEY_SB_COUNT; 1102 else 1103 sb_count = CCP5_XTS_AES_KEY_SB_COUNT; 1104 ret = ccp_init_dm_workarea(&key, cmd_q, 1105 sb_count * CCP_SB_BYTES, 1106 DMA_TO_DEVICE); 1107 if (ret) 1108 return ret; 1109 1110 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) { 1111 /* All supported key sizes must be in little endian format. 1112 * Use the 256-bit byte swap passthru option to convert from 1113 * big endian to little endian. 1114 */ 1115 dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128; 1116 ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len); 1117 ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len); 1118 } else { 1119 /* Version 5 CCPs use a 512-bit space for the key: each portion 1120 * occupies 256 bits, or one entire slot, and is zero-padded. 1121 */ 1122 unsigned int pad; 1123 1124 dm_offset = CCP_SB_BYTES; 1125 pad = dm_offset - xts->key_len; 1126 ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len); 1127 ccp_set_dm_area(&key, dm_offset + pad, xts->key, xts->key_len, 1128 xts->key_len); 1129 } 1130 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, 1131 CCP_PASSTHRU_BYTESWAP_256BIT); 1132 if (ret) { 1133 cmd->engine_error = cmd_q->cmd_error; 1134 goto e_key; 1135 } 1136 1137 /* The AES context fits in a single (32-byte) SB entry and 1138 * for XTS is already in little endian format so no byte swapping 1139 * is needed. 1140 */ 1141 ret = ccp_init_dm_workarea(&ctx, cmd_q, 1142 CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES, 1143 DMA_BIDIRECTIONAL); 1144 if (ret) 1145 goto e_key; 1146 1147 ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len); 1148 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1149 CCP_PASSTHRU_BYTESWAP_NOOP); 1150 if (ret) { 1151 cmd->engine_error = cmd_q->cmd_error; 1152 goto e_ctx; 1153 } 1154 1155 /* Prepare the input and output data workareas. For in-place 1156 * operations we need to set the dma direction to BIDIRECTIONAL 1157 * and copy the src workarea to the dst workarea. 1158 */ 1159 if (sg_virt(xts->src) == sg_virt(xts->dst)) 1160 in_place = true; 1161 1162 ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len, 1163 unit_size, 1164 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); 1165 if (ret) 1166 goto e_ctx; 1167 1168 if (in_place) { 1169 dst = src; 1170 } else { 1171 ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len, 1172 unit_size, DMA_FROM_DEVICE); 1173 if (ret) 1174 goto e_src; 1175 } 1176 1177 /* Send data to the CCP AES engine */ 1178 while (src.sg_wa.bytes_left) { 1179 ccp_prepare_data(&src, &dst, &op, unit_size, true); 1180 if (!src.sg_wa.bytes_left) 1181 op.eom = 1; 1182 1183 ret = cmd_q->ccp->vdata->perform->xts_aes(&op); 1184 if (ret) { 1185 cmd->engine_error = cmd_q->cmd_error; 1186 goto e_dst; 1187 } 1188 1189 ccp_process_data(&src, &dst, &op); 1190 } 1191 1192 /* Retrieve the AES context - convert from LE to BE using 1193 * 32-byte (256-bit) byteswapping 1194 */ 1195 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1196 CCP_PASSTHRU_BYTESWAP_256BIT); 1197 if (ret) { 1198 cmd->engine_error = cmd_q->cmd_error; 1199 goto e_dst; 1200 } 1201 1202 /* ...but we only need AES_BLOCK_SIZE bytes */ 1203 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; 1204 ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len); 1205 1206 e_dst: 1207 if (!in_place) 1208 ccp_free_data(&dst, cmd_q); 1209 1210 e_src: 1211 ccp_free_data(&src, cmd_q); 1212 1213 e_ctx: 1214 ccp_dm_free(&ctx); 1215 1216 e_key: 1217 ccp_dm_free(&key); 1218 1219 return ret; 1220 } 1221 1222 static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 1223 { 1224 struct ccp_des3_engine *des3 = &cmd->u.des3; 1225 1226 struct ccp_dm_workarea key, ctx; 1227 struct ccp_data src, dst; 1228 struct ccp_op op; 1229 unsigned int dm_offset; 1230 unsigned int len_singlekey; 1231 bool in_place = false; 1232 int ret; 1233 1234 /* Error checks */ 1235 if (!cmd_q->ccp->vdata->perform->des3) 1236 return -EINVAL; 1237 1238 if (des3->key_len != DES3_EDE_KEY_SIZE) 1239 return -EINVAL; 1240 1241 if (((des3->mode == CCP_DES3_MODE_ECB) || 1242 (des3->mode == CCP_DES3_MODE_CBC)) && 1243 (des3->src_len & (DES3_EDE_BLOCK_SIZE - 1))) 1244 return -EINVAL; 1245 1246 if (!des3->key || !des3->src || !des3->dst) 1247 return -EINVAL; 1248 1249 if (des3->mode != CCP_DES3_MODE_ECB) { 1250 if (des3->iv_len != DES3_EDE_BLOCK_SIZE) 1251 return -EINVAL; 1252 1253 if (!des3->iv) 1254 return -EINVAL; 1255 } 1256 1257 ret = -EIO; 1258 /* Zero out all the fields of the command desc */ 1259 memset(&op, 0, sizeof(op)); 1260 1261 /* Set up the Function field */ 1262 op.cmd_q = cmd_q; 1263 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 1264 op.sb_key = cmd_q->sb_key; 1265 1266 op.init = (des3->mode == CCP_DES3_MODE_ECB) ? 0 : 1; 1267 op.u.des3.type = des3->type; 1268 op.u.des3.mode = des3->mode; 1269 op.u.des3.action = des3->action; 1270 1271 /* 1272 * All supported key sizes fit in a single (32-byte) KSB entry and 1273 * (like AES) must be in little endian format. Use the 256-bit byte 1274 * swap passthru option to convert from big endian to little endian. 1275 */ 1276 ret = ccp_init_dm_workarea(&key, cmd_q, 1277 CCP_DES3_KEY_SB_COUNT * CCP_SB_BYTES, 1278 DMA_TO_DEVICE); 1279 if (ret) 1280 return ret; 1281 1282 /* 1283 * The contents of the key triplet are in the reverse order of what 1284 * is required by the engine. Copy the 3 pieces individually to put 1285 * them where they belong. 1286 */ 1287 dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */ 1288 1289 len_singlekey = des3->key_len / 3; 1290 ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey, 1291 des3->key, 0, len_singlekey); 1292 ccp_set_dm_area(&key, dm_offset + len_singlekey, 1293 des3->key, len_singlekey, len_singlekey); 1294 ccp_set_dm_area(&key, dm_offset, 1295 des3->key, 2 * len_singlekey, len_singlekey); 1296 1297 /* Copy the key to the SB */ 1298 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, 1299 CCP_PASSTHRU_BYTESWAP_256BIT); 1300 if (ret) { 1301 cmd->engine_error = cmd_q->cmd_error; 1302 goto e_key; 1303 } 1304 1305 /* 1306 * The DES3 context fits in a single (32-byte) KSB entry and 1307 * must be in little endian format. Use the 256-bit byte swap 1308 * passthru option to convert from big endian to little endian. 1309 */ 1310 if (des3->mode != CCP_DES3_MODE_ECB) { 1311 u32 load_mode; 1312 1313 op.sb_ctx = cmd_q->sb_ctx; 1314 1315 ret = ccp_init_dm_workarea(&ctx, cmd_q, 1316 CCP_DES3_CTX_SB_COUNT * CCP_SB_BYTES, 1317 DMA_BIDIRECTIONAL); 1318 if (ret) 1319 goto e_key; 1320 1321 /* Load the context into the LSB */ 1322 dm_offset = CCP_SB_BYTES - des3->iv_len; 1323 ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, des3->iv_len); 1324 1325 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) 1326 load_mode = CCP_PASSTHRU_BYTESWAP_NOOP; 1327 else 1328 load_mode = CCP_PASSTHRU_BYTESWAP_256BIT; 1329 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1330 load_mode); 1331 if (ret) { 1332 cmd->engine_error = cmd_q->cmd_error; 1333 goto e_ctx; 1334 } 1335 } 1336 1337 /* 1338 * Prepare the input and output data workareas. For in-place 1339 * operations we need to set the dma direction to BIDIRECTIONAL 1340 * and copy the src workarea to the dst workarea. 1341 */ 1342 if (sg_virt(des3->src) == sg_virt(des3->dst)) 1343 in_place = true; 1344 1345 ret = ccp_init_data(&src, cmd_q, des3->src, des3->src_len, 1346 DES3_EDE_BLOCK_SIZE, 1347 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); 1348 if (ret) 1349 goto e_ctx; 1350 1351 if (in_place) 1352 dst = src; 1353 else { 1354 ret = ccp_init_data(&dst, cmd_q, des3->dst, des3->src_len, 1355 DES3_EDE_BLOCK_SIZE, DMA_FROM_DEVICE); 1356 if (ret) 1357 goto e_src; 1358 } 1359 1360 /* Send data to the CCP DES3 engine */ 1361 while (src.sg_wa.bytes_left) { 1362 ccp_prepare_data(&src, &dst, &op, DES3_EDE_BLOCK_SIZE, true); 1363 if (!src.sg_wa.bytes_left) { 1364 op.eom = 1; 1365 1366 /* Since we don't retrieve the context in ECB mode 1367 * we have to wait for the operation to complete 1368 * on the last piece of data 1369 */ 1370 op.soc = 0; 1371 } 1372 1373 ret = cmd_q->ccp->vdata->perform->des3(&op); 1374 if (ret) { 1375 cmd->engine_error = cmd_q->cmd_error; 1376 goto e_dst; 1377 } 1378 1379 ccp_process_data(&src, &dst, &op); 1380 } 1381 1382 if (des3->mode != CCP_DES3_MODE_ECB) { 1383 /* Retrieve the context and make BE */ 1384 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1385 CCP_PASSTHRU_BYTESWAP_256BIT); 1386 if (ret) { 1387 cmd->engine_error = cmd_q->cmd_error; 1388 goto e_dst; 1389 } 1390 1391 /* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */ 1392 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) 1393 dm_offset = CCP_SB_BYTES - des3->iv_len; 1394 else 1395 dm_offset = 0; 1396 ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0, 1397 DES3_EDE_BLOCK_SIZE); 1398 } 1399 e_dst: 1400 if (!in_place) 1401 ccp_free_data(&dst, cmd_q); 1402 1403 e_src: 1404 ccp_free_data(&src, cmd_q); 1405 1406 e_ctx: 1407 if (des3->mode != CCP_DES3_MODE_ECB) 1408 ccp_dm_free(&ctx); 1409 1410 e_key: 1411 ccp_dm_free(&key); 1412 1413 return ret; 1414 } 1415 1416 static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 1417 { 1418 struct ccp_sha_engine *sha = &cmd->u.sha; 1419 struct ccp_dm_workarea ctx; 1420 struct ccp_data src; 1421 struct ccp_op op; 1422 unsigned int ioffset, ooffset; 1423 unsigned int digest_size; 1424 int sb_count; 1425 const void *init; 1426 u64 block_size; 1427 int ctx_size; 1428 int ret; 1429 1430 switch (sha->type) { 1431 case CCP_SHA_TYPE_1: 1432 if (sha->ctx_len < SHA1_DIGEST_SIZE) 1433 return -EINVAL; 1434 block_size = SHA1_BLOCK_SIZE; 1435 break; 1436 case CCP_SHA_TYPE_224: 1437 if (sha->ctx_len < SHA224_DIGEST_SIZE) 1438 return -EINVAL; 1439 block_size = SHA224_BLOCK_SIZE; 1440 break; 1441 case CCP_SHA_TYPE_256: 1442 if (sha->ctx_len < SHA256_DIGEST_SIZE) 1443 return -EINVAL; 1444 block_size = SHA256_BLOCK_SIZE; 1445 break; 1446 case CCP_SHA_TYPE_384: 1447 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0) 1448 || sha->ctx_len < SHA384_DIGEST_SIZE) 1449 return -EINVAL; 1450 block_size = SHA384_BLOCK_SIZE; 1451 break; 1452 case CCP_SHA_TYPE_512: 1453 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0) 1454 || sha->ctx_len < SHA512_DIGEST_SIZE) 1455 return -EINVAL; 1456 block_size = SHA512_BLOCK_SIZE; 1457 break; 1458 default: 1459 return -EINVAL; 1460 } 1461 1462 if (!sha->ctx) 1463 return -EINVAL; 1464 1465 if (!sha->final && (sha->src_len & (block_size - 1))) 1466 return -EINVAL; 1467 1468 /* The version 3 device can't handle zero-length input */ 1469 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) { 1470 1471 if (!sha->src_len) { 1472 unsigned int digest_len; 1473 const u8 *sha_zero; 1474 1475 /* Not final, just return */ 1476 if (!sha->final) 1477 return 0; 1478 1479 /* CCP can't do a zero length sha operation so the 1480 * caller must buffer the data. 1481 */ 1482 if (sha->msg_bits) 1483 return -EINVAL; 1484 1485 /* The CCP cannot perform zero-length sha operations 1486 * so the caller is required to buffer data for the 1487 * final operation. However, a sha operation for a 1488 * message with a total length of zero is valid so 1489 * known values are required to supply the result. 1490 */ 1491 switch (sha->type) { 1492 case CCP_SHA_TYPE_1: 1493 sha_zero = sha1_zero_message_hash; 1494 digest_len = SHA1_DIGEST_SIZE; 1495 break; 1496 case CCP_SHA_TYPE_224: 1497 sha_zero = sha224_zero_message_hash; 1498 digest_len = SHA224_DIGEST_SIZE; 1499 break; 1500 case CCP_SHA_TYPE_256: 1501 sha_zero = sha256_zero_message_hash; 1502 digest_len = SHA256_DIGEST_SIZE; 1503 break; 1504 default: 1505 return -EINVAL; 1506 } 1507 1508 scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0, 1509 digest_len, 1); 1510 1511 return 0; 1512 } 1513 } 1514 1515 /* Set variables used throughout */ 1516 switch (sha->type) { 1517 case CCP_SHA_TYPE_1: 1518 digest_size = SHA1_DIGEST_SIZE; 1519 init = (void *) ccp_sha1_init; 1520 ctx_size = SHA1_DIGEST_SIZE; 1521 sb_count = 1; 1522 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0)) 1523 ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE; 1524 else 1525 ooffset = ioffset = 0; 1526 break; 1527 case CCP_SHA_TYPE_224: 1528 digest_size = SHA224_DIGEST_SIZE; 1529 init = (void *) ccp_sha224_init; 1530 ctx_size = SHA256_DIGEST_SIZE; 1531 sb_count = 1; 1532 ioffset = 0; 1533 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0)) 1534 ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE; 1535 else 1536 ooffset = 0; 1537 break; 1538 case CCP_SHA_TYPE_256: 1539 digest_size = SHA256_DIGEST_SIZE; 1540 init = (void *) ccp_sha256_init; 1541 ctx_size = SHA256_DIGEST_SIZE; 1542 sb_count = 1; 1543 ooffset = ioffset = 0; 1544 break; 1545 case CCP_SHA_TYPE_384: 1546 digest_size = SHA384_DIGEST_SIZE; 1547 init = (void *) ccp_sha384_init; 1548 ctx_size = SHA512_DIGEST_SIZE; 1549 sb_count = 2; 1550 ioffset = 0; 1551 ooffset = 2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE; 1552 break; 1553 case CCP_SHA_TYPE_512: 1554 digest_size = SHA512_DIGEST_SIZE; 1555 init = (void *) ccp_sha512_init; 1556 ctx_size = SHA512_DIGEST_SIZE; 1557 sb_count = 2; 1558 ooffset = ioffset = 0; 1559 break; 1560 default: 1561 ret = -EINVAL; 1562 goto e_data; 1563 } 1564 1565 /* For zero-length plaintext the src pointer is ignored; 1566 * otherwise both parts must be valid 1567 */ 1568 if (sha->src_len && !sha->src) 1569 return -EINVAL; 1570 1571 memset(&op, 0, sizeof(op)); 1572 op.cmd_q = cmd_q; 1573 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 1574 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ 1575 op.u.sha.type = sha->type; 1576 op.u.sha.msg_bits = sha->msg_bits; 1577 1578 /* For SHA1/224/256 the context fits in a single (32-byte) SB entry; 1579 * SHA384/512 require 2 adjacent SB slots, with the right half in the 1580 * first slot, and the left half in the second. Each portion must then 1581 * be in little endian format: use the 256-bit byte swap option. 1582 */ 1583 ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES, 1584 DMA_BIDIRECTIONAL); 1585 if (ret) 1586 return ret; 1587 if (sha->first) { 1588 switch (sha->type) { 1589 case CCP_SHA_TYPE_1: 1590 case CCP_SHA_TYPE_224: 1591 case CCP_SHA_TYPE_256: 1592 memcpy(ctx.address + ioffset, init, ctx_size); 1593 break; 1594 case CCP_SHA_TYPE_384: 1595 case CCP_SHA_TYPE_512: 1596 memcpy(ctx.address + ctx_size / 2, init, 1597 ctx_size / 2); 1598 memcpy(ctx.address, init + ctx_size / 2, 1599 ctx_size / 2); 1600 break; 1601 default: 1602 ret = -EINVAL; 1603 goto e_ctx; 1604 } 1605 } else { 1606 /* Restore the context */ 1607 ccp_set_dm_area(&ctx, 0, sha->ctx, 0, 1608 sb_count * CCP_SB_BYTES); 1609 } 1610 1611 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1612 CCP_PASSTHRU_BYTESWAP_256BIT); 1613 if (ret) { 1614 cmd->engine_error = cmd_q->cmd_error; 1615 goto e_ctx; 1616 } 1617 1618 if (sha->src) { 1619 /* Send data to the CCP SHA engine; block_size is set above */ 1620 ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len, 1621 block_size, DMA_TO_DEVICE); 1622 if (ret) 1623 goto e_ctx; 1624 1625 while (src.sg_wa.bytes_left) { 1626 ccp_prepare_data(&src, NULL, &op, block_size, false); 1627 if (sha->final && !src.sg_wa.bytes_left) 1628 op.eom = 1; 1629 1630 ret = cmd_q->ccp->vdata->perform->sha(&op); 1631 if (ret) { 1632 cmd->engine_error = cmd_q->cmd_error; 1633 goto e_data; 1634 } 1635 1636 ccp_process_data(&src, NULL, &op); 1637 } 1638 } else { 1639 op.eom = 1; 1640 ret = cmd_q->ccp->vdata->perform->sha(&op); 1641 if (ret) { 1642 cmd->engine_error = cmd_q->cmd_error; 1643 goto e_data; 1644 } 1645 } 1646 1647 /* Retrieve the SHA context - convert from LE to BE using 1648 * 32-byte (256-bit) byteswapping to BE 1649 */ 1650 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1651 CCP_PASSTHRU_BYTESWAP_256BIT); 1652 if (ret) { 1653 cmd->engine_error = cmd_q->cmd_error; 1654 goto e_data; 1655 } 1656 1657 if (sha->final) { 1658 /* Finishing up, so get the digest */ 1659 switch (sha->type) { 1660 case CCP_SHA_TYPE_1: 1661 case CCP_SHA_TYPE_224: 1662 case CCP_SHA_TYPE_256: 1663 ccp_get_dm_area(&ctx, ooffset, 1664 sha->ctx, 0, 1665 digest_size); 1666 break; 1667 case CCP_SHA_TYPE_384: 1668 case CCP_SHA_TYPE_512: 1669 ccp_get_dm_area(&ctx, 0, 1670 sha->ctx, LSB_ITEM_SIZE - ooffset, 1671 LSB_ITEM_SIZE); 1672 ccp_get_dm_area(&ctx, LSB_ITEM_SIZE + ooffset, 1673 sha->ctx, 0, 1674 LSB_ITEM_SIZE - ooffset); 1675 break; 1676 default: 1677 ret = -EINVAL; 1678 goto e_ctx; 1679 } 1680 } else { 1681 /* Stash the context */ 1682 ccp_get_dm_area(&ctx, 0, sha->ctx, 0, 1683 sb_count * CCP_SB_BYTES); 1684 } 1685 1686 if (sha->final && sha->opad) { 1687 /* HMAC operation, recursively perform final SHA */ 1688 struct ccp_cmd hmac_cmd; 1689 struct scatterlist sg; 1690 u8 *hmac_buf; 1691 1692 if (sha->opad_len != block_size) { 1693 ret = -EINVAL; 1694 goto e_data; 1695 } 1696 1697 hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL); 1698 if (!hmac_buf) { 1699 ret = -ENOMEM; 1700 goto e_data; 1701 } 1702 sg_init_one(&sg, hmac_buf, block_size + digest_size); 1703 1704 scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0); 1705 switch (sha->type) { 1706 case CCP_SHA_TYPE_1: 1707 case CCP_SHA_TYPE_224: 1708 case CCP_SHA_TYPE_256: 1709 memcpy(hmac_buf + block_size, 1710 ctx.address + ooffset, 1711 digest_size); 1712 break; 1713 case CCP_SHA_TYPE_384: 1714 case CCP_SHA_TYPE_512: 1715 memcpy(hmac_buf + block_size, 1716 ctx.address + LSB_ITEM_SIZE + ooffset, 1717 LSB_ITEM_SIZE); 1718 memcpy(hmac_buf + block_size + 1719 (LSB_ITEM_SIZE - ooffset), 1720 ctx.address, 1721 LSB_ITEM_SIZE); 1722 break; 1723 default: 1724 ret = -EINVAL; 1725 goto e_ctx; 1726 } 1727 1728 memset(&hmac_cmd, 0, sizeof(hmac_cmd)); 1729 hmac_cmd.engine = CCP_ENGINE_SHA; 1730 hmac_cmd.u.sha.type = sha->type; 1731 hmac_cmd.u.sha.ctx = sha->ctx; 1732 hmac_cmd.u.sha.ctx_len = sha->ctx_len; 1733 hmac_cmd.u.sha.src = &sg; 1734 hmac_cmd.u.sha.src_len = block_size + digest_size; 1735 hmac_cmd.u.sha.opad = NULL; 1736 hmac_cmd.u.sha.opad_len = 0; 1737 hmac_cmd.u.sha.first = 1; 1738 hmac_cmd.u.sha.final = 1; 1739 hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3; 1740 1741 ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd); 1742 if (ret) 1743 cmd->engine_error = hmac_cmd.engine_error; 1744 1745 kfree(hmac_buf); 1746 } 1747 1748 e_data: 1749 if (sha->src) 1750 ccp_free_data(&src, cmd_q); 1751 1752 e_ctx: 1753 ccp_dm_free(&ctx); 1754 1755 return ret; 1756 } 1757 1758 static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 1759 { 1760 struct ccp_rsa_engine *rsa = &cmd->u.rsa; 1761 struct ccp_dm_workarea exp, src, dst; 1762 struct ccp_op op; 1763 unsigned int sb_count, i_len, o_len; 1764 int ret; 1765 1766 /* Check against the maximum allowable size, in bits */ 1767 if (rsa->key_size > cmd_q->ccp->vdata->rsamax) 1768 return -EINVAL; 1769 1770 if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst) 1771 return -EINVAL; 1772 1773 memset(&op, 0, sizeof(op)); 1774 op.cmd_q = cmd_q; 1775 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 1776 1777 /* The RSA modulus must precede the message being acted upon, so 1778 * it must be copied to a DMA area where the message and the 1779 * modulus can be concatenated. Therefore the input buffer 1780 * length required is twice the output buffer length (which 1781 * must be a multiple of 256-bits). Compute o_len, i_len in bytes. 1782 * Buffer sizes must be a multiple of 32 bytes; rounding up may be 1783 * required. 1784 */ 1785 o_len = 32 * ((rsa->key_size + 255) / 256); 1786 i_len = o_len * 2; 1787 1788 sb_count = 0; 1789 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) { 1790 /* sb_count is the number of storage block slots required 1791 * for the modulus. 1792 */ 1793 sb_count = o_len / CCP_SB_BYTES; 1794 op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, 1795 sb_count); 1796 if (!op.sb_key) 1797 return -EIO; 1798 } else { 1799 /* A version 5 device allows a modulus size that will not fit 1800 * in the LSB, so the command will transfer it from memory. 1801 * Set the sb key to the default, even though it's not used. 1802 */ 1803 op.sb_key = cmd_q->sb_key; 1804 } 1805 1806 /* The RSA exponent must be in little endian format. Reverse its 1807 * byte order. 1808 */ 1809 ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE); 1810 if (ret) 1811 goto e_sb; 1812 1813 ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len); 1814 if (ret) 1815 goto e_exp; 1816 1817 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) { 1818 /* Copy the exponent to the local storage block, using 1819 * as many 32-byte blocks as were allocated above. It's 1820 * already little endian, so no further change is required. 1821 */ 1822 ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key, 1823 CCP_PASSTHRU_BYTESWAP_NOOP); 1824 if (ret) { 1825 cmd->engine_error = cmd_q->cmd_error; 1826 goto e_exp; 1827 } 1828 } else { 1829 /* The exponent can be retrieved from memory via DMA. */ 1830 op.exp.u.dma.address = exp.dma.address; 1831 op.exp.u.dma.offset = 0; 1832 } 1833 1834 /* Concatenate the modulus and the message. Both the modulus and 1835 * the operands must be in little endian format. Since the input 1836 * is in big endian format it must be converted. 1837 */ 1838 ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE); 1839 if (ret) 1840 goto e_exp; 1841 1842 ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len); 1843 if (ret) 1844 goto e_src; 1845 ret = ccp_reverse_set_dm_area(&src, o_len, rsa->src, 0, rsa->src_len); 1846 if (ret) 1847 goto e_src; 1848 1849 /* Prepare the output area for the operation */ 1850 ret = ccp_init_dm_workarea(&dst, cmd_q, o_len, DMA_FROM_DEVICE); 1851 if (ret) 1852 goto e_src; 1853 1854 op.soc = 1; 1855 op.src.u.dma.address = src.dma.address; 1856 op.src.u.dma.offset = 0; 1857 op.src.u.dma.length = i_len; 1858 op.dst.u.dma.address = dst.dma.address; 1859 op.dst.u.dma.offset = 0; 1860 op.dst.u.dma.length = o_len; 1861 1862 op.u.rsa.mod_size = rsa->key_size; 1863 op.u.rsa.input_len = i_len; 1864 1865 ret = cmd_q->ccp->vdata->perform->rsa(&op); 1866 if (ret) { 1867 cmd->engine_error = cmd_q->cmd_error; 1868 goto e_dst; 1869 } 1870 1871 ccp_reverse_get_dm_area(&dst, 0, rsa->dst, 0, rsa->mod_len); 1872 1873 e_dst: 1874 ccp_dm_free(&dst); 1875 1876 e_src: 1877 ccp_dm_free(&src); 1878 1879 e_exp: 1880 ccp_dm_free(&exp); 1881 1882 e_sb: 1883 if (sb_count) 1884 cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count); 1885 1886 return ret; 1887 } 1888 1889 static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, 1890 struct ccp_cmd *cmd) 1891 { 1892 struct ccp_passthru_engine *pt = &cmd->u.passthru; 1893 struct ccp_dm_workarea mask; 1894 struct ccp_data src, dst; 1895 struct ccp_op op; 1896 bool in_place = false; 1897 unsigned int i; 1898 int ret = 0; 1899 1900 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) 1901 return -EINVAL; 1902 1903 if (!pt->src || !pt->dst) 1904 return -EINVAL; 1905 1906 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { 1907 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE) 1908 return -EINVAL; 1909 if (!pt->mask) 1910 return -EINVAL; 1911 } 1912 1913 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1); 1914 1915 memset(&op, 0, sizeof(op)); 1916 op.cmd_q = cmd_q; 1917 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 1918 1919 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { 1920 /* Load the mask */ 1921 op.sb_key = cmd_q->sb_key; 1922 1923 ret = ccp_init_dm_workarea(&mask, cmd_q, 1924 CCP_PASSTHRU_SB_COUNT * 1925 CCP_SB_BYTES, 1926 DMA_TO_DEVICE); 1927 if (ret) 1928 return ret; 1929 1930 ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len); 1931 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key, 1932 CCP_PASSTHRU_BYTESWAP_NOOP); 1933 if (ret) { 1934 cmd->engine_error = cmd_q->cmd_error; 1935 goto e_mask; 1936 } 1937 } 1938 1939 /* Prepare the input and output data workareas. For in-place 1940 * operations we need to set the dma direction to BIDIRECTIONAL 1941 * and copy the src workarea to the dst workarea. 1942 */ 1943 if (sg_virt(pt->src) == sg_virt(pt->dst)) 1944 in_place = true; 1945 1946 ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len, 1947 CCP_PASSTHRU_MASKSIZE, 1948 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); 1949 if (ret) 1950 goto e_mask; 1951 1952 if (in_place) { 1953 dst = src; 1954 } else { 1955 ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len, 1956 CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE); 1957 if (ret) 1958 goto e_src; 1959 } 1960 1961 /* Send data to the CCP Passthru engine 1962 * Because the CCP engine works on a single source and destination 1963 * dma address at a time, each entry in the source scatterlist 1964 * (after the dma_map_sg call) must be less than or equal to the 1965 * (remaining) length in the destination scatterlist entry and the 1966 * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE 1967 */ 1968 dst.sg_wa.sg_used = 0; 1969 for (i = 1; i <= src.sg_wa.dma_count; i++) { 1970 if (!dst.sg_wa.sg || 1971 (dst.sg_wa.sg->length < src.sg_wa.sg->length)) { 1972 ret = -EINVAL; 1973 goto e_dst; 1974 } 1975 1976 if (i == src.sg_wa.dma_count) { 1977 op.eom = 1; 1978 op.soc = 1; 1979 } 1980 1981 op.src.type = CCP_MEMTYPE_SYSTEM; 1982 op.src.u.dma.address = sg_dma_address(src.sg_wa.sg); 1983 op.src.u.dma.offset = 0; 1984 op.src.u.dma.length = sg_dma_len(src.sg_wa.sg); 1985 1986 op.dst.type = CCP_MEMTYPE_SYSTEM; 1987 op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg); 1988 op.dst.u.dma.offset = dst.sg_wa.sg_used; 1989 op.dst.u.dma.length = op.src.u.dma.length; 1990 1991 ret = cmd_q->ccp->vdata->perform->passthru(&op); 1992 if (ret) { 1993 cmd->engine_error = cmd_q->cmd_error; 1994 goto e_dst; 1995 } 1996 1997 dst.sg_wa.sg_used += src.sg_wa.sg->length; 1998 if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) { 1999 dst.sg_wa.sg = sg_next(dst.sg_wa.sg); 2000 dst.sg_wa.sg_used = 0; 2001 } 2002 src.sg_wa.sg = sg_next(src.sg_wa.sg); 2003 } 2004 2005 e_dst: 2006 if (!in_place) 2007 ccp_free_data(&dst, cmd_q); 2008 2009 e_src: 2010 ccp_free_data(&src, cmd_q); 2011 2012 e_mask: 2013 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) 2014 ccp_dm_free(&mask); 2015 2016 return ret; 2017 } 2018 2019 static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q, 2020 struct ccp_cmd *cmd) 2021 { 2022 struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap; 2023 struct ccp_dm_workarea mask; 2024 struct ccp_op op; 2025 int ret; 2026 2027 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) 2028 return -EINVAL; 2029 2030 if (!pt->src_dma || !pt->dst_dma) 2031 return -EINVAL; 2032 2033 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { 2034 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE) 2035 return -EINVAL; 2036 if (!pt->mask) 2037 return -EINVAL; 2038 } 2039 2040 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1); 2041 2042 memset(&op, 0, sizeof(op)); 2043 op.cmd_q = cmd_q; 2044 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 2045 2046 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { 2047 /* Load the mask */ 2048 op.sb_key = cmd_q->sb_key; 2049 2050 mask.length = pt->mask_len; 2051 mask.dma.address = pt->mask; 2052 mask.dma.length = pt->mask_len; 2053 2054 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key, 2055 CCP_PASSTHRU_BYTESWAP_NOOP); 2056 if (ret) { 2057 cmd->engine_error = cmd_q->cmd_error; 2058 return ret; 2059 } 2060 } 2061 2062 /* Send data to the CCP Passthru engine */ 2063 op.eom = 1; 2064 op.soc = 1; 2065 2066 op.src.type = CCP_MEMTYPE_SYSTEM; 2067 op.src.u.dma.address = pt->src_dma; 2068 op.src.u.dma.offset = 0; 2069 op.src.u.dma.length = pt->src_len; 2070 2071 op.dst.type = CCP_MEMTYPE_SYSTEM; 2072 op.dst.u.dma.address = pt->dst_dma; 2073 op.dst.u.dma.offset = 0; 2074 op.dst.u.dma.length = pt->src_len; 2075 2076 ret = cmd_q->ccp->vdata->perform->passthru(&op); 2077 if (ret) 2078 cmd->engine_error = cmd_q->cmd_error; 2079 2080 return ret; 2081 } 2082 2083 static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 2084 { 2085 struct ccp_ecc_engine *ecc = &cmd->u.ecc; 2086 struct ccp_dm_workarea src, dst; 2087 struct ccp_op op; 2088 int ret; 2089 u8 *save; 2090 2091 if (!ecc->u.mm.operand_1 || 2092 (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES)) 2093 return -EINVAL; 2094 2095 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) 2096 if (!ecc->u.mm.operand_2 || 2097 (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES)) 2098 return -EINVAL; 2099 2100 if (!ecc->u.mm.result || 2101 (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES)) 2102 return -EINVAL; 2103 2104 memset(&op, 0, sizeof(op)); 2105 op.cmd_q = cmd_q; 2106 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 2107 2108 /* Concatenate the modulus and the operands. Both the modulus and 2109 * the operands must be in little endian format. Since the input 2110 * is in big endian format it must be converted and placed in a 2111 * fixed length buffer. 2112 */ 2113 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE, 2114 DMA_TO_DEVICE); 2115 if (ret) 2116 return ret; 2117 2118 /* Save the workarea address since it is updated in order to perform 2119 * the concatenation 2120 */ 2121 save = src.address; 2122 2123 /* Copy the ECC modulus */ 2124 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len); 2125 if (ret) 2126 goto e_src; 2127 src.address += CCP_ECC_OPERAND_SIZE; 2128 2129 /* Copy the first operand */ 2130 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_1, 0, 2131 ecc->u.mm.operand_1_len); 2132 if (ret) 2133 goto e_src; 2134 src.address += CCP_ECC_OPERAND_SIZE; 2135 2136 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) { 2137 /* Copy the second operand */ 2138 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_2, 0, 2139 ecc->u.mm.operand_2_len); 2140 if (ret) 2141 goto e_src; 2142 src.address += CCP_ECC_OPERAND_SIZE; 2143 } 2144 2145 /* Restore the workarea address */ 2146 src.address = save; 2147 2148 /* Prepare the output area for the operation */ 2149 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE, 2150 DMA_FROM_DEVICE); 2151 if (ret) 2152 goto e_src; 2153 2154 op.soc = 1; 2155 op.src.u.dma.address = src.dma.address; 2156 op.src.u.dma.offset = 0; 2157 op.src.u.dma.length = src.length; 2158 op.dst.u.dma.address = dst.dma.address; 2159 op.dst.u.dma.offset = 0; 2160 op.dst.u.dma.length = dst.length; 2161 2162 op.u.ecc.function = cmd->u.ecc.function; 2163 2164 ret = cmd_q->ccp->vdata->perform->ecc(&op); 2165 if (ret) { 2166 cmd->engine_error = cmd_q->cmd_error; 2167 goto e_dst; 2168 } 2169 2170 ecc->ecc_result = le16_to_cpup( 2171 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET)); 2172 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) { 2173 ret = -EIO; 2174 goto e_dst; 2175 } 2176 2177 /* Save the ECC result */ 2178 ccp_reverse_get_dm_area(&dst, 0, ecc->u.mm.result, 0, 2179 CCP_ECC_MODULUS_BYTES); 2180 2181 e_dst: 2182 ccp_dm_free(&dst); 2183 2184 e_src: 2185 ccp_dm_free(&src); 2186 2187 return ret; 2188 } 2189 2190 static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 2191 { 2192 struct ccp_ecc_engine *ecc = &cmd->u.ecc; 2193 struct ccp_dm_workarea src, dst; 2194 struct ccp_op op; 2195 int ret; 2196 u8 *save; 2197 2198 if (!ecc->u.pm.point_1.x || 2199 (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) || 2200 !ecc->u.pm.point_1.y || 2201 (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES)) 2202 return -EINVAL; 2203 2204 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { 2205 if (!ecc->u.pm.point_2.x || 2206 (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) || 2207 !ecc->u.pm.point_2.y || 2208 (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES)) 2209 return -EINVAL; 2210 } else { 2211 if (!ecc->u.pm.domain_a || 2212 (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES)) 2213 return -EINVAL; 2214 2215 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) 2216 if (!ecc->u.pm.scalar || 2217 (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES)) 2218 return -EINVAL; 2219 } 2220 2221 if (!ecc->u.pm.result.x || 2222 (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) || 2223 !ecc->u.pm.result.y || 2224 (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES)) 2225 return -EINVAL; 2226 2227 memset(&op, 0, sizeof(op)); 2228 op.cmd_q = cmd_q; 2229 op.jobid = CCP_NEW_JOBID(cmd_q->ccp); 2230 2231 /* Concatenate the modulus and the operands. Both the modulus and 2232 * the operands must be in little endian format. Since the input 2233 * is in big endian format it must be converted and placed in a 2234 * fixed length buffer. 2235 */ 2236 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE, 2237 DMA_TO_DEVICE); 2238 if (ret) 2239 return ret; 2240 2241 /* Save the workarea address since it is updated in order to perform 2242 * the concatenation 2243 */ 2244 save = src.address; 2245 2246 /* Copy the ECC modulus */ 2247 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len); 2248 if (ret) 2249 goto e_src; 2250 src.address += CCP_ECC_OPERAND_SIZE; 2251 2252 /* Copy the first point X and Y coordinate */ 2253 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.x, 0, 2254 ecc->u.pm.point_1.x_len); 2255 if (ret) 2256 goto e_src; 2257 src.address += CCP_ECC_OPERAND_SIZE; 2258 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.y, 0, 2259 ecc->u.pm.point_1.y_len); 2260 if (ret) 2261 goto e_src; 2262 src.address += CCP_ECC_OPERAND_SIZE; 2263 2264 /* Set the first point Z coordinate to 1 */ 2265 *src.address = 0x01; 2266 src.address += CCP_ECC_OPERAND_SIZE; 2267 2268 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { 2269 /* Copy the second point X and Y coordinate */ 2270 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.x, 0, 2271 ecc->u.pm.point_2.x_len); 2272 if (ret) 2273 goto e_src; 2274 src.address += CCP_ECC_OPERAND_SIZE; 2275 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.y, 0, 2276 ecc->u.pm.point_2.y_len); 2277 if (ret) 2278 goto e_src; 2279 src.address += CCP_ECC_OPERAND_SIZE; 2280 2281 /* Set the second point Z coordinate to 1 */ 2282 *src.address = 0x01; 2283 src.address += CCP_ECC_OPERAND_SIZE; 2284 } else { 2285 /* Copy the Domain "a" parameter */ 2286 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.domain_a, 0, 2287 ecc->u.pm.domain_a_len); 2288 if (ret) 2289 goto e_src; 2290 src.address += CCP_ECC_OPERAND_SIZE; 2291 2292 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) { 2293 /* Copy the scalar value */ 2294 ret = ccp_reverse_set_dm_area(&src, 0, 2295 ecc->u.pm.scalar, 0, 2296 ecc->u.pm.scalar_len); 2297 if (ret) 2298 goto e_src; 2299 src.address += CCP_ECC_OPERAND_SIZE; 2300 } 2301 } 2302 2303 /* Restore the workarea address */ 2304 src.address = save; 2305 2306 /* Prepare the output area for the operation */ 2307 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE, 2308 DMA_FROM_DEVICE); 2309 if (ret) 2310 goto e_src; 2311 2312 op.soc = 1; 2313 op.src.u.dma.address = src.dma.address; 2314 op.src.u.dma.offset = 0; 2315 op.src.u.dma.length = src.length; 2316 op.dst.u.dma.address = dst.dma.address; 2317 op.dst.u.dma.offset = 0; 2318 op.dst.u.dma.length = dst.length; 2319 2320 op.u.ecc.function = cmd->u.ecc.function; 2321 2322 ret = cmd_q->ccp->vdata->perform->ecc(&op); 2323 if (ret) { 2324 cmd->engine_error = cmd_q->cmd_error; 2325 goto e_dst; 2326 } 2327 2328 ecc->ecc_result = le16_to_cpup( 2329 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET)); 2330 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) { 2331 ret = -EIO; 2332 goto e_dst; 2333 } 2334 2335 /* Save the workarea address since it is updated as we walk through 2336 * to copy the point math result 2337 */ 2338 save = dst.address; 2339 2340 /* Save the ECC result X and Y coordinates */ 2341 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.x, 0, 2342 CCP_ECC_MODULUS_BYTES); 2343 dst.address += CCP_ECC_OUTPUT_SIZE; 2344 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0, 2345 CCP_ECC_MODULUS_BYTES); 2346 dst.address += CCP_ECC_OUTPUT_SIZE; 2347 2348 /* Restore the workarea address */ 2349 dst.address = save; 2350 2351 e_dst: 2352 ccp_dm_free(&dst); 2353 2354 e_src: 2355 ccp_dm_free(&src); 2356 2357 return ret; 2358 } 2359 2360 static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 2361 { 2362 struct ccp_ecc_engine *ecc = &cmd->u.ecc; 2363 2364 ecc->ecc_result = 0; 2365 2366 if (!ecc->mod || 2367 (ecc->mod_len > CCP_ECC_MODULUS_BYTES)) 2368 return -EINVAL; 2369 2370 switch (ecc->function) { 2371 case CCP_ECC_FUNCTION_MMUL_384BIT: 2372 case CCP_ECC_FUNCTION_MADD_384BIT: 2373 case CCP_ECC_FUNCTION_MINV_384BIT: 2374 return ccp_run_ecc_mm_cmd(cmd_q, cmd); 2375 2376 case CCP_ECC_FUNCTION_PADD_384BIT: 2377 case CCP_ECC_FUNCTION_PMUL_384BIT: 2378 case CCP_ECC_FUNCTION_PDBL_384BIT: 2379 return ccp_run_ecc_pm_cmd(cmd_q, cmd); 2380 2381 default: 2382 return -EINVAL; 2383 } 2384 } 2385 2386 int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 2387 { 2388 int ret; 2389 2390 cmd->engine_error = 0; 2391 cmd_q->cmd_error = 0; 2392 cmd_q->int_rcvd = 0; 2393 cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q); 2394 2395 switch (cmd->engine) { 2396 case CCP_ENGINE_AES: 2397 ret = ccp_run_aes_cmd(cmd_q, cmd); 2398 break; 2399 case CCP_ENGINE_XTS_AES_128: 2400 ret = ccp_run_xts_aes_cmd(cmd_q, cmd); 2401 break; 2402 case CCP_ENGINE_DES3: 2403 ret = ccp_run_des3_cmd(cmd_q, cmd); 2404 break; 2405 case CCP_ENGINE_SHA: 2406 ret = ccp_run_sha_cmd(cmd_q, cmd); 2407 break; 2408 case CCP_ENGINE_RSA: 2409 ret = ccp_run_rsa_cmd(cmd_q, cmd); 2410 break; 2411 case CCP_ENGINE_PASSTHRU: 2412 if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP) 2413 ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd); 2414 else 2415 ret = ccp_run_passthru_cmd(cmd_q, cmd); 2416 break; 2417 case CCP_ENGINE_ECC: 2418 ret = ccp_run_ecc_cmd(cmd_q, cmd); 2419 break; 2420 default: 2421 ret = -EINVAL; 2422 } 2423 2424 return ret; 2425 } 2426