1 /* 2 * Freescale i.MX23/i.MX28 Data Co-Processor driver 3 * 4 * Copyright (C) 2013 Marek Vasut <marex@denx.de> 5 * 6 * The code contained herein is licensed under the GNU General Public 7 * License. You may obtain a copy of the GNU General Public License 8 * Version 2 or later at the following locations: 9 * 10 * http://www.opensource.org/licenses/gpl-license.html 11 * http://www.gnu.org/copyleft/gpl.html 12 */ 13 14 #include <linux/dma-mapping.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/kernel.h> 18 #include <linux/kthread.h> 19 #include <linux/module.h> 20 #include <linux/of.h> 21 #include <linux/platform_device.h> 22 #include <linux/stmp_device.h> 23 #include <linux/clk.h> 24 25 #include <crypto/aes.h> 26 #include <crypto/sha.h> 27 #include <crypto/internal/hash.h> 28 #include <crypto/internal/skcipher.h> 29 30 #define DCP_MAX_CHANS 4 31 #define DCP_BUF_SZ PAGE_SIZE 32 #define DCP_SHA_PAY_SZ 64 33 34 #define DCP_ALIGNMENT 64 35 36 /* 37 * Null hashes to align with hw behavior on imx6sl and ull 38 * these are flipped for consistency with hw output 39 */ 40 static const uint8_t sha1_null_hash[] = 41 "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf" 42 "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda"; 43 44 static const uint8_t sha256_null_hash[] = 45 "\x55\xb8\x52\x78\x1b\x99\x95\xa4" 46 "\x4c\x93\x9b\x64\xe4\x41\xae\x27" 47 "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a" 48 "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3"; 49 50 /* DCP DMA descriptor. */ 51 struct dcp_dma_desc { 52 uint32_t next_cmd_addr; 53 uint32_t control0; 54 uint32_t control1; 55 uint32_t source; 56 uint32_t destination; 57 uint32_t size; 58 uint32_t payload; 59 uint32_t status; 60 }; 61 62 /* Coherent aligned block for bounce buffering. */ 63 struct dcp_coherent_block { 64 uint8_t aes_in_buf[DCP_BUF_SZ]; 65 uint8_t aes_out_buf[DCP_BUF_SZ]; 66 uint8_t sha_in_buf[DCP_BUF_SZ]; 67 uint8_t sha_out_buf[DCP_SHA_PAY_SZ]; 68 69 uint8_t aes_key[2 * AES_KEYSIZE_128]; 70 71 struct dcp_dma_desc desc[DCP_MAX_CHANS]; 72 }; 73 74 struct dcp { 75 struct device *dev; 76 void __iomem *base; 77 78 uint32_t caps; 79 80 struct dcp_coherent_block *coh; 81 82 struct completion completion[DCP_MAX_CHANS]; 83 spinlock_t lock[DCP_MAX_CHANS]; 84 struct task_struct *thread[DCP_MAX_CHANS]; 85 struct crypto_queue queue[DCP_MAX_CHANS]; 86 struct clk *dcp_clk; 87 }; 88 89 enum dcp_chan { 90 DCP_CHAN_HASH_SHA = 0, 91 DCP_CHAN_CRYPTO = 2, 92 }; 93 94 struct dcp_async_ctx { 95 /* Common context */ 96 enum dcp_chan chan; 97 uint32_t fill; 98 99 /* SHA Hash-specific context */ 100 struct mutex mutex; 101 uint32_t alg; 102 unsigned int hot:1; 103 104 /* Crypto-specific context */ 105 struct crypto_sync_skcipher *fallback; 106 unsigned int key_len; 107 uint8_t key[AES_KEYSIZE_128]; 108 }; 109 110 struct dcp_aes_req_ctx { 111 unsigned int enc:1; 112 unsigned int ecb:1; 113 }; 114 115 struct dcp_sha_req_ctx { 116 unsigned int init:1; 117 unsigned int fini:1; 118 }; 119 120 struct dcp_export_state { 121 struct dcp_sha_req_ctx req_ctx; 122 struct dcp_async_ctx async_ctx; 123 }; 124 125 /* 126 * There can even be only one instance of the MXS DCP due to the 127 * design of Linux Crypto API. 128 */ 129 static struct dcp *global_sdcp; 130 131 /* DCP register layout. */ 132 #define MXS_DCP_CTRL 0x00 133 #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23) 134 #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22) 135 136 #define MXS_DCP_STAT 0x10 137 #define MXS_DCP_STAT_CLR 0x18 138 #define MXS_DCP_STAT_IRQ_MASK 0xf 139 140 #define MXS_DCP_CHANNELCTRL 0x20 141 #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff 142 143 #define MXS_DCP_CAPABILITY1 0x40 144 #define MXS_DCP_CAPABILITY1_SHA256 (4 << 16) 145 #define MXS_DCP_CAPABILITY1_SHA1 (1 << 16) 146 #define MXS_DCP_CAPABILITY1_AES128 (1 << 0) 147 148 #define MXS_DCP_CONTEXT 0x50 149 150 #define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40)) 151 152 #define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40)) 153 154 #define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40)) 155 #define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40)) 156 157 /* DMA descriptor bits. */ 158 #define MXS_DCP_CONTROL0_HASH_TERM (1 << 13) 159 #define MXS_DCP_CONTROL0_HASH_INIT (1 << 12) 160 #define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11) 161 #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8) 162 #define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9) 163 #define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6) 164 #define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5) 165 #define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1) 166 #define MXS_DCP_CONTROL0_INTERRUPT (1 << 0) 167 168 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16) 169 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16) 170 #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4) 171 #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4) 172 #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0) 173 174 static int mxs_dcp_start_dma(struct dcp_async_ctx *actx) 175 { 176 struct dcp *sdcp = global_sdcp; 177 const int chan = actx->chan; 178 uint32_t stat; 179 unsigned long ret; 180 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; 181 182 dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc), 183 DMA_TO_DEVICE); 184 185 reinit_completion(&sdcp->completion[chan]); 186 187 /* Clear status register. */ 188 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan)); 189 190 /* Load the DMA descriptor. */ 191 writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan)); 192 193 /* Increment the semaphore to start the DMA transfer. */ 194 writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan)); 195 196 ret = wait_for_completion_timeout(&sdcp->completion[chan], 197 msecs_to_jiffies(1000)); 198 if (!ret) { 199 dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n", 200 chan, readl(sdcp->base + MXS_DCP_STAT)); 201 return -ETIMEDOUT; 202 } 203 204 stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan)); 205 if (stat & 0xff) { 206 dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n", 207 chan, stat); 208 return -EINVAL; 209 } 210 211 dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE); 212 213 return 0; 214 } 215 216 /* 217 * Encryption (AES128) 218 */ 219 static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, 220 struct ablkcipher_request *req, int init) 221 { 222 struct dcp *sdcp = global_sdcp; 223 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; 224 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); 225 int ret; 226 227 dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, 228 2 * AES_KEYSIZE_128, 229 DMA_TO_DEVICE); 230 dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf, 231 DCP_BUF_SZ, DMA_TO_DEVICE); 232 dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf, 233 DCP_BUF_SZ, DMA_FROM_DEVICE); 234 235 if (actx->fill % AES_BLOCK_SIZE) { 236 dev_err(sdcp->dev, "Invalid block size!\n"); 237 ret = -EINVAL; 238 goto aes_done_run; 239 } 240 241 /* Fill in the DMA descriptor. */ 242 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | 243 MXS_DCP_CONTROL0_INTERRUPT | 244 MXS_DCP_CONTROL0_ENABLE_CIPHER; 245 246 /* Payload contains the key. */ 247 desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; 248 249 if (rctx->enc) 250 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT; 251 if (init) 252 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT; 253 254 desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128; 255 256 if (rctx->ecb) 257 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB; 258 else 259 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC; 260 261 desc->next_cmd_addr = 0; 262 desc->source = src_phys; 263 desc->destination = dst_phys; 264 desc->size = actx->fill; 265 desc->payload = key_phys; 266 desc->status = 0; 267 268 ret = mxs_dcp_start_dma(actx); 269 270 aes_done_run: 271 dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128, 272 DMA_TO_DEVICE); 273 dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE); 274 dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE); 275 276 return ret; 277 } 278 279 static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) 280 { 281 struct dcp *sdcp = global_sdcp; 282 283 struct ablkcipher_request *req = ablkcipher_request_cast(arq); 284 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); 285 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); 286 287 struct scatterlist *dst = req->dst; 288 struct scatterlist *src = req->src; 289 const int nents = sg_nents(req->src); 290 291 const int out_off = DCP_BUF_SZ; 292 uint8_t *in_buf = sdcp->coh->aes_in_buf; 293 uint8_t *out_buf = sdcp->coh->aes_out_buf; 294 295 uint8_t *out_tmp, *src_buf, *dst_buf = NULL; 296 uint32_t dst_off = 0; 297 uint32_t last_out_len = 0; 298 299 uint8_t *key = sdcp->coh->aes_key; 300 301 int ret = 0; 302 int split = 0; 303 unsigned int i, len, clen, rem = 0, tlen = 0; 304 int init = 0; 305 bool limit_hit = false; 306 307 actx->fill = 0; 308 309 /* Copy the key from the temporary location. */ 310 memcpy(key, actx->key, actx->key_len); 311 312 if (!rctx->ecb) { 313 /* Copy the CBC IV just past the key. */ 314 memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128); 315 /* CBC needs the INIT set. */ 316 init = 1; 317 } else { 318 memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128); 319 } 320 321 for_each_sg(req->src, src, nents, i) { 322 src_buf = sg_virt(src); 323 len = sg_dma_len(src); 324 tlen += len; 325 limit_hit = tlen > req->nbytes; 326 327 if (limit_hit) 328 len = req->nbytes - (tlen - len); 329 330 do { 331 if (actx->fill + len > out_off) 332 clen = out_off - actx->fill; 333 else 334 clen = len; 335 336 memcpy(in_buf + actx->fill, src_buf, clen); 337 len -= clen; 338 src_buf += clen; 339 actx->fill += clen; 340 341 /* 342 * If we filled the buffer or this is the last SG, 343 * submit the buffer. 344 */ 345 if (actx->fill == out_off || sg_is_last(src) || 346 limit_hit) { 347 ret = mxs_dcp_run_aes(actx, req, init); 348 if (ret) 349 return ret; 350 init = 0; 351 352 out_tmp = out_buf; 353 last_out_len = actx->fill; 354 while (dst && actx->fill) { 355 if (!split) { 356 dst_buf = sg_virt(dst); 357 dst_off = 0; 358 } 359 rem = min(sg_dma_len(dst) - dst_off, 360 actx->fill); 361 362 memcpy(dst_buf + dst_off, out_tmp, rem); 363 out_tmp += rem; 364 dst_off += rem; 365 actx->fill -= rem; 366 367 if (dst_off == sg_dma_len(dst)) { 368 dst = sg_next(dst); 369 split = 0; 370 } else { 371 split = 1; 372 } 373 } 374 } 375 } while (len); 376 377 if (limit_hit) 378 break; 379 } 380 381 /* Copy the IV for CBC for chaining */ 382 if (!rctx->ecb) { 383 if (rctx->enc) 384 memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE), 385 AES_BLOCK_SIZE); 386 else 387 memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE), 388 AES_BLOCK_SIZE); 389 } 390 391 return ret; 392 } 393 394 static int dcp_chan_thread_aes(void *data) 395 { 396 struct dcp *sdcp = global_sdcp; 397 const int chan = DCP_CHAN_CRYPTO; 398 399 struct crypto_async_request *backlog; 400 struct crypto_async_request *arq; 401 402 int ret; 403 404 while (!kthread_should_stop()) { 405 set_current_state(TASK_INTERRUPTIBLE); 406 407 spin_lock(&sdcp->lock[chan]); 408 backlog = crypto_get_backlog(&sdcp->queue[chan]); 409 arq = crypto_dequeue_request(&sdcp->queue[chan]); 410 spin_unlock(&sdcp->lock[chan]); 411 412 if (!backlog && !arq) { 413 schedule(); 414 continue; 415 } 416 417 set_current_state(TASK_RUNNING); 418 419 if (backlog) 420 backlog->complete(backlog, -EINPROGRESS); 421 422 if (arq) { 423 ret = mxs_dcp_aes_block_crypt(arq); 424 arq->complete(arq, ret); 425 } 426 } 427 428 return 0; 429 } 430 431 static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc) 432 { 433 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 434 struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm); 435 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); 436 int ret; 437 438 skcipher_request_set_sync_tfm(subreq, ctx->fallback); 439 skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL); 440 skcipher_request_set_crypt(subreq, req->src, req->dst, 441 req->nbytes, req->info); 442 443 if (enc) 444 ret = crypto_skcipher_encrypt(subreq); 445 else 446 ret = crypto_skcipher_decrypt(subreq); 447 448 skcipher_request_zero(subreq); 449 450 return ret; 451 } 452 453 static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb) 454 { 455 struct dcp *sdcp = global_sdcp; 456 struct crypto_async_request *arq = &req->base; 457 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); 458 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); 459 int ret; 460 461 if (unlikely(actx->key_len != AES_KEYSIZE_128)) 462 return mxs_dcp_block_fallback(req, enc); 463 464 rctx->enc = enc; 465 rctx->ecb = ecb; 466 actx->chan = DCP_CHAN_CRYPTO; 467 468 spin_lock(&sdcp->lock[actx->chan]); 469 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); 470 spin_unlock(&sdcp->lock[actx->chan]); 471 472 wake_up_process(sdcp->thread[actx->chan]); 473 474 return -EINPROGRESS; 475 } 476 477 static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req) 478 { 479 return mxs_dcp_aes_enqueue(req, 0, 1); 480 } 481 482 static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req) 483 { 484 return mxs_dcp_aes_enqueue(req, 1, 1); 485 } 486 487 static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req) 488 { 489 return mxs_dcp_aes_enqueue(req, 0, 0); 490 } 491 492 static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req) 493 { 494 return mxs_dcp_aes_enqueue(req, 1, 0); 495 } 496 497 static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 498 unsigned int len) 499 { 500 struct dcp_async_ctx *actx = crypto_ablkcipher_ctx(tfm); 501 unsigned int ret; 502 503 /* 504 * AES 128 is supposed by the hardware, store key into temporary 505 * buffer and exit. We must use the temporary buffer here, since 506 * there can still be an operation in progress. 507 */ 508 actx->key_len = len; 509 if (len == AES_KEYSIZE_128) { 510 memcpy(actx->key, key, len); 511 return 0; 512 } 513 514 /* 515 * If the requested AES key size is not supported by the hardware, 516 * but is supported by in-kernel software implementation, we use 517 * software fallback. 518 */ 519 crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK); 520 crypto_sync_skcipher_set_flags(actx->fallback, 521 tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); 522 523 ret = crypto_sync_skcipher_setkey(actx->fallback, key, len); 524 if (!ret) 525 return 0; 526 527 tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK; 528 tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(actx->fallback) & 529 CRYPTO_TFM_RES_MASK; 530 531 return ret; 532 } 533 534 static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm) 535 { 536 const char *name = crypto_tfm_alg_name(tfm); 537 struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm); 538 struct crypto_sync_skcipher *blk; 539 540 blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); 541 if (IS_ERR(blk)) 542 return PTR_ERR(blk); 543 544 actx->fallback = blk; 545 tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx); 546 return 0; 547 } 548 549 static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm) 550 { 551 struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm); 552 553 crypto_free_sync_skcipher(actx->fallback); 554 } 555 556 /* 557 * Hashing (SHA1/SHA256) 558 */ 559 static int mxs_dcp_run_sha(struct ahash_request *req) 560 { 561 struct dcp *sdcp = global_sdcp; 562 int ret; 563 564 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 565 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 566 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 567 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; 568 569 dma_addr_t digest_phys = 0; 570 dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf, 571 DCP_BUF_SZ, DMA_TO_DEVICE); 572 573 /* Fill in the DMA descriptor. */ 574 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | 575 MXS_DCP_CONTROL0_INTERRUPT | 576 MXS_DCP_CONTROL0_ENABLE_HASH; 577 if (rctx->init) 578 desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT; 579 580 desc->control1 = actx->alg; 581 desc->next_cmd_addr = 0; 582 desc->source = buf_phys; 583 desc->destination = 0; 584 desc->size = actx->fill; 585 desc->payload = 0; 586 desc->status = 0; 587 588 /* 589 * Align driver with hw behavior when generating null hashes 590 */ 591 if (rctx->init && rctx->fini && desc->size == 0) { 592 struct hash_alg_common *halg = crypto_hash_alg_common(tfm); 593 const uint8_t *sha_buf = 594 (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ? 595 sha1_null_hash : sha256_null_hash; 596 memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize); 597 ret = 0; 598 goto done_run; 599 } 600 601 /* Set HASH_TERM bit for last transfer block. */ 602 if (rctx->fini) { 603 digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf, 604 DCP_SHA_PAY_SZ, DMA_FROM_DEVICE); 605 desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM; 606 desc->payload = digest_phys; 607 } 608 609 ret = mxs_dcp_start_dma(actx); 610 611 if (rctx->fini) 612 dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ, 613 DMA_FROM_DEVICE); 614 615 done_run: 616 dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE); 617 618 return ret; 619 } 620 621 static int dcp_sha_req_to_buf(struct crypto_async_request *arq) 622 { 623 struct dcp *sdcp = global_sdcp; 624 625 struct ahash_request *req = ahash_request_cast(arq); 626 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 627 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 628 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 629 struct hash_alg_common *halg = crypto_hash_alg_common(tfm); 630 const int nents = sg_nents(req->src); 631 632 uint8_t *in_buf = sdcp->coh->sha_in_buf; 633 uint8_t *out_buf = sdcp->coh->sha_out_buf; 634 635 uint8_t *src_buf; 636 637 struct scatterlist *src; 638 639 unsigned int i, len, clen; 640 int ret; 641 642 int fin = rctx->fini; 643 if (fin) 644 rctx->fini = 0; 645 646 for_each_sg(req->src, src, nents, i) { 647 src_buf = sg_virt(src); 648 len = sg_dma_len(src); 649 650 do { 651 if (actx->fill + len > DCP_BUF_SZ) 652 clen = DCP_BUF_SZ - actx->fill; 653 else 654 clen = len; 655 656 memcpy(in_buf + actx->fill, src_buf, clen); 657 len -= clen; 658 src_buf += clen; 659 actx->fill += clen; 660 661 /* 662 * If we filled the buffer and still have some 663 * more data, submit the buffer. 664 */ 665 if (len && actx->fill == DCP_BUF_SZ) { 666 ret = mxs_dcp_run_sha(req); 667 if (ret) 668 return ret; 669 actx->fill = 0; 670 rctx->init = 0; 671 } 672 } while (len); 673 } 674 675 if (fin) { 676 rctx->fini = 1; 677 678 /* Submit whatever is left. */ 679 if (!req->result) 680 return -EINVAL; 681 682 ret = mxs_dcp_run_sha(req); 683 if (ret) 684 return ret; 685 686 actx->fill = 0; 687 688 /* For some reason the result is flipped */ 689 for (i = 0; i < halg->digestsize; i++) 690 req->result[i] = out_buf[halg->digestsize - i - 1]; 691 } 692 693 return 0; 694 } 695 696 static int dcp_chan_thread_sha(void *data) 697 { 698 struct dcp *sdcp = global_sdcp; 699 const int chan = DCP_CHAN_HASH_SHA; 700 701 struct crypto_async_request *backlog; 702 struct crypto_async_request *arq; 703 704 struct dcp_sha_req_ctx *rctx; 705 706 struct ahash_request *req; 707 int ret, fini; 708 709 while (!kthread_should_stop()) { 710 set_current_state(TASK_INTERRUPTIBLE); 711 712 spin_lock(&sdcp->lock[chan]); 713 backlog = crypto_get_backlog(&sdcp->queue[chan]); 714 arq = crypto_dequeue_request(&sdcp->queue[chan]); 715 spin_unlock(&sdcp->lock[chan]); 716 717 if (!backlog && !arq) { 718 schedule(); 719 continue; 720 } 721 722 set_current_state(TASK_RUNNING); 723 724 if (backlog) 725 backlog->complete(backlog, -EINPROGRESS); 726 727 if (arq) { 728 req = ahash_request_cast(arq); 729 rctx = ahash_request_ctx(req); 730 731 ret = dcp_sha_req_to_buf(arq); 732 fini = rctx->fini; 733 arq->complete(arq, ret); 734 } 735 } 736 737 return 0; 738 } 739 740 static int dcp_sha_init(struct ahash_request *req) 741 { 742 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 743 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 744 745 struct hash_alg_common *halg = crypto_hash_alg_common(tfm); 746 747 /* 748 * Start hashing session. The code below only inits the 749 * hashing session context, nothing more. 750 */ 751 memset(actx, 0, sizeof(*actx)); 752 753 if (strcmp(halg->base.cra_name, "sha1") == 0) 754 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1; 755 else 756 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256; 757 758 actx->fill = 0; 759 actx->hot = 0; 760 actx->chan = DCP_CHAN_HASH_SHA; 761 762 mutex_init(&actx->mutex); 763 764 return 0; 765 } 766 767 static int dcp_sha_update_fx(struct ahash_request *req, int fini) 768 { 769 struct dcp *sdcp = global_sdcp; 770 771 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 772 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 773 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 774 775 int ret; 776 777 /* 778 * Ignore requests that have no data in them and are not 779 * the trailing requests in the stream of requests. 780 */ 781 if (!req->nbytes && !fini) 782 return 0; 783 784 mutex_lock(&actx->mutex); 785 786 rctx->fini = fini; 787 788 if (!actx->hot) { 789 actx->hot = 1; 790 rctx->init = 1; 791 } 792 793 spin_lock(&sdcp->lock[actx->chan]); 794 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); 795 spin_unlock(&sdcp->lock[actx->chan]); 796 797 wake_up_process(sdcp->thread[actx->chan]); 798 mutex_unlock(&actx->mutex); 799 800 return -EINPROGRESS; 801 } 802 803 static int dcp_sha_update(struct ahash_request *req) 804 { 805 return dcp_sha_update_fx(req, 0); 806 } 807 808 static int dcp_sha_final(struct ahash_request *req) 809 { 810 ahash_request_set_crypt(req, NULL, req->result, 0); 811 req->nbytes = 0; 812 return dcp_sha_update_fx(req, 1); 813 } 814 815 static int dcp_sha_finup(struct ahash_request *req) 816 { 817 return dcp_sha_update_fx(req, 1); 818 } 819 820 static int dcp_sha_digest(struct ahash_request *req) 821 { 822 int ret; 823 824 ret = dcp_sha_init(req); 825 if (ret) 826 return ret; 827 828 return dcp_sha_finup(req); 829 } 830 831 static int dcp_sha_import(struct ahash_request *req, const void *in) 832 { 833 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 834 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 835 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 836 const struct dcp_export_state *export = in; 837 838 memset(rctx, 0, sizeof(struct dcp_sha_req_ctx)); 839 memset(actx, 0, sizeof(struct dcp_async_ctx)); 840 memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx)); 841 memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx)); 842 843 return 0; 844 } 845 846 static int dcp_sha_export(struct ahash_request *req, void *out) 847 { 848 struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req); 849 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 850 struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm); 851 struct dcp_export_state *export = out; 852 853 memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx)); 854 memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx)); 855 856 return 0; 857 } 858 859 static int dcp_sha_cra_init(struct crypto_tfm *tfm) 860 { 861 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 862 sizeof(struct dcp_sha_req_ctx)); 863 return 0; 864 } 865 866 static void dcp_sha_cra_exit(struct crypto_tfm *tfm) 867 { 868 } 869 870 /* AES 128 ECB and AES 128 CBC */ 871 static struct crypto_alg dcp_aes_algs[] = { 872 { 873 .cra_name = "ecb(aes)", 874 .cra_driver_name = "ecb-aes-dcp", 875 .cra_priority = 400, 876 .cra_alignmask = 15, 877 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 878 CRYPTO_ALG_ASYNC | 879 CRYPTO_ALG_NEED_FALLBACK, 880 .cra_init = mxs_dcp_aes_fallback_init, 881 .cra_exit = mxs_dcp_aes_fallback_exit, 882 .cra_blocksize = AES_BLOCK_SIZE, 883 .cra_ctxsize = sizeof(struct dcp_async_ctx), 884 .cra_type = &crypto_ablkcipher_type, 885 .cra_module = THIS_MODULE, 886 .cra_u = { 887 .ablkcipher = { 888 .min_keysize = AES_MIN_KEY_SIZE, 889 .max_keysize = AES_MAX_KEY_SIZE, 890 .setkey = mxs_dcp_aes_setkey, 891 .encrypt = mxs_dcp_aes_ecb_encrypt, 892 .decrypt = mxs_dcp_aes_ecb_decrypt 893 }, 894 }, 895 }, { 896 .cra_name = "cbc(aes)", 897 .cra_driver_name = "cbc-aes-dcp", 898 .cra_priority = 400, 899 .cra_alignmask = 15, 900 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 901 CRYPTO_ALG_ASYNC | 902 CRYPTO_ALG_NEED_FALLBACK, 903 .cra_init = mxs_dcp_aes_fallback_init, 904 .cra_exit = mxs_dcp_aes_fallback_exit, 905 .cra_blocksize = AES_BLOCK_SIZE, 906 .cra_ctxsize = sizeof(struct dcp_async_ctx), 907 .cra_type = &crypto_ablkcipher_type, 908 .cra_module = THIS_MODULE, 909 .cra_u = { 910 .ablkcipher = { 911 .min_keysize = AES_MIN_KEY_SIZE, 912 .max_keysize = AES_MAX_KEY_SIZE, 913 .setkey = mxs_dcp_aes_setkey, 914 .encrypt = mxs_dcp_aes_cbc_encrypt, 915 .decrypt = mxs_dcp_aes_cbc_decrypt, 916 .ivsize = AES_BLOCK_SIZE, 917 }, 918 }, 919 }, 920 }; 921 922 /* SHA1 */ 923 static struct ahash_alg dcp_sha1_alg = { 924 .init = dcp_sha_init, 925 .update = dcp_sha_update, 926 .final = dcp_sha_final, 927 .finup = dcp_sha_finup, 928 .digest = dcp_sha_digest, 929 .import = dcp_sha_import, 930 .export = dcp_sha_export, 931 .halg = { 932 .digestsize = SHA1_DIGEST_SIZE, 933 .statesize = sizeof(struct dcp_export_state), 934 .base = { 935 .cra_name = "sha1", 936 .cra_driver_name = "sha1-dcp", 937 .cra_priority = 400, 938 .cra_alignmask = 63, 939 .cra_flags = CRYPTO_ALG_ASYNC, 940 .cra_blocksize = SHA1_BLOCK_SIZE, 941 .cra_ctxsize = sizeof(struct dcp_async_ctx), 942 .cra_module = THIS_MODULE, 943 .cra_init = dcp_sha_cra_init, 944 .cra_exit = dcp_sha_cra_exit, 945 }, 946 }, 947 }; 948 949 /* SHA256 */ 950 static struct ahash_alg dcp_sha256_alg = { 951 .init = dcp_sha_init, 952 .update = dcp_sha_update, 953 .final = dcp_sha_final, 954 .finup = dcp_sha_finup, 955 .digest = dcp_sha_digest, 956 .import = dcp_sha_import, 957 .export = dcp_sha_export, 958 .halg = { 959 .digestsize = SHA256_DIGEST_SIZE, 960 .statesize = sizeof(struct dcp_export_state), 961 .base = { 962 .cra_name = "sha256", 963 .cra_driver_name = "sha256-dcp", 964 .cra_priority = 400, 965 .cra_alignmask = 63, 966 .cra_flags = CRYPTO_ALG_ASYNC, 967 .cra_blocksize = SHA256_BLOCK_SIZE, 968 .cra_ctxsize = sizeof(struct dcp_async_ctx), 969 .cra_module = THIS_MODULE, 970 .cra_init = dcp_sha_cra_init, 971 .cra_exit = dcp_sha_cra_exit, 972 }, 973 }, 974 }; 975 976 static irqreturn_t mxs_dcp_irq(int irq, void *context) 977 { 978 struct dcp *sdcp = context; 979 uint32_t stat; 980 int i; 981 982 stat = readl(sdcp->base + MXS_DCP_STAT); 983 stat &= MXS_DCP_STAT_IRQ_MASK; 984 if (!stat) 985 return IRQ_NONE; 986 987 /* Clear the interrupts. */ 988 writel(stat, sdcp->base + MXS_DCP_STAT_CLR); 989 990 /* Complete the DMA requests that finished. */ 991 for (i = 0; i < DCP_MAX_CHANS; i++) 992 if (stat & (1 << i)) 993 complete(&sdcp->completion[i]); 994 995 return IRQ_HANDLED; 996 } 997 998 static int mxs_dcp_probe(struct platform_device *pdev) 999 { 1000 struct device *dev = &pdev->dev; 1001 struct dcp *sdcp = NULL; 1002 int i, ret; 1003 1004 struct resource *iores; 1005 int dcp_vmi_irq, dcp_irq; 1006 1007 if (global_sdcp) { 1008 dev_err(dev, "Only one DCP instance allowed!\n"); 1009 return -ENODEV; 1010 } 1011 1012 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1013 dcp_vmi_irq = platform_get_irq(pdev, 0); 1014 if (dcp_vmi_irq < 0) { 1015 dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_vmi_irq); 1016 return dcp_vmi_irq; 1017 } 1018 1019 dcp_irq = platform_get_irq(pdev, 1); 1020 if (dcp_irq < 0) { 1021 dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_irq); 1022 return dcp_irq; 1023 } 1024 1025 sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL); 1026 if (!sdcp) 1027 return -ENOMEM; 1028 1029 sdcp->dev = dev; 1030 sdcp->base = devm_ioremap_resource(dev, iores); 1031 if (IS_ERR(sdcp->base)) 1032 return PTR_ERR(sdcp->base); 1033 1034 1035 ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0, 1036 "dcp-vmi-irq", sdcp); 1037 if (ret) { 1038 dev_err(dev, "Failed to claim DCP VMI IRQ!\n"); 1039 return ret; 1040 } 1041 1042 ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0, 1043 "dcp-irq", sdcp); 1044 if (ret) { 1045 dev_err(dev, "Failed to claim DCP IRQ!\n"); 1046 return ret; 1047 } 1048 1049 /* Allocate coherent helper block. */ 1050 sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT, 1051 GFP_KERNEL); 1052 if (!sdcp->coh) 1053 return -ENOMEM; 1054 1055 /* Re-align the structure so it fits the DCP constraints. */ 1056 sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT); 1057 1058 /* DCP clock is optional, only used on some SOCs */ 1059 sdcp->dcp_clk = devm_clk_get(dev, "dcp"); 1060 if (IS_ERR(sdcp->dcp_clk)) { 1061 if (sdcp->dcp_clk != ERR_PTR(-ENOENT)) 1062 return PTR_ERR(sdcp->dcp_clk); 1063 sdcp->dcp_clk = NULL; 1064 } 1065 ret = clk_prepare_enable(sdcp->dcp_clk); 1066 if (ret) 1067 return ret; 1068 1069 /* Restart the DCP block. */ 1070 ret = stmp_reset_block(sdcp->base); 1071 if (ret) { 1072 dev_err(dev, "Failed reset\n"); 1073 goto err_disable_unprepare_clk; 1074 } 1075 1076 /* Initialize control register. */ 1077 writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES | 1078 MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf, 1079 sdcp->base + MXS_DCP_CTRL); 1080 1081 /* Enable all DCP DMA channels. */ 1082 writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK, 1083 sdcp->base + MXS_DCP_CHANNELCTRL); 1084 1085 /* 1086 * We do not enable context switching. Give the context buffer a 1087 * pointer to an illegal address so if context switching is 1088 * inadvertantly enabled, the DCP will return an error instead of 1089 * trashing good memory. The DCP DMA cannot access ROM, so any ROM 1090 * address will do. 1091 */ 1092 writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT); 1093 for (i = 0; i < DCP_MAX_CHANS; i++) 1094 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i)); 1095 writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR); 1096 1097 global_sdcp = sdcp; 1098 1099 platform_set_drvdata(pdev, sdcp); 1100 1101 for (i = 0; i < DCP_MAX_CHANS; i++) { 1102 spin_lock_init(&sdcp->lock[i]); 1103 init_completion(&sdcp->completion[i]); 1104 crypto_init_queue(&sdcp->queue[i], 50); 1105 } 1106 1107 /* Create the SHA and AES handler threads. */ 1108 sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha, 1109 NULL, "mxs_dcp_chan/sha"); 1110 if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) { 1111 dev_err(dev, "Error starting SHA thread!\n"); 1112 ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]); 1113 goto err_disable_unprepare_clk; 1114 } 1115 1116 sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes, 1117 NULL, "mxs_dcp_chan/aes"); 1118 if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) { 1119 dev_err(dev, "Error starting SHA thread!\n"); 1120 ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]); 1121 goto err_destroy_sha_thread; 1122 } 1123 1124 /* Register the various crypto algorithms. */ 1125 sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1); 1126 1127 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) { 1128 ret = crypto_register_algs(dcp_aes_algs, 1129 ARRAY_SIZE(dcp_aes_algs)); 1130 if (ret) { 1131 /* Failed to register algorithm. */ 1132 dev_err(dev, "Failed to register AES crypto!\n"); 1133 goto err_destroy_aes_thread; 1134 } 1135 } 1136 1137 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) { 1138 ret = crypto_register_ahash(&dcp_sha1_alg); 1139 if (ret) { 1140 dev_err(dev, "Failed to register %s hash!\n", 1141 dcp_sha1_alg.halg.base.cra_name); 1142 goto err_unregister_aes; 1143 } 1144 } 1145 1146 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) { 1147 ret = crypto_register_ahash(&dcp_sha256_alg); 1148 if (ret) { 1149 dev_err(dev, "Failed to register %s hash!\n", 1150 dcp_sha256_alg.halg.base.cra_name); 1151 goto err_unregister_sha1; 1152 } 1153 } 1154 1155 return 0; 1156 1157 err_unregister_sha1: 1158 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) 1159 crypto_unregister_ahash(&dcp_sha1_alg); 1160 1161 err_unregister_aes: 1162 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) 1163 crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); 1164 1165 err_destroy_aes_thread: 1166 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); 1167 1168 err_destroy_sha_thread: 1169 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); 1170 1171 err_disable_unprepare_clk: 1172 clk_disable_unprepare(sdcp->dcp_clk); 1173 1174 return ret; 1175 } 1176 1177 static int mxs_dcp_remove(struct platform_device *pdev) 1178 { 1179 struct dcp *sdcp = platform_get_drvdata(pdev); 1180 1181 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) 1182 crypto_unregister_ahash(&dcp_sha256_alg); 1183 1184 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) 1185 crypto_unregister_ahash(&dcp_sha1_alg); 1186 1187 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) 1188 crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); 1189 1190 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); 1191 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); 1192 1193 clk_disable_unprepare(sdcp->dcp_clk); 1194 1195 platform_set_drvdata(pdev, NULL); 1196 1197 global_sdcp = NULL; 1198 1199 return 0; 1200 } 1201 1202 static const struct of_device_id mxs_dcp_dt_ids[] = { 1203 { .compatible = "fsl,imx23-dcp", .data = NULL, }, 1204 { .compatible = "fsl,imx28-dcp", .data = NULL, }, 1205 { /* sentinel */ } 1206 }; 1207 1208 MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids); 1209 1210 static struct platform_driver mxs_dcp_driver = { 1211 .probe = mxs_dcp_probe, 1212 .remove = mxs_dcp_remove, 1213 .driver = { 1214 .name = "mxs-dcp", 1215 .of_match_table = mxs_dcp_dt_ids, 1216 }, 1217 }; 1218 1219 module_platform_driver(mxs_dcp_driver); 1220 1221 MODULE_AUTHOR("Marek Vasut <marex@denx.de>"); 1222 MODULE_DESCRIPTION("Freescale MXS DCP Driver"); 1223 MODULE_LICENSE("GPL"); 1224 MODULE_ALIAS("platform:mxs-dcp"); 1225