1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Freescale i.MX23/i.MX28 Data Co-Processor driver 4 * 5 * Copyright (C) 2013 Marek Vasut <marex@denx.de> 6 */ 7 8 #include <linux/dma-mapping.h> 9 #include <linux/interrupt.h> 10 #include <linux/io.h> 11 #include <linux/kernel.h> 12 #include <linux/kthread.h> 13 #include <linux/module.h> 14 #include <linux/of.h> 15 #include <linux/platform_device.h> 16 #include <linux/stmp_device.h> 17 #include <linux/clk.h> 18 19 #include <crypto/aes.h> 20 #include <crypto/sha.h> 21 #include <crypto/internal/hash.h> 22 #include <crypto/internal/skcipher.h> 23 24 #define DCP_MAX_CHANS 4 25 #define DCP_BUF_SZ PAGE_SIZE 26 #define DCP_SHA_PAY_SZ 64 27 28 #define DCP_ALIGNMENT 64 29 30 /* 31 * Null hashes to align with hw behavior on imx6sl and ull 32 * these are flipped for consistency with hw output 33 */ 34 static const uint8_t sha1_null_hash[] = 35 "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf" 36 "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda"; 37 38 static const uint8_t sha256_null_hash[] = 39 "\x55\xb8\x52\x78\x1b\x99\x95\xa4" 40 "\x4c\x93\x9b\x64\xe4\x41\xae\x27" 41 "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a" 42 "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3"; 43 44 /* DCP DMA descriptor. */ 45 struct dcp_dma_desc { 46 uint32_t next_cmd_addr; 47 uint32_t control0; 48 uint32_t control1; 49 uint32_t source; 50 uint32_t destination; 51 uint32_t size; 52 uint32_t payload; 53 uint32_t status; 54 }; 55 56 /* Coherent aligned block for bounce buffering. */ 57 struct dcp_coherent_block { 58 uint8_t aes_in_buf[DCP_BUF_SZ]; 59 uint8_t aes_out_buf[DCP_BUF_SZ]; 60 uint8_t sha_in_buf[DCP_BUF_SZ]; 61 uint8_t sha_out_buf[DCP_SHA_PAY_SZ]; 62 63 uint8_t aes_key[2 * AES_KEYSIZE_128]; 64 65 struct dcp_dma_desc desc[DCP_MAX_CHANS]; 66 }; 67 68 struct dcp { 69 struct device *dev; 70 void __iomem *base; 71 72 uint32_t caps; 73 74 struct dcp_coherent_block *coh; 75 76 struct completion completion[DCP_MAX_CHANS]; 77 spinlock_t lock[DCP_MAX_CHANS]; 78 struct task_struct *thread[DCP_MAX_CHANS]; 79 struct crypto_queue queue[DCP_MAX_CHANS]; 80 struct clk *dcp_clk; 81 }; 82 83 enum dcp_chan { 84 DCP_CHAN_HASH_SHA = 0, 85 DCP_CHAN_CRYPTO = 2, 86 }; 87 88 struct dcp_async_ctx { 89 /* Common context */ 90 enum dcp_chan chan; 91 uint32_t fill; 92 93 /* SHA Hash-specific context */ 94 struct mutex mutex; 95 uint32_t alg; 96 unsigned int hot:1; 97 98 /* Crypto-specific context */ 99 struct crypto_sync_skcipher *fallback; 100 unsigned int key_len; 101 uint8_t key[AES_KEYSIZE_128]; 102 }; 103 104 struct dcp_aes_req_ctx { 105 unsigned int enc:1; 106 unsigned int ecb:1; 107 }; 108 109 struct dcp_sha_req_ctx { 110 unsigned int init:1; 111 unsigned int fini:1; 112 }; 113 114 struct dcp_export_state { 115 struct dcp_sha_req_ctx req_ctx; 116 struct dcp_async_ctx async_ctx; 117 }; 118 119 /* 120 * There can even be only one instance of the MXS DCP due to the 121 * design of Linux Crypto API. 122 */ 123 static struct dcp *global_sdcp; 124 125 /* DCP register layout. */ 126 #define MXS_DCP_CTRL 0x00 127 #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23) 128 #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22) 129 130 #define MXS_DCP_STAT 0x10 131 #define MXS_DCP_STAT_CLR 0x18 132 #define MXS_DCP_STAT_IRQ_MASK 0xf 133 134 #define MXS_DCP_CHANNELCTRL 0x20 135 #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff 136 137 #define MXS_DCP_CAPABILITY1 0x40 138 #define MXS_DCP_CAPABILITY1_SHA256 (4 << 16) 139 #define MXS_DCP_CAPABILITY1_SHA1 (1 << 16) 140 #define MXS_DCP_CAPABILITY1_AES128 (1 << 0) 141 142 #define MXS_DCP_CONTEXT 0x50 143 144 #define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40)) 145 146 #define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40)) 147 148 #define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40)) 149 #define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40)) 150 151 /* DMA descriptor bits. */ 152 #define MXS_DCP_CONTROL0_HASH_TERM (1 << 13) 153 #define MXS_DCP_CONTROL0_HASH_INIT (1 << 12) 154 #define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11) 155 #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8) 156 #define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9) 157 #define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6) 158 #define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5) 159 #define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1) 160 #define MXS_DCP_CONTROL0_INTERRUPT (1 << 0) 161 162 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16) 163 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16) 164 #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4) 165 #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4) 166 #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0) 167 168 static int mxs_dcp_start_dma(struct dcp_async_ctx *actx) 169 { 170 struct dcp *sdcp = global_sdcp; 171 const int chan = actx->chan; 172 uint32_t stat; 173 unsigned long ret; 174 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; 175 176 dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc), 177 DMA_TO_DEVICE); 178 179 reinit_completion(&sdcp->completion[chan]); 180 181 /* Clear status register. */ 182 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan)); 183 184 /* Load the DMA descriptor. */ 185 writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan)); 186 187 /* Increment the semaphore to start the DMA transfer. */ 188 writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan)); 189 190 ret = wait_for_completion_timeout(&sdcp->completion[chan], 191 msecs_to_jiffies(1000)); 192 if (!ret) { 193 dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n", 194 chan, readl(sdcp->base + MXS_DCP_STAT)); 195 return -ETIMEDOUT; 196 } 197 198 stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan)); 199 if (stat & 0xff) { 200 dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n", 201 chan, stat); 202 return -EINVAL; 203 } 204 205 dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE); 206 207 return 0; 208 } 209 210 /* 211 * Encryption (AES128) 212 */ 213 static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, 214 struct skcipher_request *req, int init) 215 { 216 struct dcp *sdcp = global_sdcp; 217 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; 218 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); 219 int ret; 220 221 dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, 222 2 * AES_KEYSIZE_128, 223 DMA_TO_DEVICE); 224 dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf, 225 DCP_BUF_SZ, DMA_TO_DEVICE); 226 dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf, 227 DCP_BUF_SZ, DMA_FROM_DEVICE); 228 229 if (actx->fill % AES_BLOCK_SIZE) { 230 dev_err(sdcp->dev, "Invalid block size!\n"); 231 ret = -EINVAL; 232 goto aes_done_run; 233 } 234 235 /* Fill in the DMA descriptor. */ 236 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | 237 MXS_DCP_CONTROL0_INTERRUPT | 238 MXS_DCP_CONTROL0_ENABLE_CIPHER; 239 240 /* Payload contains the key. */ 241 desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; 242 243 if (rctx->enc) 244 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT; 245 if (init) 246 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT; 247 248 desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128; 249 250 if (rctx->ecb) 251 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB; 252 else 253 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC; 254 255 desc->next_cmd_addr = 0; 256 desc->source = src_phys; 257 desc->destination = dst_phys; 258 desc->size = actx->fill; 259 desc->payload = key_phys; 260 desc->status = 0; 261 262 ret = mxs_dcp_start_dma(actx); 263 264 aes_done_run: 265 dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128, 266 DMA_TO_DEVICE); 267 dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE); 268 dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE); 269 270 return ret; 271 } 272 273 static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) 274 { 275 struct dcp *sdcp = global_sdcp; 276 277 struct skcipher_request *req = skcipher_request_cast(arq); 278 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); 279 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); 280 281 struct scatterlist *dst = req->dst; 282 struct scatterlist *src = req->src; 283 const int nents = sg_nents(req->src); 284 285 const int out_off = DCP_BUF_SZ; 286 uint8_t *in_buf = sdcp->coh->aes_in_buf; 287 uint8_t *out_buf = sdcp->coh->aes_out_buf; 288 289 uint8_t *out_tmp, *src_buf, *dst_buf = NULL; 290 uint32_t dst_off = 0; 291 uint32_t last_out_len = 0; 292 293 uint8_t *key = sdcp->coh->aes_key; 294 295 int ret = 0; 296 int split = 0; 297 unsigned int i, len, clen, rem = 0, tlen = 0; 298 int init = 0; 299 bool limit_hit = false; 300 301 actx->fill = 0; 302 303 /* Copy the key from the temporary location. */ 304 memcpy(key, actx->key, actx->key_len); 305 306 if (!rctx->ecb) { 307 /* Copy the CBC IV just past the key. */ 308 memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128); 309 /* CBC needs the INIT set. */ 310 init = 1; 311 } else { 312 memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128); 313 } 314 315 for_each_sg(req->src, src, nents, i) { 316 src_buf = sg_virt(src); 317 len = sg_dma_len(src); 318 tlen += len; 319 limit_hit = tlen > req->cryptlen; 320 321 if (limit_hit) 322 len = req->cryptlen - (tlen - len); 323 324 do { 325 if (actx->fill + len > out_off) 326 clen = out_off - actx->fill; 327 else 328 clen = len; 329 330 memcpy(in_buf + actx->fill, src_buf, clen); 331 len -= clen; 332 src_buf += clen; 333 actx->fill += clen; 334 335 /* 336 * If we filled the buffer or this is the last SG, 337 * submit the buffer. 338 */ 339 if (actx->fill == out_off || sg_is_last(src) || 340 limit_hit) { 341 ret = mxs_dcp_run_aes(actx, req, init); 342 if (ret) 343 return ret; 344 init = 0; 345 346 out_tmp = out_buf; 347 last_out_len = actx->fill; 348 while (dst && actx->fill) { 349 if (!split) { 350 dst_buf = sg_virt(dst); 351 dst_off = 0; 352 } 353 rem = min(sg_dma_len(dst) - dst_off, 354 actx->fill); 355 356 memcpy(dst_buf + dst_off, out_tmp, rem); 357 out_tmp += rem; 358 dst_off += rem; 359 actx->fill -= rem; 360 361 if (dst_off == sg_dma_len(dst)) { 362 dst = sg_next(dst); 363 split = 0; 364 } else { 365 split = 1; 366 } 367 } 368 } 369 } while (len); 370 371 if (limit_hit) 372 break; 373 } 374 375 /* Copy the IV for CBC for chaining */ 376 if (!rctx->ecb) { 377 if (rctx->enc) 378 memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE), 379 AES_BLOCK_SIZE); 380 else 381 memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE), 382 AES_BLOCK_SIZE); 383 } 384 385 return ret; 386 } 387 388 static int dcp_chan_thread_aes(void *data) 389 { 390 struct dcp *sdcp = global_sdcp; 391 const int chan = DCP_CHAN_CRYPTO; 392 393 struct crypto_async_request *backlog; 394 struct crypto_async_request *arq; 395 396 int ret; 397 398 while (!kthread_should_stop()) { 399 set_current_state(TASK_INTERRUPTIBLE); 400 401 spin_lock(&sdcp->lock[chan]); 402 backlog = crypto_get_backlog(&sdcp->queue[chan]); 403 arq = crypto_dequeue_request(&sdcp->queue[chan]); 404 spin_unlock(&sdcp->lock[chan]); 405 406 if (!backlog && !arq) { 407 schedule(); 408 continue; 409 } 410 411 set_current_state(TASK_RUNNING); 412 413 if (backlog) 414 backlog->complete(backlog, -EINPROGRESS); 415 416 if (arq) { 417 ret = mxs_dcp_aes_block_crypt(arq); 418 arq->complete(arq, ret); 419 } 420 } 421 422 return 0; 423 } 424 425 static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc) 426 { 427 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 428 struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm); 429 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); 430 int ret; 431 432 skcipher_request_set_sync_tfm(subreq, ctx->fallback); 433 skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL); 434 skcipher_request_set_crypt(subreq, req->src, req->dst, 435 req->cryptlen, req->iv); 436 437 if (enc) 438 ret = crypto_skcipher_encrypt(subreq); 439 else 440 ret = crypto_skcipher_decrypt(subreq); 441 442 skcipher_request_zero(subreq); 443 444 return ret; 445 } 446 447 static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb) 448 { 449 struct dcp *sdcp = global_sdcp; 450 struct crypto_async_request *arq = &req->base; 451 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); 452 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); 453 int ret; 454 455 if (unlikely(actx->key_len != AES_KEYSIZE_128)) 456 return mxs_dcp_block_fallback(req, enc); 457 458 rctx->enc = enc; 459 rctx->ecb = ecb; 460 actx->chan = DCP_CHAN_CRYPTO; 461 462 spin_lock(&sdcp->lock[actx->chan]); 463 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); 464 spin_unlock(&sdcp->lock[actx->chan]); 465 466 wake_up_process(sdcp->thread[actx->chan]); 467 468 return ret; 469 } 470 471 static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req) 472 { 473 return mxs_dcp_aes_enqueue(req, 0, 1); 474 } 475 476 static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req) 477 { 478 return mxs_dcp_aes_enqueue(req, 1, 1); 479 } 480 481 static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req) 482 { 483 return mxs_dcp_aes_enqueue(req, 0, 0); 484 } 485 486 static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req) 487 { 488 return mxs_dcp_aes_enqueue(req, 1, 0); 489 } 490 491 static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, 492 unsigned int len) 493 { 494 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); 495 unsigned int ret; 496 497 /* 498 * AES 128 is supposed by the hardware, store key into temporary 499 * buffer and exit. We must use the temporary buffer here, since 500 * there can still be an operation in progress. 501 */ 502 actx->key_len = len; 503 if (len == AES_KEYSIZE_128) { 504 memcpy(actx->key, key, len); 505 return 0; 506 } 507 508 /* 509 * If the requested AES key size is not supported by the hardware, 510 * but is supported by in-kernel software implementation, we use 511 * software fallback. 512 */ 513 crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK); 514 crypto_sync_skcipher_set_flags(actx->fallback, 515 tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); 516 517 ret = crypto_sync_skcipher_setkey(actx->fallback, key, len); 518 if (!ret) 519 return 0; 520 521 tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK; 522 tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(actx->fallback) & 523 CRYPTO_TFM_RES_MASK; 524 525 return ret; 526 } 527 528 static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm) 529 { 530 const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm)); 531 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); 532 struct crypto_sync_skcipher *blk; 533 534 blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); 535 if (IS_ERR(blk)) 536 return PTR_ERR(blk); 537 538 actx->fallback = blk; 539 crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx)); 540 return 0; 541 } 542 543 static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm) 544 { 545 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); 546 547 crypto_free_sync_skcipher(actx->fallback); 548 } 549 550 /* 551 * Hashing (SHA1/SHA256) 552 */ 553 static int mxs_dcp_run_sha(struct ahash_request *req) 554 { 555 struct dcp *sdcp = global_sdcp; 556 int ret; 557 558 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 559 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 560 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 561 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; 562 563 dma_addr_t digest_phys = 0; 564 dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf, 565 DCP_BUF_SZ, DMA_TO_DEVICE); 566 567 /* Fill in the DMA descriptor. */ 568 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | 569 MXS_DCP_CONTROL0_INTERRUPT | 570 MXS_DCP_CONTROL0_ENABLE_HASH; 571 if (rctx->init) 572 desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT; 573 574 desc->control1 = actx->alg; 575 desc->next_cmd_addr = 0; 576 desc->source = buf_phys; 577 desc->destination = 0; 578 desc->size = actx->fill; 579 desc->payload = 0; 580 desc->status = 0; 581 582 /* 583 * Align driver with hw behavior when generating null hashes 584 */ 585 if (rctx->init && rctx->fini && desc->size == 0) { 586 struct hash_alg_common *halg = crypto_hash_alg_common(tfm); 587 const uint8_t *sha_buf = 588 (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ? 589 sha1_null_hash : sha256_null_hash; 590 memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize); 591 ret = 0; 592 goto done_run; 593 } 594 595 /* Set HASH_TERM bit for last transfer block. */ 596 if (rctx->fini) { 597 digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf, 598 DCP_SHA_PAY_SZ, DMA_FROM_DEVICE); 599 desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM; 600 desc->payload = digest_phys; 601 } 602 603 ret = mxs_dcp_start_dma(actx); 604 605 if (rctx->fini) 606 dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ, 607 DMA_FROM_DEVICE); 608 609 done_run: 610 dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE); 611 612 return ret; 613 } 614 615 static int dcp_sha_req_to_buf(struct crypto_async_request *arq) 616 { 617 struct dcp *sdcp = global_sdcp; 618 619 struct ahash_request *req = ahash_request_cast(arq); 620 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 621 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 622 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 623 struct hash_alg_common *halg = crypto_hash_alg_common(tfm); 624 const int nents = sg_nents(req->src); 625 626 uint8_t *in_buf = sdcp->coh->sha_in_buf; 627 uint8_t *out_buf = sdcp->coh->sha_out_buf; 628 629 uint8_t *src_buf; 630 631 struct scatterlist *src; 632 633 unsigned int i, len, clen; 634 int ret; 635 636 int fin = rctx->fini; 637 if (fin) 638 rctx->fini = 0; 639 640 for_each_sg(req->src, src, nents, i) { 641 src_buf = sg_virt(src); 642 len = sg_dma_len(src); 643 644 do { 645 if (actx->fill + len > DCP_BUF_SZ) 646 clen = DCP_BUF_SZ - actx->fill; 647 else 648 clen = len; 649 650 memcpy(in_buf + actx->fill, src_buf, clen); 651 len -= clen; 652 src_buf += clen; 653 actx->fill += clen; 654 655 /* 656 * If we filled the buffer and still have some 657 * more data, submit the buffer. 658 */ 659 if (len && actx->fill == DCP_BUF_SZ) { 660 ret = mxs_dcp_run_sha(req); 661 if (ret) 662 return ret; 663 actx->fill = 0; 664 rctx->init = 0; 665 } 666 } while (len); 667 } 668 669 if (fin) { 670 rctx->fini = 1; 671 672 /* Submit whatever is left. */ 673 if (!req->result) 674 return -EINVAL; 675 676 ret = mxs_dcp_run_sha(req); 677 if (ret) 678 return ret; 679 680 actx->fill = 0; 681 682 /* For some reason the result is flipped */ 683 for (i = 0; i < halg->digestsize; i++) 684 req->result[i] = out_buf[halg->digestsize - i - 1]; 685 } 686 687 return 0; 688 } 689 690 static int dcp_chan_thread_sha(void *data) 691 { 692 struct dcp *sdcp = global_sdcp; 693 const int chan = DCP_CHAN_HASH_SHA; 694 695 struct crypto_async_request *backlog; 696 struct crypto_async_request *arq; 697 int ret; 698 699 while (!kthread_should_stop()) { 700 set_current_state(TASK_INTERRUPTIBLE); 701 702 spin_lock(&sdcp->lock[chan]); 703 backlog = crypto_get_backlog(&sdcp->queue[chan]); 704 arq = crypto_dequeue_request(&sdcp->queue[chan]); 705 spin_unlock(&sdcp->lock[chan]); 706 707 if (!backlog && !arq) { 708 schedule(); 709 continue; 710 } 711 712 set_current_state(TASK_RUNNING); 713 714 if (backlog) 715 backlog->complete(backlog, -EINPROGRESS); 716 717 if (arq) { 718 ret = dcp_sha_req_to_buf(arq); 719 arq->complete(arq, ret); 720 } 721 } 722 723 return 0; 724 } 725 726 static int dcp_sha_init(struct ahash_request *req) 727 { 728 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 729 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 730 731 struct hash_alg_common *halg = crypto_hash_alg_common(tfm); 732 733 /* 734 * Start hashing session. The code below only inits the 735 * hashing session context, nothing more. 736 */ 737 memset(actx, 0, sizeof(*actx)); 738 739 if (strcmp(halg->base.cra_name, "sha1") == 0) 740 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1; 741 else 742 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256; 743 744 actx->fill = 0; 745 actx->hot = 0; 746 actx->chan = DCP_CHAN_HASH_SHA; 747 748 mutex_init(&actx->mutex); 749 750 return 0; 751 } 752 753 static int dcp_sha_update_fx(struct ahash_request *req, int fini) 754 { 755 struct dcp *sdcp = global_sdcp; 756 757 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 758 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 759 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 760 761 int ret; 762 763 /* 764 * Ignore requests that have no data in them and are not 765 * the trailing requests in the stream of requests. 766 */ 767 if (!req->nbytes && !fini) 768 return 0; 769 770 mutex_lock(&actx->mutex); 771 772 rctx->fini = fini; 773 774 if (!actx->hot) { 775 actx->hot = 1; 776 rctx->init = 1; 777 } 778 779 spin_lock(&sdcp->lock[actx->chan]); 780 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); 781 spin_unlock(&sdcp->lock[actx->chan]); 782 783 wake_up_process(sdcp->thread[actx->chan]); 784 mutex_unlock(&actx->mutex); 785 786 return ret; 787 } 788 789 static int dcp_sha_update(struct ahash_request *req) 790 { 791 return dcp_sha_update_fx(req, 0); 792 } 793 794 static int dcp_sha_final(struct ahash_request *req) 795 { 796 ahash_request_set_crypt(req, NULL, req->result, 0); 797 req->nbytes = 0; 798 return dcp_sha_update_fx(req, 1); 799 } 800 801 static int dcp_sha_finup(struct ahash_request *req) 802 { 803 return dcp_sha_update_fx(req, 1); 804 } 805 806 static int dcp_sha_digest(struct ahash_request *req) 807 { 808 int ret; 809 810 ret = dcp_sha_init(req); 811 if (ret) 812 return ret; 813 814 return dcp_sha_finup(req); 815 } 816 817 static int dcp_sha_import(struct ahash_request *req, const void *in) 818 { 819 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 820 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 821 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 822 const struct dcp_export_state *export = in; 823 824 memset(rctx, 0, sizeof(struct dcp_sha_req_ctx)); 825 memset(actx, 0, sizeof(struct dcp_async_ctx)); 826 memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx)); 827 memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx)); 828 829 return 0; 830 } 831 832 static int dcp_sha_export(struct ahash_request *req, void *out) 833 { 834 struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req); 835 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 836 struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm); 837 struct dcp_export_state *export = out; 838 839 memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx)); 840 memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx)); 841 842 return 0; 843 } 844 845 static int dcp_sha_cra_init(struct crypto_tfm *tfm) 846 { 847 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 848 sizeof(struct dcp_sha_req_ctx)); 849 return 0; 850 } 851 852 static void dcp_sha_cra_exit(struct crypto_tfm *tfm) 853 { 854 } 855 856 /* AES 128 ECB and AES 128 CBC */ 857 static struct skcipher_alg dcp_aes_algs[] = { 858 { 859 .base.cra_name = "ecb(aes)", 860 .base.cra_driver_name = "ecb-aes-dcp", 861 .base.cra_priority = 400, 862 .base.cra_alignmask = 15, 863 .base.cra_flags = CRYPTO_ALG_ASYNC | 864 CRYPTO_ALG_NEED_FALLBACK, 865 .base.cra_blocksize = AES_BLOCK_SIZE, 866 .base.cra_ctxsize = sizeof(struct dcp_async_ctx), 867 .base.cra_module = THIS_MODULE, 868 869 .min_keysize = AES_MIN_KEY_SIZE, 870 .max_keysize = AES_MAX_KEY_SIZE, 871 .setkey = mxs_dcp_aes_setkey, 872 .encrypt = mxs_dcp_aes_ecb_encrypt, 873 .decrypt = mxs_dcp_aes_ecb_decrypt, 874 .init = mxs_dcp_aes_fallback_init_tfm, 875 .exit = mxs_dcp_aes_fallback_exit_tfm, 876 }, { 877 .base.cra_name = "cbc(aes)", 878 .base.cra_driver_name = "cbc-aes-dcp", 879 .base.cra_priority = 400, 880 .base.cra_alignmask = 15, 881 .base.cra_flags = CRYPTO_ALG_ASYNC | 882 CRYPTO_ALG_NEED_FALLBACK, 883 .base.cra_blocksize = AES_BLOCK_SIZE, 884 .base.cra_ctxsize = sizeof(struct dcp_async_ctx), 885 .base.cra_module = THIS_MODULE, 886 887 .min_keysize = AES_MIN_KEY_SIZE, 888 .max_keysize = AES_MAX_KEY_SIZE, 889 .setkey = mxs_dcp_aes_setkey, 890 .encrypt = mxs_dcp_aes_cbc_encrypt, 891 .decrypt = mxs_dcp_aes_cbc_decrypt, 892 .ivsize = AES_BLOCK_SIZE, 893 .init = mxs_dcp_aes_fallback_init_tfm, 894 .exit = mxs_dcp_aes_fallback_exit_tfm, 895 }, 896 }; 897 898 /* SHA1 */ 899 static struct ahash_alg dcp_sha1_alg = { 900 .init = dcp_sha_init, 901 .update = dcp_sha_update, 902 .final = dcp_sha_final, 903 .finup = dcp_sha_finup, 904 .digest = dcp_sha_digest, 905 .import = dcp_sha_import, 906 .export = dcp_sha_export, 907 .halg = { 908 .digestsize = SHA1_DIGEST_SIZE, 909 .statesize = sizeof(struct dcp_export_state), 910 .base = { 911 .cra_name = "sha1", 912 .cra_driver_name = "sha1-dcp", 913 .cra_priority = 400, 914 .cra_alignmask = 63, 915 .cra_flags = CRYPTO_ALG_ASYNC, 916 .cra_blocksize = SHA1_BLOCK_SIZE, 917 .cra_ctxsize = sizeof(struct dcp_async_ctx), 918 .cra_module = THIS_MODULE, 919 .cra_init = dcp_sha_cra_init, 920 .cra_exit = dcp_sha_cra_exit, 921 }, 922 }, 923 }; 924 925 /* SHA256 */ 926 static struct ahash_alg dcp_sha256_alg = { 927 .init = dcp_sha_init, 928 .update = dcp_sha_update, 929 .final = dcp_sha_final, 930 .finup = dcp_sha_finup, 931 .digest = dcp_sha_digest, 932 .import = dcp_sha_import, 933 .export = dcp_sha_export, 934 .halg = { 935 .digestsize = SHA256_DIGEST_SIZE, 936 .statesize = sizeof(struct dcp_export_state), 937 .base = { 938 .cra_name = "sha256", 939 .cra_driver_name = "sha256-dcp", 940 .cra_priority = 400, 941 .cra_alignmask = 63, 942 .cra_flags = CRYPTO_ALG_ASYNC, 943 .cra_blocksize = SHA256_BLOCK_SIZE, 944 .cra_ctxsize = sizeof(struct dcp_async_ctx), 945 .cra_module = THIS_MODULE, 946 .cra_init = dcp_sha_cra_init, 947 .cra_exit = dcp_sha_cra_exit, 948 }, 949 }, 950 }; 951 952 static irqreturn_t mxs_dcp_irq(int irq, void *context) 953 { 954 struct dcp *sdcp = context; 955 uint32_t stat; 956 int i; 957 958 stat = readl(sdcp->base + MXS_DCP_STAT); 959 stat &= MXS_DCP_STAT_IRQ_MASK; 960 if (!stat) 961 return IRQ_NONE; 962 963 /* Clear the interrupts. */ 964 writel(stat, sdcp->base + MXS_DCP_STAT_CLR); 965 966 /* Complete the DMA requests that finished. */ 967 for (i = 0; i < DCP_MAX_CHANS; i++) 968 if (stat & (1 << i)) 969 complete(&sdcp->completion[i]); 970 971 return IRQ_HANDLED; 972 } 973 974 static int mxs_dcp_probe(struct platform_device *pdev) 975 { 976 struct device *dev = &pdev->dev; 977 struct dcp *sdcp = NULL; 978 int i, ret; 979 int dcp_vmi_irq, dcp_irq; 980 981 if (global_sdcp) { 982 dev_err(dev, "Only one DCP instance allowed!\n"); 983 return -ENODEV; 984 } 985 986 dcp_vmi_irq = platform_get_irq(pdev, 0); 987 if (dcp_vmi_irq < 0) 988 return dcp_vmi_irq; 989 990 dcp_irq = platform_get_irq(pdev, 1); 991 if (dcp_irq < 0) 992 return dcp_irq; 993 994 sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL); 995 if (!sdcp) 996 return -ENOMEM; 997 998 sdcp->dev = dev; 999 sdcp->base = devm_platform_ioremap_resource(pdev, 0); 1000 if (IS_ERR(sdcp->base)) 1001 return PTR_ERR(sdcp->base); 1002 1003 1004 ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0, 1005 "dcp-vmi-irq", sdcp); 1006 if (ret) { 1007 dev_err(dev, "Failed to claim DCP VMI IRQ!\n"); 1008 return ret; 1009 } 1010 1011 ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0, 1012 "dcp-irq", sdcp); 1013 if (ret) { 1014 dev_err(dev, "Failed to claim DCP IRQ!\n"); 1015 return ret; 1016 } 1017 1018 /* Allocate coherent helper block. */ 1019 sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT, 1020 GFP_KERNEL); 1021 if (!sdcp->coh) 1022 return -ENOMEM; 1023 1024 /* Re-align the structure so it fits the DCP constraints. */ 1025 sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT); 1026 1027 /* DCP clock is optional, only used on some SOCs */ 1028 sdcp->dcp_clk = devm_clk_get(dev, "dcp"); 1029 if (IS_ERR(sdcp->dcp_clk)) { 1030 if (sdcp->dcp_clk != ERR_PTR(-ENOENT)) 1031 return PTR_ERR(sdcp->dcp_clk); 1032 sdcp->dcp_clk = NULL; 1033 } 1034 ret = clk_prepare_enable(sdcp->dcp_clk); 1035 if (ret) 1036 return ret; 1037 1038 /* Restart the DCP block. */ 1039 ret = stmp_reset_block(sdcp->base); 1040 if (ret) { 1041 dev_err(dev, "Failed reset\n"); 1042 goto err_disable_unprepare_clk; 1043 } 1044 1045 /* Initialize control register. */ 1046 writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES | 1047 MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf, 1048 sdcp->base + MXS_DCP_CTRL); 1049 1050 /* Enable all DCP DMA channels. */ 1051 writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK, 1052 sdcp->base + MXS_DCP_CHANNELCTRL); 1053 1054 /* 1055 * We do not enable context switching. Give the context buffer a 1056 * pointer to an illegal address so if context switching is 1057 * inadvertantly enabled, the DCP will return an error instead of 1058 * trashing good memory. The DCP DMA cannot access ROM, so any ROM 1059 * address will do. 1060 */ 1061 writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT); 1062 for (i = 0; i < DCP_MAX_CHANS; i++) 1063 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i)); 1064 writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR); 1065 1066 global_sdcp = sdcp; 1067 1068 platform_set_drvdata(pdev, sdcp); 1069 1070 for (i = 0; i < DCP_MAX_CHANS; i++) { 1071 spin_lock_init(&sdcp->lock[i]); 1072 init_completion(&sdcp->completion[i]); 1073 crypto_init_queue(&sdcp->queue[i], 50); 1074 } 1075 1076 /* Create the SHA and AES handler threads. */ 1077 sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha, 1078 NULL, "mxs_dcp_chan/sha"); 1079 if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) { 1080 dev_err(dev, "Error starting SHA thread!\n"); 1081 ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]); 1082 goto err_disable_unprepare_clk; 1083 } 1084 1085 sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes, 1086 NULL, "mxs_dcp_chan/aes"); 1087 if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) { 1088 dev_err(dev, "Error starting SHA thread!\n"); 1089 ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]); 1090 goto err_destroy_sha_thread; 1091 } 1092 1093 /* Register the various crypto algorithms. */ 1094 sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1); 1095 1096 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) { 1097 ret = crypto_register_skciphers(dcp_aes_algs, 1098 ARRAY_SIZE(dcp_aes_algs)); 1099 if (ret) { 1100 /* Failed to register algorithm. */ 1101 dev_err(dev, "Failed to register AES crypto!\n"); 1102 goto err_destroy_aes_thread; 1103 } 1104 } 1105 1106 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) { 1107 ret = crypto_register_ahash(&dcp_sha1_alg); 1108 if (ret) { 1109 dev_err(dev, "Failed to register %s hash!\n", 1110 dcp_sha1_alg.halg.base.cra_name); 1111 goto err_unregister_aes; 1112 } 1113 } 1114 1115 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) { 1116 ret = crypto_register_ahash(&dcp_sha256_alg); 1117 if (ret) { 1118 dev_err(dev, "Failed to register %s hash!\n", 1119 dcp_sha256_alg.halg.base.cra_name); 1120 goto err_unregister_sha1; 1121 } 1122 } 1123 1124 return 0; 1125 1126 err_unregister_sha1: 1127 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) 1128 crypto_unregister_ahash(&dcp_sha1_alg); 1129 1130 err_unregister_aes: 1131 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) 1132 crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); 1133 1134 err_destroy_aes_thread: 1135 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); 1136 1137 err_destroy_sha_thread: 1138 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); 1139 1140 err_disable_unprepare_clk: 1141 clk_disable_unprepare(sdcp->dcp_clk); 1142 1143 return ret; 1144 } 1145 1146 static int mxs_dcp_remove(struct platform_device *pdev) 1147 { 1148 struct dcp *sdcp = platform_get_drvdata(pdev); 1149 1150 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) 1151 crypto_unregister_ahash(&dcp_sha256_alg); 1152 1153 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) 1154 crypto_unregister_ahash(&dcp_sha1_alg); 1155 1156 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) 1157 crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); 1158 1159 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); 1160 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); 1161 1162 clk_disable_unprepare(sdcp->dcp_clk); 1163 1164 platform_set_drvdata(pdev, NULL); 1165 1166 global_sdcp = NULL; 1167 1168 return 0; 1169 } 1170 1171 static const struct of_device_id mxs_dcp_dt_ids[] = { 1172 { .compatible = "fsl,imx23-dcp", .data = NULL, }, 1173 { .compatible = "fsl,imx28-dcp", .data = NULL, }, 1174 { /* sentinel */ } 1175 }; 1176 1177 MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids); 1178 1179 static struct platform_driver mxs_dcp_driver = { 1180 .probe = mxs_dcp_probe, 1181 .remove = mxs_dcp_remove, 1182 .driver = { 1183 .name = "mxs-dcp", 1184 .of_match_table = mxs_dcp_dt_ids, 1185 }, 1186 }; 1187 1188 module_platform_driver(mxs_dcp_driver); 1189 1190 MODULE_AUTHOR("Marek Vasut <marex@denx.de>"); 1191 MODULE_DESCRIPTION("Freescale MXS DCP Driver"); 1192 MODULE_LICENSE("GPL"); 1193 MODULE_ALIAS("platform:mxs-dcp"); 1194