1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Freescale i.MX23/i.MX28 Data Co-Processor driver 4 * 5 * Copyright (C) 2013 Marek Vasut <marex@denx.de> 6 */ 7 8 #include <linux/dma-mapping.h> 9 #include <linux/interrupt.h> 10 #include <linux/io.h> 11 #include <linux/kernel.h> 12 #include <linux/kthread.h> 13 #include <linux/module.h> 14 #include <linux/of.h> 15 #include <linux/platform_device.h> 16 #include <linux/stmp_device.h> 17 #include <linux/clk.h> 18 19 #include <crypto/aes.h> 20 #include <crypto/sha1.h> 21 #include <crypto/sha2.h> 22 #include <crypto/internal/hash.h> 23 #include <crypto/internal/skcipher.h> 24 #include <crypto/scatterwalk.h> 25 26 #define DCP_MAX_CHANS 4 27 #define DCP_BUF_SZ PAGE_SIZE 28 #define DCP_SHA_PAY_SZ 64 29 30 #define DCP_ALIGNMENT 64 31 32 /* 33 * Null hashes to align with hw behavior on imx6sl and ull 34 * these are flipped for consistency with hw output 35 */ 36 static const uint8_t sha1_null_hash[] = 37 "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf" 38 "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda"; 39 40 static const uint8_t sha256_null_hash[] = 41 "\x55\xb8\x52\x78\x1b\x99\x95\xa4" 42 "\x4c\x93\x9b\x64\xe4\x41\xae\x27" 43 "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a" 44 "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3"; 45 46 /* DCP DMA descriptor. */ 47 struct dcp_dma_desc { 48 uint32_t next_cmd_addr; 49 uint32_t control0; 50 uint32_t control1; 51 uint32_t source; 52 uint32_t destination; 53 uint32_t size; 54 uint32_t payload; 55 uint32_t status; 56 }; 57 58 /* Coherent aligned block for bounce buffering. */ 59 struct dcp_coherent_block { 60 uint8_t aes_in_buf[DCP_BUF_SZ]; 61 uint8_t aes_out_buf[DCP_BUF_SZ]; 62 uint8_t sha_in_buf[DCP_BUF_SZ]; 63 uint8_t sha_out_buf[DCP_SHA_PAY_SZ]; 64 65 uint8_t aes_key[2 * AES_KEYSIZE_128]; 66 67 struct dcp_dma_desc desc[DCP_MAX_CHANS]; 68 }; 69 70 struct dcp { 71 struct device *dev; 72 void __iomem *base; 73 74 uint32_t caps; 75 76 struct dcp_coherent_block *coh; 77 78 struct completion completion[DCP_MAX_CHANS]; 79 spinlock_t lock[DCP_MAX_CHANS]; 80 struct task_struct *thread[DCP_MAX_CHANS]; 81 struct crypto_queue queue[DCP_MAX_CHANS]; 82 struct clk *dcp_clk; 83 }; 84 85 enum dcp_chan { 86 DCP_CHAN_HASH_SHA = 0, 87 DCP_CHAN_CRYPTO = 2, 88 }; 89 90 struct dcp_async_ctx { 91 /* Common context */ 92 enum dcp_chan chan; 93 uint32_t fill; 94 95 /* SHA Hash-specific context */ 96 struct mutex mutex; 97 uint32_t alg; 98 unsigned int hot:1; 99 100 /* Crypto-specific context */ 101 struct crypto_skcipher *fallback; 102 unsigned int key_len; 103 uint8_t key[AES_KEYSIZE_128]; 104 }; 105 106 struct dcp_aes_req_ctx { 107 unsigned int enc:1; 108 unsigned int ecb:1; 109 struct skcipher_request fallback_req; // keep at the end 110 }; 111 112 struct dcp_sha_req_ctx { 113 unsigned int init:1; 114 unsigned int fini:1; 115 }; 116 117 struct dcp_export_state { 118 struct dcp_sha_req_ctx req_ctx; 119 struct dcp_async_ctx async_ctx; 120 }; 121 122 /* 123 * There can even be only one instance of the MXS DCP due to the 124 * design of Linux Crypto API. 125 */ 126 static struct dcp *global_sdcp; 127 128 /* DCP register layout. */ 129 #define MXS_DCP_CTRL 0x00 130 #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23) 131 #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22) 132 133 #define MXS_DCP_STAT 0x10 134 #define MXS_DCP_STAT_CLR 0x18 135 #define MXS_DCP_STAT_IRQ_MASK 0xf 136 137 #define MXS_DCP_CHANNELCTRL 0x20 138 #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff 139 140 #define MXS_DCP_CAPABILITY1 0x40 141 #define MXS_DCP_CAPABILITY1_SHA256 (4 << 16) 142 #define MXS_DCP_CAPABILITY1_SHA1 (1 << 16) 143 #define MXS_DCP_CAPABILITY1_AES128 (1 << 0) 144 145 #define MXS_DCP_CONTEXT 0x50 146 147 #define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40)) 148 149 #define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40)) 150 151 #define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40)) 152 #define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40)) 153 154 /* DMA descriptor bits. */ 155 #define MXS_DCP_CONTROL0_HASH_TERM (1 << 13) 156 #define MXS_DCP_CONTROL0_HASH_INIT (1 << 12) 157 #define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11) 158 #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8) 159 #define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9) 160 #define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6) 161 #define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5) 162 #define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1) 163 #define MXS_DCP_CONTROL0_INTERRUPT (1 << 0) 164 165 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16) 166 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16) 167 #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4) 168 #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4) 169 #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0) 170 171 static int mxs_dcp_start_dma(struct dcp_async_ctx *actx) 172 { 173 struct dcp *sdcp = global_sdcp; 174 const int chan = actx->chan; 175 uint32_t stat; 176 unsigned long ret; 177 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; 178 179 dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc), 180 DMA_TO_DEVICE); 181 182 reinit_completion(&sdcp->completion[chan]); 183 184 /* Clear status register. */ 185 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan)); 186 187 /* Load the DMA descriptor. */ 188 writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan)); 189 190 /* Increment the semaphore to start the DMA transfer. */ 191 writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan)); 192 193 ret = wait_for_completion_timeout(&sdcp->completion[chan], 194 msecs_to_jiffies(1000)); 195 if (!ret) { 196 dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n", 197 chan, readl(sdcp->base + MXS_DCP_STAT)); 198 return -ETIMEDOUT; 199 } 200 201 stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan)); 202 if (stat & 0xff) { 203 dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n", 204 chan, stat); 205 return -EINVAL; 206 } 207 208 dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE); 209 210 return 0; 211 } 212 213 /* 214 * Encryption (AES128) 215 */ 216 static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, 217 struct skcipher_request *req, int init) 218 { 219 struct dcp *sdcp = global_sdcp; 220 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; 221 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); 222 int ret; 223 224 dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, 225 2 * AES_KEYSIZE_128, 226 DMA_TO_DEVICE); 227 dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf, 228 DCP_BUF_SZ, DMA_TO_DEVICE); 229 dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf, 230 DCP_BUF_SZ, DMA_FROM_DEVICE); 231 232 if (actx->fill % AES_BLOCK_SIZE) { 233 dev_err(sdcp->dev, "Invalid block size!\n"); 234 ret = -EINVAL; 235 goto aes_done_run; 236 } 237 238 /* Fill in the DMA descriptor. */ 239 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | 240 MXS_DCP_CONTROL0_INTERRUPT | 241 MXS_DCP_CONTROL0_ENABLE_CIPHER; 242 243 /* Payload contains the key. */ 244 desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; 245 246 if (rctx->enc) 247 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT; 248 if (init) 249 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT; 250 251 desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128; 252 253 if (rctx->ecb) 254 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB; 255 else 256 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC; 257 258 desc->next_cmd_addr = 0; 259 desc->source = src_phys; 260 desc->destination = dst_phys; 261 desc->size = actx->fill; 262 desc->payload = key_phys; 263 desc->status = 0; 264 265 ret = mxs_dcp_start_dma(actx); 266 267 aes_done_run: 268 dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128, 269 DMA_TO_DEVICE); 270 dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE); 271 dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE); 272 273 return ret; 274 } 275 276 static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) 277 { 278 struct dcp *sdcp = global_sdcp; 279 280 struct skcipher_request *req = skcipher_request_cast(arq); 281 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); 282 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); 283 284 struct scatterlist *dst = req->dst; 285 struct scatterlist *src = req->src; 286 const int nents = sg_nents(req->src); 287 288 const int out_off = DCP_BUF_SZ; 289 uint8_t *in_buf = sdcp->coh->aes_in_buf; 290 uint8_t *out_buf = sdcp->coh->aes_out_buf; 291 292 uint8_t *out_tmp, *src_buf, *dst_buf = NULL; 293 uint32_t dst_off = 0; 294 uint32_t last_out_len = 0; 295 296 uint8_t *key = sdcp->coh->aes_key; 297 298 int ret = 0; 299 int split = 0; 300 unsigned int i, len, clen, rem = 0, tlen = 0; 301 int init = 0; 302 bool limit_hit = false; 303 304 actx->fill = 0; 305 306 /* Copy the key from the temporary location. */ 307 memcpy(key, actx->key, actx->key_len); 308 309 if (!rctx->ecb) { 310 /* Copy the CBC IV just past the key. */ 311 memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128); 312 /* CBC needs the INIT set. */ 313 init = 1; 314 } else { 315 memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128); 316 } 317 318 for_each_sg(req->src, src, nents, i) { 319 src_buf = sg_virt(src); 320 len = sg_dma_len(src); 321 tlen += len; 322 limit_hit = tlen > req->cryptlen; 323 324 if (limit_hit) 325 len = req->cryptlen - (tlen - len); 326 327 do { 328 if (actx->fill + len > out_off) 329 clen = out_off - actx->fill; 330 else 331 clen = len; 332 333 memcpy(in_buf + actx->fill, src_buf, clen); 334 len -= clen; 335 src_buf += clen; 336 actx->fill += clen; 337 338 /* 339 * If we filled the buffer or this is the last SG, 340 * submit the buffer. 341 */ 342 if (actx->fill == out_off || sg_is_last(src) || 343 limit_hit) { 344 ret = mxs_dcp_run_aes(actx, req, init); 345 if (ret) 346 return ret; 347 init = 0; 348 349 out_tmp = out_buf; 350 last_out_len = actx->fill; 351 while (dst && actx->fill) { 352 if (!split) { 353 dst_buf = sg_virt(dst); 354 dst_off = 0; 355 } 356 rem = min(sg_dma_len(dst) - dst_off, 357 actx->fill); 358 359 memcpy(dst_buf + dst_off, out_tmp, rem); 360 out_tmp += rem; 361 dst_off += rem; 362 actx->fill -= rem; 363 364 if (dst_off == sg_dma_len(dst)) { 365 dst = sg_next(dst); 366 split = 0; 367 } else { 368 split = 1; 369 } 370 } 371 } 372 } while (len); 373 374 if (limit_hit) 375 break; 376 } 377 378 /* Copy the IV for CBC for chaining */ 379 if (!rctx->ecb) { 380 if (rctx->enc) 381 memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE), 382 AES_BLOCK_SIZE); 383 else 384 memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE), 385 AES_BLOCK_SIZE); 386 } 387 388 return ret; 389 } 390 391 static int dcp_chan_thread_aes(void *data) 392 { 393 struct dcp *sdcp = global_sdcp; 394 const int chan = DCP_CHAN_CRYPTO; 395 396 struct crypto_async_request *backlog; 397 struct crypto_async_request *arq; 398 399 int ret; 400 401 while (!kthread_should_stop()) { 402 set_current_state(TASK_INTERRUPTIBLE); 403 404 spin_lock(&sdcp->lock[chan]); 405 backlog = crypto_get_backlog(&sdcp->queue[chan]); 406 arq = crypto_dequeue_request(&sdcp->queue[chan]); 407 spin_unlock(&sdcp->lock[chan]); 408 409 if (!backlog && !arq) { 410 schedule(); 411 continue; 412 } 413 414 set_current_state(TASK_RUNNING); 415 416 if (backlog) 417 backlog->complete(backlog, -EINPROGRESS); 418 419 if (arq) { 420 ret = mxs_dcp_aes_block_crypt(arq); 421 arq->complete(arq, ret); 422 } 423 } 424 425 return 0; 426 } 427 428 static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc) 429 { 430 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 431 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); 432 struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm); 433 int ret; 434 435 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); 436 skcipher_request_set_callback(&rctx->fallback_req, req->base.flags, 437 req->base.complete, req->base.data); 438 skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst, 439 req->cryptlen, req->iv); 440 441 if (enc) 442 ret = crypto_skcipher_encrypt(&rctx->fallback_req); 443 else 444 ret = crypto_skcipher_decrypt(&rctx->fallback_req); 445 446 return ret; 447 } 448 449 static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb) 450 { 451 struct dcp *sdcp = global_sdcp; 452 struct crypto_async_request *arq = &req->base; 453 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); 454 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); 455 int ret; 456 457 if (unlikely(actx->key_len != AES_KEYSIZE_128)) 458 return mxs_dcp_block_fallback(req, enc); 459 460 rctx->enc = enc; 461 rctx->ecb = ecb; 462 actx->chan = DCP_CHAN_CRYPTO; 463 464 spin_lock(&sdcp->lock[actx->chan]); 465 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); 466 spin_unlock(&sdcp->lock[actx->chan]); 467 468 wake_up_process(sdcp->thread[actx->chan]); 469 470 return ret; 471 } 472 473 static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req) 474 { 475 return mxs_dcp_aes_enqueue(req, 0, 1); 476 } 477 478 static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req) 479 { 480 return mxs_dcp_aes_enqueue(req, 1, 1); 481 } 482 483 static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req) 484 { 485 return mxs_dcp_aes_enqueue(req, 0, 0); 486 } 487 488 static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req) 489 { 490 return mxs_dcp_aes_enqueue(req, 1, 0); 491 } 492 493 static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, 494 unsigned int len) 495 { 496 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); 497 498 /* 499 * AES 128 is supposed by the hardware, store key into temporary 500 * buffer and exit. We must use the temporary buffer here, since 501 * there can still be an operation in progress. 502 */ 503 actx->key_len = len; 504 if (len == AES_KEYSIZE_128) { 505 memcpy(actx->key, key, len); 506 return 0; 507 } 508 509 /* 510 * If the requested AES key size is not supported by the hardware, 511 * but is supported by in-kernel software implementation, we use 512 * software fallback. 513 */ 514 crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK); 515 crypto_skcipher_set_flags(actx->fallback, 516 tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); 517 return crypto_skcipher_setkey(actx->fallback, key, len); 518 } 519 520 static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm) 521 { 522 const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm)); 523 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); 524 struct crypto_skcipher *blk; 525 526 blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); 527 if (IS_ERR(blk)) 528 return PTR_ERR(blk); 529 530 actx->fallback = blk; 531 crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx) + 532 crypto_skcipher_reqsize(blk)); 533 return 0; 534 } 535 536 static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm) 537 { 538 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); 539 540 crypto_free_skcipher(actx->fallback); 541 } 542 543 /* 544 * Hashing (SHA1/SHA256) 545 */ 546 static int mxs_dcp_run_sha(struct ahash_request *req) 547 { 548 struct dcp *sdcp = global_sdcp; 549 int ret; 550 551 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 552 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 553 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 554 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; 555 556 dma_addr_t digest_phys = 0; 557 dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf, 558 DCP_BUF_SZ, DMA_TO_DEVICE); 559 560 /* Fill in the DMA descriptor. */ 561 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | 562 MXS_DCP_CONTROL0_INTERRUPT | 563 MXS_DCP_CONTROL0_ENABLE_HASH; 564 if (rctx->init) 565 desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT; 566 567 desc->control1 = actx->alg; 568 desc->next_cmd_addr = 0; 569 desc->source = buf_phys; 570 desc->destination = 0; 571 desc->size = actx->fill; 572 desc->payload = 0; 573 desc->status = 0; 574 575 /* 576 * Align driver with hw behavior when generating null hashes 577 */ 578 if (rctx->init && rctx->fini && desc->size == 0) { 579 struct hash_alg_common *halg = crypto_hash_alg_common(tfm); 580 const uint8_t *sha_buf = 581 (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ? 582 sha1_null_hash : sha256_null_hash; 583 memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize); 584 ret = 0; 585 goto done_run; 586 } 587 588 /* Set HASH_TERM bit for last transfer block. */ 589 if (rctx->fini) { 590 digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf, 591 DCP_SHA_PAY_SZ, DMA_FROM_DEVICE); 592 desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM; 593 desc->payload = digest_phys; 594 } 595 596 ret = mxs_dcp_start_dma(actx); 597 598 if (rctx->fini) 599 dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ, 600 DMA_FROM_DEVICE); 601 602 done_run: 603 dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE); 604 605 return ret; 606 } 607 608 static int dcp_sha_req_to_buf(struct crypto_async_request *arq) 609 { 610 struct dcp *sdcp = global_sdcp; 611 612 struct ahash_request *req = ahash_request_cast(arq); 613 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 614 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 615 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 616 struct hash_alg_common *halg = crypto_hash_alg_common(tfm); 617 618 uint8_t *in_buf = sdcp->coh->sha_in_buf; 619 uint8_t *out_buf = sdcp->coh->sha_out_buf; 620 621 struct scatterlist *src; 622 623 unsigned int i, len, clen, oft = 0; 624 int ret; 625 626 int fin = rctx->fini; 627 if (fin) 628 rctx->fini = 0; 629 630 src = req->src; 631 len = req->nbytes; 632 633 while (len) { 634 if (actx->fill + len > DCP_BUF_SZ) 635 clen = DCP_BUF_SZ - actx->fill; 636 else 637 clen = len; 638 639 scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen, 640 0); 641 642 len -= clen; 643 oft += clen; 644 actx->fill += clen; 645 646 /* 647 * If we filled the buffer and still have some 648 * more data, submit the buffer. 649 */ 650 if (len && actx->fill == DCP_BUF_SZ) { 651 ret = mxs_dcp_run_sha(req); 652 if (ret) 653 return ret; 654 actx->fill = 0; 655 rctx->init = 0; 656 } 657 } 658 659 if (fin) { 660 rctx->fini = 1; 661 662 /* Submit whatever is left. */ 663 if (!req->result) 664 return -EINVAL; 665 666 ret = mxs_dcp_run_sha(req); 667 if (ret) 668 return ret; 669 670 actx->fill = 0; 671 672 /* For some reason the result is flipped */ 673 for (i = 0; i < halg->digestsize; i++) 674 req->result[i] = out_buf[halg->digestsize - i - 1]; 675 } 676 677 return 0; 678 } 679 680 static int dcp_chan_thread_sha(void *data) 681 { 682 struct dcp *sdcp = global_sdcp; 683 const int chan = DCP_CHAN_HASH_SHA; 684 685 struct crypto_async_request *backlog; 686 struct crypto_async_request *arq; 687 int ret; 688 689 while (!kthread_should_stop()) { 690 set_current_state(TASK_INTERRUPTIBLE); 691 692 spin_lock(&sdcp->lock[chan]); 693 backlog = crypto_get_backlog(&sdcp->queue[chan]); 694 arq = crypto_dequeue_request(&sdcp->queue[chan]); 695 spin_unlock(&sdcp->lock[chan]); 696 697 if (!backlog && !arq) { 698 schedule(); 699 continue; 700 } 701 702 set_current_state(TASK_RUNNING); 703 704 if (backlog) 705 backlog->complete(backlog, -EINPROGRESS); 706 707 if (arq) { 708 ret = dcp_sha_req_to_buf(arq); 709 arq->complete(arq, ret); 710 } 711 } 712 713 return 0; 714 } 715 716 static int dcp_sha_init(struct ahash_request *req) 717 { 718 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 719 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 720 721 struct hash_alg_common *halg = crypto_hash_alg_common(tfm); 722 723 /* 724 * Start hashing session. The code below only inits the 725 * hashing session context, nothing more. 726 */ 727 memset(actx, 0, sizeof(*actx)); 728 729 if (strcmp(halg->base.cra_name, "sha1") == 0) 730 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1; 731 else 732 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256; 733 734 actx->fill = 0; 735 actx->hot = 0; 736 actx->chan = DCP_CHAN_HASH_SHA; 737 738 mutex_init(&actx->mutex); 739 740 return 0; 741 } 742 743 static int dcp_sha_update_fx(struct ahash_request *req, int fini) 744 { 745 struct dcp *sdcp = global_sdcp; 746 747 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 748 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 749 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 750 751 int ret; 752 753 /* 754 * Ignore requests that have no data in them and are not 755 * the trailing requests in the stream of requests. 756 */ 757 if (!req->nbytes && !fini) 758 return 0; 759 760 mutex_lock(&actx->mutex); 761 762 rctx->fini = fini; 763 764 if (!actx->hot) { 765 actx->hot = 1; 766 rctx->init = 1; 767 } 768 769 spin_lock(&sdcp->lock[actx->chan]); 770 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); 771 spin_unlock(&sdcp->lock[actx->chan]); 772 773 wake_up_process(sdcp->thread[actx->chan]); 774 mutex_unlock(&actx->mutex); 775 776 return ret; 777 } 778 779 static int dcp_sha_update(struct ahash_request *req) 780 { 781 return dcp_sha_update_fx(req, 0); 782 } 783 784 static int dcp_sha_final(struct ahash_request *req) 785 { 786 ahash_request_set_crypt(req, NULL, req->result, 0); 787 req->nbytes = 0; 788 return dcp_sha_update_fx(req, 1); 789 } 790 791 static int dcp_sha_finup(struct ahash_request *req) 792 { 793 return dcp_sha_update_fx(req, 1); 794 } 795 796 static int dcp_sha_digest(struct ahash_request *req) 797 { 798 int ret; 799 800 ret = dcp_sha_init(req); 801 if (ret) 802 return ret; 803 804 return dcp_sha_finup(req); 805 } 806 807 static int dcp_sha_import(struct ahash_request *req, const void *in) 808 { 809 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 810 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 811 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 812 const struct dcp_export_state *export = in; 813 814 memset(rctx, 0, sizeof(struct dcp_sha_req_ctx)); 815 memset(actx, 0, sizeof(struct dcp_async_ctx)); 816 memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx)); 817 memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx)); 818 819 return 0; 820 } 821 822 static int dcp_sha_export(struct ahash_request *req, void *out) 823 { 824 struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req); 825 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 826 struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm); 827 struct dcp_export_state *export = out; 828 829 memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx)); 830 memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx)); 831 832 return 0; 833 } 834 835 static int dcp_sha_cra_init(struct crypto_tfm *tfm) 836 { 837 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 838 sizeof(struct dcp_sha_req_ctx)); 839 return 0; 840 } 841 842 static void dcp_sha_cra_exit(struct crypto_tfm *tfm) 843 { 844 } 845 846 /* AES 128 ECB and AES 128 CBC */ 847 static struct skcipher_alg dcp_aes_algs[] = { 848 { 849 .base.cra_name = "ecb(aes)", 850 .base.cra_driver_name = "ecb-aes-dcp", 851 .base.cra_priority = 400, 852 .base.cra_alignmask = 15, 853 .base.cra_flags = CRYPTO_ALG_ASYNC | 854 CRYPTO_ALG_NEED_FALLBACK, 855 .base.cra_blocksize = AES_BLOCK_SIZE, 856 .base.cra_ctxsize = sizeof(struct dcp_async_ctx), 857 .base.cra_module = THIS_MODULE, 858 859 .min_keysize = AES_MIN_KEY_SIZE, 860 .max_keysize = AES_MAX_KEY_SIZE, 861 .setkey = mxs_dcp_aes_setkey, 862 .encrypt = mxs_dcp_aes_ecb_encrypt, 863 .decrypt = mxs_dcp_aes_ecb_decrypt, 864 .init = mxs_dcp_aes_fallback_init_tfm, 865 .exit = mxs_dcp_aes_fallback_exit_tfm, 866 }, { 867 .base.cra_name = "cbc(aes)", 868 .base.cra_driver_name = "cbc-aes-dcp", 869 .base.cra_priority = 400, 870 .base.cra_alignmask = 15, 871 .base.cra_flags = CRYPTO_ALG_ASYNC | 872 CRYPTO_ALG_NEED_FALLBACK, 873 .base.cra_blocksize = AES_BLOCK_SIZE, 874 .base.cra_ctxsize = sizeof(struct dcp_async_ctx), 875 .base.cra_module = THIS_MODULE, 876 877 .min_keysize = AES_MIN_KEY_SIZE, 878 .max_keysize = AES_MAX_KEY_SIZE, 879 .setkey = mxs_dcp_aes_setkey, 880 .encrypt = mxs_dcp_aes_cbc_encrypt, 881 .decrypt = mxs_dcp_aes_cbc_decrypt, 882 .ivsize = AES_BLOCK_SIZE, 883 .init = mxs_dcp_aes_fallback_init_tfm, 884 .exit = mxs_dcp_aes_fallback_exit_tfm, 885 }, 886 }; 887 888 /* SHA1 */ 889 static struct ahash_alg dcp_sha1_alg = { 890 .init = dcp_sha_init, 891 .update = dcp_sha_update, 892 .final = dcp_sha_final, 893 .finup = dcp_sha_finup, 894 .digest = dcp_sha_digest, 895 .import = dcp_sha_import, 896 .export = dcp_sha_export, 897 .halg = { 898 .digestsize = SHA1_DIGEST_SIZE, 899 .statesize = sizeof(struct dcp_export_state), 900 .base = { 901 .cra_name = "sha1", 902 .cra_driver_name = "sha1-dcp", 903 .cra_priority = 400, 904 .cra_alignmask = 63, 905 .cra_flags = CRYPTO_ALG_ASYNC, 906 .cra_blocksize = SHA1_BLOCK_SIZE, 907 .cra_ctxsize = sizeof(struct dcp_async_ctx), 908 .cra_module = THIS_MODULE, 909 .cra_init = dcp_sha_cra_init, 910 .cra_exit = dcp_sha_cra_exit, 911 }, 912 }, 913 }; 914 915 /* SHA256 */ 916 static struct ahash_alg dcp_sha256_alg = { 917 .init = dcp_sha_init, 918 .update = dcp_sha_update, 919 .final = dcp_sha_final, 920 .finup = dcp_sha_finup, 921 .digest = dcp_sha_digest, 922 .import = dcp_sha_import, 923 .export = dcp_sha_export, 924 .halg = { 925 .digestsize = SHA256_DIGEST_SIZE, 926 .statesize = sizeof(struct dcp_export_state), 927 .base = { 928 .cra_name = "sha256", 929 .cra_driver_name = "sha256-dcp", 930 .cra_priority = 400, 931 .cra_alignmask = 63, 932 .cra_flags = CRYPTO_ALG_ASYNC, 933 .cra_blocksize = SHA256_BLOCK_SIZE, 934 .cra_ctxsize = sizeof(struct dcp_async_ctx), 935 .cra_module = THIS_MODULE, 936 .cra_init = dcp_sha_cra_init, 937 .cra_exit = dcp_sha_cra_exit, 938 }, 939 }, 940 }; 941 942 static irqreturn_t mxs_dcp_irq(int irq, void *context) 943 { 944 struct dcp *sdcp = context; 945 uint32_t stat; 946 int i; 947 948 stat = readl(sdcp->base + MXS_DCP_STAT); 949 stat &= MXS_DCP_STAT_IRQ_MASK; 950 if (!stat) 951 return IRQ_NONE; 952 953 /* Clear the interrupts. */ 954 writel(stat, sdcp->base + MXS_DCP_STAT_CLR); 955 956 /* Complete the DMA requests that finished. */ 957 for (i = 0; i < DCP_MAX_CHANS; i++) 958 if (stat & (1 << i)) 959 complete(&sdcp->completion[i]); 960 961 return IRQ_HANDLED; 962 } 963 964 static int mxs_dcp_probe(struct platform_device *pdev) 965 { 966 struct device *dev = &pdev->dev; 967 struct dcp *sdcp = NULL; 968 int i, ret; 969 int dcp_vmi_irq, dcp_irq; 970 971 if (global_sdcp) { 972 dev_err(dev, "Only one DCP instance allowed!\n"); 973 return -ENODEV; 974 } 975 976 dcp_vmi_irq = platform_get_irq(pdev, 0); 977 if (dcp_vmi_irq < 0) 978 return dcp_vmi_irq; 979 980 dcp_irq = platform_get_irq(pdev, 1); 981 if (dcp_irq < 0) 982 return dcp_irq; 983 984 sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL); 985 if (!sdcp) 986 return -ENOMEM; 987 988 sdcp->dev = dev; 989 sdcp->base = devm_platform_ioremap_resource(pdev, 0); 990 if (IS_ERR(sdcp->base)) 991 return PTR_ERR(sdcp->base); 992 993 994 ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0, 995 "dcp-vmi-irq", sdcp); 996 if (ret) { 997 dev_err(dev, "Failed to claim DCP VMI IRQ!\n"); 998 return ret; 999 } 1000 1001 ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0, 1002 "dcp-irq", sdcp); 1003 if (ret) { 1004 dev_err(dev, "Failed to claim DCP IRQ!\n"); 1005 return ret; 1006 } 1007 1008 /* Allocate coherent helper block. */ 1009 sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT, 1010 GFP_KERNEL); 1011 if (!sdcp->coh) 1012 return -ENOMEM; 1013 1014 /* Re-align the structure so it fits the DCP constraints. */ 1015 sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT); 1016 1017 /* DCP clock is optional, only used on some SOCs */ 1018 sdcp->dcp_clk = devm_clk_get(dev, "dcp"); 1019 if (IS_ERR(sdcp->dcp_clk)) { 1020 if (sdcp->dcp_clk != ERR_PTR(-ENOENT)) 1021 return PTR_ERR(sdcp->dcp_clk); 1022 sdcp->dcp_clk = NULL; 1023 } 1024 ret = clk_prepare_enable(sdcp->dcp_clk); 1025 if (ret) 1026 return ret; 1027 1028 /* Restart the DCP block. */ 1029 ret = stmp_reset_block(sdcp->base); 1030 if (ret) { 1031 dev_err(dev, "Failed reset\n"); 1032 goto err_disable_unprepare_clk; 1033 } 1034 1035 /* Initialize control register. */ 1036 writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES | 1037 MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf, 1038 sdcp->base + MXS_DCP_CTRL); 1039 1040 /* Enable all DCP DMA channels. */ 1041 writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK, 1042 sdcp->base + MXS_DCP_CHANNELCTRL); 1043 1044 /* 1045 * We do not enable context switching. Give the context buffer a 1046 * pointer to an illegal address so if context switching is 1047 * inadvertantly enabled, the DCP will return an error instead of 1048 * trashing good memory. The DCP DMA cannot access ROM, so any ROM 1049 * address will do. 1050 */ 1051 writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT); 1052 for (i = 0; i < DCP_MAX_CHANS; i++) 1053 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i)); 1054 writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR); 1055 1056 global_sdcp = sdcp; 1057 1058 platform_set_drvdata(pdev, sdcp); 1059 1060 for (i = 0; i < DCP_MAX_CHANS; i++) { 1061 spin_lock_init(&sdcp->lock[i]); 1062 init_completion(&sdcp->completion[i]); 1063 crypto_init_queue(&sdcp->queue[i], 50); 1064 } 1065 1066 /* Create the SHA and AES handler threads. */ 1067 sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha, 1068 NULL, "mxs_dcp_chan/sha"); 1069 if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) { 1070 dev_err(dev, "Error starting SHA thread!\n"); 1071 ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]); 1072 goto err_disable_unprepare_clk; 1073 } 1074 1075 sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes, 1076 NULL, "mxs_dcp_chan/aes"); 1077 if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) { 1078 dev_err(dev, "Error starting SHA thread!\n"); 1079 ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]); 1080 goto err_destroy_sha_thread; 1081 } 1082 1083 /* Register the various crypto algorithms. */ 1084 sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1); 1085 1086 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) { 1087 ret = crypto_register_skciphers(dcp_aes_algs, 1088 ARRAY_SIZE(dcp_aes_algs)); 1089 if (ret) { 1090 /* Failed to register algorithm. */ 1091 dev_err(dev, "Failed to register AES crypto!\n"); 1092 goto err_destroy_aes_thread; 1093 } 1094 } 1095 1096 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) { 1097 ret = crypto_register_ahash(&dcp_sha1_alg); 1098 if (ret) { 1099 dev_err(dev, "Failed to register %s hash!\n", 1100 dcp_sha1_alg.halg.base.cra_name); 1101 goto err_unregister_aes; 1102 } 1103 } 1104 1105 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) { 1106 ret = crypto_register_ahash(&dcp_sha256_alg); 1107 if (ret) { 1108 dev_err(dev, "Failed to register %s hash!\n", 1109 dcp_sha256_alg.halg.base.cra_name); 1110 goto err_unregister_sha1; 1111 } 1112 } 1113 1114 return 0; 1115 1116 err_unregister_sha1: 1117 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) 1118 crypto_unregister_ahash(&dcp_sha1_alg); 1119 1120 err_unregister_aes: 1121 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) 1122 crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); 1123 1124 err_destroy_aes_thread: 1125 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); 1126 1127 err_destroy_sha_thread: 1128 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); 1129 1130 err_disable_unprepare_clk: 1131 clk_disable_unprepare(sdcp->dcp_clk); 1132 1133 return ret; 1134 } 1135 1136 static int mxs_dcp_remove(struct platform_device *pdev) 1137 { 1138 struct dcp *sdcp = platform_get_drvdata(pdev); 1139 1140 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) 1141 crypto_unregister_ahash(&dcp_sha256_alg); 1142 1143 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) 1144 crypto_unregister_ahash(&dcp_sha1_alg); 1145 1146 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) 1147 crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); 1148 1149 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); 1150 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); 1151 1152 clk_disable_unprepare(sdcp->dcp_clk); 1153 1154 platform_set_drvdata(pdev, NULL); 1155 1156 global_sdcp = NULL; 1157 1158 return 0; 1159 } 1160 1161 static const struct of_device_id mxs_dcp_dt_ids[] = { 1162 { .compatible = "fsl,imx23-dcp", .data = NULL, }, 1163 { .compatible = "fsl,imx28-dcp", .data = NULL, }, 1164 { /* sentinel */ } 1165 }; 1166 1167 MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids); 1168 1169 static struct platform_driver mxs_dcp_driver = { 1170 .probe = mxs_dcp_probe, 1171 .remove = mxs_dcp_remove, 1172 .driver = { 1173 .name = "mxs-dcp", 1174 .of_match_table = mxs_dcp_dt_ids, 1175 }, 1176 }; 1177 1178 module_platform_driver(mxs_dcp_driver); 1179 1180 MODULE_AUTHOR("Marek Vasut <marex@denx.de>"); 1181 MODULE_DESCRIPTION("Freescale MXS DCP Driver"); 1182 MODULE_LICENSE("GPL"); 1183 MODULE_ALIAS("platform:mxs-dcp"); 1184