1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Freescale i.MX23/i.MX28 Data Co-Processor driver 4 * 5 * Copyright (C) 2013 Marek Vasut <marex@denx.de> 6 */ 7 8 #include <linux/dma-mapping.h> 9 #include <linux/interrupt.h> 10 #include <linux/io.h> 11 #include <linux/kernel.h> 12 #include <linux/kthread.h> 13 #include <linux/module.h> 14 #include <linux/of.h> 15 #include <linux/platform_device.h> 16 #include <linux/stmp_device.h> 17 #include <linux/clk.h> 18 19 #include <crypto/aes.h> 20 #include <crypto/sha.h> 21 #include <crypto/internal/hash.h> 22 #include <crypto/internal/skcipher.h> 23 24 #define DCP_MAX_CHANS 4 25 #define DCP_BUF_SZ PAGE_SIZE 26 #define DCP_SHA_PAY_SZ 64 27 28 #define DCP_ALIGNMENT 64 29 30 /* 31 * Null hashes to align with hw behavior on imx6sl and ull 32 * these are flipped for consistency with hw output 33 */ 34 static const uint8_t sha1_null_hash[] = 35 "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf" 36 "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda"; 37 38 static const uint8_t sha256_null_hash[] = 39 "\x55\xb8\x52\x78\x1b\x99\x95\xa4" 40 "\x4c\x93\x9b\x64\xe4\x41\xae\x27" 41 "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a" 42 "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3"; 43 44 /* DCP DMA descriptor. */ 45 struct dcp_dma_desc { 46 uint32_t next_cmd_addr; 47 uint32_t control0; 48 uint32_t control1; 49 uint32_t source; 50 uint32_t destination; 51 uint32_t size; 52 uint32_t payload; 53 uint32_t status; 54 }; 55 56 /* Coherent aligned block for bounce buffering. */ 57 struct dcp_coherent_block { 58 uint8_t aes_in_buf[DCP_BUF_SZ]; 59 uint8_t aes_out_buf[DCP_BUF_SZ]; 60 uint8_t sha_in_buf[DCP_BUF_SZ]; 61 uint8_t sha_out_buf[DCP_SHA_PAY_SZ]; 62 63 uint8_t aes_key[2 * AES_KEYSIZE_128]; 64 65 struct dcp_dma_desc desc[DCP_MAX_CHANS]; 66 }; 67 68 struct dcp { 69 struct device *dev; 70 void __iomem *base; 71 72 uint32_t caps; 73 74 struct dcp_coherent_block *coh; 75 76 struct completion completion[DCP_MAX_CHANS]; 77 spinlock_t lock[DCP_MAX_CHANS]; 78 struct task_struct *thread[DCP_MAX_CHANS]; 79 struct crypto_queue queue[DCP_MAX_CHANS]; 80 struct clk *dcp_clk; 81 }; 82 83 enum dcp_chan { 84 DCP_CHAN_HASH_SHA = 0, 85 DCP_CHAN_CRYPTO = 2, 86 }; 87 88 struct dcp_async_ctx { 89 /* Common context */ 90 enum dcp_chan chan; 91 uint32_t fill; 92 93 /* SHA Hash-specific context */ 94 struct mutex mutex; 95 uint32_t alg; 96 unsigned int hot:1; 97 98 /* Crypto-specific context */ 99 struct crypto_sync_skcipher *fallback; 100 unsigned int key_len; 101 uint8_t key[AES_KEYSIZE_128]; 102 }; 103 104 struct dcp_aes_req_ctx { 105 unsigned int enc:1; 106 unsigned int ecb:1; 107 }; 108 109 struct dcp_sha_req_ctx { 110 unsigned int init:1; 111 unsigned int fini:1; 112 }; 113 114 struct dcp_export_state { 115 struct dcp_sha_req_ctx req_ctx; 116 struct dcp_async_ctx async_ctx; 117 }; 118 119 /* 120 * There can even be only one instance of the MXS DCP due to the 121 * design of Linux Crypto API. 122 */ 123 static struct dcp *global_sdcp; 124 125 /* DCP register layout. */ 126 #define MXS_DCP_CTRL 0x00 127 #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23) 128 #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22) 129 130 #define MXS_DCP_STAT 0x10 131 #define MXS_DCP_STAT_CLR 0x18 132 #define MXS_DCP_STAT_IRQ_MASK 0xf 133 134 #define MXS_DCP_CHANNELCTRL 0x20 135 #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff 136 137 #define MXS_DCP_CAPABILITY1 0x40 138 #define MXS_DCP_CAPABILITY1_SHA256 (4 << 16) 139 #define MXS_DCP_CAPABILITY1_SHA1 (1 << 16) 140 #define MXS_DCP_CAPABILITY1_AES128 (1 << 0) 141 142 #define MXS_DCP_CONTEXT 0x50 143 144 #define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40)) 145 146 #define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40)) 147 148 #define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40)) 149 #define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40)) 150 151 /* DMA descriptor bits. */ 152 #define MXS_DCP_CONTROL0_HASH_TERM (1 << 13) 153 #define MXS_DCP_CONTROL0_HASH_INIT (1 << 12) 154 #define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11) 155 #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8) 156 #define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9) 157 #define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6) 158 #define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5) 159 #define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1) 160 #define MXS_DCP_CONTROL0_INTERRUPT (1 << 0) 161 162 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16) 163 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16) 164 #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4) 165 #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4) 166 #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0) 167 168 static int mxs_dcp_start_dma(struct dcp_async_ctx *actx) 169 { 170 struct dcp *sdcp = global_sdcp; 171 const int chan = actx->chan; 172 uint32_t stat; 173 unsigned long ret; 174 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; 175 176 dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc), 177 DMA_TO_DEVICE); 178 179 reinit_completion(&sdcp->completion[chan]); 180 181 /* Clear status register. */ 182 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan)); 183 184 /* Load the DMA descriptor. */ 185 writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan)); 186 187 /* Increment the semaphore to start the DMA transfer. */ 188 writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan)); 189 190 ret = wait_for_completion_timeout(&sdcp->completion[chan], 191 msecs_to_jiffies(1000)); 192 if (!ret) { 193 dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n", 194 chan, readl(sdcp->base + MXS_DCP_STAT)); 195 return -ETIMEDOUT; 196 } 197 198 stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan)); 199 if (stat & 0xff) { 200 dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n", 201 chan, stat); 202 return -EINVAL; 203 } 204 205 dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE); 206 207 return 0; 208 } 209 210 /* 211 * Encryption (AES128) 212 */ 213 static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, 214 struct skcipher_request *req, int init) 215 { 216 struct dcp *sdcp = global_sdcp; 217 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; 218 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); 219 int ret; 220 221 dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, 222 2 * AES_KEYSIZE_128, 223 DMA_TO_DEVICE); 224 dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf, 225 DCP_BUF_SZ, DMA_TO_DEVICE); 226 dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf, 227 DCP_BUF_SZ, DMA_FROM_DEVICE); 228 229 if (actx->fill % AES_BLOCK_SIZE) { 230 dev_err(sdcp->dev, "Invalid block size!\n"); 231 ret = -EINVAL; 232 goto aes_done_run; 233 } 234 235 /* Fill in the DMA descriptor. */ 236 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | 237 MXS_DCP_CONTROL0_INTERRUPT | 238 MXS_DCP_CONTROL0_ENABLE_CIPHER; 239 240 /* Payload contains the key. */ 241 desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; 242 243 if (rctx->enc) 244 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT; 245 if (init) 246 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT; 247 248 desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128; 249 250 if (rctx->ecb) 251 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB; 252 else 253 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC; 254 255 desc->next_cmd_addr = 0; 256 desc->source = src_phys; 257 desc->destination = dst_phys; 258 desc->size = actx->fill; 259 desc->payload = key_phys; 260 desc->status = 0; 261 262 ret = mxs_dcp_start_dma(actx); 263 264 aes_done_run: 265 dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128, 266 DMA_TO_DEVICE); 267 dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE); 268 dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE); 269 270 return ret; 271 } 272 273 static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) 274 { 275 struct dcp *sdcp = global_sdcp; 276 277 struct skcipher_request *req = skcipher_request_cast(arq); 278 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); 279 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); 280 281 struct scatterlist *dst = req->dst; 282 struct scatterlist *src = req->src; 283 const int nents = sg_nents(req->src); 284 285 const int out_off = DCP_BUF_SZ; 286 uint8_t *in_buf = sdcp->coh->aes_in_buf; 287 uint8_t *out_buf = sdcp->coh->aes_out_buf; 288 289 uint8_t *out_tmp, *src_buf, *dst_buf = NULL; 290 uint32_t dst_off = 0; 291 uint32_t last_out_len = 0; 292 293 uint8_t *key = sdcp->coh->aes_key; 294 295 int ret = 0; 296 int split = 0; 297 unsigned int i, len, clen, rem = 0, tlen = 0; 298 int init = 0; 299 bool limit_hit = false; 300 301 actx->fill = 0; 302 303 /* Copy the key from the temporary location. */ 304 memcpy(key, actx->key, actx->key_len); 305 306 if (!rctx->ecb) { 307 /* Copy the CBC IV just past the key. */ 308 memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128); 309 /* CBC needs the INIT set. */ 310 init = 1; 311 } else { 312 memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128); 313 } 314 315 for_each_sg(req->src, src, nents, i) { 316 src_buf = sg_virt(src); 317 len = sg_dma_len(src); 318 tlen += len; 319 limit_hit = tlen > req->cryptlen; 320 321 if (limit_hit) 322 len = req->cryptlen - (tlen - len); 323 324 do { 325 if (actx->fill + len > out_off) 326 clen = out_off - actx->fill; 327 else 328 clen = len; 329 330 memcpy(in_buf + actx->fill, src_buf, clen); 331 len -= clen; 332 src_buf += clen; 333 actx->fill += clen; 334 335 /* 336 * If we filled the buffer or this is the last SG, 337 * submit the buffer. 338 */ 339 if (actx->fill == out_off || sg_is_last(src) || 340 limit_hit) { 341 ret = mxs_dcp_run_aes(actx, req, init); 342 if (ret) 343 return ret; 344 init = 0; 345 346 out_tmp = out_buf; 347 last_out_len = actx->fill; 348 while (dst && actx->fill) { 349 if (!split) { 350 dst_buf = sg_virt(dst); 351 dst_off = 0; 352 } 353 rem = min(sg_dma_len(dst) - dst_off, 354 actx->fill); 355 356 memcpy(dst_buf + dst_off, out_tmp, rem); 357 out_tmp += rem; 358 dst_off += rem; 359 actx->fill -= rem; 360 361 if (dst_off == sg_dma_len(dst)) { 362 dst = sg_next(dst); 363 split = 0; 364 } else { 365 split = 1; 366 } 367 } 368 } 369 } while (len); 370 371 if (limit_hit) 372 break; 373 } 374 375 /* Copy the IV for CBC for chaining */ 376 if (!rctx->ecb) { 377 if (rctx->enc) 378 memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE), 379 AES_BLOCK_SIZE); 380 else 381 memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE), 382 AES_BLOCK_SIZE); 383 } 384 385 return ret; 386 } 387 388 static int dcp_chan_thread_aes(void *data) 389 { 390 struct dcp *sdcp = global_sdcp; 391 const int chan = DCP_CHAN_CRYPTO; 392 393 struct crypto_async_request *backlog; 394 struct crypto_async_request *arq; 395 396 int ret; 397 398 while (!kthread_should_stop()) { 399 set_current_state(TASK_INTERRUPTIBLE); 400 401 spin_lock(&sdcp->lock[chan]); 402 backlog = crypto_get_backlog(&sdcp->queue[chan]); 403 arq = crypto_dequeue_request(&sdcp->queue[chan]); 404 spin_unlock(&sdcp->lock[chan]); 405 406 if (!backlog && !arq) { 407 schedule(); 408 continue; 409 } 410 411 set_current_state(TASK_RUNNING); 412 413 if (backlog) 414 backlog->complete(backlog, -EINPROGRESS); 415 416 if (arq) { 417 ret = mxs_dcp_aes_block_crypt(arq); 418 arq->complete(arq, ret); 419 } 420 } 421 422 return 0; 423 } 424 425 static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc) 426 { 427 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 428 struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm); 429 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); 430 int ret; 431 432 skcipher_request_set_sync_tfm(subreq, ctx->fallback); 433 skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL); 434 skcipher_request_set_crypt(subreq, req->src, req->dst, 435 req->cryptlen, req->iv); 436 437 if (enc) 438 ret = crypto_skcipher_encrypt(subreq); 439 else 440 ret = crypto_skcipher_decrypt(subreq); 441 442 skcipher_request_zero(subreq); 443 444 return ret; 445 } 446 447 static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb) 448 { 449 struct dcp *sdcp = global_sdcp; 450 struct crypto_async_request *arq = &req->base; 451 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); 452 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); 453 int ret; 454 455 if (unlikely(actx->key_len != AES_KEYSIZE_128)) 456 return mxs_dcp_block_fallback(req, enc); 457 458 rctx->enc = enc; 459 rctx->ecb = ecb; 460 actx->chan = DCP_CHAN_CRYPTO; 461 462 spin_lock(&sdcp->lock[actx->chan]); 463 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); 464 spin_unlock(&sdcp->lock[actx->chan]); 465 466 wake_up_process(sdcp->thread[actx->chan]); 467 468 return ret; 469 } 470 471 static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req) 472 { 473 return mxs_dcp_aes_enqueue(req, 0, 1); 474 } 475 476 static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req) 477 { 478 return mxs_dcp_aes_enqueue(req, 1, 1); 479 } 480 481 static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req) 482 { 483 return mxs_dcp_aes_enqueue(req, 0, 0); 484 } 485 486 static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req) 487 { 488 return mxs_dcp_aes_enqueue(req, 1, 0); 489 } 490 491 static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, 492 unsigned int len) 493 { 494 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); 495 496 /* 497 * AES 128 is supposed by the hardware, store key into temporary 498 * buffer and exit. We must use the temporary buffer here, since 499 * there can still be an operation in progress. 500 */ 501 actx->key_len = len; 502 if (len == AES_KEYSIZE_128) { 503 memcpy(actx->key, key, len); 504 return 0; 505 } 506 507 /* 508 * If the requested AES key size is not supported by the hardware, 509 * but is supported by in-kernel software implementation, we use 510 * software fallback. 511 */ 512 crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK); 513 crypto_sync_skcipher_set_flags(actx->fallback, 514 tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); 515 return crypto_sync_skcipher_setkey(actx->fallback, key, len); 516 } 517 518 static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm) 519 { 520 const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm)); 521 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); 522 struct crypto_sync_skcipher *blk; 523 524 blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); 525 if (IS_ERR(blk)) 526 return PTR_ERR(blk); 527 528 actx->fallback = blk; 529 crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx)); 530 return 0; 531 } 532 533 static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm) 534 { 535 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); 536 537 crypto_free_sync_skcipher(actx->fallback); 538 } 539 540 /* 541 * Hashing (SHA1/SHA256) 542 */ 543 static int mxs_dcp_run_sha(struct ahash_request *req) 544 { 545 struct dcp *sdcp = global_sdcp; 546 int ret; 547 548 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 549 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 550 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 551 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; 552 553 dma_addr_t digest_phys = 0; 554 dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf, 555 DCP_BUF_SZ, DMA_TO_DEVICE); 556 557 /* Fill in the DMA descriptor. */ 558 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | 559 MXS_DCP_CONTROL0_INTERRUPT | 560 MXS_DCP_CONTROL0_ENABLE_HASH; 561 if (rctx->init) 562 desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT; 563 564 desc->control1 = actx->alg; 565 desc->next_cmd_addr = 0; 566 desc->source = buf_phys; 567 desc->destination = 0; 568 desc->size = actx->fill; 569 desc->payload = 0; 570 desc->status = 0; 571 572 /* 573 * Align driver with hw behavior when generating null hashes 574 */ 575 if (rctx->init && rctx->fini && desc->size == 0) { 576 struct hash_alg_common *halg = crypto_hash_alg_common(tfm); 577 const uint8_t *sha_buf = 578 (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ? 579 sha1_null_hash : sha256_null_hash; 580 memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize); 581 ret = 0; 582 goto done_run; 583 } 584 585 /* Set HASH_TERM bit for last transfer block. */ 586 if (rctx->fini) { 587 digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf, 588 DCP_SHA_PAY_SZ, DMA_FROM_DEVICE); 589 desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM; 590 desc->payload = digest_phys; 591 } 592 593 ret = mxs_dcp_start_dma(actx); 594 595 if (rctx->fini) 596 dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ, 597 DMA_FROM_DEVICE); 598 599 done_run: 600 dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE); 601 602 return ret; 603 } 604 605 static int dcp_sha_req_to_buf(struct crypto_async_request *arq) 606 { 607 struct dcp *sdcp = global_sdcp; 608 609 struct ahash_request *req = ahash_request_cast(arq); 610 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 611 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 612 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 613 struct hash_alg_common *halg = crypto_hash_alg_common(tfm); 614 const int nents = sg_nents(req->src); 615 616 uint8_t *in_buf = sdcp->coh->sha_in_buf; 617 uint8_t *out_buf = sdcp->coh->sha_out_buf; 618 619 uint8_t *src_buf; 620 621 struct scatterlist *src; 622 623 unsigned int i, len, clen; 624 int ret; 625 626 int fin = rctx->fini; 627 if (fin) 628 rctx->fini = 0; 629 630 for_each_sg(req->src, src, nents, i) { 631 src_buf = sg_virt(src); 632 len = sg_dma_len(src); 633 634 do { 635 if (actx->fill + len > DCP_BUF_SZ) 636 clen = DCP_BUF_SZ - actx->fill; 637 else 638 clen = len; 639 640 memcpy(in_buf + actx->fill, src_buf, clen); 641 len -= clen; 642 src_buf += clen; 643 actx->fill += clen; 644 645 /* 646 * If we filled the buffer and still have some 647 * more data, submit the buffer. 648 */ 649 if (len && actx->fill == DCP_BUF_SZ) { 650 ret = mxs_dcp_run_sha(req); 651 if (ret) 652 return ret; 653 actx->fill = 0; 654 rctx->init = 0; 655 } 656 } while (len); 657 } 658 659 if (fin) { 660 rctx->fini = 1; 661 662 /* Submit whatever is left. */ 663 if (!req->result) 664 return -EINVAL; 665 666 ret = mxs_dcp_run_sha(req); 667 if (ret) 668 return ret; 669 670 actx->fill = 0; 671 672 /* For some reason the result is flipped */ 673 for (i = 0; i < halg->digestsize; i++) 674 req->result[i] = out_buf[halg->digestsize - i - 1]; 675 } 676 677 return 0; 678 } 679 680 static int dcp_chan_thread_sha(void *data) 681 { 682 struct dcp *sdcp = global_sdcp; 683 const int chan = DCP_CHAN_HASH_SHA; 684 685 struct crypto_async_request *backlog; 686 struct crypto_async_request *arq; 687 int ret; 688 689 while (!kthread_should_stop()) { 690 set_current_state(TASK_INTERRUPTIBLE); 691 692 spin_lock(&sdcp->lock[chan]); 693 backlog = crypto_get_backlog(&sdcp->queue[chan]); 694 arq = crypto_dequeue_request(&sdcp->queue[chan]); 695 spin_unlock(&sdcp->lock[chan]); 696 697 if (!backlog && !arq) { 698 schedule(); 699 continue; 700 } 701 702 set_current_state(TASK_RUNNING); 703 704 if (backlog) 705 backlog->complete(backlog, -EINPROGRESS); 706 707 if (arq) { 708 ret = dcp_sha_req_to_buf(arq); 709 arq->complete(arq, ret); 710 } 711 } 712 713 return 0; 714 } 715 716 static int dcp_sha_init(struct ahash_request *req) 717 { 718 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 719 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 720 721 struct hash_alg_common *halg = crypto_hash_alg_common(tfm); 722 723 /* 724 * Start hashing session. The code below only inits the 725 * hashing session context, nothing more. 726 */ 727 memset(actx, 0, sizeof(*actx)); 728 729 if (strcmp(halg->base.cra_name, "sha1") == 0) 730 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1; 731 else 732 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256; 733 734 actx->fill = 0; 735 actx->hot = 0; 736 actx->chan = DCP_CHAN_HASH_SHA; 737 738 mutex_init(&actx->mutex); 739 740 return 0; 741 } 742 743 static int dcp_sha_update_fx(struct ahash_request *req, int fini) 744 { 745 struct dcp *sdcp = global_sdcp; 746 747 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 748 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 749 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 750 751 int ret; 752 753 /* 754 * Ignore requests that have no data in them and are not 755 * the trailing requests in the stream of requests. 756 */ 757 if (!req->nbytes && !fini) 758 return 0; 759 760 mutex_lock(&actx->mutex); 761 762 rctx->fini = fini; 763 764 if (!actx->hot) { 765 actx->hot = 1; 766 rctx->init = 1; 767 } 768 769 spin_lock(&sdcp->lock[actx->chan]); 770 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); 771 spin_unlock(&sdcp->lock[actx->chan]); 772 773 wake_up_process(sdcp->thread[actx->chan]); 774 mutex_unlock(&actx->mutex); 775 776 return ret; 777 } 778 779 static int dcp_sha_update(struct ahash_request *req) 780 { 781 return dcp_sha_update_fx(req, 0); 782 } 783 784 static int dcp_sha_final(struct ahash_request *req) 785 { 786 ahash_request_set_crypt(req, NULL, req->result, 0); 787 req->nbytes = 0; 788 return dcp_sha_update_fx(req, 1); 789 } 790 791 static int dcp_sha_finup(struct ahash_request *req) 792 { 793 return dcp_sha_update_fx(req, 1); 794 } 795 796 static int dcp_sha_digest(struct ahash_request *req) 797 { 798 int ret; 799 800 ret = dcp_sha_init(req); 801 if (ret) 802 return ret; 803 804 return dcp_sha_finup(req); 805 } 806 807 static int dcp_sha_import(struct ahash_request *req, const void *in) 808 { 809 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 810 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 811 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 812 const struct dcp_export_state *export = in; 813 814 memset(rctx, 0, sizeof(struct dcp_sha_req_ctx)); 815 memset(actx, 0, sizeof(struct dcp_async_ctx)); 816 memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx)); 817 memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx)); 818 819 return 0; 820 } 821 822 static int dcp_sha_export(struct ahash_request *req, void *out) 823 { 824 struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req); 825 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 826 struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm); 827 struct dcp_export_state *export = out; 828 829 memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx)); 830 memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx)); 831 832 return 0; 833 } 834 835 static int dcp_sha_cra_init(struct crypto_tfm *tfm) 836 { 837 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 838 sizeof(struct dcp_sha_req_ctx)); 839 return 0; 840 } 841 842 static void dcp_sha_cra_exit(struct crypto_tfm *tfm) 843 { 844 } 845 846 /* AES 128 ECB and AES 128 CBC */ 847 static struct skcipher_alg dcp_aes_algs[] = { 848 { 849 .base.cra_name = "ecb(aes)", 850 .base.cra_driver_name = "ecb-aes-dcp", 851 .base.cra_priority = 400, 852 .base.cra_alignmask = 15, 853 .base.cra_flags = CRYPTO_ALG_ASYNC | 854 CRYPTO_ALG_NEED_FALLBACK, 855 .base.cra_blocksize = AES_BLOCK_SIZE, 856 .base.cra_ctxsize = sizeof(struct dcp_async_ctx), 857 .base.cra_module = THIS_MODULE, 858 859 .min_keysize = AES_MIN_KEY_SIZE, 860 .max_keysize = AES_MAX_KEY_SIZE, 861 .setkey = mxs_dcp_aes_setkey, 862 .encrypt = mxs_dcp_aes_ecb_encrypt, 863 .decrypt = mxs_dcp_aes_ecb_decrypt, 864 .init = mxs_dcp_aes_fallback_init_tfm, 865 .exit = mxs_dcp_aes_fallback_exit_tfm, 866 }, { 867 .base.cra_name = "cbc(aes)", 868 .base.cra_driver_name = "cbc-aes-dcp", 869 .base.cra_priority = 400, 870 .base.cra_alignmask = 15, 871 .base.cra_flags = CRYPTO_ALG_ASYNC | 872 CRYPTO_ALG_NEED_FALLBACK, 873 .base.cra_blocksize = AES_BLOCK_SIZE, 874 .base.cra_ctxsize = sizeof(struct dcp_async_ctx), 875 .base.cra_module = THIS_MODULE, 876 877 .min_keysize = AES_MIN_KEY_SIZE, 878 .max_keysize = AES_MAX_KEY_SIZE, 879 .setkey = mxs_dcp_aes_setkey, 880 .encrypt = mxs_dcp_aes_cbc_encrypt, 881 .decrypt = mxs_dcp_aes_cbc_decrypt, 882 .ivsize = AES_BLOCK_SIZE, 883 .init = mxs_dcp_aes_fallback_init_tfm, 884 .exit = mxs_dcp_aes_fallback_exit_tfm, 885 }, 886 }; 887 888 /* SHA1 */ 889 static struct ahash_alg dcp_sha1_alg = { 890 .init = dcp_sha_init, 891 .update = dcp_sha_update, 892 .final = dcp_sha_final, 893 .finup = dcp_sha_finup, 894 .digest = dcp_sha_digest, 895 .import = dcp_sha_import, 896 .export = dcp_sha_export, 897 .halg = { 898 .digestsize = SHA1_DIGEST_SIZE, 899 .statesize = sizeof(struct dcp_export_state), 900 .base = { 901 .cra_name = "sha1", 902 .cra_driver_name = "sha1-dcp", 903 .cra_priority = 400, 904 .cra_alignmask = 63, 905 .cra_flags = CRYPTO_ALG_ASYNC, 906 .cra_blocksize = SHA1_BLOCK_SIZE, 907 .cra_ctxsize = sizeof(struct dcp_async_ctx), 908 .cra_module = THIS_MODULE, 909 .cra_init = dcp_sha_cra_init, 910 .cra_exit = dcp_sha_cra_exit, 911 }, 912 }, 913 }; 914 915 /* SHA256 */ 916 static struct ahash_alg dcp_sha256_alg = { 917 .init = dcp_sha_init, 918 .update = dcp_sha_update, 919 .final = dcp_sha_final, 920 .finup = dcp_sha_finup, 921 .digest = dcp_sha_digest, 922 .import = dcp_sha_import, 923 .export = dcp_sha_export, 924 .halg = { 925 .digestsize = SHA256_DIGEST_SIZE, 926 .statesize = sizeof(struct dcp_export_state), 927 .base = { 928 .cra_name = "sha256", 929 .cra_driver_name = "sha256-dcp", 930 .cra_priority = 400, 931 .cra_alignmask = 63, 932 .cra_flags = CRYPTO_ALG_ASYNC, 933 .cra_blocksize = SHA256_BLOCK_SIZE, 934 .cra_ctxsize = sizeof(struct dcp_async_ctx), 935 .cra_module = THIS_MODULE, 936 .cra_init = dcp_sha_cra_init, 937 .cra_exit = dcp_sha_cra_exit, 938 }, 939 }, 940 }; 941 942 static irqreturn_t mxs_dcp_irq(int irq, void *context) 943 { 944 struct dcp *sdcp = context; 945 uint32_t stat; 946 int i; 947 948 stat = readl(sdcp->base + MXS_DCP_STAT); 949 stat &= MXS_DCP_STAT_IRQ_MASK; 950 if (!stat) 951 return IRQ_NONE; 952 953 /* Clear the interrupts. */ 954 writel(stat, sdcp->base + MXS_DCP_STAT_CLR); 955 956 /* Complete the DMA requests that finished. */ 957 for (i = 0; i < DCP_MAX_CHANS; i++) 958 if (stat & (1 << i)) 959 complete(&sdcp->completion[i]); 960 961 return IRQ_HANDLED; 962 } 963 964 static int mxs_dcp_probe(struct platform_device *pdev) 965 { 966 struct device *dev = &pdev->dev; 967 struct dcp *sdcp = NULL; 968 int i, ret; 969 int dcp_vmi_irq, dcp_irq; 970 971 if (global_sdcp) { 972 dev_err(dev, "Only one DCP instance allowed!\n"); 973 return -ENODEV; 974 } 975 976 dcp_vmi_irq = platform_get_irq(pdev, 0); 977 if (dcp_vmi_irq < 0) 978 return dcp_vmi_irq; 979 980 dcp_irq = platform_get_irq(pdev, 1); 981 if (dcp_irq < 0) 982 return dcp_irq; 983 984 sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL); 985 if (!sdcp) 986 return -ENOMEM; 987 988 sdcp->dev = dev; 989 sdcp->base = devm_platform_ioremap_resource(pdev, 0); 990 if (IS_ERR(sdcp->base)) 991 return PTR_ERR(sdcp->base); 992 993 994 ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0, 995 "dcp-vmi-irq", sdcp); 996 if (ret) { 997 dev_err(dev, "Failed to claim DCP VMI IRQ!\n"); 998 return ret; 999 } 1000 1001 ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0, 1002 "dcp-irq", sdcp); 1003 if (ret) { 1004 dev_err(dev, "Failed to claim DCP IRQ!\n"); 1005 return ret; 1006 } 1007 1008 /* Allocate coherent helper block. */ 1009 sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT, 1010 GFP_KERNEL); 1011 if (!sdcp->coh) 1012 return -ENOMEM; 1013 1014 /* Re-align the structure so it fits the DCP constraints. */ 1015 sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT); 1016 1017 /* DCP clock is optional, only used on some SOCs */ 1018 sdcp->dcp_clk = devm_clk_get(dev, "dcp"); 1019 if (IS_ERR(sdcp->dcp_clk)) { 1020 if (sdcp->dcp_clk != ERR_PTR(-ENOENT)) 1021 return PTR_ERR(sdcp->dcp_clk); 1022 sdcp->dcp_clk = NULL; 1023 } 1024 ret = clk_prepare_enable(sdcp->dcp_clk); 1025 if (ret) 1026 return ret; 1027 1028 /* Restart the DCP block. */ 1029 ret = stmp_reset_block(sdcp->base); 1030 if (ret) { 1031 dev_err(dev, "Failed reset\n"); 1032 goto err_disable_unprepare_clk; 1033 } 1034 1035 /* Initialize control register. */ 1036 writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES | 1037 MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf, 1038 sdcp->base + MXS_DCP_CTRL); 1039 1040 /* Enable all DCP DMA channels. */ 1041 writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK, 1042 sdcp->base + MXS_DCP_CHANNELCTRL); 1043 1044 /* 1045 * We do not enable context switching. Give the context buffer a 1046 * pointer to an illegal address so if context switching is 1047 * inadvertantly enabled, the DCP will return an error instead of 1048 * trashing good memory. The DCP DMA cannot access ROM, so any ROM 1049 * address will do. 1050 */ 1051 writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT); 1052 for (i = 0; i < DCP_MAX_CHANS; i++) 1053 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i)); 1054 writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR); 1055 1056 global_sdcp = sdcp; 1057 1058 platform_set_drvdata(pdev, sdcp); 1059 1060 for (i = 0; i < DCP_MAX_CHANS; i++) { 1061 spin_lock_init(&sdcp->lock[i]); 1062 init_completion(&sdcp->completion[i]); 1063 crypto_init_queue(&sdcp->queue[i], 50); 1064 } 1065 1066 /* Create the SHA and AES handler threads. */ 1067 sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha, 1068 NULL, "mxs_dcp_chan/sha"); 1069 if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) { 1070 dev_err(dev, "Error starting SHA thread!\n"); 1071 ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]); 1072 goto err_disable_unprepare_clk; 1073 } 1074 1075 sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes, 1076 NULL, "mxs_dcp_chan/aes"); 1077 if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) { 1078 dev_err(dev, "Error starting SHA thread!\n"); 1079 ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]); 1080 goto err_destroy_sha_thread; 1081 } 1082 1083 /* Register the various crypto algorithms. */ 1084 sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1); 1085 1086 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) { 1087 ret = crypto_register_skciphers(dcp_aes_algs, 1088 ARRAY_SIZE(dcp_aes_algs)); 1089 if (ret) { 1090 /* Failed to register algorithm. */ 1091 dev_err(dev, "Failed to register AES crypto!\n"); 1092 goto err_destroy_aes_thread; 1093 } 1094 } 1095 1096 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) { 1097 ret = crypto_register_ahash(&dcp_sha1_alg); 1098 if (ret) { 1099 dev_err(dev, "Failed to register %s hash!\n", 1100 dcp_sha1_alg.halg.base.cra_name); 1101 goto err_unregister_aes; 1102 } 1103 } 1104 1105 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) { 1106 ret = crypto_register_ahash(&dcp_sha256_alg); 1107 if (ret) { 1108 dev_err(dev, "Failed to register %s hash!\n", 1109 dcp_sha256_alg.halg.base.cra_name); 1110 goto err_unregister_sha1; 1111 } 1112 } 1113 1114 return 0; 1115 1116 err_unregister_sha1: 1117 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) 1118 crypto_unregister_ahash(&dcp_sha1_alg); 1119 1120 err_unregister_aes: 1121 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) 1122 crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); 1123 1124 err_destroy_aes_thread: 1125 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); 1126 1127 err_destroy_sha_thread: 1128 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); 1129 1130 err_disable_unprepare_clk: 1131 clk_disable_unprepare(sdcp->dcp_clk); 1132 1133 return ret; 1134 } 1135 1136 static int mxs_dcp_remove(struct platform_device *pdev) 1137 { 1138 struct dcp *sdcp = platform_get_drvdata(pdev); 1139 1140 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) 1141 crypto_unregister_ahash(&dcp_sha256_alg); 1142 1143 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) 1144 crypto_unregister_ahash(&dcp_sha1_alg); 1145 1146 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) 1147 crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); 1148 1149 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); 1150 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); 1151 1152 clk_disable_unprepare(sdcp->dcp_clk); 1153 1154 platform_set_drvdata(pdev, NULL); 1155 1156 global_sdcp = NULL; 1157 1158 return 0; 1159 } 1160 1161 static const struct of_device_id mxs_dcp_dt_ids[] = { 1162 { .compatible = "fsl,imx23-dcp", .data = NULL, }, 1163 { .compatible = "fsl,imx28-dcp", .data = NULL, }, 1164 { /* sentinel */ } 1165 }; 1166 1167 MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids); 1168 1169 static struct platform_driver mxs_dcp_driver = { 1170 .probe = mxs_dcp_probe, 1171 .remove = mxs_dcp_remove, 1172 .driver = { 1173 .name = "mxs-dcp", 1174 .of_match_table = mxs_dcp_dt_ids, 1175 }, 1176 }; 1177 1178 module_platform_driver(mxs_dcp_driver); 1179 1180 MODULE_AUTHOR("Marek Vasut <marex@denx.de>"); 1181 MODULE_DESCRIPTION("Freescale MXS DCP Driver"); 1182 MODULE_LICENSE("GPL"); 1183 MODULE_ALIAS("platform:mxs-dcp"); 1184