1fcaf2036SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
215b59e7cSMarek Vasut /*
315b59e7cSMarek Vasut * Freescale i.MX23/i.MX28 Data Co-Processor driver
415b59e7cSMarek Vasut *
515b59e7cSMarek Vasut * Copyright (C) 2013 Marek Vasut <marex@denx.de>
615b59e7cSMarek Vasut */
715b59e7cSMarek Vasut
815b59e7cSMarek Vasut #include <linux/dma-mapping.h>
915b59e7cSMarek Vasut #include <linux/interrupt.h>
1015b59e7cSMarek Vasut #include <linux/io.h>
1115b59e7cSMarek Vasut #include <linux/kernel.h>
1215b59e7cSMarek Vasut #include <linux/kthread.h>
1315b59e7cSMarek Vasut #include <linux/module.h>
1415b59e7cSMarek Vasut #include <linux/of.h>
1515b59e7cSMarek Vasut #include <linux/platform_device.h>
1615b59e7cSMarek Vasut #include <linux/stmp_device.h>
1757f00289SLeonard Crestez #include <linux/clk.h>
1815b59e7cSMarek Vasut
1915b59e7cSMarek Vasut #include <crypto/aes.h>
20a24d22b2SEric Biggers #include <crypto/sha1.h>
21a24d22b2SEric Biggers #include <crypto/sha2.h>
2215b59e7cSMarek Vasut #include <crypto/internal/hash.h>
2329406bb9SHerbert Xu #include <crypto/internal/skcipher.h>
24fa03481bSRosioru Dragos #include <crypto/scatterwalk.h>
2515b59e7cSMarek Vasut
2615b59e7cSMarek Vasut #define DCP_MAX_CHANS 4
2715b59e7cSMarek Vasut #define DCP_BUF_SZ PAGE_SIZE
28c709eebaSRadu Solea #define DCP_SHA_PAY_SZ 64
2915b59e7cSMarek Vasut
301a7c6856SMarek Vasut #define DCP_ALIGNMENT 64
311a7c6856SMarek Vasut
32c709eebaSRadu Solea /*
33c709eebaSRadu Solea * Null hashes to align with hw behavior on imx6sl and ull
34c709eebaSRadu Solea * these are flipped for consistency with hw output
35c709eebaSRadu Solea */
36ce4e4584SWei Yongjun static const uint8_t sha1_null_hash[] =
37c709eebaSRadu Solea "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
38c709eebaSRadu Solea "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
39c709eebaSRadu Solea
40ce4e4584SWei Yongjun static const uint8_t sha256_null_hash[] =
41c709eebaSRadu Solea "\x55\xb8\x52\x78\x1b\x99\x95\xa4"
42c709eebaSRadu Solea "\x4c\x93\x9b\x64\xe4\x41\xae\x27"
43c709eebaSRadu Solea "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
44c709eebaSRadu Solea "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
45c709eebaSRadu Solea
4615b59e7cSMarek Vasut /* DCP DMA descriptor. */
4715b59e7cSMarek Vasut struct dcp_dma_desc {
4815b59e7cSMarek Vasut uint32_t next_cmd_addr;
4915b59e7cSMarek Vasut uint32_t control0;
5015b59e7cSMarek Vasut uint32_t control1;
5115b59e7cSMarek Vasut uint32_t source;
5215b59e7cSMarek Vasut uint32_t destination;
5315b59e7cSMarek Vasut uint32_t size;
5415b59e7cSMarek Vasut uint32_t payload;
5515b59e7cSMarek Vasut uint32_t status;
5615b59e7cSMarek Vasut };
5715b59e7cSMarek Vasut
5815b59e7cSMarek Vasut /* Coherent aligned block for bounce buffering. */
5915b59e7cSMarek Vasut struct dcp_coherent_block {
6015b59e7cSMarek Vasut uint8_t aes_in_buf[DCP_BUF_SZ];
6115b59e7cSMarek Vasut uint8_t aes_out_buf[DCP_BUF_SZ];
6215b59e7cSMarek Vasut uint8_t sha_in_buf[DCP_BUF_SZ];
63c709eebaSRadu Solea uint8_t sha_out_buf[DCP_SHA_PAY_SZ];
6415b59e7cSMarek Vasut
6515b59e7cSMarek Vasut uint8_t aes_key[2 * AES_KEYSIZE_128];
6615b59e7cSMarek Vasut
6715b59e7cSMarek Vasut struct dcp_dma_desc desc[DCP_MAX_CHANS];
6815b59e7cSMarek Vasut };
6915b59e7cSMarek Vasut
7015b59e7cSMarek Vasut struct dcp {
7115b59e7cSMarek Vasut struct device *dev;
7215b59e7cSMarek Vasut void __iomem *base;
7315b59e7cSMarek Vasut
7415b59e7cSMarek Vasut uint32_t caps;
7515b59e7cSMarek Vasut
7615b59e7cSMarek Vasut struct dcp_coherent_block *coh;
7715b59e7cSMarek Vasut
7815b59e7cSMarek Vasut struct completion completion[DCP_MAX_CHANS];
79d80771c0SLeonard Crestez spinlock_t lock[DCP_MAX_CHANS];
8015b59e7cSMarek Vasut struct task_struct *thread[DCP_MAX_CHANS];
8115b59e7cSMarek Vasut struct crypto_queue queue[DCP_MAX_CHANS];
8257f00289SLeonard Crestez struct clk *dcp_clk;
8315b59e7cSMarek Vasut };
8415b59e7cSMarek Vasut
8515b59e7cSMarek Vasut enum dcp_chan {
8615b59e7cSMarek Vasut DCP_CHAN_HASH_SHA = 0,
8715b59e7cSMarek Vasut DCP_CHAN_CRYPTO = 2,
8815b59e7cSMarek Vasut };
8915b59e7cSMarek Vasut
9015b59e7cSMarek Vasut struct dcp_async_ctx {
9115b59e7cSMarek Vasut /* Common context */
9215b59e7cSMarek Vasut enum dcp_chan chan;
9315b59e7cSMarek Vasut uint32_t fill;
9415b59e7cSMarek Vasut
9515b59e7cSMarek Vasut /* SHA Hash-specific context */
9615b59e7cSMarek Vasut struct mutex mutex;
9715b59e7cSMarek Vasut uint32_t alg;
9815b59e7cSMarek Vasut unsigned int hot:1;
9915b59e7cSMarek Vasut
10015b59e7cSMarek Vasut /* Crypto-specific context */
101c9598d4eSArd Biesheuvel struct crypto_skcipher *fallback;
10215b59e7cSMarek Vasut unsigned int key_len;
10315b59e7cSMarek Vasut uint8_t key[AES_KEYSIZE_128];
10415b59e7cSMarek Vasut };
10515b59e7cSMarek Vasut
1062021abaaSMarek Vasut struct dcp_aes_req_ctx {
1072021abaaSMarek Vasut unsigned int enc:1;
1082021abaaSMarek Vasut unsigned int ecb:1;
109c9598d4eSArd Biesheuvel struct skcipher_request fallback_req; // keep at the end
1102021abaaSMarek Vasut };
1112021abaaSMarek Vasut
11215b59e7cSMarek Vasut struct dcp_sha_req_ctx {
11315b59e7cSMarek Vasut unsigned int init:1;
11415b59e7cSMarek Vasut unsigned int fini:1;
11515b59e7cSMarek Vasut };
11615b59e7cSMarek Vasut
117ea9e7568SDan Douglass struct dcp_export_state {
118ea9e7568SDan Douglass struct dcp_sha_req_ctx req_ctx;
119ea9e7568SDan Douglass struct dcp_async_ctx async_ctx;
120ea9e7568SDan Douglass };
121ea9e7568SDan Douglass
12215b59e7cSMarek Vasut /*
12315b59e7cSMarek Vasut * There can even be only one instance of the MXS DCP due to the
12415b59e7cSMarek Vasut * design of Linux Crypto API.
12515b59e7cSMarek Vasut */
12615b59e7cSMarek Vasut static struct dcp *global_sdcp;
12715b59e7cSMarek Vasut
12815b59e7cSMarek Vasut /* DCP register layout. */
12915b59e7cSMarek Vasut #define MXS_DCP_CTRL 0x00
13015b59e7cSMarek Vasut #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23)
13115b59e7cSMarek Vasut #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22)
13215b59e7cSMarek Vasut
13315b59e7cSMarek Vasut #define MXS_DCP_STAT 0x10
13415b59e7cSMarek Vasut #define MXS_DCP_STAT_CLR 0x18
13515b59e7cSMarek Vasut #define MXS_DCP_STAT_IRQ_MASK 0xf
13615b59e7cSMarek Vasut
13715b59e7cSMarek Vasut #define MXS_DCP_CHANNELCTRL 0x20
13815b59e7cSMarek Vasut #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff
13915b59e7cSMarek Vasut
14015b59e7cSMarek Vasut #define MXS_DCP_CAPABILITY1 0x40
14115b59e7cSMarek Vasut #define MXS_DCP_CAPABILITY1_SHA256 (4 << 16)
14215b59e7cSMarek Vasut #define MXS_DCP_CAPABILITY1_SHA1 (1 << 16)
14315b59e7cSMarek Vasut #define MXS_DCP_CAPABILITY1_AES128 (1 << 0)
14415b59e7cSMarek Vasut
14515b59e7cSMarek Vasut #define MXS_DCP_CONTEXT 0x50
14615b59e7cSMarek Vasut
14715b59e7cSMarek Vasut #define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40))
14815b59e7cSMarek Vasut
14915b59e7cSMarek Vasut #define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40))
15015b59e7cSMarek Vasut
15115b59e7cSMarek Vasut #define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40))
15215b59e7cSMarek Vasut #define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40))
15315b59e7cSMarek Vasut
15415b59e7cSMarek Vasut /* DMA descriptor bits. */
15515b59e7cSMarek Vasut #define MXS_DCP_CONTROL0_HASH_TERM (1 << 13)
15615b59e7cSMarek Vasut #define MXS_DCP_CONTROL0_HASH_INIT (1 << 12)
15715b59e7cSMarek Vasut #define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11)
15815b59e7cSMarek Vasut #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8)
15915b59e7cSMarek Vasut #define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9)
16015b59e7cSMarek Vasut #define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6)
16115b59e7cSMarek Vasut #define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5)
16215b59e7cSMarek Vasut #define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1)
16315b59e7cSMarek Vasut #define MXS_DCP_CONTROL0_INTERRUPT (1 << 0)
16415b59e7cSMarek Vasut
16515b59e7cSMarek Vasut #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16)
16615b59e7cSMarek Vasut #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16)
16715b59e7cSMarek Vasut #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4)
16815b59e7cSMarek Vasut #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4)
16915b59e7cSMarek Vasut #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0)
17015b59e7cSMarek Vasut
mxs_dcp_start_dma(struct dcp_async_ctx * actx)17115b59e7cSMarek Vasut static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
17215b59e7cSMarek Vasut {
173df6313d7SSean Anderson int dma_err;
17415b59e7cSMarek Vasut struct dcp *sdcp = global_sdcp;
17515b59e7cSMarek Vasut const int chan = actx->chan;
17615b59e7cSMarek Vasut uint32_t stat;
177dd0fff8dSNicholas Mc Guire unsigned long ret;
17815b59e7cSMarek Vasut struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
17915b59e7cSMarek Vasut dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
18015b59e7cSMarek Vasut DMA_TO_DEVICE);
18115b59e7cSMarek Vasut
182df6313d7SSean Anderson dma_err = dma_mapping_error(sdcp->dev, desc_phys);
183df6313d7SSean Anderson if (dma_err)
184df6313d7SSean Anderson return dma_err;
185df6313d7SSean Anderson
18615b59e7cSMarek Vasut reinit_completion(&sdcp->completion[chan]);
18715b59e7cSMarek Vasut
18815b59e7cSMarek Vasut /* Clear status register. */
18915b59e7cSMarek Vasut writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
19015b59e7cSMarek Vasut
19115b59e7cSMarek Vasut /* Load the DMA descriptor. */
19215b59e7cSMarek Vasut writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
19315b59e7cSMarek Vasut
19415b59e7cSMarek Vasut /* Increment the semaphore to start the DMA transfer. */
19515b59e7cSMarek Vasut writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
19615b59e7cSMarek Vasut
19715b59e7cSMarek Vasut ret = wait_for_completion_timeout(&sdcp->completion[chan],
19815b59e7cSMarek Vasut msecs_to_jiffies(1000));
19915b59e7cSMarek Vasut if (!ret) {
20015b59e7cSMarek Vasut dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
20115b59e7cSMarek Vasut chan, readl(sdcp->base + MXS_DCP_STAT));
20215b59e7cSMarek Vasut return -ETIMEDOUT;
20315b59e7cSMarek Vasut }
20415b59e7cSMarek Vasut
20515b59e7cSMarek Vasut stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
20615b59e7cSMarek Vasut if (stat & 0xff) {
20715b59e7cSMarek Vasut dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
20815b59e7cSMarek Vasut chan, stat);
20915b59e7cSMarek Vasut return -EINVAL;
21015b59e7cSMarek Vasut }
21115b59e7cSMarek Vasut
21215b59e7cSMarek Vasut dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
21315b59e7cSMarek Vasut
21415b59e7cSMarek Vasut return 0;
21515b59e7cSMarek Vasut }
21615b59e7cSMarek Vasut
21715b59e7cSMarek Vasut /*
21815b59e7cSMarek Vasut * Encryption (AES128)
21915b59e7cSMarek Vasut */
mxs_dcp_run_aes(struct dcp_async_ctx * actx,struct skcipher_request * req,int init)2202021abaaSMarek Vasut static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
2219acb3247SArd Biesheuvel struct skcipher_request *req, int init)
22215b59e7cSMarek Vasut {
223df6313d7SSean Anderson dma_addr_t key_phys, src_phys, dst_phys;
22415b59e7cSMarek Vasut struct dcp *sdcp = global_sdcp;
22515b59e7cSMarek Vasut struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
2269acb3247SArd Biesheuvel struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
22715b59e7cSMarek Vasut int ret;
22815b59e7cSMarek Vasut
229df6313d7SSean Anderson key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
230df6313d7SSean Anderson 2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
231df6313d7SSean Anderson ret = dma_mapping_error(sdcp->dev, key_phys);
232df6313d7SSean Anderson if (ret)
233df6313d7SSean Anderson return ret;
234df6313d7SSean Anderson
235df6313d7SSean Anderson src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
23615b59e7cSMarek Vasut DCP_BUF_SZ, DMA_TO_DEVICE);
237df6313d7SSean Anderson ret = dma_mapping_error(sdcp->dev, src_phys);
238df6313d7SSean Anderson if (ret)
239df6313d7SSean Anderson goto err_src;
240df6313d7SSean Anderson
241df6313d7SSean Anderson dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
24215b59e7cSMarek Vasut DCP_BUF_SZ, DMA_FROM_DEVICE);
243df6313d7SSean Anderson ret = dma_mapping_error(sdcp->dev, dst_phys);
244df6313d7SSean Anderson if (ret)
245df6313d7SSean Anderson goto err_dst;
24615b59e7cSMarek Vasut
247fadd7a6eSRadu Solea if (actx->fill % AES_BLOCK_SIZE) {
248fadd7a6eSRadu Solea dev_err(sdcp->dev, "Invalid block size!\n");
249fadd7a6eSRadu Solea ret = -EINVAL;
250fadd7a6eSRadu Solea goto aes_done_run;
251fadd7a6eSRadu Solea }
252fadd7a6eSRadu Solea
25315b59e7cSMarek Vasut /* Fill in the DMA descriptor. */
25415b59e7cSMarek Vasut desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
25515b59e7cSMarek Vasut MXS_DCP_CONTROL0_INTERRUPT |
25615b59e7cSMarek Vasut MXS_DCP_CONTROL0_ENABLE_CIPHER;
25715b59e7cSMarek Vasut
25815b59e7cSMarek Vasut /* Payload contains the key. */
25915b59e7cSMarek Vasut desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
26015b59e7cSMarek Vasut
2612021abaaSMarek Vasut if (rctx->enc)
26215b59e7cSMarek Vasut desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
26315b59e7cSMarek Vasut if (init)
26415b59e7cSMarek Vasut desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
26515b59e7cSMarek Vasut
26615b59e7cSMarek Vasut desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
26715b59e7cSMarek Vasut
2682021abaaSMarek Vasut if (rctx->ecb)
26915b59e7cSMarek Vasut desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
27015b59e7cSMarek Vasut else
27115b59e7cSMarek Vasut desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
27215b59e7cSMarek Vasut
27315b59e7cSMarek Vasut desc->next_cmd_addr = 0;
27415b59e7cSMarek Vasut desc->source = src_phys;
27515b59e7cSMarek Vasut desc->destination = dst_phys;
27615b59e7cSMarek Vasut desc->size = actx->fill;
27715b59e7cSMarek Vasut desc->payload = key_phys;
27815b59e7cSMarek Vasut desc->status = 0;
27915b59e7cSMarek Vasut
28015b59e7cSMarek Vasut ret = mxs_dcp_start_dma(actx);
28115b59e7cSMarek Vasut
282fadd7a6eSRadu Solea aes_done_run:
283df6313d7SSean Anderson dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
284df6313d7SSean Anderson err_dst:
285df6313d7SSean Anderson dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
286df6313d7SSean Anderson err_src:
28715b59e7cSMarek Vasut dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
28815b59e7cSMarek Vasut DMA_TO_DEVICE);
28915b59e7cSMarek Vasut
29015b59e7cSMarek Vasut return ret;
29115b59e7cSMarek Vasut }
29215b59e7cSMarek Vasut
mxs_dcp_aes_block_crypt(struct crypto_async_request * arq)29315b59e7cSMarek Vasut static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
29415b59e7cSMarek Vasut {
29515b59e7cSMarek Vasut struct dcp *sdcp = global_sdcp;
29615b59e7cSMarek Vasut
2979acb3247SArd Biesheuvel struct skcipher_request *req = skcipher_request_cast(arq);
29815b59e7cSMarek Vasut struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
2999acb3247SArd Biesheuvel struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
30015b59e7cSMarek Vasut
30115b59e7cSMarek Vasut struct scatterlist *dst = req->dst;
30215b59e7cSMarek Vasut struct scatterlist *src = req->src;
3032e6d793eSSean Anderson int dst_nents = sg_nents(dst);
30415b59e7cSMarek Vasut
30515b59e7cSMarek Vasut const int out_off = DCP_BUF_SZ;
30615b59e7cSMarek Vasut uint8_t *in_buf = sdcp->coh->aes_in_buf;
30715b59e7cSMarek Vasut uint8_t *out_buf = sdcp->coh->aes_out_buf;
30815b59e7cSMarek Vasut
30915b59e7cSMarek Vasut uint32_t dst_off = 0;
3102e6d793eSSean Anderson uint8_t *src_buf = NULL;
311fadd7a6eSRadu Solea uint32_t last_out_len = 0;
31215b59e7cSMarek Vasut
31315b59e7cSMarek Vasut uint8_t *key = sdcp->coh->aes_key;
31415b59e7cSMarek Vasut
31515b59e7cSMarek Vasut int ret = 0;
3162e6d793eSSean Anderson unsigned int i, len, clen, tlen = 0;
31715b59e7cSMarek Vasut int init = 0;
318fadd7a6eSRadu Solea bool limit_hit = false;
31915b59e7cSMarek Vasut
32015b59e7cSMarek Vasut actx->fill = 0;
32115b59e7cSMarek Vasut
32215b59e7cSMarek Vasut /* Copy the key from the temporary location. */
32315b59e7cSMarek Vasut memcpy(key, actx->key, actx->key_len);
32415b59e7cSMarek Vasut
3252021abaaSMarek Vasut if (!rctx->ecb) {
32615b59e7cSMarek Vasut /* Copy the CBC IV just past the key. */
3279acb3247SArd Biesheuvel memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128);
32815b59e7cSMarek Vasut /* CBC needs the INIT set. */
32915b59e7cSMarek Vasut init = 1;
33015b59e7cSMarek Vasut } else {
33115b59e7cSMarek Vasut memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
33215b59e7cSMarek Vasut }
33315b59e7cSMarek Vasut
33428e9b6d8STomas Paukrt for_each_sg(req->src, src, sg_nents(req->src), i) {
33515b59e7cSMarek Vasut src_buf = sg_virt(src);
33615b59e7cSMarek Vasut len = sg_dma_len(src);
337fadd7a6eSRadu Solea tlen += len;
3389acb3247SArd Biesheuvel limit_hit = tlen > req->cryptlen;
339fadd7a6eSRadu Solea
340fadd7a6eSRadu Solea if (limit_hit)
3419acb3247SArd Biesheuvel len = req->cryptlen - (tlen - len);
34215b59e7cSMarek Vasut
34315b59e7cSMarek Vasut do {
34415b59e7cSMarek Vasut if (actx->fill + len > out_off)
34515b59e7cSMarek Vasut clen = out_off - actx->fill;
34615b59e7cSMarek Vasut else
34715b59e7cSMarek Vasut clen = len;
34815b59e7cSMarek Vasut
34915b59e7cSMarek Vasut memcpy(in_buf + actx->fill, src_buf, clen);
35015b59e7cSMarek Vasut len -= clen;
35115b59e7cSMarek Vasut src_buf += clen;
35215b59e7cSMarek Vasut actx->fill += clen;
35315b59e7cSMarek Vasut
35415b59e7cSMarek Vasut /*
35515b59e7cSMarek Vasut * If we filled the buffer or this is the last SG,
35615b59e7cSMarek Vasut * submit the buffer.
35715b59e7cSMarek Vasut */
358fadd7a6eSRadu Solea if (actx->fill == out_off || sg_is_last(src) ||
359fadd7a6eSRadu Solea limit_hit) {
3602021abaaSMarek Vasut ret = mxs_dcp_run_aes(actx, req, init);
36115b59e7cSMarek Vasut if (ret)
36215b59e7cSMarek Vasut return ret;
36315b59e7cSMarek Vasut init = 0;
36415b59e7cSMarek Vasut
3652e6d793eSSean Anderson sg_pcopy_from_buffer(dst, dst_nents, out_buf,
3662e6d793eSSean Anderson actx->fill, dst_off);
3672e6d793eSSean Anderson dst_off += actx->fill;
368fadd7a6eSRadu Solea last_out_len = actx->fill;
3692e6d793eSSean Anderson actx->fill = 0;
37015b59e7cSMarek Vasut }
37115b59e7cSMarek Vasut } while (len);
372fadd7a6eSRadu Solea
373fadd7a6eSRadu Solea if (limit_hit)
374fadd7a6eSRadu Solea break;
375fadd7a6eSRadu Solea }
376fadd7a6eSRadu Solea
377fadd7a6eSRadu Solea /* Copy the IV for CBC for chaining */
378fadd7a6eSRadu Solea if (!rctx->ecb) {
379fadd7a6eSRadu Solea if (rctx->enc)
3809acb3247SArd Biesheuvel memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE),
381fadd7a6eSRadu Solea AES_BLOCK_SIZE);
382fadd7a6eSRadu Solea else
3839acb3247SArd Biesheuvel memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE),
384fadd7a6eSRadu Solea AES_BLOCK_SIZE);
38515b59e7cSMarek Vasut }
38615b59e7cSMarek Vasut
38715b59e7cSMarek Vasut return ret;
38815b59e7cSMarek Vasut }
38915b59e7cSMarek Vasut
dcp_chan_thread_aes(void * data)39015b59e7cSMarek Vasut static int dcp_chan_thread_aes(void *data)
39115b59e7cSMarek Vasut {
39215b59e7cSMarek Vasut struct dcp *sdcp = global_sdcp;
39315b59e7cSMarek Vasut const int chan = DCP_CHAN_CRYPTO;
39415b59e7cSMarek Vasut
39515b59e7cSMarek Vasut struct crypto_async_request *backlog;
39615b59e7cSMarek Vasut struct crypto_async_request *arq;
39715b59e7cSMarek Vasut
39815b59e7cSMarek Vasut int ret;
39915b59e7cSMarek Vasut
400d80771c0SLeonard Crestez while (!kthread_should_stop()) {
401d80771c0SLeonard Crestez set_current_state(TASK_INTERRUPTIBLE);
40215b59e7cSMarek Vasut
403d80771c0SLeonard Crestez spin_lock(&sdcp->lock[chan]);
40415b59e7cSMarek Vasut backlog = crypto_get_backlog(&sdcp->queue[chan]);
40515b59e7cSMarek Vasut arq = crypto_dequeue_request(&sdcp->queue[chan]);
406d80771c0SLeonard Crestez spin_unlock(&sdcp->lock[chan]);
407d80771c0SLeonard Crestez
408d80771c0SLeonard Crestez if (!backlog && !arq) {
409d80771c0SLeonard Crestez schedule();
410d80771c0SLeonard Crestez continue;
411d80771c0SLeonard Crestez }
412d80771c0SLeonard Crestez
413d80771c0SLeonard Crestez set_current_state(TASK_RUNNING);
41415b59e7cSMarek Vasut
41515b59e7cSMarek Vasut if (backlog)
41625c9d2c3SHerbert Xu crypto_request_complete(backlog, -EINPROGRESS);
41715b59e7cSMarek Vasut
41815b59e7cSMarek Vasut if (arq) {
41915b59e7cSMarek Vasut ret = mxs_dcp_aes_block_crypt(arq);
42025c9d2c3SHerbert Xu crypto_request_complete(arq, ret);
42115b59e7cSMarek Vasut }
422d80771c0SLeonard Crestez }
42315b59e7cSMarek Vasut
42415b59e7cSMarek Vasut return 0;
42515b59e7cSMarek Vasut }
42615b59e7cSMarek Vasut
mxs_dcp_block_fallback(struct skcipher_request * req,int enc)4279acb3247SArd Biesheuvel static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc)
42815b59e7cSMarek Vasut {
4299acb3247SArd Biesheuvel struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
430c9598d4eSArd Biesheuvel struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
4319acb3247SArd Biesheuvel struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm);
43215b59e7cSMarek Vasut int ret;
43315b59e7cSMarek Vasut
434c9598d4eSArd Biesheuvel skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
435c9598d4eSArd Biesheuvel skcipher_request_set_callback(&rctx->fallback_req, req->base.flags,
436c9598d4eSArd Biesheuvel req->base.complete, req->base.data);
437c9598d4eSArd Biesheuvel skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst,
4389acb3247SArd Biesheuvel req->cryptlen, req->iv);
43915b59e7cSMarek Vasut
44015b59e7cSMarek Vasut if (enc)
441c9598d4eSArd Biesheuvel ret = crypto_skcipher_encrypt(&rctx->fallback_req);
44215b59e7cSMarek Vasut else
443c9598d4eSArd Biesheuvel ret = crypto_skcipher_decrypt(&rctx->fallback_req);
44415b59e7cSMarek Vasut
44515b59e7cSMarek Vasut return ret;
44615b59e7cSMarek Vasut }
44715b59e7cSMarek Vasut
mxs_dcp_aes_enqueue(struct skcipher_request * req,int enc,int ecb)4489acb3247SArd Biesheuvel static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb)
44915b59e7cSMarek Vasut {
45015b59e7cSMarek Vasut struct dcp *sdcp = global_sdcp;
45115b59e7cSMarek Vasut struct crypto_async_request *arq = &req->base;
45215b59e7cSMarek Vasut struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
4539acb3247SArd Biesheuvel struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
45415b59e7cSMarek Vasut int ret;
45515b59e7cSMarek Vasut
45615b59e7cSMarek Vasut if (unlikely(actx->key_len != AES_KEYSIZE_128))
45715b59e7cSMarek Vasut return mxs_dcp_block_fallback(req, enc);
45815b59e7cSMarek Vasut
4592021abaaSMarek Vasut rctx->enc = enc;
4602021abaaSMarek Vasut rctx->ecb = ecb;
46115b59e7cSMarek Vasut actx->chan = DCP_CHAN_CRYPTO;
46215b59e7cSMarek Vasut
463d80771c0SLeonard Crestez spin_lock(&sdcp->lock[actx->chan]);
46415b59e7cSMarek Vasut ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
465d80771c0SLeonard Crestez spin_unlock(&sdcp->lock[actx->chan]);
46615b59e7cSMarek Vasut
46715b59e7cSMarek Vasut wake_up_process(sdcp->thread[actx->chan]);
46815b59e7cSMarek Vasut
469dbbaffefSYueHaibing return ret;
47015b59e7cSMarek Vasut }
47115b59e7cSMarek Vasut
mxs_dcp_aes_ecb_decrypt(struct skcipher_request * req)4729acb3247SArd Biesheuvel static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req)
47315b59e7cSMarek Vasut {
47415b59e7cSMarek Vasut return mxs_dcp_aes_enqueue(req, 0, 1);
47515b59e7cSMarek Vasut }
47615b59e7cSMarek Vasut
mxs_dcp_aes_ecb_encrypt(struct skcipher_request * req)4779acb3247SArd Biesheuvel static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req)
47815b59e7cSMarek Vasut {
47915b59e7cSMarek Vasut return mxs_dcp_aes_enqueue(req, 1, 1);
48015b59e7cSMarek Vasut }
48115b59e7cSMarek Vasut
mxs_dcp_aes_cbc_decrypt(struct skcipher_request * req)4829acb3247SArd Biesheuvel static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req)
48315b59e7cSMarek Vasut {
48415b59e7cSMarek Vasut return mxs_dcp_aes_enqueue(req, 0, 0);
48515b59e7cSMarek Vasut }
48615b59e7cSMarek Vasut
mxs_dcp_aes_cbc_encrypt(struct skcipher_request * req)4879acb3247SArd Biesheuvel static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req)
48815b59e7cSMarek Vasut {
48915b59e7cSMarek Vasut return mxs_dcp_aes_enqueue(req, 1, 0);
49015b59e7cSMarek Vasut }
49115b59e7cSMarek Vasut
mxs_dcp_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int len)4929acb3247SArd Biesheuvel static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
49315b59e7cSMarek Vasut unsigned int len)
49415b59e7cSMarek Vasut {
4959acb3247SArd Biesheuvel struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
49615b59e7cSMarek Vasut
49715b59e7cSMarek Vasut /*
49815b59e7cSMarek Vasut * AES 128 is supposed by the hardware, store key into temporary
49915b59e7cSMarek Vasut * buffer and exit. We must use the temporary buffer here, since
50015b59e7cSMarek Vasut * there can still be an operation in progress.
50115b59e7cSMarek Vasut */
50215b59e7cSMarek Vasut actx->key_len = len;
50315b59e7cSMarek Vasut if (len == AES_KEYSIZE_128) {
50415b59e7cSMarek Vasut memcpy(actx->key, key, len);
50515b59e7cSMarek Vasut return 0;
50615b59e7cSMarek Vasut }
50715b59e7cSMarek Vasut
50815b59e7cSMarek Vasut /*
50915b59e7cSMarek Vasut * If the requested AES key size is not supported by the hardware,
51015b59e7cSMarek Vasut * but is supported by in-kernel software implementation, we use
51115b59e7cSMarek Vasut * software fallback.
51215b59e7cSMarek Vasut */
513c9598d4eSArd Biesheuvel crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
514c9598d4eSArd Biesheuvel crypto_skcipher_set_flags(actx->fallback,
51529406bb9SHerbert Xu tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
516c9598d4eSArd Biesheuvel return crypto_skcipher_setkey(actx->fallback, key, len);
51715b59e7cSMarek Vasut }
51815b59e7cSMarek Vasut
mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher * tfm)5199acb3247SArd Biesheuvel static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
52015b59e7cSMarek Vasut {
5219acb3247SArd Biesheuvel const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
5229acb3247SArd Biesheuvel struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
523c9598d4eSArd Biesheuvel struct crypto_skcipher *blk;
52415b59e7cSMarek Vasut
525c9598d4eSArd Biesheuvel blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
52615b59e7cSMarek Vasut if (IS_ERR(blk))
52715b59e7cSMarek Vasut return PTR_ERR(blk);
52815b59e7cSMarek Vasut
52915b59e7cSMarek Vasut actx->fallback = blk;
530c9598d4eSArd Biesheuvel crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx) +
531c9598d4eSArd Biesheuvel crypto_skcipher_reqsize(blk));
53215b59e7cSMarek Vasut return 0;
53315b59e7cSMarek Vasut }
53415b59e7cSMarek Vasut
mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher * tfm)5359acb3247SArd Biesheuvel static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm)
53615b59e7cSMarek Vasut {
5379acb3247SArd Biesheuvel struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
53815b59e7cSMarek Vasut
539c9598d4eSArd Biesheuvel crypto_free_skcipher(actx->fallback);
54015b59e7cSMarek Vasut }
54115b59e7cSMarek Vasut
54215b59e7cSMarek Vasut /*
54315b59e7cSMarek Vasut * Hashing (SHA1/SHA256)
54415b59e7cSMarek Vasut */
mxs_dcp_run_sha(struct ahash_request * req)54515b59e7cSMarek Vasut static int mxs_dcp_run_sha(struct ahash_request *req)
54615b59e7cSMarek Vasut {
54715b59e7cSMarek Vasut struct dcp *sdcp = global_sdcp;
54815b59e7cSMarek Vasut int ret;
54915b59e7cSMarek Vasut
55015b59e7cSMarek Vasut struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
55115b59e7cSMarek Vasut struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
55215b59e7cSMarek Vasut struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
55315b59e7cSMarek Vasut struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
55415b59e7cSMarek Vasut
55504d088ccSMarek Vasut dma_addr_t digest_phys = 0;
55615b59e7cSMarek Vasut dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
55715b59e7cSMarek Vasut DCP_BUF_SZ, DMA_TO_DEVICE);
55815b59e7cSMarek Vasut
559df6313d7SSean Anderson ret = dma_mapping_error(sdcp->dev, buf_phys);
560df6313d7SSean Anderson if (ret)
561df6313d7SSean Anderson return ret;
562df6313d7SSean Anderson
56315b59e7cSMarek Vasut /* Fill in the DMA descriptor. */
56415b59e7cSMarek Vasut desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
56515b59e7cSMarek Vasut MXS_DCP_CONTROL0_INTERRUPT |
56615b59e7cSMarek Vasut MXS_DCP_CONTROL0_ENABLE_HASH;
56715b59e7cSMarek Vasut if (rctx->init)
56815b59e7cSMarek Vasut desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT;
56915b59e7cSMarek Vasut
57015b59e7cSMarek Vasut desc->control1 = actx->alg;
57115b59e7cSMarek Vasut desc->next_cmd_addr = 0;
57215b59e7cSMarek Vasut desc->source = buf_phys;
57315b59e7cSMarek Vasut desc->destination = 0;
57415b59e7cSMarek Vasut desc->size = actx->fill;
57515b59e7cSMarek Vasut desc->payload = 0;
57615b59e7cSMarek Vasut desc->status = 0;
57715b59e7cSMarek Vasut
578c709eebaSRadu Solea /*
579c709eebaSRadu Solea * Align driver with hw behavior when generating null hashes
580c709eebaSRadu Solea */
581c709eebaSRadu Solea if (rctx->init && rctx->fini && desc->size == 0) {
582c709eebaSRadu Solea struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
583c709eebaSRadu Solea const uint8_t *sha_buf =
584c709eebaSRadu Solea (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
585c709eebaSRadu Solea sha1_null_hash : sha256_null_hash;
586c709eebaSRadu Solea memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
587c709eebaSRadu Solea ret = 0;
588c709eebaSRadu Solea goto done_run;
589c709eebaSRadu Solea }
590c709eebaSRadu Solea
59115b59e7cSMarek Vasut /* Set HASH_TERM bit for last transfer block. */
59215b59e7cSMarek Vasut if (rctx->fini) {
593c709eebaSRadu Solea digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
594c709eebaSRadu Solea DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
595df6313d7SSean Anderson ret = dma_mapping_error(sdcp->dev, digest_phys);
596df6313d7SSean Anderson if (ret)
597df6313d7SSean Anderson goto done_run;
598df6313d7SSean Anderson
59915b59e7cSMarek Vasut desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
60015b59e7cSMarek Vasut desc->payload = digest_phys;
60115b59e7cSMarek Vasut }
60215b59e7cSMarek Vasut
60315b59e7cSMarek Vasut ret = mxs_dcp_start_dma(actx);
60415b59e7cSMarek Vasut
60504d088ccSMarek Vasut if (rctx->fini)
606c709eebaSRadu Solea dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
60715b59e7cSMarek Vasut DMA_FROM_DEVICE);
60804d088ccSMarek Vasut
609c709eebaSRadu Solea done_run:
61015b59e7cSMarek Vasut dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
61115b59e7cSMarek Vasut
61215b59e7cSMarek Vasut return ret;
61315b59e7cSMarek Vasut }
61415b59e7cSMarek Vasut
dcp_sha_req_to_buf(struct crypto_async_request * arq)61515b59e7cSMarek Vasut static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
61615b59e7cSMarek Vasut {
61715b59e7cSMarek Vasut struct dcp *sdcp = global_sdcp;
61815b59e7cSMarek Vasut
61915b59e7cSMarek Vasut struct ahash_request *req = ahash_request_cast(arq);
62015b59e7cSMarek Vasut struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
62115b59e7cSMarek Vasut struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
62215b59e7cSMarek Vasut struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
62315b59e7cSMarek Vasut struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
62415b59e7cSMarek Vasut
62515b59e7cSMarek Vasut uint8_t *in_buf = sdcp->coh->sha_in_buf;
626c709eebaSRadu Solea uint8_t *out_buf = sdcp->coh->sha_out_buf;
62715b59e7cSMarek Vasut
62815b59e7cSMarek Vasut struct scatterlist *src;
62915b59e7cSMarek Vasut
630fa03481bSRosioru Dragos unsigned int i, len, clen, oft = 0;
63115b59e7cSMarek Vasut int ret;
63215b59e7cSMarek Vasut
63315b59e7cSMarek Vasut int fin = rctx->fini;
63415b59e7cSMarek Vasut if (fin)
63515b59e7cSMarek Vasut rctx->fini = 0;
63615b59e7cSMarek Vasut
637fa03481bSRosioru Dragos src = req->src;
638fa03481bSRosioru Dragos len = req->nbytes;
63915b59e7cSMarek Vasut
640fa03481bSRosioru Dragos while (len) {
64115b59e7cSMarek Vasut if (actx->fill + len > DCP_BUF_SZ)
64215b59e7cSMarek Vasut clen = DCP_BUF_SZ - actx->fill;
64315b59e7cSMarek Vasut else
64415b59e7cSMarek Vasut clen = len;
64515b59e7cSMarek Vasut
646fa03481bSRosioru Dragos scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen,
647fa03481bSRosioru Dragos 0);
648fa03481bSRosioru Dragos
64915b59e7cSMarek Vasut len -= clen;
650fa03481bSRosioru Dragos oft += clen;
65115b59e7cSMarek Vasut actx->fill += clen;
65215b59e7cSMarek Vasut
65315b59e7cSMarek Vasut /*
65415b59e7cSMarek Vasut * If we filled the buffer and still have some
65515b59e7cSMarek Vasut * more data, submit the buffer.
65615b59e7cSMarek Vasut */
65715b59e7cSMarek Vasut if (len && actx->fill == DCP_BUF_SZ) {
65815b59e7cSMarek Vasut ret = mxs_dcp_run_sha(req);
65915b59e7cSMarek Vasut if (ret)
66015b59e7cSMarek Vasut return ret;
66115b59e7cSMarek Vasut actx->fill = 0;
66215b59e7cSMarek Vasut rctx->init = 0;
66315b59e7cSMarek Vasut }
66415b59e7cSMarek Vasut }
66515b59e7cSMarek Vasut
66615b59e7cSMarek Vasut if (fin) {
66715b59e7cSMarek Vasut rctx->fini = 1;
66815b59e7cSMarek Vasut
66915b59e7cSMarek Vasut /* Submit whatever is left. */
67004d088ccSMarek Vasut if (!req->result)
67104d088ccSMarek Vasut return -EINVAL;
67204d088ccSMarek Vasut
67315b59e7cSMarek Vasut ret = mxs_dcp_run_sha(req);
67404d088ccSMarek Vasut if (ret)
67515b59e7cSMarek Vasut return ret;
67604d088ccSMarek Vasut
67715b59e7cSMarek Vasut actx->fill = 0;
67815b59e7cSMarek Vasut
679c709eebaSRadu Solea /* For some reason the result is flipped */
680c709eebaSRadu Solea for (i = 0; i < halg->digestsize; i++)
681c709eebaSRadu Solea req->result[i] = out_buf[halg->digestsize - i - 1];
68215b59e7cSMarek Vasut }
68315b59e7cSMarek Vasut
68415b59e7cSMarek Vasut return 0;
68515b59e7cSMarek Vasut }
68615b59e7cSMarek Vasut
dcp_chan_thread_sha(void * data)68715b59e7cSMarek Vasut static int dcp_chan_thread_sha(void *data)
68815b59e7cSMarek Vasut {
68915b59e7cSMarek Vasut struct dcp *sdcp = global_sdcp;
69015b59e7cSMarek Vasut const int chan = DCP_CHAN_HASH_SHA;
69115b59e7cSMarek Vasut
69215b59e7cSMarek Vasut struct crypto_async_request *backlog;
69315b59e7cSMarek Vasut struct crypto_async_request *arq;
69411fe71f1SYueHaibing int ret;
69515b59e7cSMarek Vasut
696d80771c0SLeonard Crestez while (!kthread_should_stop()) {
697d80771c0SLeonard Crestez set_current_state(TASK_INTERRUPTIBLE);
69815b59e7cSMarek Vasut
699d80771c0SLeonard Crestez spin_lock(&sdcp->lock[chan]);
70015b59e7cSMarek Vasut backlog = crypto_get_backlog(&sdcp->queue[chan]);
70115b59e7cSMarek Vasut arq = crypto_dequeue_request(&sdcp->queue[chan]);
702d80771c0SLeonard Crestez spin_unlock(&sdcp->lock[chan]);
703d80771c0SLeonard Crestez
704d80771c0SLeonard Crestez if (!backlog && !arq) {
705d80771c0SLeonard Crestez schedule();
706d80771c0SLeonard Crestez continue;
707d80771c0SLeonard Crestez }
708d80771c0SLeonard Crestez
709d80771c0SLeonard Crestez set_current_state(TASK_RUNNING);
71015b59e7cSMarek Vasut
71115b59e7cSMarek Vasut if (backlog)
71225c9d2c3SHerbert Xu crypto_request_complete(backlog, -EINPROGRESS);
71315b59e7cSMarek Vasut
71415b59e7cSMarek Vasut if (arq) {
71515b59e7cSMarek Vasut ret = dcp_sha_req_to_buf(arq);
71625c9d2c3SHerbert Xu crypto_request_complete(arq, ret);
71715b59e7cSMarek Vasut }
718d80771c0SLeonard Crestez }
71915b59e7cSMarek Vasut
72015b59e7cSMarek Vasut return 0;
72115b59e7cSMarek Vasut }
72215b59e7cSMarek Vasut
dcp_sha_init(struct ahash_request * req)72315b59e7cSMarek Vasut static int dcp_sha_init(struct ahash_request *req)
72415b59e7cSMarek Vasut {
72515b59e7cSMarek Vasut struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
72615b59e7cSMarek Vasut struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
72715b59e7cSMarek Vasut
72815b59e7cSMarek Vasut struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
72915b59e7cSMarek Vasut
73015b59e7cSMarek Vasut /*
73115b59e7cSMarek Vasut * Start hashing session. The code below only inits the
73215b59e7cSMarek Vasut * hashing session context, nothing more.
73315b59e7cSMarek Vasut */
73415b59e7cSMarek Vasut memset(actx, 0, sizeof(*actx));
73515b59e7cSMarek Vasut
73615b59e7cSMarek Vasut if (strcmp(halg->base.cra_name, "sha1") == 0)
73715b59e7cSMarek Vasut actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1;
73815b59e7cSMarek Vasut else
73915b59e7cSMarek Vasut actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256;
74015b59e7cSMarek Vasut
74115b59e7cSMarek Vasut actx->fill = 0;
74215b59e7cSMarek Vasut actx->hot = 0;
74315b59e7cSMarek Vasut actx->chan = DCP_CHAN_HASH_SHA;
74415b59e7cSMarek Vasut
74515b59e7cSMarek Vasut mutex_init(&actx->mutex);
74615b59e7cSMarek Vasut
74715b59e7cSMarek Vasut return 0;
74815b59e7cSMarek Vasut }
74915b59e7cSMarek Vasut
dcp_sha_update_fx(struct ahash_request * req,int fini)75015b59e7cSMarek Vasut static int dcp_sha_update_fx(struct ahash_request *req, int fini)
75115b59e7cSMarek Vasut {
75215b59e7cSMarek Vasut struct dcp *sdcp = global_sdcp;
75315b59e7cSMarek Vasut
75415b59e7cSMarek Vasut struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
75515b59e7cSMarek Vasut struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
75615b59e7cSMarek Vasut struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
75715b59e7cSMarek Vasut
75815b59e7cSMarek Vasut int ret;
75915b59e7cSMarek Vasut
76015b59e7cSMarek Vasut /*
76115b59e7cSMarek Vasut * Ignore requests that have no data in them and are not
76215b59e7cSMarek Vasut * the trailing requests in the stream of requests.
76315b59e7cSMarek Vasut */
76415b59e7cSMarek Vasut if (!req->nbytes && !fini)
76515b59e7cSMarek Vasut return 0;
76615b59e7cSMarek Vasut
76715b59e7cSMarek Vasut mutex_lock(&actx->mutex);
76815b59e7cSMarek Vasut
76915b59e7cSMarek Vasut rctx->fini = fini;
77015b59e7cSMarek Vasut
77115b59e7cSMarek Vasut if (!actx->hot) {
77215b59e7cSMarek Vasut actx->hot = 1;
77315b59e7cSMarek Vasut rctx->init = 1;
77415b59e7cSMarek Vasut }
77515b59e7cSMarek Vasut
776d80771c0SLeonard Crestez spin_lock(&sdcp->lock[actx->chan]);
77715b59e7cSMarek Vasut ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
778d80771c0SLeonard Crestez spin_unlock(&sdcp->lock[actx->chan]);
77915b59e7cSMarek Vasut
78015b59e7cSMarek Vasut wake_up_process(sdcp->thread[actx->chan]);
78115b59e7cSMarek Vasut mutex_unlock(&actx->mutex);
78215b59e7cSMarek Vasut
783dbbaffefSYueHaibing return ret;
78415b59e7cSMarek Vasut }
78515b59e7cSMarek Vasut
dcp_sha_update(struct ahash_request * req)78615b59e7cSMarek Vasut static int dcp_sha_update(struct ahash_request *req)
78715b59e7cSMarek Vasut {
78815b59e7cSMarek Vasut return dcp_sha_update_fx(req, 0);
78915b59e7cSMarek Vasut }
79015b59e7cSMarek Vasut
dcp_sha_final(struct ahash_request * req)79115b59e7cSMarek Vasut static int dcp_sha_final(struct ahash_request *req)
79215b59e7cSMarek Vasut {
79315b59e7cSMarek Vasut ahash_request_set_crypt(req, NULL, req->result, 0);
79415b59e7cSMarek Vasut req->nbytes = 0;
79515b59e7cSMarek Vasut return dcp_sha_update_fx(req, 1);
79615b59e7cSMarek Vasut }
79715b59e7cSMarek Vasut
dcp_sha_finup(struct ahash_request * req)79815b59e7cSMarek Vasut static int dcp_sha_finup(struct ahash_request *req)
79915b59e7cSMarek Vasut {
80015b59e7cSMarek Vasut return dcp_sha_update_fx(req, 1);
80115b59e7cSMarek Vasut }
80215b59e7cSMarek Vasut
dcp_sha_digest(struct ahash_request * req)80315b59e7cSMarek Vasut static int dcp_sha_digest(struct ahash_request *req)
80415b59e7cSMarek Vasut {
80515b59e7cSMarek Vasut int ret;
80615b59e7cSMarek Vasut
80715b59e7cSMarek Vasut ret = dcp_sha_init(req);
80815b59e7cSMarek Vasut if (ret)
80915b59e7cSMarek Vasut return ret;
81015b59e7cSMarek Vasut
81115b59e7cSMarek Vasut return dcp_sha_finup(req);
81215b59e7cSMarek Vasut }
81315b59e7cSMarek Vasut
dcp_sha_import(struct ahash_request * req,const void * in)814ea9e7568SDan Douglass static int dcp_sha_import(struct ahash_request *req, const void *in)
8159190b6fdSKamil Konieczny {
816ea9e7568SDan Douglass struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
817ea9e7568SDan Douglass struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
818ea9e7568SDan Douglass struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
819ea9e7568SDan Douglass const struct dcp_export_state *export = in;
820ea9e7568SDan Douglass
821ea9e7568SDan Douglass memset(rctx, 0, sizeof(struct dcp_sha_req_ctx));
822ea9e7568SDan Douglass memset(actx, 0, sizeof(struct dcp_async_ctx));
823ea9e7568SDan Douglass memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx));
824ea9e7568SDan Douglass memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx));
825ea9e7568SDan Douglass
826ea9e7568SDan Douglass return 0;
8279190b6fdSKamil Konieczny }
8289190b6fdSKamil Konieczny
dcp_sha_export(struct ahash_request * req,void * out)829ea9e7568SDan Douglass static int dcp_sha_export(struct ahash_request *req, void *out)
8309190b6fdSKamil Konieczny {
831ea9e7568SDan Douglass struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req);
832ea9e7568SDan Douglass struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
833ea9e7568SDan Douglass struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm);
834ea9e7568SDan Douglass struct dcp_export_state *export = out;
835ea9e7568SDan Douglass
836ea9e7568SDan Douglass memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx));
837ea9e7568SDan Douglass memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx));
838ea9e7568SDan Douglass
839ea9e7568SDan Douglass return 0;
8409190b6fdSKamil Konieczny }
8419190b6fdSKamil Konieczny
dcp_sha_cra_init(struct crypto_tfm * tfm)84215b59e7cSMarek Vasut static int dcp_sha_cra_init(struct crypto_tfm *tfm)
84315b59e7cSMarek Vasut {
84415b59e7cSMarek Vasut crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
84515b59e7cSMarek Vasut sizeof(struct dcp_sha_req_ctx));
84615b59e7cSMarek Vasut return 0;
84715b59e7cSMarek Vasut }
84815b59e7cSMarek Vasut
dcp_sha_cra_exit(struct crypto_tfm * tfm)84915b59e7cSMarek Vasut static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
85015b59e7cSMarek Vasut {
85115b59e7cSMarek Vasut }
85215b59e7cSMarek Vasut
85315b59e7cSMarek Vasut /* AES 128 ECB and AES 128 CBC */
8549acb3247SArd Biesheuvel static struct skcipher_alg dcp_aes_algs[] = {
85515b59e7cSMarek Vasut {
8569acb3247SArd Biesheuvel .base.cra_name = "ecb(aes)",
8579acb3247SArd Biesheuvel .base.cra_driver_name = "ecb-aes-dcp",
8589acb3247SArd Biesheuvel .base.cra_priority = 400,
8599acb3247SArd Biesheuvel .base.cra_alignmask = 15,
8609acb3247SArd Biesheuvel .base.cra_flags = CRYPTO_ALG_ASYNC |
86115b59e7cSMarek Vasut CRYPTO_ALG_NEED_FALLBACK,
8629acb3247SArd Biesheuvel .base.cra_blocksize = AES_BLOCK_SIZE,
8639acb3247SArd Biesheuvel .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
8649acb3247SArd Biesheuvel .base.cra_module = THIS_MODULE,
8659acb3247SArd Biesheuvel
86615b59e7cSMarek Vasut .min_keysize = AES_MIN_KEY_SIZE,
86715b59e7cSMarek Vasut .max_keysize = AES_MAX_KEY_SIZE,
86815b59e7cSMarek Vasut .setkey = mxs_dcp_aes_setkey,
86915b59e7cSMarek Vasut .encrypt = mxs_dcp_aes_ecb_encrypt,
8709acb3247SArd Biesheuvel .decrypt = mxs_dcp_aes_ecb_decrypt,
8719acb3247SArd Biesheuvel .init = mxs_dcp_aes_fallback_init_tfm,
8729acb3247SArd Biesheuvel .exit = mxs_dcp_aes_fallback_exit_tfm,
87315b59e7cSMarek Vasut }, {
8749acb3247SArd Biesheuvel .base.cra_name = "cbc(aes)",
8759acb3247SArd Biesheuvel .base.cra_driver_name = "cbc-aes-dcp",
8769acb3247SArd Biesheuvel .base.cra_priority = 400,
8779acb3247SArd Biesheuvel .base.cra_alignmask = 15,
8789acb3247SArd Biesheuvel .base.cra_flags = CRYPTO_ALG_ASYNC |
87915b59e7cSMarek Vasut CRYPTO_ALG_NEED_FALLBACK,
8809acb3247SArd Biesheuvel .base.cra_blocksize = AES_BLOCK_SIZE,
8819acb3247SArd Biesheuvel .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
8829acb3247SArd Biesheuvel .base.cra_module = THIS_MODULE,
8839acb3247SArd Biesheuvel
88415b59e7cSMarek Vasut .min_keysize = AES_MIN_KEY_SIZE,
88515b59e7cSMarek Vasut .max_keysize = AES_MAX_KEY_SIZE,
88615b59e7cSMarek Vasut .setkey = mxs_dcp_aes_setkey,
88715b59e7cSMarek Vasut .encrypt = mxs_dcp_aes_cbc_encrypt,
88815b59e7cSMarek Vasut .decrypt = mxs_dcp_aes_cbc_decrypt,
88915b59e7cSMarek Vasut .ivsize = AES_BLOCK_SIZE,
8909acb3247SArd Biesheuvel .init = mxs_dcp_aes_fallback_init_tfm,
8919acb3247SArd Biesheuvel .exit = mxs_dcp_aes_fallback_exit_tfm,
89215b59e7cSMarek Vasut },
89315b59e7cSMarek Vasut };
89415b59e7cSMarek Vasut
89515b59e7cSMarek Vasut /* SHA1 */
89615b59e7cSMarek Vasut static struct ahash_alg dcp_sha1_alg = {
89715b59e7cSMarek Vasut .init = dcp_sha_init,
89815b59e7cSMarek Vasut .update = dcp_sha_update,
89915b59e7cSMarek Vasut .final = dcp_sha_final,
90015b59e7cSMarek Vasut .finup = dcp_sha_finup,
90115b59e7cSMarek Vasut .digest = dcp_sha_digest,
902ea9e7568SDan Douglass .import = dcp_sha_import,
903ea9e7568SDan Douglass .export = dcp_sha_export,
90415b59e7cSMarek Vasut .halg = {
90515b59e7cSMarek Vasut .digestsize = SHA1_DIGEST_SIZE,
906ea9e7568SDan Douglass .statesize = sizeof(struct dcp_export_state),
90715b59e7cSMarek Vasut .base = {
90815b59e7cSMarek Vasut .cra_name = "sha1",
90915b59e7cSMarek Vasut .cra_driver_name = "sha1-dcp",
91015b59e7cSMarek Vasut .cra_priority = 400,
91115b59e7cSMarek Vasut .cra_alignmask = 63,
91215b59e7cSMarek Vasut .cra_flags = CRYPTO_ALG_ASYNC,
91315b59e7cSMarek Vasut .cra_blocksize = SHA1_BLOCK_SIZE,
91415b59e7cSMarek Vasut .cra_ctxsize = sizeof(struct dcp_async_ctx),
91515b59e7cSMarek Vasut .cra_module = THIS_MODULE,
91615b59e7cSMarek Vasut .cra_init = dcp_sha_cra_init,
91715b59e7cSMarek Vasut .cra_exit = dcp_sha_cra_exit,
91815b59e7cSMarek Vasut },
91915b59e7cSMarek Vasut },
92015b59e7cSMarek Vasut };
92115b59e7cSMarek Vasut
92215b59e7cSMarek Vasut /* SHA256 */
92315b59e7cSMarek Vasut static struct ahash_alg dcp_sha256_alg = {
92415b59e7cSMarek Vasut .init = dcp_sha_init,
92515b59e7cSMarek Vasut .update = dcp_sha_update,
92615b59e7cSMarek Vasut .final = dcp_sha_final,
92715b59e7cSMarek Vasut .finup = dcp_sha_finup,
92815b59e7cSMarek Vasut .digest = dcp_sha_digest,
929ea9e7568SDan Douglass .import = dcp_sha_import,
930ea9e7568SDan Douglass .export = dcp_sha_export,
93115b59e7cSMarek Vasut .halg = {
93215b59e7cSMarek Vasut .digestsize = SHA256_DIGEST_SIZE,
933ea9e7568SDan Douglass .statesize = sizeof(struct dcp_export_state),
93415b59e7cSMarek Vasut .base = {
93515b59e7cSMarek Vasut .cra_name = "sha256",
93615b59e7cSMarek Vasut .cra_driver_name = "sha256-dcp",
93715b59e7cSMarek Vasut .cra_priority = 400,
93815b59e7cSMarek Vasut .cra_alignmask = 63,
93915b59e7cSMarek Vasut .cra_flags = CRYPTO_ALG_ASYNC,
94015b59e7cSMarek Vasut .cra_blocksize = SHA256_BLOCK_SIZE,
94115b59e7cSMarek Vasut .cra_ctxsize = sizeof(struct dcp_async_ctx),
94215b59e7cSMarek Vasut .cra_module = THIS_MODULE,
94315b59e7cSMarek Vasut .cra_init = dcp_sha_cra_init,
94415b59e7cSMarek Vasut .cra_exit = dcp_sha_cra_exit,
94515b59e7cSMarek Vasut },
94615b59e7cSMarek Vasut },
94715b59e7cSMarek Vasut };
94815b59e7cSMarek Vasut
mxs_dcp_irq(int irq,void * context)94915b59e7cSMarek Vasut static irqreturn_t mxs_dcp_irq(int irq, void *context)
95015b59e7cSMarek Vasut {
95115b59e7cSMarek Vasut struct dcp *sdcp = context;
95215b59e7cSMarek Vasut uint32_t stat;
95315b59e7cSMarek Vasut int i;
95415b59e7cSMarek Vasut
95515b59e7cSMarek Vasut stat = readl(sdcp->base + MXS_DCP_STAT);
95615b59e7cSMarek Vasut stat &= MXS_DCP_STAT_IRQ_MASK;
95715b59e7cSMarek Vasut if (!stat)
95815b59e7cSMarek Vasut return IRQ_NONE;
95915b59e7cSMarek Vasut
96015b59e7cSMarek Vasut /* Clear the interrupts. */
96115b59e7cSMarek Vasut writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
96215b59e7cSMarek Vasut
96315b59e7cSMarek Vasut /* Complete the DMA requests that finished. */
96415b59e7cSMarek Vasut for (i = 0; i < DCP_MAX_CHANS; i++)
96515b59e7cSMarek Vasut if (stat & (1 << i))
96615b59e7cSMarek Vasut complete(&sdcp->completion[i]);
96715b59e7cSMarek Vasut
96815b59e7cSMarek Vasut return IRQ_HANDLED;
96915b59e7cSMarek Vasut }
97015b59e7cSMarek Vasut
mxs_dcp_probe(struct platform_device * pdev)97115b59e7cSMarek Vasut static int mxs_dcp_probe(struct platform_device *pdev)
97215b59e7cSMarek Vasut {
97315b59e7cSMarek Vasut struct device *dev = &pdev->dev;
97415b59e7cSMarek Vasut struct dcp *sdcp = NULL;
97515b59e7cSMarek Vasut int i, ret;
97615b59e7cSMarek Vasut int dcp_vmi_irq, dcp_irq;
97715b59e7cSMarek Vasut
97815b59e7cSMarek Vasut if (global_sdcp) {
97915b59e7cSMarek Vasut dev_err(dev, "Only one DCP instance allowed!\n");
9805fc8005bSFabio Estevam return -ENODEV;
98115b59e7cSMarek Vasut }
98215b59e7cSMarek Vasut
98315b59e7cSMarek Vasut dcp_vmi_irq = platform_get_irq(pdev, 0);
984514838e9SStephen Boyd if (dcp_vmi_irq < 0)
9855fc8005bSFabio Estevam return dcp_vmi_irq;
986d9588f87SFabio Estevam
98715b59e7cSMarek Vasut dcp_irq = platform_get_irq(pdev, 1);
988514838e9SStephen Boyd if (dcp_irq < 0)
9895fc8005bSFabio Estevam return dcp_irq;
99015b59e7cSMarek Vasut
99115b59e7cSMarek Vasut sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
9925fc8005bSFabio Estevam if (!sdcp)
9935fc8005bSFabio Estevam return -ENOMEM;
99415b59e7cSMarek Vasut
99515b59e7cSMarek Vasut sdcp->dev = dev;
996cec1caafSFabio Estevam sdcp->base = devm_platform_ioremap_resource(pdev, 0);
9975fc8005bSFabio Estevam if (IS_ERR(sdcp->base))
9985fc8005bSFabio Estevam return PTR_ERR(sdcp->base);
9995fc8005bSFabio Estevam
100015b59e7cSMarek Vasut
100115b59e7cSMarek Vasut ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
100215b59e7cSMarek Vasut "dcp-vmi-irq", sdcp);
100315b59e7cSMarek Vasut if (ret) {
100415b59e7cSMarek Vasut dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
10055fc8005bSFabio Estevam return ret;
100615b59e7cSMarek Vasut }
100715b59e7cSMarek Vasut
100815b59e7cSMarek Vasut ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
100915b59e7cSMarek Vasut "dcp-irq", sdcp);
101015b59e7cSMarek Vasut if (ret) {
101115b59e7cSMarek Vasut dev_err(dev, "Failed to claim DCP IRQ!\n");
10125fc8005bSFabio Estevam return ret;
101315b59e7cSMarek Vasut }
101415b59e7cSMarek Vasut
101515b59e7cSMarek Vasut /* Allocate coherent helper block. */
10161a7c6856SMarek Vasut sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
10171a7c6856SMarek Vasut GFP_KERNEL);
10185fc8005bSFabio Estevam if (!sdcp->coh)
10195fc8005bSFabio Estevam return -ENOMEM;
102015b59e7cSMarek Vasut
10211a7c6856SMarek Vasut /* Re-align the structure so it fits the DCP constraints. */
10221a7c6856SMarek Vasut sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
10231a7c6856SMarek Vasut
102457f00289SLeonard Crestez /* DCP clock is optional, only used on some SOCs */
1025*d6cb9ab4SChristophe JAILLET sdcp->dcp_clk = devm_clk_get_optional_enabled(dev, "dcp");
1026*d6cb9ab4SChristophe JAILLET if (IS_ERR(sdcp->dcp_clk))
102757f00289SLeonard Crestez return PTR_ERR(sdcp->dcp_clk);
102815b59e7cSMarek Vasut
102957f00289SLeonard Crestez /* Restart the DCP block. */
103057f00289SLeonard Crestez ret = stmp_reset_block(sdcp->base);
103157f00289SLeonard Crestez if (ret) {
103257f00289SLeonard Crestez dev_err(dev, "Failed reset\n");
1033*d6cb9ab4SChristophe JAILLET return ret;
103457f00289SLeonard Crestez }
103557f00289SLeonard Crestez
103615b59e7cSMarek Vasut /* Initialize control register. */
103715b59e7cSMarek Vasut writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
103815b59e7cSMarek Vasut MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
103915b59e7cSMarek Vasut sdcp->base + MXS_DCP_CTRL);
104015b59e7cSMarek Vasut
104115b59e7cSMarek Vasut /* Enable all DCP DMA channels. */
104215b59e7cSMarek Vasut writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
104315b59e7cSMarek Vasut sdcp->base + MXS_DCP_CHANNELCTRL);
104415b59e7cSMarek Vasut
104515b59e7cSMarek Vasut /*
104615b59e7cSMarek Vasut * We do not enable context switching. Give the context buffer a
104715b59e7cSMarek Vasut * pointer to an illegal address so if context switching is
104815b59e7cSMarek Vasut * inadvertantly enabled, the DCP will return an error instead of
104915b59e7cSMarek Vasut * trashing good memory. The DCP DMA cannot access ROM, so any ROM
105015b59e7cSMarek Vasut * address will do.
105115b59e7cSMarek Vasut */
105215b59e7cSMarek Vasut writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
105315b59e7cSMarek Vasut for (i = 0; i < DCP_MAX_CHANS; i++)
105415b59e7cSMarek Vasut writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
105515b59e7cSMarek Vasut writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
105615b59e7cSMarek Vasut
105715b59e7cSMarek Vasut global_sdcp = sdcp;
105815b59e7cSMarek Vasut
105915b59e7cSMarek Vasut platform_set_drvdata(pdev, sdcp);
106015b59e7cSMarek Vasut
106115b59e7cSMarek Vasut for (i = 0; i < DCP_MAX_CHANS; i++) {
1062d80771c0SLeonard Crestez spin_lock_init(&sdcp->lock[i]);
106315b59e7cSMarek Vasut init_completion(&sdcp->completion[i]);
106415b59e7cSMarek Vasut crypto_init_queue(&sdcp->queue[i], 50);
106515b59e7cSMarek Vasut }
106615b59e7cSMarek Vasut
106715b59e7cSMarek Vasut /* Create the SHA and AES handler threads. */
106815b59e7cSMarek Vasut sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
106915b59e7cSMarek Vasut NULL, "mxs_dcp_chan/sha");
107015b59e7cSMarek Vasut if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
107115b59e7cSMarek Vasut dev_err(dev, "Error starting SHA thread!\n");
107257f00289SLeonard Crestez ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
1073*d6cb9ab4SChristophe JAILLET return ret;
107415b59e7cSMarek Vasut }
107515b59e7cSMarek Vasut
107615b59e7cSMarek Vasut sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
107715b59e7cSMarek Vasut NULL, "mxs_dcp_chan/aes");
107815b59e7cSMarek Vasut if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
107915b59e7cSMarek Vasut dev_err(dev, "Error starting SHA thread!\n");
108015b59e7cSMarek Vasut ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
108115b59e7cSMarek Vasut goto err_destroy_sha_thread;
108215b59e7cSMarek Vasut }
108315b59e7cSMarek Vasut
108415b59e7cSMarek Vasut /* Register the various crypto algorithms. */
108515b59e7cSMarek Vasut sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
108615b59e7cSMarek Vasut
108715b59e7cSMarek Vasut if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
10889acb3247SArd Biesheuvel ret = crypto_register_skciphers(dcp_aes_algs,
108915b59e7cSMarek Vasut ARRAY_SIZE(dcp_aes_algs));
109015b59e7cSMarek Vasut if (ret) {
109115b59e7cSMarek Vasut /* Failed to register algorithm. */
109215b59e7cSMarek Vasut dev_err(dev, "Failed to register AES crypto!\n");
109315b59e7cSMarek Vasut goto err_destroy_aes_thread;
109415b59e7cSMarek Vasut }
109515b59e7cSMarek Vasut }
109615b59e7cSMarek Vasut
109715b59e7cSMarek Vasut if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
109815b59e7cSMarek Vasut ret = crypto_register_ahash(&dcp_sha1_alg);
109915b59e7cSMarek Vasut if (ret) {
110015b59e7cSMarek Vasut dev_err(dev, "Failed to register %s hash!\n",
110115b59e7cSMarek Vasut dcp_sha1_alg.halg.base.cra_name);
110215b59e7cSMarek Vasut goto err_unregister_aes;
110315b59e7cSMarek Vasut }
110415b59e7cSMarek Vasut }
110515b59e7cSMarek Vasut
110615b59e7cSMarek Vasut if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
110715b59e7cSMarek Vasut ret = crypto_register_ahash(&dcp_sha256_alg);
110815b59e7cSMarek Vasut if (ret) {
110915b59e7cSMarek Vasut dev_err(dev, "Failed to register %s hash!\n",
111015b59e7cSMarek Vasut dcp_sha256_alg.halg.base.cra_name);
111115b59e7cSMarek Vasut goto err_unregister_sha1;
111215b59e7cSMarek Vasut }
111315b59e7cSMarek Vasut }
111415b59e7cSMarek Vasut
111515b59e7cSMarek Vasut return 0;
111615b59e7cSMarek Vasut
111715b59e7cSMarek Vasut err_unregister_sha1:
111815b59e7cSMarek Vasut if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
111915b59e7cSMarek Vasut crypto_unregister_ahash(&dcp_sha1_alg);
112015b59e7cSMarek Vasut
112115b59e7cSMarek Vasut err_unregister_aes:
112215b59e7cSMarek Vasut if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
11239acb3247SArd Biesheuvel crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
112415b59e7cSMarek Vasut
112515b59e7cSMarek Vasut err_destroy_aes_thread:
112615b59e7cSMarek Vasut kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
112715b59e7cSMarek Vasut
112815b59e7cSMarek Vasut err_destroy_sha_thread:
112915b59e7cSMarek Vasut kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
113057f00289SLeonard Crestez
113115b59e7cSMarek Vasut return ret;
113215b59e7cSMarek Vasut }
113315b59e7cSMarek Vasut
mxs_dcp_remove(struct platform_device * pdev)113415b59e7cSMarek Vasut static int mxs_dcp_remove(struct platform_device *pdev)
113515b59e7cSMarek Vasut {
113615b59e7cSMarek Vasut struct dcp *sdcp = platform_get_drvdata(pdev);
113715b59e7cSMarek Vasut
113815b59e7cSMarek Vasut if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
113915b59e7cSMarek Vasut crypto_unregister_ahash(&dcp_sha256_alg);
114015b59e7cSMarek Vasut
114115b59e7cSMarek Vasut if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
114215b59e7cSMarek Vasut crypto_unregister_ahash(&dcp_sha1_alg);
114315b59e7cSMarek Vasut
114415b59e7cSMarek Vasut if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
11459acb3247SArd Biesheuvel crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
114615b59e7cSMarek Vasut
114715b59e7cSMarek Vasut kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
114815b59e7cSMarek Vasut kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
114915b59e7cSMarek Vasut
115015b59e7cSMarek Vasut platform_set_drvdata(pdev, NULL);
115115b59e7cSMarek Vasut
115215b59e7cSMarek Vasut global_sdcp = NULL;
115315b59e7cSMarek Vasut
115415b59e7cSMarek Vasut return 0;
115515b59e7cSMarek Vasut }
115615b59e7cSMarek Vasut
115715b59e7cSMarek Vasut static const struct of_device_id mxs_dcp_dt_ids[] = {
115815b59e7cSMarek Vasut { .compatible = "fsl,imx23-dcp", .data = NULL, },
115915b59e7cSMarek Vasut { .compatible = "fsl,imx28-dcp", .data = NULL, },
116015b59e7cSMarek Vasut { /* sentinel */ }
116115b59e7cSMarek Vasut };
116215b59e7cSMarek Vasut
116315b59e7cSMarek Vasut MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
116415b59e7cSMarek Vasut
116515b59e7cSMarek Vasut static struct platform_driver mxs_dcp_driver = {
116615b59e7cSMarek Vasut .probe = mxs_dcp_probe,
116715b59e7cSMarek Vasut .remove = mxs_dcp_remove,
116815b59e7cSMarek Vasut .driver = {
116915b59e7cSMarek Vasut .name = "mxs-dcp",
117015b59e7cSMarek Vasut .of_match_table = mxs_dcp_dt_ids,
117115b59e7cSMarek Vasut },
117215b59e7cSMarek Vasut };
117315b59e7cSMarek Vasut
117415b59e7cSMarek Vasut module_platform_driver(mxs_dcp_driver);
117515b59e7cSMarek Vasut
117615b59e7cSMarek Vasut MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
117715b59e7cSMarek Vasut MODULE_DESCRIPTION("Freescale MXS DCP Driver");
117815b59e7cSMarek Vasut MODULE_LICENSE("GPL");
117915b59e7cSMarek Vasut MODULE_ALIAS("platform:mxs-dcp");
1180