1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * sun8i-ss-prng.c - hardware cryptographic offloader for
4  * Allwinner A80/A83T SoC
5  *
6  * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
7  *
8  * This file handle the PRNG found in the SS
9  *
10  * You could find a link for the datasheet in Documentation/arm/sunxi.rst
11  */
12 #include "sun8i-ss.h"
13 #include <linux/dma-mapping.h>
14 #include <linux/pm_runtime.h>
15 #include <crypto/internal/rng.h>
16 
17 int sun8i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed,
18 		       unsigned int slen)
19 {
20 	struct sun8i_ss_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm);
21 
22 	if (ctx->seed && ctx->slen != slen) {
23 		kfree_sensitive(ctx->seed);
24 		ctx->slen = 0;
25 		ctx->seed = NULL;
26 	}
27 	if (!ctx->seed)
28 		ctx->seed = kmalloc(slen, GFP_KERNEL | GFP_DMA);
29 	if (!ctx->seed)
30 		return -ENOMEM;
31 
32 	memcpy(ctx->seed, seed, slen);
33 	ctx->slen = slen;
34 
35 	return 0;
36 }
37 
38 int sun8i_ss_prng_init(struct crypto_tfm *tfm)
39 {
40 	struct sun8i_ss_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
41 
42 	memset(ctx, 0, sizeof(struct sun8i_ss_rng_tfm_ctx));
43 	return 0;
44 }
45 
46 void sun8i_ss_prng_exit(struct crypto_tfm *tfm)
47 {
48 	struct sun8i_ss_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
49 
50 	kfree_sensitive(ctx->seed);
51 	ctx->seed = NULL;
52 	ctx->slen = 0;
53 }
54 
55 int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
56 			   unsigned int slen, u8 *dst, unsigned int dlen)
57 {
58 	struct sun8i_ss_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm);
59 	struct rng_alg *alg = crypto_rng_alg(tfm);
60 	struct sun8i_ss_alg_template *algt;
61 	struct sun8i_ss_dev *ss;
62 	dma_addr_t dma_iv, dma_dst;
63 	unsigned int todo;
64 	int err = 0;
65 	int flow;
66 	void *d;
67 	u32 v;
68 
69 	algt = container_of(alg, struct sun8i_ss_alg_template, alg.rng);
70 	ss = algt->ss;
71 
72 	if (ctx->slen == 0) {
73 		dev_err(ss->dev, "The PRNG is not seeded\n");
74 		return -EINVAL;
75 	}
76 
77 	/* The SS does not give an updated seed, so we need to get a new one.
78 	 * So we will ask for an extra PRNG_SEED_SIZE data.
79 	 * We want dlen + seedsize rounded up to a multiple of PRNG_DATA_SIZE
80 	 */
81 	todo = dlen + PRNG_SEED_SIZE + PRNG_DATA_SIZE;
82 	todo -= todo % PRNG_DATA_SIZE;
83 
84 	d = kzalloc(todo, GFP_KERNEL | GFP_DMA);
85 	if (!d)
86 		return -ENOMEM;
87 
88 	flow = sun8i_ss_get_engine_number(ss);
89 
90 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
91 	algt->stat_req++;
92 	algt->stat_bytes += todo;
93 #endif
94 
95 	v = SS_ALG_PRNG | SS_PRNG_CONTINUE | SS_START;
96 	if (flow)
97 		v |= SS_FLOW1;
98 	else
99 		v |= SS_FLOW0;
100 
101 	dma_iv = dma_map_single(ss->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE);
102 	if (dma_mapping_error(ss->dev, dma_iv)) {
103 		dev_err(ss->dev, "Cannot DMA MAP IV\n");
104 		err = -EFAULT;
105 		goto err_free;
106 	}
107 
108 	dma_dst = dma_map_single(ss->dev, d, todo, DMA_FROM_DEVICE);
109 	if (dma_mapping_error(ss->dev, dma_dst)) {
110 		dev_err(ss->dev, "Cannot DMA MAP DST\n");
111 		err = -EFAULT;
112 		goto err_iv;
113 	}
114 
115 	err = pm_runtime_get_sync(ss->dev);
116 	if (err < 0) {
117 		pm_runtime_put_noidle(ss->dev);
118 		goto err_pm;
119 	}
120 	err = 0;
121 
122 	mutex_lock(&ss->mlock);
123 	writel(dma_iv, ss->base + SS_IV_ADR_REG);
124 	/* the PRNG act badly (failing rngtest) without SS_KEY_ADR_REG set */
125 	writel(dma_iv, ss->base + SS_KEY_ADR_REG);
126 	writel(dma_dst, ss->base + SS_DST_ADR_REG);
127 	writel(todo / 4, ss->base + SS_LEN_ADR_REG);
128 
129 	reinit_completion(&ss->flows[flow].complete);
130 	ss->flows[flow].status = 0;
131 	/* Be sure all data is written before enabling the task */
132 	wmb();
133 
134 	writel(v, ss->base + SS_CTL_REG);
135 
136 	wait_for_completion_interruptible_timeout(&ss->flows[flow].complete,
137 						  msecs_to_jiffies(todo));
138 	if (ss->flows[flow].status == 0) {
139 		dev_err(ss->dev, "DMA timeout for PRNG (size=%u)\n", todo);
140 		err = -EFAULT;
141 	}
142 	/* Since cipher and hash use the linux/cryptoengine and that we have
143 	 * a cryptoengine per flow, we are sure that they will issue only one
144 	 * request per flow.
145 	 * Since the cryptoengine wait for completion before submitting a new
146 	 * one, the mlock could be left just after the final writel.
147 	 * But cryptoengine cannot handle crypto_rng, so we need to be sure
148 	 * nothing will use our flow.
149 	 * The easiest way is to grab mlock until the hardware end our requests.
150 	 * We could have used a per flow lock, but this would increase
151 	 * complexity.
152 	 * The drawback is that no request could be handled for the other flow.
153 	 */
154 	mutex_unlock(&ss->mlock);
155 
156 	pm_runtime_put(ss->dev);
157 
158 err_pm:
159 	dma_unmap_single(ss->dev, dma_dst, todo, DMA_FROM_DEVICE);
160 err_iv:
161 	dma_unmap_single(ss->dev, dma_iv, ctx->slen, DMA_TO_DEVICE);
162 
163 	if (!err) {
164 		memcpy(dst, d, dlen);
165 		/* Update seed */
166 		memcpy(ctx->seed, d + dlen, ctx->slen);
167 	}
168 err_free:
169 	kfree_sensitive(d);
170 
171 	return err;
172 }
173