xref: /openbmc/linux/drivers/crypto/caam/caamprng.c (revision 4e3b8650)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Driver to expose SEC4 PRNG via crypto RNG API
4  *
5  * Copyright 2022 NXP
6  *
7  */
8 
9 #include <linux/completion.h>
10 #include <crypto/internal/rng.h>
11 #include "compat.h"
12 #include "regs.h"
13 #include "intern.h"
14 #include "desc_constr.h"
15 #include "jr.h"
16 #include "error.h"
17 
18 /*
19  * Length of used descriptors, see caam_init_desc()
20  */
21 #define CAAM_PRNG_MAX_DESC_LEN (CAAM_CMD_SZ +				\
22 			    CAAM_CMD_SZ +				\
23 			    CAAM_CMD_SZ + CAAM_PTR_SZ_MAX)
24 
25 /* prng per-device context */
26 struct caam_prng_ctx {
27 	int err;
28 	struct completion done;
29 };
30 
31 struct caam_prng_alg {
32 	struct rng_alg rng;
33 	bool registered;
34 };
35 
36 static void caam_prng_done(struct device *jrdev, u32 *desc, u32 err,
37 			  void *context)
38 {
39 	struct caam_prng_ctx *jctx = context;
40 
41 	jctx->err = err ? caam_jr_strstatus(jrdev, err) : 0;
42 
43 	complete(&jctx->done);
44 }
45 
46 static u32 *caam_init_reseed_desc(u32 *desc)
47 {
48 	init_job_desc(desc, 0);	/* + 1 cmd_sz */
49 	/* Generate random bytes: + 1 cmd_sz */
50 	append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
51 			OP_ALG_AS_FINALIZE);
52 
53 	print_hex_dump_debug("prng reseed desc@: ", DUMP_PREFIX_ADDRESS,
54 			     16, 4, desc, desc_bytes(desc), 1);
55 
56 	return desc;
57 }
58 
59 static u32 *caam_init_prng_desc(u32 *desc, dma_addr_t dst_dma, u32 len)
60 {
61 	init_job_desc(desc, 0);	/* + 1 cmd_sz */
62 	/* Generate random bytes: + 1 cmd_sz */
63 	append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
64 	/* Store bytes: + 1 cmd_sz + caam_ptr_sz  */
65 	append_fifo_store(desc, dst_dma,
66 			  len, FIFOST_TYPE_RNGSTORE);
67 
68 	print_hex_dump_debug("prng job desc@: ", DUMP_PREFIX_ADDRESS,
69 			     16, 4, desc, desc_bytes(desc), 1);
70 
71 	return desc;
72 }
73 
74 static int caam_prng_generate(struct crypto_rng *tfm,
75 			     const u8 *src, unsigned int slen,
76 			     u8 *dst, unsigned int dlen)
77 {
78 	struct caam_prng_ctx ctx;
79 	struct device *jrdev;
80 	dma_addr_t dst_dma;
81 	u32 *desc;
82 	u8 *buf;
83 	int ret;
84 
85 	buf = kzalloc(dlen, GFP_KERNEL);
86 	if (!buf)
87 		return -ENOMEM;
88 
89 	jrdev = caam_jr_alloc();
90 	ret = PTR_ERR_OR_ZERO(jrdev);
91 	if (ret) {
92 		pr_err("Job Ring Device allocation failed\n");
93 		kfree(buf);
94 		return ret;
95 	}
96 
97 	desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL | GFP_DMA);
98 	if (!desc) {
99 		ret = -ENOMEM;
100 		goto out1;
101 	}
102 
103 	dst_dma = dma_map_single(jrdev, buf, dlen, DMA_FROM_DEVICE);
104 	if (dma_mapping_error(jrdev, dst_dma)) {
105 		dev_err(jrdev, "Failed to map destination buffer memory\n");
106 		ret = -ENOMEM;
107 		goto out;
108 	}
109 
110 	init_completion(&ctx.done);
111 	ret = caam_jr_enqueue(jrdev,
112 			      caam_init_prng_desc(desc, dst_dma, dlen),
113 			      caam_prng_done, &ctx);
114 
115 	if (ret == -EINPROGRESS) {
116 		wait_for_completion(&ctx.done);
117 		ret = ctx.err;
118 	}
119 
120 	dma_unmap_single(jrdev, dst_dma, dlen, DMA_FROM_DEVICE);
121 
122 	if (!ret)
123 		memcpy(dst, buf, dlen);
124 out:
125 	kfree(desc);
126 out1:
127 	caam_jr_free(jrdev);
128 	kfree(buf);
129 	return ret;
130 }
131 
132 static void caam_prng_exit(struct crypto_tfm *tfm) {}
133 
134 static int caam_prng_init(struct crypto_tfm *tfm)
135 {
136 	return 0;
137 }
138 
139 static int caam_prng_seed(struct crypto_rng *tfm,
140 			 const u8 *seed, unsigned int slen)
141 {
142 	struct caam_prng_ctx ctx;
143 	struct device *jrdev;
144 	u32 *desc;
145 	int ret;
146 
147 	if (slen) {
148 		pr_err("Seed length should be zero\n");
149 		return -EINVAL;
150 	}
151 
152 	jrdev = caam_jr_alloc();
153 	ret = PTR_ERR_OR_ZERO(jrdev);
154 	if (ret) {
155 		pr_err("Job Ring Device allocation failed\n");
156 		return ret;
157 	}
158 
159 	desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL | GFP_DMA);
160 	if (!desc) {
161 		caam_jr_free(jrdev);
162 		return -ENOMEM;
163 	}
164 
165 	init_completion(&ctx.done);
166 	ret = caam_jr_enqueue(jrdev,
167 			      caam_init_reseed_desc(desc),
168 			      caam_prng_done, &ctx);
169 
170 	if (ret == -EINPROGRESS) {
171 		wait_for_completion(&ctx.done);
172 		ret = ctx.err;
173 	}
174 
175 	kfree(desc);
176 	caam_jr_free(jrdev);
177 	return ret;
178 }
179 
180 static struct caam_prng_alg caam_prng_alg = {
181 	.rng = {
182 		.generate = caam_prng_generate,
183 		.seed = caam_prng_seed,
184 		.seedsize = 0,
185 		.base = {
186 			.cra_name = "stdrng",
187 			.cra_driver_name = "prng-caam",
188 			.cra_priority = 500,
189 			.cra_ctxsize = sizeof(struct caam_prng_ctx),
190 			.cra_module = THIS_MODULE,
191 			.cra_init = caam_prng_init,
192 			.cra_exit = caam_prng_exit,
193 		},
194 	}
195 };
196 
197 void caam_prng_unregister(void *data)
198 {
199 	if (caam_prng_alg.registered)
200 		crypto_unregister_rng(&caam_prng_alg.rng);
201 }
202 
203 int caam_prng_register(struct device *ctrldev)
204 {
205 	struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
206 	u32 rng_inst;
207 	int ret = 0;
208 
209 	/* Check for available RNG blocks before registration */
210 	if (priv->era < 10)
211 		rng_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) &
212 			    CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
213 	else
214 		rng_inst = rd_reg32(&priv->jr[0]->vreg.rng) & CHA_VER_NUM_MASK;
215 
216 	if (!rng_inst) {
217 		dev_dbg(ctrldev, "RNG block is not available... skipping registering algorithm\n");
218 		return ret;
219 	}
220 
221 	ret = crypto_register_rng(&caam_prng_alg.rng);
222 	if (ret) {
223 		dev_err(ctrldev,
224 			"couldn't register rng crypto alg: %d\n",
225 			ret);
226 		return ret;
227 	}
228 
229 	caam_prng_alg.registered = true;
230 
231 	dev_info(ctrldev,
232 		 "rng crypto API alg registered %s\n", caam_prng_alg.rng.base.cra_driver_name);
233 
234 	return 0;
235 }
236