xref: /openbmc/linux/drivers/crypto/caam/caamrng.c (revision 4bb1eb3c)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * caam - Freescale FSL CAAM support for hw_random
4  *
5  * Copyright 2011 Freescale Semiconductor, Inc.
6  * Copyright 2018-2019 NXP
7  *
8  * Based on caamalg.c crypto API driver.
9  *
10  */
11 
12 #include <linux/hw_random.h>
13 #include <linux/completion.h>
14 #include <linux/atomic.h>
15 #include <linux/kfifo.h>
16 
17 #include "compat.h"
18 
19 #include "regs.h"
20 #include "intern.h"
21 #include "desc_constr.h"
22 #include "jr.h"
23 #include "error.h"
24 
25 #define CAAM_RNG_MAX_FIFO_STORE_SIZE	16
26 
27 /*
28  * Length of used descriptors, see caam_init_desc()
29  */
30 #define CAAM_RNG_DESC_LEN (CAAM_CMD_SZ +				\
31 			   CAAM_CMD_SZ +				\
32 			   CAAM_CMD_SZ + CAAM_PTR_SZ_MAX)
33 
34 /* rng per-device context */
35 struct caam_rng_ctx {
36 	struct hwrng rng;
37 	struct device *jrdev;
38 	struct device *ctrldev;
39 	void *desc_async;
40 	void *desc_sync;
41 	struct work_struct worker;
42 	struct kfifo fifo;
43 };
44 
45 struct caam_rng_job_ctx {
46 	struct completion *done;
47 	int *err;
48 };
49 
50 static struct caam_rng_ctx *to_caam_rng_ctx(struct hwrng *r)
51 {
52 	return (struct caam_rng_ctx *)r->priv;
53 }
54 
55 static void caam_rng_done(struct device *jrdev, u32 *desc, u32 err,
56 			  void *context)
57 {
58 	struct caam_rng_job_ctx *jctx = context;
59 
60 	if (err)
61 		*jctx->err = caam_jr_strstatus(jrdev, err);
62 
63 	complete(jctx->done);
64 }
65 
66 static u32 *caam_init_desc(u32 *desc, dma_addr_t dst_dma)
67 {
68 	init_job_desc(desc, 0);	/* + 1 cmd_sz */
69 	/* Generate random bytes: + 1 cmd_sz */
70 	append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG |
71 			 OP_ALG_PR_ON);
72 	/* Store bytes: + 1 cmd_sz + caam_ptr_sz  */
73 	append_fifo_store(desc, dst_dma,
74 			  CAAM_RNG_MAX_FIFO_STORE_SIZE, FIFOST_TYPE_RNGSTORE);
75 
76 	print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS,
77 			     16, 4, desc, desc_bytes(desc), 1);
78 
79 	return desc;
80 }
81 
82 static int caam_rng_read_one(struct device *jrdev,
83 			     void *dst, int len,
84 			     void *desc,
85 			     struct completion *done)
86 {
87 	dma_addr_t dst_dma;
88 	int err, ret = 0;
89 	struct caam_rng_job_ctx jctx = {
90 		.done = done,
91 		.err  = &ret,
92 	};
93 
94 	len = CAAM_RNG_MAX_FIFO_STORE_SIZE;
95 
96 	dst_dma = dma_map_single(jrdev, dst, len, DMA_FROM_DEVICE);
97 	if (dma_mapping_error(jrdev, dst_dma)) {
98 		dev_err(jrdev, "unable to map destination memory\n");
99 		return -ENOMEM;
100 	}
101 
102 	init_completion(done);
103 	err = caam_jr_enqueue(jrdev,
104 			      caam_init_desc(desc, dst_dma),
105 			      caam_rng_done, &jctx);
106 	if (err == -EINPROGRESS) {
107 		wait_for_completion(done);
108 		err = 0;
109 	}
110 
111 	dma_unmap_single(jrdev, dst_dma, len, DMA_FROM_DEVICE);
112 
113 	return err ?: (ret ?: len);
114 }
115 
116 static void caam_rng_fill_async(struct caam_rng_ctx *ctx)
117 {
118 	struct scatterlist sg[1];
119 	struct completion done;
120 	int len, nents;
121 
122 	sg_init_table(sg, ARRAY_SIZE(sg));
123 	nents = kfifo_dma_in_prepare(&ctx->fifo, sg, ARRAY_SIZE(sg),
124 				     CAAM_RNG_MAX_FIFO_STORE_SIZE);
125 	if (!nents)
126 		return;
127 
128 	len = caam_rng_read_one(ctx->jrdev, sg_virt(&sg[0]),
129 				sg[0].length,
130 				ctx->desc_async,
131 				&done);
132 	if (len < 0)
133 		return;
134 
135 	kfifo_dma_in_finish(&ctx->fifo, len);
136 }
137 
138 static void caam_rng_worker(struct work_struct *work)
139 {
140 	struct caam_rng_ctx *ctx = container_of(work, struct caam_rng_ctx,
141 						worker);
142 	caam_rng_fill_async(ctx);
143 }
144 
145 static int caam_read(struct hwrng *rng, void *dst, size_t max, bool wait)
146 {
147 	struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
148 	int out;
149 
150 	if (wait) {
151 		struct completion done;
152 
153 		return caam_rng_read_one(ctx->jrdev, dst, max,
154 					 ctx->desc_sync, &done);
155 	}
156 
157 	out = kfifo_out(&ctx->fifo, dst, max);
158 	if (kfifo_is_empty(&ctx->fifo))
159 		schedule_work(&ctx->worker);
160 
161 	return out;
162 }
163 
164 static void caam_cleanup(struct hwrng *rng)
165 {
166 	struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
167 
168 	flush_work(&ctx->worker);
169 	caam_jr_free(ctx->jrdev);
170 	kfifo_free(&ctx->fifo);
171 }
172 
173 static int caam_init(struct hwrng *rng)
174 {
175 	struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
176 	int err;
177 
178 	ctx->desc_sync = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN,
179 				      GFP_DMA | GFP_KERNEL);
180 	if (!ctx->desc_sync)
181 		return -ENOMEM;
182 
183 	ctx->desc_async = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN,
184 				       GFP_DMA | GFP_KERNEL);
185 	if (!ctx->desc_async)
186 		return -ENOMEM;
187 
188 	if (kfifo_alloc(&ctx->fifo, CAAM_RNG_MAX_FIFO_STORE_SIZE,
189 			GFP_DMA | GFP_KERNEL))
190 		return -ENOMEM;
191 
192 	INIT_WORK(&ctx->worker, caam_rng_worker);
193 
194 	ctx->jrdev = caam_jr_alloc();
195 	err = PTR_ERR_OR_ZERO(ctx->jrdev);
196 	if (err) {
197 		kfifo_free(&ctx->fifo);
198 		pr_err("Job Ring Device allocation for transform failed\n");
199 		return err;
200 	}
201 
202 	/*
203 	 * Fill async buffer to have early randomness data for
204 	 * hw_random
205 	 */
206 	caam_rng_fill_async(ctx);
207 
208 	return 0;
209 }
210 
211 int caam_rng_init(struct device *ctrldev);
212 
213 void caam_rng_exit(struct device *ctrldev)
214 {
215 	devres_release_group(ctrldev, caam_rng_init);
216 }
217 
218 int caam_rng_init(struct device *ctrldev)
219 {
220 	struct caam_rng_ctx *ctx;
221 	u32 rng_inst;
222 	struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
223 	int ret;
224 
225 	/* Check for an instantiated RNG before registration */
226 	if (priv->era < 10)
227 		rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
228 			    CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
229 	else
230 		rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
231 
232 	if (!rng_inst)
233 		return 0;
234 
235 	if (!devres_open_group(ctrldev, caam_rng_init, GFP_KERNEL))
236 		return -ENOMEM;
237 
238 	ctx = devm_kzalloc(ctrldev, sizeof(*ctx), GFP_KERNEL);
239 	if (!ctx)
240 		return -ENOMEM;
241 
242 	ctx->ctrldev = ctrldev;
243 
244 	ctx->rng.name    = "rng-caam";
245 	ctx->rng.init    = caam_init;
246 	ctx->rng.cleanup = caam_cleanup;
247 	ctx->rng.read    = caam_read;
248 	ctx->rng.priv    = (unsigned long)ctx;
249 	ctx->rng.quality = 1024;
250 
251 	dev_info(ctrldev, "registering rng-caam\n");
252 
253 	ret = devm_hwrng_register(ctrldev, &ctx->rng);
254 	if (ret) {
255 		caam_rng_exit(ctrldev);
256 		return ret;
257 	}
258 
259 	devres_close_group(ctrldev, caam_rng_init);
260 	return 0;
261 }
262