xref: /openbmc/linux/drivers/crypto/caam/caamrng.c (revision 206204a1)
1 /*
2  * caam - Freescale FSL CAAM support for hw_random
3  *
4  * Copyright 2011 Freescale Semiconductor, Inc.
5  *
6  * Based on caamalg.c crypto API driver.
7  *
8  * relationship between job descriptors to shared descriptors:
9  *
10  * ---------------                     --------------
11  * | JobDesc #0  |-------------------->| ShareDesc  |
12  * | *(buffer 0) |      |------------->| (generate) |
13  * ---------------      |              | (move)     |
14  *                      |              | (store)    |
15  * ---------------      |              --------------
16  * | JobDesc #1  |------|
17  * | *(buffer 1) |
18  * ---------------
19  *
20  * A job desc looks like this:
21  *
22  * ---------------------
23  * | Header            |
24  * | ShareDesc Pointer |
25  * | SEQ_OUT_PTR       |
26  * | (output buffer)   |
27  * ---------------------
28  *
29  * The SharedDesc never changes, and each job descriptor points to one of two
30  * buffers for each device, from which the data will be copied into the
31  * requested destination
32  */
33 
34 #include <linux/hw_random.h>
35 #include <linux/completion.h>
36 #include <linux/atomic.h>
37 
38 #include "compat.h"
39 
40 #include "regs.h"
41 #include "intern.h"
42 #include "desc_constr.h"
43 #include "jr.h"
44 #include "error.h"
45 
46 /*
47  * Maximum buffer size: maximum number of random, cache-aligned bytes that
48  * will be generated and moved to seq out ptr (extlen not allowed)
49  */
50 #define RN_BUF_SIZE			(0xffff / L1_CACHE_BYTES * \
51 					 L1_CACHE_BYTES)
52 
53 /* length of descriptors */
54 #define DESC_JOB_O_LEN			(CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
55 #define DESC_RNG_LEN			(10 * CAAM_CMD_SZ)
56 
57 /* Buffer, its dma address and lock */
58 struct buf_data {
59 	u8 buf[RN_BUF_SIZE];
60 	dma_addr_t addr;
61 	struct completion filled;
62 	u32 hw_desc[DESC_JOB_O_LEN];
63 #define BUF_NOT_EMPTY 0
64 #define BUF_EMPTY 1
65 #define BUF_PENDING 2  /* Empty, but with job pending --don't submit another */
66 	atomic_t empty;
67 };
68 
69 /* rng per-device context */
70 struct caam_rng_ctx {
71 	struct device *jrdev;
72 	dma_addr_t sh_desc_dma;
73 	u32 sh_desc[DESC_RNG_LEN];
74 	unsigned int cur_buf_idx;
75 	int current_buf;
76 	struct buf_data bufs[2];
77 };
78 
79 static struct caam_rng_ctx *rng_ctx;
80 
81 static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd)
82 {
83 	if (bd->addr)
84 		dma_unmap_single(jrdev, bd->addr, RN_BUF_SIZE,
85 				 DMA_FROM_DEVICE);
86 }
87 
88 static inline void rng_unmap_ctx(struct caam_rng_ctx *ctx)
89 {
90 	struct device *jrdev = ctx->jrdev;
91 
92 	if (ctx->sh_desc_dma)
93 		dma_unmap_single(jrdev, ctx->sh_desc_dma, DESC_RNG_LEN,
94 				 DMA_TO_DEVICE);
95 	rng_unmap_buf(jrdev, &ctx->bufs[0]);
96 	rng_unmap_buf(jrdev, &ctx->bufs[1]);
97 }
98 
99 static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
100 {
101 	struct buf_data *bd;
102 
103 	bd = (struct buf_data *)((char *)desc -
104 	      offsetof(struct buf_data, hw_desc));
105 
106 	if (err)
107 		caam_jr_strstatus(jrdev, err);
108 
109 	atomic_set(&bd->empty, BUF_NOT_EMPTY);
110 	complete(&bd->filled);
111 #ifdef DEBUG
112 	print_hex_dump(KERN_ERR, "rng refreshed buf@: ",
113 		       DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1);
114 #endif
115 }
116 
117 static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
118 {
119 	struct buf_data *bd = &ctx->bufs[!(to_current ^ ctx->current_buf)];
120 	struct device *jrdev = ctx->jrdev;
121 	u32 *desc = bd->hw_desc;
122 	int err;
123 
124 	dev_dbg(jrdev, "submitting job %d\n", !(to_current ^ ctx->current_buf));
125 	init_completion(&bd->filled);
126 	err = caam_jr_enqueue(jrdev, desc, rng_done, ctx);
127 	if (err)
128 		complete(&bd->filled); /* don't wait on failed job*/
129 	else
130 		atomic_inc(&bd->empty); /* note if pending */
131 
132 	return err;
133 }
134 
135 static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait)
136 {
137 	struct caam_rng_ctx *ctx = rng_ctx;
138 	struct buf_data *bd = &ctx->bufs[ctx->current_buf];
139 	int next_buf_idx, copied_idx;
140 	int err;
141 
142 	if (atomic_read(&bd->empty)) {
143 		/* try to submit job if there wasn't one */
144 		if (atomic_read(&bd->empty) == BUF_EMPTY) {
145 			err = submit_job(ctx, 1);
146 			/* if can't submit job, can't even wait */
147 			if (err)
148 				return 0;
149 		}
150 		/* no immediate data, so exit if not waiting */
151 		if (!wait)
152 			return 0;
153 
154 		/* waiting for pending job */
155 		if (atomic_read(&bd->empty))
156 			wait_for_completion(&bd->filled);
157 	}
158 
159 	next_buf_idx = ctx->cur_buf_idx + max;
160 	dev_dbg(ctx->jrdev, "%s: start reading at buffer %d, idx %d\n",
161 		 __func__, ctx->current_buf, ctx->cur_buf_idx);
162 
163 	/* if enough data in current buffer */
164 	if (next_buf_idx < RN_BUF_SIZE) {
165 		memcpy(data, bd->buf + ctx->cur_buf_idx, max);
166 		ctx->cur_buf_idx = next_buf_idx;
167 		return max;
168 	}
169 
170 	/* else, copy what's left... */
171 	copied_idx = RN_BUF_SIZE - ctx->cur_buf_idx;
172 	memcpy(data, bd->buf + ctx->cur_buf_idx, copied_idx);
173 	ctx->cur_buf_idx = 0;
174 	atomic_set(&bd->empty, BUF_EMPTY);
175 
176 	/* ...refill... */
177 	submit_job(ctx, 1);
178 
179 	/* and use next buffer */
180 	ctx->current_buf = !ctx->current_buf;
181 	dev_dbg(ctx->jrdev, "switched to buffer %d\n", ctx->current_buf);
182 
183 	/* since there already is some data read, don't wait */
184 	return copied_idx + caam_read(rng, data + copied_idx,
185 				      max - copied_idx, false);
186 }
187 
188 static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx)
189 {
190 	struct device *jrdev = ctx->jrdev;
191 	u32 *desc = ctx->sh_desc;
192 
193 	init_sh_desc(desc, HDR_SHARE_SERIAL);
194 
195 	/* Propagate errors from shared to job descriptor */
196 	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
197 
198 	/* Generate random bytes */
199 	append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
200 
201 	/* Store bytes */
202 	append_seq_fifo_store(desc, RN_BUF_SIZE, FIFOST_TYPE_RNGSTORE);
203 
204 	ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
205 					  DMA_TO_DEVICE);
206 #ifdef DEBUG
207 	print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
208 		       desc, desc_bytes(desc), 1);
209 #endif
210 }
211 
212 static inline void rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
213 {
214 	struct device *jrdev = ctx->jrdev;
215 	struct buf_data *bd = &ctx->bufs[buf_id];
216 	u32 *desc = bd->hw_desc;
217 	int sh_len = desc_len(ctx->sh_desc);
218 
219 	init_job_desc_shared(desc, ctx->sh_desc_dma, sh_len, HDR_SHARE_DEFER |
220 			     HDR_REVERSE);
221 
222 	bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE);
223 
224 	append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
225 #ifdef DEBUG
226 	print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
227 		       desc, desc_bytes(desc), 1);
228 #endif
229 }
230 
231 static void caam_cleanup(struct hwrng *rng)
232 {
233 	int i;
234 	struct buf_data *bd;
235 
236 	for (i = 0; i < 2; i++) {
237 		bd = &rng_ctx->bufs[i];
238 		if (atomic_read(&bd->empty) == BUF_PENDING)
239 			wait_for_completion(&bd->filled);
240 	}
241 
242 	rng_unmap_ctx(rng_ctx);
243 }
244 
245 static void caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
246 {
247 	struct buf_data *bd = &ctx->bufs[buf_id];
248 
249 	rng_create_job_desc(ctx, buf_id);
250 	atomic_set(&bd->empty, BUF_EMPTY);
251 	submit_job(ctx, buf_id == ctx->current_buf);
252 	wait_for_completion(&bd->filled);
253 }
254 
255 static void caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
256 {
257 	ctx->jrdev = jrdev;
258 	rng_create_sh_desc(ctx);
259 	ctx->current_buf = 0;
260 	ctx->cur_buf_idx = 0;
261 	caam_init_buf(ctx, 0);
262 	caam_init_buf(ctx, 1);
263 }
264 
265 static struct hwrng caam_rng = {
266 	.name		= "rng-caam",
267 	.cleanup	= caam_cleanup,
268 	.read		= caam_read,
269 };
270 
271 static void __exit caam_rng_exit(void)
272 {
273 	caam_jr_free(rng_ctx->jrdev);
274 	hwrng_unregister(&caam_rng);
275 	kfree(rng_ctx);
276 }
277 
278 static int __init caam_rng_init(void)
279 {
280 	struct device *dev;
281 
282 	dev = caam_jr_alloc();
283 	if (IS_ERR(dev)) {
284 		pr_err("Job Ring Device allocation for transform failed\n");
285 		return PTR_ERR(dev);
286 	}
287 	rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA);
288 	if (!rng_ctx)
289 		return -ENOMEM;
290 	caam_init_rng(rng_ctx, dev);
291 
292 	dev_info(dev, "registering rng-caam\n");
293 	return hwrng_register(&caam_rng);
294 }
295 
296 module_init(caam_rng_init);
297 module_exit(caam_rng_exit);
298 
299 MODULE_LICENSE("GPL");
300 MODULE_DESCRIPTION("FSL CAAM support for hw_random API");
301 MODULE_AUTHOR("Freescale Semiconductor - NMG");
302