xref: /openbmc/linux/drivers/char/hw_random/cctrng.c (revision 82df5b73)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019-2020 ARM Limited or its affiliates. */
3 
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/clk.h>
7 #include <linux/hw_random.h>
8 #include <linux/io.h>
9 #include <linux/platform_device.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/interrupt.h>
12 #include <linux/irqreturn.h>
13 #include <linux/workqueue.h>
14 #include <linux/circ_buf.h>
15 #include <linux/completion.h>
16 #include <linux/of.h>
17 #include <linux/bitfield.h>
18 #include <linux/fips.h>
19 
20 #include "cctrng.h"
21 
22 #define CC_REG_LOW(name)  (name ## _BIT_SHIFT)
23 #define CC_REG_HIGH(name) (CC_REG_LOW(name) + name ## _BIT_SIZE - 1)
24 #define CC_GENMASK(name)  GENMASK(CC_REG_HIGH(name), CC_REG_LOW(name))
25 
26 #define CC_REG_FLD_GET(reg_name, fld_name, reg_val)     \
27 	(FIELD_GET(CC_GENMASK(CC_ ## reg_name ## _ ## fld_name), reg_val))
28 
29 #define CC_HW_RESET_LOOP_COUNT 10
30 #define CC_TRNG_SUSPEND_TIMEOUT 3000
31 
32 /* data circular buffer in words must be:
33  *  - of a power-of-2 size (limitation of circ_buf.h macros)
34  *  - at least 6, the size generated in the EHR according to HW implementation
35  */
36 #define CCTRNG_DATA_BUF_WORDS 32
37 
38 /* The timeout for the TRNG operation should be calculated with the formula:
39  * Timeout = EHR_NUM * VN_COEFF * EHR_LENGTH * SAMPLE_CNT * SCALE_VALUE
40  * while:
41  *  - SAMPLE_CNT is input value from the characterisation process
42  *  - all the rest are constants
43  */
44 #define EHR_NUM 1
45 #define VN_COEFF 4
46 #define EHR_LENGTH CC_TRNG_EHR_IN_BITS
47 #define SCALE_VALUE 2
48 #define CCTRNG_TIMEOUT(smpl_cnt) \
49 	(EHR_NUM * VN_COEFF * EHR_LENGTH * smpl_cnt * SCALE_VALUE)
50 
51 struct cctrng_drvdata {
52 	struct platform_device *pdev;
53 	void __iomem *cc_base;
54 	struct clk *clk;
55 	struct hwrng rng;
56 	u32 active_rosc;
57 	/* Sampling interval for each ring oscillator:
58 	 * count of ring oscillator cycles between consecutive bits sampling.
59 	 * Value of 0 indicates non-valid rosc
60 	 */
61 	u32 smpl_ratio[CC_TRNG_NUM_OF_ROSCS];
62 
63 	u32 data_buf[CCTRNG_DATA_BUF_WORDS];
64 	struct circ_buf circ;
65 	struct work_struct compwork;
66 	struct work_struct startwork;
67 
68 	/* pending_hw - 1 when HW is pending, 0 when it is idle */
69 	atomic_t pending_hw;
70 
71 	/* protects against multiple concurrent consumers of data_buf */
72 	spinlock_t read_lock;
73 };
74 
75 
76 /* functions for write/read CC registers */
77 static inline void cc_iowrite(struct cctrng_drvdata *drvdata, u32 reg, u32 val)
78 {
79 	iowrite32(val, (drvdata->cc_base + reg));
80 }
81 static inline u32 cc_ioread(struct cctrng_drvdata *drvdata, u32 reg)
82 {
83 	return ioread32(drvdata->cc_base + reg);
84 }
85 
86 
87 static int cc_trng_pm_get(struct device *dev)
88 {
89 	int rc = 0;
90 
91 	rc = pm_runtime_get_sync(dev);
92 
93 	/* pm_runtime_get_sync() can return 1 as a valid return code */
94 	return (rc == 1 ? 0 : rc);
95 }
96 
97 static void cc_trng_pm_put_suspend(struct device *dev)
98 {
99 	int rc = 0;
100 
101 	pm_runtime_mark_last_busy(dev);
102 	rc = pm_runtime_put_autosuspend(dev);
103 	if (rc)
104 		dev_err(dev, "pm_runtime_put_autosuspend returned %x\n", rc);
105 }
106 
107 static int cc_trng_pm_init(struct cctrng_drvdata *drvdata)
108 {
109 	struct device *dev = &(drvdata->pdev->dev);
110 
111 	/* must be before the enabling to avoid redundant suspending */
112 	pm_runtime_set_autosuspend_delay(dev, CC_TRNG_SUSPEND_TIMEOUT);
113 	pm_runtime_use_autosuspend(dev);
114 	/* set us as active - note we won't do PM ops until cc_trng_pm_go()! */
115 	return pm_runtime_set_active(dev);
116 }
117 
118 static void cc_trng_pm_go(struct cctrng_drvdata *drvdata)
119 {
120 	struct device *dev = &(drvdata->pdev->dev);
121 
122 	/* enable the PM module*/
123 	pm_runtime_enable(dev);
124 }
125 
126 static void cc_trng_pm_fini(struct cctrng_drvdata *drvdata)
127 {
128 	struct device *dev = &(drvdata->pdev->dev);
129 
130 	pm_runtime_disable(dev);
131 }
132 
133 
134 static inline int cc_trng_parse_sampling_ratio(struct cctrng_drvdata *drvdata)
135 {
136 	struct device *dev = &(drvdata->pdev->dev);
137 	struct device_node *np = drvdata->pdev->dev.of_node;
138 	int rc;
139 	int i;
140 	/* ret will be set to 0 if at least one rosc has (sampling ratio > 0) */
141 	int ret = -EINVAL;
142 
143 	rc = of_property_read_u32_array(np, "arm,rosc-ratio",
144 					drvdata->smpl_ratio,
145 					CC_TRNG_NUM_OF_ROSCS);
146 	if (rc) {
147 		/* arm,rosc-ratio was not found in device tree */
148 		return rc;
149 	}
150 
151 	/* verify that at least one rosc has (sampling ratio > 0) */
152 	for (i = 0; i < CC_TRNG_NUM_OF_ROSCS; ++i) {
153 		dev_dbg(dev, "rosc %d sampling ratio %u",
154 			i, drvdata->smpl_ratio[i]);
155 
156 		if (drvdata->smpl_ratio[i] > 0)
157 			ret = 0;
158 	}
159 
160 	return ret;
161 }
162 
163 static int cc_trng_change_rosc(struct cctrng_drvdata *drvdata)
164 {
165 	struct device *dev = &(drvdata->pdev->dev);
166 
167 	dev_dbg(dev, "cctrng change rosc (was %d)\n", drvdata->active_rosc);
168 	drvdata->active_rosc += 1;
169 
170 	while (drvdata->active_rosc < CC_TRNG_NUM_OF_ROSCS) {
171 		if (drvdata->smpl_ratio[drvdata->active_rosc] > 0)
172 			return 0;
173 
174 		drvdata->active_rosc += 1;
175 	}
176 	return -EINVAL;
177 }
178 
179 
180 static void cc_trng_enable_rnd_source(struct cctrng_drvdata *drvdata)
181 {
182 	u32 max_cycles;
183 
184 	/* Set watchdog threshold to maximal allowed time (in CPU cycles) */
185 	max_cycles = CCTRNG_TIMEOUT(drvdata->smpl_ratio[drvdata->active_rosc]);
186 	cc_iowrite(drvdata, CC_RNG_WATCHDOG_VAL_REG_OFFSET, max_cycles);
187 
188 	/* enable the RND source */
189 	cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0x1);
190 
191 	/* unmask RNG interrupts */
192 	cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, (u32)~CC_RNG_INT_MASK);
193 }
194 
195 
196 /* increase circular data buffer index (head/tail) */
197 static inline void circ_idx_inc(int *idx, int bytes)
198 {
199 	*idx += (bytes + 3) >> 2;
200 	*idx &= (CCTRNG_DATA_BUF_WORDS - 1);
201 }
202 
203 static inline size_t circ_buf_space(struct cctrng_drvdata *drvdata)
204 {
205 	return CIRC_SPACE(drvdata->circ.head,
206 			  drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
207 
208 }
209 
210 static int cctrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
211 {
212 	/* current implementation ignores "wait" */
213 
214 	struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)rng->priv;
215 	struct device *dev = &(drvdata->pdev->dev);
216 	u32 *buf = (u32 *)drvdata->circ.buf;
217 	size_t copied = 0;
218 	size_t cnt_w;
219 	size_t size;
220 	size_t left;
221 
222 	if (!spin_trylock(&drvdata->read_lock)) {
223 		/* concurrent consumers from data_buf cannot be served */
224 		dev_dbg_ratelimited(dev, "unable to hold lock\n");
225 		return 0;
226 	}
227 
228 	/* copy till end of data buffer (without wrap back) */
229 	cnt_w = CIRC_CNT_TO_END(drvdata->circ.head,
230 				drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
231 	size = min((cnt_w<<2), max);
232 	memcpy(data, &(buf[drvdata->circ.tail]), size);
233 	copied = size;
234 	circ_idx_inc(&drvdata->circ.tail, size);
235 	/* copy rest of data in data buffer */
236 	left = max - copied;
237 	if (left > 0) {
238 		cnt_w = CIRC_CNT(drvdata->circ.head,
239 				 drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
240 		size = min((cnt_w<<2), left);
241 		memcpy(data, &(buf[drvdata->circ.tail]), size);
242 		copied += size;
243 		circ_idx_inc(&drvdata->circ.tail, size);
244 	}
245 
246 	spin_unlock(&drvdata->read_lock);
247 
248 	if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
249 		if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) {
250 			/* re-check space in buffer to avoid potential race */
251 			if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
252 				/* increment device's usage counter */
253 				int rc = cc_trng_pm_get(dev);
254 
255 				if (rc) {
256 					dev_err(dev,
257 						"cc_trng_pm_get returned %x\n",
258 						rc);
259 					return rc;
260 				}
261 
262 				/* schedule execution of deferred work handler
263 				 * for filling of data buffer
264 				 */
265 				schedule_work(&drvdata->startwork);
266 			} else {
267 				atomic_set(&drvdata->pending_hw, 0);
268 			}
269 		}
270 	}
271 
272 	return copied;
273 }
274 
275 static void cc_trng_hw_trigger(struct cctrng_drvdata *drvdata)
276 {
277 	u32 tmp_smpl_cnt = 0;
278 	struct device *dev = &(drvdata->pdev->dev);
279 
280 	dev_dbg(dev, "cctrng hw trigger.\n");
281 
282 	/* enable the HW RND clock */
283 	cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1);
284 
285 	/* do software reset */
286 	cc_iowrite(drvdata, CC_RNG_SW_RESET_REG_OFFSET, 0x1);
287 	/* in order to verify that the reset has completed,
288 	 * the sample count need to be verified
289 	 */
290 	do {
291 		/* enable the HW RND clock   */
292 		cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1);
293 
294 		/* set sampling ratio (rng_clocks) between consecutive bits */
295 		cc_iowrite(drvdata, CC_SAMPLE_CNT1_REG_OFFSET,
296 			   drvdata->smpl_ratio[drvdata->active_rosc]);
297 
298 		/* read the sampling ratio  */
299 		tmp_smpl_cnt = cc_ioread(drvdata, CC_SAMPLE_CNT1_REG_OFFSET);
300 
301 	} while (tmp_smpl_cnt != drvdata->smpl_ratio[drvdata->active_rosc]);
302 
303 	/* disable the RND source for setting new parameters in HW */
304 	cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0);
305 
306 	cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, 0xFFFFFFFF);
307 
308 	cc_iowrite(drvdata, CC_TRNG_CONFIG_REG_OFFSET, drvdata->active_rosc);
309 
310 	/* Debug Control register: set to 0 - no bypasses */
311 	cc_iowrite(drvdata, CC_TRNG_DEBUG_CONTROL_REG_OFFSET, 0);
312 
313 	cc_trng_enable_rnd_source(drvdata);
314 }
315 
316 static void cc_trng_compwork_handler(struct work_struct *w)
317 {
318 	u32 isr = 0;
319 	u32 ehr_valid = 0;
320 	struct cctrng_drvdata *drvdata =
321 			container_of(w, struct cctrng_drvdata, compwork);
322 	struct device *dev = &(drvdata->pdev->dev);
323 	int i;
324 
325 	/* stop DMA and the RNG source */
326 	cc_iowrite(drvdata, CC_RNG_DMA_ENABLE_REG_OFFSET, 0);
327 	cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0);
328 
329 	/* read RNG_ISR and check for errors */
330 	isr = cc_ioread(drvdata, CC_RNG_ISR_REG_OFFSET);
331 	ehr_valid = CC_REG_FLD_GET(RNG_ISR, EHR_VALID, isr);
332 	dev_dbg(dev, "Got RNG_ISR=0x%08X (EHR_VALID=%u)\n", isr, ehr_valid);
333 
334 	if (fips_enabled && CC_REG_FLD_GET(RNG_ISR, CRNGT_ERR, isr)) {
335 		fips_fail_notify();
336 		/* FIPS error is fatal */
337 		panic("Got HW CRNGT error while fips is enabled!\n");
338 	}
339 
340 	/* Clear all pending RNG interrupts */
341 	cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, isr);
342 
343 
344 	if (!ehr_valid) {
345 		/* in case of AUTOCORR/TIMEOUT error, try the next ROSC */
346 		if (CC_REG_FLD_GET(RNG_ISR, AUTOCORR_ERR, isr) ||
347 				CC_REG_FLD_GET(RNG_ISR, WATCHDOG, isr)) {
348 			dev_dbg(dev, "cctrng autocorr/timeout error.\n");
349 			goto next_rosc;
350 		}
351 
352 		/* in case of VN error, ignore it */
353 	}
354 
355 	/* read EHR data from registers */
356 	for (i = 0; i < CC_TRNG_EHR_IN_WORDS; i++) {
357 		/* calc word ptr in data_buf */
358 		u32 *buf = (u32 *)drvdata->circ.buf;
359 
360 		buf[drvdata->circ.head] = cc_ioread(drvdata,
361 				CC_EHR_DATA_0_REG_OFFSET + (i*sizeof(u32)));
362 
363 		/* EHR_DATA registers are cleared on read. In case 0 value was
364 		 * returned, restart the entropy collection.
365 		 */
366 		if (buf[drvdata->circ.head] == 0) {
367 			dev_dbg(dev, "Got 0 value in EHR. active_rosc %u\n",
368 				drvdata->active_rosc);
369 			goto next_rosc;
370 		}
371 
372 		circ_idx_inc(&drvdata->circ.head, 1<<2);
373 	}
374 
375 	atomic_set(&drvdata->pending_hw, 0);
376 
377 	/* continue to fill data buffer if needed */
378 	if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
379 		if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) {
380 			/* Re-enable rnd source */
381 			cc_trng_enable_rnd_source(drvdata);
382 			return;
383 		}
384 	}
385 
386 	cc_trng_pm_put_suspend(dev);
387 
388 	dev_dbg(dev, "compwork handler done\n");
389 	return;
390 
391 next_rosc:
392 	if ((circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) &&
393 			(cc_trng_change_rosc(drvdata) == 0)) {
394 		/* trigger trng hw with next rosc */
395 		cc_trng_hw_trigger(drvdata);
396 	} else {
397 		atomic_set(&drvdata->pending_hw, 0);
398 		cc_trng_pm_put_suspend(dev);
399 	}
400 }
401 
402 static irqreturn_t cc_isr(int irq, void *dev_id)
403 {
404 	struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)dev_id;
405 	struct device *dev = &(drvdata->pdev->dev);
406 	u32 irr;
407 
408 	/* if driver suspended return, probably shared interrupt */
409 	if (pm_runtime_suspended(dev))
410 		return IRQ_NONE;
411 
412 	/* read the interrupt status */
413 	irr = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET);
414 	dev_dbg(dev, "Got IRR=0x%08X\n", irr);
415 
416 	if (irr == 0) /* Probably shared interrupt line */
417 		return IRQ_NONE;
418 
419 	/* clear interrupt - must be before processing events */
420 	cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, irr);
421 
422 	/* RNG interrupt - most probable */
423 	if (irr & CC_HOST_RNG_IRQ_MASK) {
424 		/* Mask RNG interrupts - will be unmasked in deferred work */
425 		cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, 0xFFFFFFFF);
426 
427 		/* We clear RNG interrupt here,
428 		 * to avoid it from firing as we'll unmask RNG interrupts.
429 		 */
430 		cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET,
431 			   CC_HOST_RNG_IRQ_MASK);
432 
433 		irr &= ~CC_HOST_RNG_IRQ_MASK;
434 
435 		/* schedule execution of deferred work handler */
436 		schedule_work(&drvdata->compwork);
437 	}
438 
439 	if (irr) {
440 		dev_dbg_ratelimited(dev,
441 				"IRR includes unknown cause bits (0x%08X)\n",
442 				irr);
443 		/* Just warning */
444 	}
445 
446 	return IRQ_HANDLED;
447 }
448 
449 static void cc_trng_startwork_handler(struct work_struct *w)
450 {
451 	struct cctrng_drvdata *drvdata =
452 			container_of(w, struct cctrng_drvdata, startwork);
453 
454 	drvdata->active_rosc = 0;
455 	cc_trng_hw_trigger(drvdata);
456 }
457 
458 
459 static int cc_trng_clk_init(struct cctrng_drvdata *drvdata)
460 {
461 	struct clk *clk;
462 	struct device *dev = &(drvdata->pdev->dev);
463 	int rc = 0;
464 
465 	clk = devm_clk_get_optional(dev, NULL);
466 	if (IS_ERR(clk)) {
467 		if (PTR_ERR(clk) != -EPROBE_DEFER)
468 			dev_err(dev, "Error getting clock: %pe\n", clk);
469 		return PTR_ERR(clk);
470 	}
471 	drvdata->clk = clk;
472 
473 	rc = clk_prepare_enable(drvdata->clk);
474 	if (rc) {
475 		dev_err(dev, "Failed to enable clock\n");
476 		return rc;
477 	}
478 
479 	return 0;
480 }
481 
482 static void cc_trng_clk_fini(struct cctrng_drvdata *drvdata)
483 {
484 	clk_disable_unprepare(drvdata->clk);
485 }
486 
487 
488 static int cctrng_probe(struct platform_device *pdev)
489 {
490 	struct resource *req_mem_cc_regs = NULL;
491 	struct cctrng_drvdata *drvdata;
492 	struct device *dev = &pdev->dev;
493 	int rc = 0;
494 	u32 val;
495 	int irq;
496 
497 	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
498 	if (!drvdata)
499 		return -ENOMEM;
500 
501 	drvdata->rng.name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
502 	if (!drvdata->rng.name)
503 		return -ENOMEM;
504 
505 	drvdata->rng.read = cctrng_read;
506 	drvdata->rng.priv = (unsigned long)drvdata;
507 	drvdata->rng.quality = CC_TRNG_QUALITY;
508 
509 	platform_set_drvdata(pdev, drvdata);
510 	drvdata->pdev = pdev;
511 
512 	drvdata->circ.buf = (char *)drvdata->data_buf;
513 
514 	/* Get device resources */
515 	/* First CC registers space */
516 	req_mem_cc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
517 	/* Map registers space */
518 	drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
519 	if (IS_ERR(drvdata->cc_base)) {
520 		dev_err(dev, "Failed to ioremap registers");
521 		return PTR_ERR(drvdata->cc_base);
522 	}
523 
524 	dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
525 		req_mem_cc_regs);
526 	dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
527 		&req_mem_cc_regs->start, drvdata->cc_base);
528 
529 	/* Then IRQ */
530 	irq = platform_get_irq(pdev, 0);
531 	if (irq < 0) {
532 		dev_err(dev, "Failed getting IRQ resource\n");
533 		return irq;
534 	}
535 
536 	/* parse sampling rate from device tree */
537 	rc = cc_trng_parse_sampling_ratio(drvdata);
538 	if (rc) {
539 		dev_err(dev, "Failed to get legal sampling ratio for rosc\n");
540 		return rc;
541 	}
542 
543 	rc = cc_trng_clk_init(drvdata);
544 	if (rc) {
545 		dev_err(dev, "cc_trng_clk_init failed\n");
546 		return rc;
547 	}
548 
549 	INIT_WORK(&drvdata->compwork, cc_trng_compwork_handler);
550 	INIT_WORK(&drvdata->startwork, cc_trng_startwork_handler);
551 	spin_lock_init(&drvdata->read_lock);
552 
553 	/* register the driver isr function */
554 	rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "cctrng", drvdata);
555 	if (rc) {
556 		dev_err(dev, "Could not register to interrupt %d\n", irq);
557 		goto post_clk_err;
558 	}
559 	dev_dbg(dev, "Registered to IRQ: %d\n", irq);
560 
561 	/* Clear all pending interrupts */
562 	val = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET);
563 	dev_dbg(dev, "IRR=0x%08X\n", val);
564 	cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, val);
565 
566 	/* unmask HOST RNG interrupt */
567 	cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET,
568 		   cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) &
569 		   ~CC_HOST_RNG_IRQ_MASK);
570 
571 	/* init PM */
572 	rc = cc_trng_pm_init(drvdata);
573 	if (rc) {
574 		dev_err(dev, "cc_trng_pm_init failed\n");
575 		goto post_clk_err;
576 	}
577 
578 	/* increment device's usage counter */
579 	rc = cc_trng_pm_get(dev);
580 	if (rc) {
581 		dev_err(dev, "cc_trng_pm_get returned %x\n", rc);
582 		goto post_pm_err;
583 	}
584 
585 	/* set pending_hw to verify that HW won't be triggered from read */
586 	atomic_set(&drvdata->pending_hw, 1);
587 
588 	/* registration of the hwrng device */
589 	rc = hwrng_register(&drvdata->rng);
590 	if (rc) {
591 		dev_err(dev, "Could not register hwrng device.\n");
592 		goto post_pm_err;
593 	}
594 
595 	/* trigger HW to start generate data */
596 	drvdata->active_rosc = 0;
597 	cc_trng_hw_trigger(drvdata);
598 
599 	/* All set, we can allow auto-suspend */
600 	cc_trng_pm_go(drvdata);
601 
602 	dev_info(dev, "ARM cctrng device initialized\n");
603 
604 	return 0;
605 
606 post_pm_err:
607 	cc_trng_pm_fini(drvdata);
608 
609 post_clk_err:
610 	cc_trng_clk_fini(drvdata);
611 
612 	return rc;
613 }
614 
615 static int cctrng_remove(struct platform_device *pdev)
616 {
617 	struct cctrng_drvdata *drvdata = platform_get_drvdata(pdev);
618 	struct device *dev = &pdev->dev;
619 
620 	dev_dbg(dev, "Releasing cctrng resources...\n");
621 
622 	hwrng_unregister(&drvdata->rng);
623 
624 	cc_trng_pm_fini(drvdata);
625 
626 	cc_trng_clk_fini(drvdata);
627 
628 	dev_info(dev, "ARM cctrng device terminated\n");
629 
630 	return 0;
631 }
632 
633 static int __maybe_unused cctrng_suspend(struct device *dev)
634 {
635 	struct cctrng_drvdata *drvdata = dev_get_drvdata(dev);
636 
637 	dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
638 	cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET,
639 			POWER_DOWN_ENABLE);
640 
641 	clk_disable_unprepare(drvdata->clk);
642 
643 	return 0;
644 }
645 
646 static bool cctrng_wait_for_reset_completion(struct cctrng_drvdata *drvdata)
647 {
648 	unsigned int val;
649 	unsigned int i;
650 
651 	for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) {
652 		/* in cc7x3 NVM_IS_IDLE indicates that CC reset is
653 		 *  completed and device is fully functional
654 		 */
655 		val = cc_ioread(drvdata, CC_NVM_IS_IDLE_REG_OFFSET);
656 		if (val & BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)) {
657 			/* hw indicate reset completed */
658 			return true;
659 		}
660 		/* allow scheduling other process on the processor */
661 		schedule();
662 	}
663 	/* reset not completed */
664 	return false;
665 }
666 
667 static int __maybe_unused cctrng_resume(struct device *dev)
668 {
669 	struct cctrng_drvdata *drvdata = dev_get_drvdata(dev);
670 	int rc;
671 
672 	dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
673 	/* Enables the device source clk */
674 	rc = clk_prepare_enable(drvdata->clk);
675 	if (rc) {
676 		dev_err(dev, "failed getting clock back on. We're toast.\n");
677 		return rc;
678 	}
679 
680 	/* wait for Cryptocell reset completion */
681 	if (!cctrng_wait_for_reset_completion(drvdata)) {
682 		dev_err(dev, "Cryptocell reset not completed");
683 		return -EBUSY;
684 	}
685 
686 	/* unmask HOST RNG interrupt */
687 	cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET,
688 		   cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) &
689 		   ~CC_HOST_RNG_IRQ_MASK);
690 
691 	cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET,
692 		   POWER_DOWN_DISABLE);
693 
694 	return 0;
695 }
696 
697 static UNIVERSAL_DEV_PM_OPS(cctrng_pm, cctrng_suspend, cctrng_resume, NULL);
698 
699 static const struct of_device_id arm_cctrng_dt_match[] = {
700 	{ .compatible = "arm,cryptocell-713-trng", },
701 	{ .compatible = "arm,cryptocell-703-trng", },
702 	{},
703 };
704 MODULE_DEVICE_TABLE(of, arm_cctrng_dt_match);
705 
706 static struct platform_driver cctrng_driver = {
707 	.driver = {
708 		.name = "cctrng",
709 		.of_match_table = arm_cctrng_dt_match,
710 		.pm = &cctrng_pm,
711 	},
712 	.probe = cctrng_probe,
713 	.remove = cctrng_remove,
714 };
715 
716 static int __init cctrng_mod_init(void)
717 {
718 	/* Compile time assertion checks */
719 	BUILD_BUG_ON(CCTRNG_DATA_BUF_WORDS < 6);
720 	BUILD_BUG_ON((CCTRNG_DATA_BUF_WORDS & (CCTRNG_DATA_BUF_WORDS-1)) != 0);
721 
722 	return platform_driver_register(&cctrng_driver);
723 }
724 module_init(cctrng_mod_init);
725 
726 static void __exit cctrng_mod_exit(void)
727 {
728 	platform_driver_unregister(&cctrng_driver);
729 }
730 module_exit(cctrng_mod_exit);
731 
732 /* Module description */
733 MODULE_DESCRIPTION("ARM CryptoCell TRNG Driver");
734 MODULE_AUTHOR("ARM");
735 MODULE_LICENSE("GPL v2");
736