xref: /openbmc/linux/drivers/crypto/caam/ctrl.c (revision 7cffcade)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* * CAAM control-plane driver backend
3  * Controller-level driver, kernel property detection, initialization
4  *
5  * Copyright 2008-2012 Freescale Semiconductor, Inc.
6  * Copyright 2018-2019 NXP
7  */
8 
9 #include <linux/device.h>
10 #include <linux/of_address.h>
11 #include <linux/of_irq.h>
12 #include <linux/sys_soc.h>
13 #include <linux/fsl/mc.h>
14 
15 #include "compat.h"
16 #include "debugfs.h"
17 #include "regs.h"
18 #include "intern.h"
19 #include "jr.h"
20 #include "desc_constr.h"
21 #include "ctrl.h"
22 
23 bool caam_dpaa2;
24 EXPORT_SYMBOL(caam_dpaa2);
25 
26 #ifdef CONFIG_CAAM_QI
27 #include "qi.h"
28 #endif
29 
30 /*
31  * Descriptor to instantiate RNG State Handle 0 in normal mode and
32  * load the JDKEK, TDKEK and TDSK registers
33  */
34 static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
35 {
36 	u32 *jump_cmd, op_flags;
37 
38 	init_job_desc(desc, 0);
39 
40 	op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
41 			(handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT |
42 			OP_ALG_PR_ON;
43 
44 	/* INIT RNG in non-test mode */
45 	append_operation(desc, op_flags);
46 
47 	if (!handle && do_sk) {
48 		/*
49 		 * For SH0, Secure Keys must be generated as well
50 		 */
51 
52 		/* wait for done */
53 		jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
54 		set_jump_tgt_here(desc, jump_cmd);
55 
56 		/*
57 		 * load 1 to clear written reg:
58 		 * resets the done interrupt and returns the RNG to idle.
59 		 */
60 		append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
61 
62 		/* Initialize State Handle  */
63 		append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
64 				 OP_ALG_AAI_RNG4_SK);
65 	}
66 
67 	append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
68 }
69 
70 /* Descriptor for deinstantiation of State Handle 0 of the RNG block. */
71 static void build_deinstantiation_desc(u32 *desc, int handle)
72 {
73 	init_job_desc(desc, 0);
74 
75 	/* Uninstantiate State Handle 0 */
76 	append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
77 			 (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
78 
79 	append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
80 }
81 
82 /*
83  * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
84  *			  the software (no JR/QI used).
85  * @ctrldev - pointer to device
86  * @status - descriptor status, after being run
87  *
88  * Return: - 0 if no error occurred
89  *	   - -ENODEV if the DECO couldn't be acquired
90  *	   - -EAGAIN if an error occurred while executing the descriptor
91  */
92 static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
93 					u32 *status)
94 {
95 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
96 	struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
97 	struct caam_deco __iomem *deco = ctrlpriv->deco;
98 	unsigned int timeout = 100000;
99 	u32 deco_dbg_reg, deco_state, flags;
100 	int i;
101 
102 
103 	if (ctrlpriv->virt_en == 1 ||
104 	    /*
105 	     * Apparently on i.MX8M{Q,M,N,P} it doesn't matter if virt_en == 1
106 	     * and the following steps should be performed regardless
107 	     */
108 	    of_machine_is_compatible("fsl,imx8mq") ||
109 	    of_machine_is_compatible("fsl,imx8mm") ||
110 	    of_machine_is_compatible("fsl,imx8mn") ||
111 	    of_machine_is_compatible("fsl,imx8mp")) {
112 		clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
113 
114 		while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
115 		       --timeout)
116 			cpu_relax();
117 
118 		timeout = 100000;
119 	}
120 
121 	clrsetbits_32(&ctrl->deco_rq, 0, DECORR_RQD0ENABLE);
122 
123 	while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) &&
124 								 --timeout)
125 		cpu_relax();
126 
127 	if (!timeout) {
128 		dev_err(ctrldev, "failed to acquire DECO 0\n");
129 		clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
130 		return -ENODEV;
131 	}
132 
133 	for (i = 0; i < desc_len(desc); i++)
134 		wr_reg32(&deco->descbuf[i], caam32_to_cpu(*(desc + i)));
135 
136 	flags = DECO_JQCR_WHL;
137 	/*
138 	 * If the descriptor length is longer than 4 words, then the
139 	 * FOUR bit in JRCTRL register must be set.
140 	 */
141 	if (desc_len(desc) >= 4)
142 		flags |= DECO_JQCR_FOUR;
143 
144 	/* Instruct the DECO to execute it */
145 	clrsetbits_32(&deco->jr_ctl_hi, 0, flags);
146 
147 	timeout = 10000000;
148 	do {
149 		deco_dbg_reg = rd_reg32(&deco->desc_dbg);
150 
151 		if (ctrlpriv->era < 10)
152 			deco_state = (deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) >>
153 				     DESC_DBG_DECO_STAT_SHIFT;
154 		else
155 			deco_state = (rd_reg32(&deco->dbg_exec) &
156 				      DESC_DER_DECO_STAT_MASK) >>
157 				     DESC_DER_DECO_STAT_SHIFT;
158 
159 		/*
160 		 * If an error occurred in the descriptor, then
161 		 * the DECO status field will be set to 0x0D
162 		 */
163 		if (deco_state == DECO_STAT_HOST_ERR)
164 			break;
165 
166 		cpu_relax();
167 	} while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
168 
169 	*status = rd_reg32(&deco->op_status_hi) &
170 		  DECO_OP_STATUS_HI_ERR_MASK;
171 
172 	if (ctrlpriv->virt_en == 1)
173 		clrsetbits_32(&ctrl->deco_rsr, DECORSR_JR0, 0);
174 
175 	/* Mark the DECO as free */
176 	clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
177 
178 	if (!timeout)
179 		return -EAGAIN;
180 
181 	return 0;
182 }
183 
184 /*
185  * deinstantiate_rng - builds and executes a descriptor on DECO0,
186  *		       which deinitializes the RNG block.
187  * @ctrldev - pointer to device
188  * @state_handle_mask - bitmask containing the instantiation status
189  *			for the RNG4 state handles which exist in
190  *			the RNG4 block: 1 if it's been instantiated
191  *
192  * Return: - 0 if no error occurred
193  *	   - -ENOMEM if there isn't enough memory to allocate the descriptor
194  *	   - -ENODEV if DECO0 couldn't be acquired
195  *	   - -EAGAIN if an error occurred when executing the descriptor
196  */
197 static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
198 {
199 	u32 *desc, status;
200 	int sh_idx, ret = 0;
201 
202 	desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL | GFP_DMA);
203 	if (!desc)
204 		return -ENOMEM;
205 
206 	for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
207 		/*
208 		 * If the corresponding bit is set, then it means the state
209 		 * handle was initialized by us, and thus it needs to be
210 		 * deinitialized as well
211 		 */
212 		if ((1 << sh_idx) & state_handle_mask) {
213 			/*
214 			 * Create the descriptor for deinstantating this state
215 			 * handle
216 			 */
217 			build_deinstantiation_desc(desc, sh_idx);
218 
219 			/* Try to run it through DECO0 */
220 			ret = run_descriptor_deco0(ctrldev, desc, &status);
221 
222 			if (ret ||
223 			    (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
224 				dev_err(ctrldev,
225 					"Failed to deinstantiate RNG4 SH%d\n",
226 					sh_idx);
227 				break;
228 			}
229 			dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
230 		}
231 	}
232 
233 	kfree(desc);
234 
235 	return ret;
236 }
237 
238 static void devm_deinstantiate_rng(void *data)
239 {
240 	struct device *ctrldev = data;
241 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
242 
243 	/*
244 	 * De-initialize RNG state handles initialized by this driver.
245 	 * In case of SoCs with Management Complex, RNG is managed by MC f/w.
246 	 */
247 	if (ctrlpriv->rng4_sh_init)
248 		deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
249 }
250 
251 /*
252  * instantiate_rng - builds and executes a descriptor on DECO0,
253  *		     which initializes the RNG block.
254  * @ctrldev - pointer to device
255  * @state_handle_mask - bitmask containing the instantiation status
256  *			for the RNG4 state handles which exist in
257  *			the RNG4 block: 1 if it's been instantiated
258  *			by an external entry, 0 otherwise.
259  * @gen_sk  - generate data to be loaded into the JDKEK, TDKEK and TDSK;
260  *	      Caution: this can be done only once; if the keys need to be
261  *	      regenerated, a POR is required
262  *
263  * Return: - 0 if no error occurred
264  *	   - -ENOMEM if there isn't enough memory to allocate the descriptor
265  *	   - -ENODEV if DECO0 couldn't be acquired
266  *	   - -EAGAIN if an error occurred when executing the descriptor
267  *	      f.i. there was a RNG hardware error due to not "good enough"
268  *	      entropy being acquired.
269  */
270 static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
271 			   int gen_sk)
272 {
273 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
274 	struct caam_ctrl __iomem *ctrl;
275 	u32 *desc, status = 0, rdsta_val;
276 	int ret = 0, sh_idx;
277 
278 	ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
279 	desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL | GFP_DMA);
280 	if (!desc)
281 		return -ENOMEM;
282 
283 	for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
284 		const u32 rdsta_if = RDSTA_IF0 << sh_idx;
285 		const u32 rdsta_pr = RDSTA_PR0 << sh_idx;
286 		const u32 rdsta_mask = rdsta_if | rdsta_pr;
287 		/*
288 		 * If the corresponding bit is set, this state handle
289 		 * was initialized by somebody else, so it's left alone.
290 		 */
291 		if (rdsta_if & state_handle_mask) {
292 			if (rdsta_pr & state_handle_mask)
293 				continue;
294 
295 			dev_info(ctrldev,
296 				 "RNG4 SH%d was previously instantiated without prediction resistance. Tearing it down\n",
297 				 sh_idx);
298 
299 			ret = deinstantiate_rng(ctrldev, rdsta_if);
300 			if (ret)
301 				break;
302 		}
303 
304 		/* Create the descriptor for instantiating RNG State Handle */
305 		build_instantiation_desc(desc, sh_idx, gen_sk);
306 
307 		/* Try to run it through DECO0 */
308 		ret = run_descriptor_deco0(ctrldev, desc, &status);
309 
310 		/*
311 		 * If ret is not 0, or descriptor status is not 0, then
312 		 * something went wrong. No need to try the next state
313 		 * handle (if available), bail out here.
314 		 * Also, if for some reason, the State Handle didn't get
315 		 * instantiated although the descriptor has finished
316 		 * without any error (HW optimizations for later
317 		 * CAAM eras), then try again.
318 		 */
319 		if (ret)
320 			break;
321 
322 		rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_MASK;
323 		if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
324 		    (rdsta_val & rdsta_mask) != rdsta_mask) {
325 			ret = -EAGAIN;
326 			break;
327 		}
328 
329 		dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
330 		/* Clear the contents before recreating the descriptor */
331 		memset(desc, 0x00, CAAM_CMD_SZ * 7);
332 	}
333 
334 	kfree(desc);
335 
336 	if (ret)
337 		return ret;
338 
339 	return devm_add_action_or_reset(ctrldev, devm_deinstantiate_rng, ctrldev);
340 }
341 
342 /*
343  * kick_trng - sets the various parameters for enabling the initialization
344  *	       of the RNG4 block in CAAM
345  * @pdev - pointer to the platform device
346  * @ent_delay - Defines the length (in system clocks) of each entropy sample.
347  */
348 static void kick_trng(struct platform_device *pdev, int ent_delay)
349 {
350 	struct device *ctrldev = &pdev->dev;
351 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
352 	struct caam_ctrl __iomem *ctrl;
353 	struct rng4tst __iomem *r4tst;
354 	u32 val;
355 
356 	ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
357 	r4tst = &ctrl->r4tst[0];
358 
359 	/*
360 	 * Setting both RTMCTL:PRGM and RTMCTL:TRNG_ACC causes TRNG to
361 	 * properly invalidate the entropy in the entropy register and
362 	 * force re-generation.
363 	 */
364 	clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM | RTMCTL_ACC);
365 
366 	/*
367 	 * Performance-wise, it does not make sense to
368 	 * set the delay to a value that is lower
369 	 * than the last one that worked (i.e. the state handles
370 	 * were instantiated properly. Thus, instead of wasting
371 	 * time trying to set the values controlling the sample
372 	 * frequency, the function simply returns.
373 	 */
374 	val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
375 	      >> RTSDCTL_ENT_DLY_SHIFT;
376 	if (ent_delay <= val)
377 		goto start_rng;
378 
379 	val = rd_reg32(&r4tst->rtsdctl);
380 	val = (val & ~RTSDCTL_ENT_DLY_MASK) |
381 	      (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
382 	wr_reg32(&r4tst->rtsdctl, val);
383 	/* min. freq. count, equal to 1/4 of the entropy sample length */
384 	wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
385 	/* disable maximum frequency count */
386 	wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
387 	/* read the control register */
388 	val = rd_reg32(&r4tst->rtmctl);
389 start_rng:
390 	/*
391 	 * select raw sampling in both entropy shifter
392 	 * and statistical checker; ; put RNG4 into run mode
393 	 */
394 	clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM | RTMCTL_ACC,
395 		      RTMCTL_SAMP_MODE_RAW_ES_SC);
396 }
397 
398 static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
399 {
400 	static const struct {
401 		u16 ip_id;
402 		u8 maj_rev;
403 		u8 era;
404 	} id[] = {
405 		{0x0A10, 1, 1},
406 		{0x0A10, 2, 2},
407 		{0x0A12, 1, 3},
408 		{0x0A14, 1, 3},
409 		{0x0A14, 2, 4},
410 		{0x0A16, 1, 4},
411 		{0x0A10, 3, 4},
412 		{0x0A11, 1, 4},
413 		{0x0A18, 1, 4},
414 		{0x0A11, 2, 5},
415 		{0x0A12, 2, 5},
416 		{0x0A13, 1, 5},
417 		{0x0A1C, 1, 5}
418 	};
419 	u32 ccbvid, id_ms;
420 	u8 maj_rev, era;
421 	u16 ip_id;
422 	int i;
423 
424 	ccbvid = rd_reg32(&ctrl->perfmon.ccb_id);
425 	era = (ccbvid & CCBVID_ERA_MASK) >> CCBVID_ERA_SHIFT;
426 	if (era)	/* This is '0' prior to CAAM ERA-6 */
427 		return era;
428 
429 	id_ms = rd_reg32(&ctrl->perfmon.caam_id_ms);
430 	ip_id = (id_ms & SECVID_MS_IPID_MASK) >> SECVID_MS_IPID_SHIFT;
431 	maj_rev = (id_ms & SECVID_MS_MAJ_REV_MASK) >> SECVID_MS_MAJ_REV_SHIFT;
432 
433 	for (i = 0; i < ARRAY_SIZE(id); i++)
434 		if (id[i].ip_id == ip_id && id[i].maj_rev == maj_rev)
435 			return id[i].era;
436 
437 	return -ENOTSUPP;
438 }
439 
440 /**
441  * caam_get_era() - Return the ERA of the SEC on SoC, based
442  * on "sec-era" optional property in the DTS. This property is updated
443  * by u-boot.
444  * In case this property is not passed an attempt to retrieve the CAAM
445  * era via register reads will be made.
446  *
447  * @ctrl:	controller region
448  */
449 static int caam_get_era(struct caam_ctrl __iomem *ctrl)
450 {
451 	struct device_node *caam_node;
452 	int ret;
453 	u32 prop;
454 
455 	caam_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
456 	ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop);
457 	of_node_put(caam_node);
458 
459 	if (!ret)
460 		return prop;
461 	else
462 		return caam_get_era_from_hw(ctrl);
463 }
464 
465 /*
466  * ERRATA: imx6 devices (imx6D, imx6Q, imx6DL, imx6S, imx6DP and imx6QP)
467  * have an issue wherein AXI bus transactions may not occur in the correct
468  * order. This isn't a problem running single descriptors, but can be if
469  * running multiple concurrent descriptors. Reworking the driver to throttle
470  * to single requests is impractical, thus the workaround is to limit the AXI
471  * pipeline to a depth of 1 (from it's default of 4) to preclude this situation
472  * from occurring.
473  */
474 static void handle_imx6_err005766(u32 __iomem *mcr)
475 {
476 	if (of_machine_is_compatible("fsl,imx6q") ||
477 	    of_machine_is_compatible("fsl,imx6dl") ||
478 	    of_machine_is_compatible("fsl,imx6qp"))
479 		clrsetbits_32(mcr, MCFGR_AXIPIPE_MASK,
480 			      1 << MCFGR_AXIPIPE_SHIFT);
481 }
482 
483 static const struct of_device_id caam_match[] = {
484 	{
485 		.compatible = "fsl,sec-v4.0",
486 	},
487 	{
488 		.compatible = "fsl,sec4.0",
489 	},
490 	{},
491 };
492 MODULE_DEVICE_TABLE(of, caam_match);
493 
494 struct caam_imx_data {
495 	const struct clk_bulk_data *clks;
496 	int num_clks;
497 };
498 
499 static const struct clk_bulk_data caam_imx6_clks[] = {
500 	{ .id = "ipg" },
501 	{ .id = "mem" },
502 	{ .id = "aclk" },
503 	{ .id = "emi_slow" },
504 };
505 
506 static const struct caam_imx_data caam_imx6_data = {
507 	.clks = caam_imx6_clks,
508 	.num_clks = ARRAY_SIZE(caam_imx6_clks),
509 };
510 
511 static const struct clk_bulk_data caam_imx7_clks[] = {
512 	{ .id = "ipg" },
513 	{ .id = "aclk" },
514 };
515 
516 static const struct caam_imx_data caam_imx7_data = {
517 	.clks = caam_imx7_clks,
518 	.num_clks = ARRAY_SIZE(caam_imx7_clks),
519 };
520 
521 static const struct clk_bulk_data caam_imx6ul_clks[] = {
522 	{ .id = "ipg" },
523 	{ .id = "mem" },
524 	{ .id = "aclk" },
525 };
526 
527 static const struct caam_imx_data caam_imx6ul_data = {
528 	.clks = caam_imx6ul_clks,
529 	.num_clks = ARRAY_SIZE(caam_imx6ul_clks),
530 };
531 
532 static const struct clk_bulk_data caam_vf610_clks[] = {
533 	{ .id = "ipg" },
534 };
535 
536 static const struct caam_imx_data caam_vf610_data = {
537 	.clks = caam_vf610_clks,
538 	.num_clks = ARRAY_SIZE(caam_vf610_clks),
539 };
540 
541 static const struct soc_device_attribute caam_imx_soc_table[] = {
542 	{ .soc_id = "i.MX6UL", .data = &caam_imx6ul_data },
543 	{ .soc_id = "i.MX6*",  .data = &caam_imx6_data },
544 	{ .soc_id = "i.MX7*",  .data = &caam_imx7_data },
545 	{ .soc_id = "i.MX8M*", .data = &caam_imx7_data },
546 	{ .soc_id = "VF*",     .data = &caam_vf610_data },
547 	{ .family = "Freescale i.MX" },
548 	{ /* sentinel */ }
549 };
550 
551 static void disable_clocks(void *data)
552 {
553 	struct caam_drv_private *ctrlpriv = data;
554 
555 	clk_bulk_disable_unprepare(ctrlpriv->num_clks, ctrlpriv->clks);
556 }
557 
558 static int init_clocks(struct device *dev, const struct caam_imx_data *data)
559 {
560 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
561 	int ret;
562 
563 	ctrlpriv->num_clks = data->num_clks;
564 	ctrlpriv->clks = devm_kmemdup(dev, data->clks,
565 				      data->num_clks * sizeof(data->clks[0]),
566 				      GFP_KERNEL);
567 	if (!ctrlpriv->clks)
568 		return -ENOMEM;
569 
570 	ret = devm_clk_bulk_get(dev, ctrlpriv->num_clks, ctrlpriv->clks);
571 	if (ret) {
572 		dev_err(dev,
573 			"Failed to request all necessary clocks\n");
574 		return ret;
575 	}
576 
577 	ret = clk_bulk_prepare_enable(ctrlpriv->num_clks, ctrlpriv->clks);
578 	if (ret) {
579 		dev_err(dev,
580 			"Failed to prepare/enable all necessary clocks\n");
581 		return ret;
582 	}
583 
584 	return devm_add_action_or_reset(dev, disable_clocks, ctrlpriv);
585 }
586 
587 static void caam_remove_debugfs(void *root)
588 {
589 	debugfs_remove_recursive(root);
590 }
591 
592 #ifdef CONFIG_FSL_MC_BUS
593 static bool check_version(struct fsl_mc_version *mc_version, u32 major,
594 			  u32 minor, u32 revision)
595 {
596 	if (mc_version->major > major)
597 		return true;
598 
599 	if (mc_version->major == major) {
600 		if (mc_version->minor > minor)
601 			return true;
602 
603 		if (mc_version->minor == minor &&
604 		    mc_version->revision > revision)
605 			return true;
606 	}
607 
608 	return false;
609 }
610 #endif
611 
612 static bool needs_entropy_delay_adjustment(void)
613 {
614 	if (of_machine_is_compatible("fsl,imx6sx"))
615 		return true;
616 	return false;
617 }
618 
619 /* Probe routine for CAAM top (controller) level */
620 static int caam_probe(struct platform_device *pdev)
621 {
622 	int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
623 	u64 caam_id;
624 	const struct soc_device_attribute *imx_soc_match;
625 	struct device *dev;
626 	struct device_node *nprop, *np;
627 	struct caam_ctrl __iomem *ctrl;
628 	struct caam_drv_private *ctrlpriv;
629 	struct dentry *dfs_root;
630 	u32 scfgr, comp_params;
631 	u8 rng_vid;
632 	int pg_size;
633 	int BLOCK_OFFSET = 0;
634 	bool pr_support = false;
635 
636 	ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
637 	if (!ctrlpriv)
638 		return -ENOMEM;
639 
640 	dev = &pdev->dev;
641 	dev_set_drvdata(dev, ctrlpriv);
642 	nprop = pdev->dev.of_node;
643 
644 	imx_soc_match = soc_device_match(caam_imx_soc_table);
645 	caam_imx = (bool)imx_soc_match;
646 
647 	if (imx_soc_match) {
648 		if (!imx_soc_match->data) {
649 			dev_err(dev, "No clock data provided for i.MX SoC");
650 			return -EINVAL;
651 		}
652 
653 		ret = init_clocks(dev, imx_soc_match->data);
654 		if (ret)
655 			return ret;
656 	}
657 
658 
659 	/* Get configuration properties from device tree */
660 	/* First, get register page */
661 	ctrl = devm_of_iomap(dev, nprop, 0, NULL);
662 	ret = PTR_ERR_OR_ZERO(ctrl);
663 	if (ret) {
664 		dev_err(dev, "caam: of_iomap() failed\n");
665 		return ret;
666 	}
667 
668 	caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
669 				  (CSTA_PLEND | CSTA_ALT_PLEND));
670 	comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
671 	if (comp_params & CTPR_MS_PS && rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR)
672 		caam_ptr_sz = sizeof(u64);
673 	else
674 		caam_ptr_sz = sizeof(u32);
675 	caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
676 	ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
677 
678 #ifdef CONFIG_CAAM_QI
679 	/* If (DPAA 1.x) QI present, check whether dependencies are available */
680 	if (ctrlpriv->qi_present && !caam_dpaa2) {
681 		ret = qman_is_probed();
682 		if (!ret) {
683 			return -EPROBE_DEFER;
684 		} else if (ret < 0) {
685 			dev_err(dev, "failing probe due to qman probe error\n");
686 			return -ENODEV;
687 		}
688 
689 		ret = qman_portals_probed();
690 		if (!ret) {
691 			return -EPROBE_DEFER;
692 		} else if (ret < 0) {
693 			dev_err(dev, "failing probe due to qman portals probe error\n");
694 			return -ENODEV;
695 		}
696 	}
697 #endif
698 
699 	/* Allocating the BLOCK_OFFSET based on the supported page size on
700 	 * the platform
701 	 */
702 	pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
703 	if (pg_size == 0)
704 		BLOCK_OFFSET = PG_SIZE_4K;
705 	else
706 		BLOCK_OFFSET = PG_SIZE_64K;
707 
708 	ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
709 	ctrlpriv->assure = (struct caam_assurance __iomem __force *)
710 			   ((__force uint8_t *)ctrl +
711 			    BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
712 			   );
713 	ctrlpriv->deco = (struct caam_deco __iomem __force *)
714 			 ((__force uint8_t *)ctrl +
715 			 BLOCK_OFFSET * DECO_BLOCK_NUMBER
716 			 );
717 
718 	/* Get the IRQ of the controller (for security violations only) */
719 	ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
720 	np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
721 	ctrlpriv->mc_en = !!np;
722 	of_node_put(np);
723 
724 #ifdef CONFIG_FSL_MC_BUS
725 	if (ctrlpriv->mc_en) {
726 		struct fsl_mc_version *mc_version;
727 
728 		mc_version = fsl_mc_get_version();
729 		if (mc_version)
730 			pr_support = check_version(mc_version, 10, 20, 0);
731 		else
732 			return -EPROBE_DEFER;
733 	}
734 #endif
735 
736 	/*
737 	 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
738 	 * long pointers in master configuration register.
739 	 * In case of SoCs with Management Complex, MC f/w performs
740 	 * the configuration.
741 	 */
742 	if (!ctrlpriv->mc_en)
743 		clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK,
744 			      MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
745 			      MCFGR_WDENABLE | MCFGR_LARGE_BURST);
746 
747 	handle_imx6_err005766(&ctrl->mcr);
748 
749 	/*
750 	 *  Read the Compile Time parameters and SCFGR to determine
751 	 * if virtualization is enabled for this platform
752 	 */
753 	scfgr = rd_reg32(&ctrl->scfgr);
754 
755 	ctrlpriv->virt_en = 0;
756 	if (comp_params & CTPR_MS_VIRT_EN_INCL) {
757 		/* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or
758 		 * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1
759 		 */
760 		if ((comp_params & CTPR_MS_VIRT_EN_POR) ||
761 		    (!(comp_params & CTPR_MS_VIRT_EN_POR) &&
762 		       (scfgr & SCFGR_VIRT_EN)))
763 				ctrlpriv->virt_en = 1;
764 	} else {
765 		/* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */
766 		if (comp_params & CTPR_MS_VIRT_EN_POR)
767 				ctrlpriv->virt_en = 1;
768 	}
769 
770 	if (ctrlpriv->virt_en == 1)
771 		clrsetbits_32(&ctrl->jrstart, 0, JRSTART_JR0_START |
772 			      JRSTART_JR1_START | JRSTART_JR2_START |
773 			      JRSTART_JR3_START);
774 
775 	ret = dma_set_mask_and_coherent(dev, caam_get_dma_mask(dev));
776 	if (ret) {
777 		dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
778 		return ret;
779 	}
780 
781 	ctrlpriv->era = caam_get_era(ctrl);
782 	ctrlpriv->domain = iommu_get_domain_for_dev(dev);
783 
784 	dfs_root = debugfs_create_dir(dev_name(dev), NULL);
785 	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
786 		ret = devm_add_action_or_reset(dev, caam_remove_debugfs,
787 					       dfs_root);
788 		if (ret)
789 			return ret;
790 	}
791 
792 	caam_debugfs_init(ctrlpriv, dfs_root);
793 
794 	/* Check to see if (DPAA 1.x) QI present. If so, enable */
795 	if (ctrlpriv->qi_present && !caam_dpaa2) {
796 		ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
797 			       ((__force uint8_t *)ctrl +
798 				 BLOCK_OFFSET * QI_BLOCK_NUMBER
799 			       );
800 		/* This is all that's required to physically enable QI */
801 		wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
802 
803 		/* If QMAN driver is present, init CAAM-QI backend */
804 #ifdef CONFIG_CAAM_QI
805 		ret = caam_qi_init(pdev);
806 		if (ret)
807 			dev_err(dev, "caam qi i/f init failed: %d\n", ret);
808 #endif
809 	}
810 
811 	ring = 0;
812 	for_each_available_child_of_node(nprop, np)
813 		if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
814 		    of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
815 			ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
816 					     ((__force uint8_t *)ctrl +
817 					     (ring + JR_BLOCK_NUMBER) *
818 					      BLOCK_OFFSET
819 					     );
820 			ctrlpriv->total_jobrs++;
821 			ring++;
822 		}
823 
824 	/* If no QI and no rings specified, quit and go home */
825 	if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
826 		dev_err(dev, "no queues configured, terminating\n");
827 		return -ENOMEM;
828 	}
829 
830 	comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ls);
831 	ctrlpriv->blob_present = !!(comp_params & CTPR_LS_BLOB);
832 
833 	/*
834 	 * Some SoCs like the LS1028A (non-E) indicate CTPR_LS_BLOB support,
835 	 * but fail when actually using it due to missing AES support, so
836 	 * check both here.
837 	 */
838 	if (ctrlpriv->era < 10) {
839 		rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) &
840 			   CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
841 		ctrlpriv->blob_present = ctrlpriv->blob_present &&
842 			(rd_reg32(&ctrl->perfmon.cha_num_ls) & CHA_ID_LS_AES_MASK);
843 	} else {
844 		rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >>
845 			   CHA_VER_VID_SHIFT;
846 		ctrlpriv->blob_present = ctrlpriv->blob_present &&
847 			(rd_reg32(&ctrl->vreg.aesa) & CHA_VER_MISC_AES_NUM_MASK);
848 	}
849 
850 	/*
851 	 * If SEC has RNG version >= 4 and RNG state handle has not been
852 	 * already instantiated, do RNG instantiation
853 	 * In case of SoCs with Management Complex, RNG is managed by MC f/w.
854 	 */
855 	if (!(ctrlpriv->mc_en && pr_support) && rng_vid >= 4) {
856 		ctrlpriv->rng4_sh_init =
857 			rd_reg32(&ctrl->r4tst[0].rdsta);
858 		/*
859 		 * If the secure keys (TDKEK, JDKEK, TDSK), were already
860 		 * generated, signal this to the function that is instantiating
861 		 * the state handles. An error would occur if RNG4 attempts
862 		 * to regenerate these keys before the next POR.
863 		 */
864 		gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
865 		ctrlpriv->rng4_sh_init &= RDSTA_MASK;
866 		do {
867 			int inst_handles =
868 				rd_reg32(&ctrl->r4tst[0].rdsta) &
869 								RDSTA_MASK;
870 			/*
871 			 * If either SH were instantiated by somebody else
872 			 * (e.g. u-boot) then it is assumed that the entropy
873 			 * parameters are properly set and thus the function
874 			 * setting these (kick_trng(...)) is skipped.
875 			 * Also, if a handle was instantiated, do not change
876 			 * the TRNG parameters.
877 			 */
878 			if (needs_entropy_delay_adjustment())
879 				ent_delay = 12000;
880 			if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
881 				dev_info(dev,
882 					 "Entropy delay = %u\n",
883 					 ent_delay);
884 				kick_trng(pdev, ent_delay);
885 				ent_delay += 400;
886 			}
887 			/*
888 			 * if instantiate_rng(...) fails, the loop will rerun
889 			 * and the kick_trng(...) function will modify the
890 			 * upper and lower limits of the entropy sampling
891 			 * interval, leading to a successful initialization of
892 			 * the RNG.
893 			 */
894 			ret = instantiate_rng(dev, inst_handles,
895 					      gen_sk);
896 			/*
897 			 * Entropy delay is determined via TRNG characterization.
898 			 * TRNG characterization is run across different voltages
899 			 * and temperatures.
900 			 * If worst case value for ent_dly is identified,
901 			 * the loop can be skipped for that platform.
902 			 */
903 			if (needs_entropy_delay_adjustment())
904 				break;
905 			if (ret == -EAGAIN)
906 				/*
907 				 * if here, the loop will rerun,
908 				 * so don't hog the CPU
909 				 */
910 				cpu_relax();
911 		} while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
912 		if (ret) {
913 			dev_err(dev, "failed to instantiate RNG");
914 			return ret;
915 		}
916 		/*
917 		 * Set handles initialized by this module as the complement of
918 		 * the already initialized ones
919 		 */
920 		ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_MASK;
921 
922 		/* Enable RDB bit so that RNG works faster */
923 		clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
924 	}
925 
926 	/* NOTE: RTIC detection ought to go here, around Si time */
927 
928 	caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 |
929 		  (u64)rd_reg32(&ctrl->perfmon.caam_id_ls);
930 
931 	/* Report "alive" for developer to see */
932 	dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
933 		 ctrlpriv->era);
934 	dev_info(dev, "job rings = %d, qi = %d\n",
935 		 ctrlpriv->total_jobrs, ctrlpriv->qi_present);
936 
937 	ret = devm_of_platform_populate(dev);
938 	if (ret)
939 		dev_err(dev, "JR platform devices creation error\n");
940 
941 	return ret;
942 }
943 
944 static struct platform_driver caam_driver = {
945 	.driver = {
946 		.name = "caam",
947 		.of_match_table = caam_match,
948 	},
949 	.probe       = caam_probe,
950 };
951 
952 module_platform_driver(caam_driver);
953 
954 MODULE_LICENSE("GPL");
955 MODULE_DESCRIPTION("FSL CAAM request backend");
956 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
957