1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_ARCHRANDOM_H
3 #define _ASM_ARCHRANDOM_H
4 
5 #include <linux/arm-smccc.h>
6 #include <linux/bug.h>
7 #include <linux/kernel.h>
8 #include <linux/irqflags.h>
9 #include <asm/cpufeature.h>
10 
11 #define ARM_SMCCC_TRNG_MIN_VERSION	0x10000UL
12 
13 extern bool smccc_trng_available;
14 
smccc_probe_trng(void)15 static inline bool __init smccc_probe_trng(void)
16 {
17 	struct arm_smccc_res res;
18 
19 	arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_VERSION, &res);
20 	if ((s32)res.a0 < 0)
21 		return false;
22 
23 	return res.a0 >= ARM_SMCCC_TRNG_MIN_VERSION;
24 }
25 
__arm64_rndr(unsigned long * v)26 static inline bool __arm64_rndr(unsigned long *v)
27 {
28 	bool ok;
29 
30 	/*
31 	 * Reads of RNDR set PSTATE.NZCV to 0b0000 on success,
32 	 * and set PSTATE.NZCV to 0b0100 otherwise.
33 	 */
34 	asm volatile(
35 		__mrs_s("%0", SYS_RNDR_EL0) "\n"
36 	"	cset %w1, ne\n"
37 	: "=r" (*v), "=r" (ok)
38 	:
39 	: "cc");
40 
41 	return ok;
42 }
43 
__arm64_rndrrs(unsigned long * v)44 static inline bool __arm64_rndrrs(unsigned long *v)
45 {
46 	bool ok;
47 
48 	/*
49 	 * Reads of RNDRRS set PSTATE.NZCV to 0b0000 on success,
50 	 * and set PSTATE.NZCV to 0b0100 otherwise.
51 	 */
52 	asm volatile(
53 		__mrs_s("%0", SYS_RNDRRS_EL0) "\n"
54 	"	cset %w1, ne\n"
55 	: "=r" (*v), "=r" (ok)
56 	:
57 	: "cc");
58 
59 	return ok;
60 }
61 
__cpu_has_rng(void)62 static __always_inline bool __cpu_has_rng(void)
63 {
64 	if (unlikely(!system_capabilities_finalized() && !preemptible()))
65 		return this_cpu_has_cap(ARM64_HAS_RNG);
66 	return cpus_have_const_cap(ARM64_HAS_RNG);
67 }
68 
arch_get_random_longs(unsigned long * v,size_t max_longs)69 static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
70 {
71 	/*
72 	 * Only support the generic interface after we have detected
73 	 * the system wide capability, avoiding complexity with the
74 	 * cpufeature code and with potential scheduling between CPUs
75 	 * with and without the feature.
76 	 */
77 	if (max_longs && __cpu_has_rng() && __arm64_rndr(v))
78 		return 1;
79 	return 0;
80 }
81 
arch_get_random_seed_longs(unsigned long * v,size_t max_longs)82 static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
83 {
84 	if (!max_longs)
85 		return 0;
86 
87 	/*
88 	 * We prefer the SMCCC call, since its semantics (return actual
89 	 * hardware backed entropy) is closer to the idea behind this
90 	 * function here than what even the RNDRSS register provides
91 	 * (the output of a pseudo RNG freshly seeded by a TRNG).
92 	 */
93 	if (smccc_trng_available) {
94 		struct arm_smccc_res res;
95 
96 		max_longs = min_t(size_t, 3, max_longs);
97 		arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, max_longs * 64, &res);
98 		if ((int)res.a0 >= 0) {
99 			switch (max_longs) {
100 			case 3:
101 				*v++ = res.a1;
102 				fallthrough;
103 			case 2:
104 				*v++ = res.a2;
105 				fallthrough;
106 			case 1:
107 				*v++ = res.a3;
108 				break;
109 			}
110 			return max_longs;
111 		}
112 	}
113 
114 	/*
115 	 * RNDRRS is not backed by an entropy source but by a DRBG that is
116 	 * reseeded after each invocation. This is not a 100% fit but good
117 	 * enough to implement this API if no other entropy source exists.
118 	 */
119 	if (__cpu_has_rng() && __arm64_rndrrs(v))
120 		return 1;
121 
122 	return 0;
123 }
124 
__early_cpu_has_rndr(void)125 static inline bool __init __early_cpu_has_rndr(void)
126 {
127 	/* Open code as we run prior to the first call to cpufeature. */
128 	unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1);
129 	return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf;
130 }
131 
132 u64 kaslr_early_init(void *fdt);
133 
134 #endif /* _ASM_ARCHRANDOM_H */
135