1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_ARCHRANDOM_H
3 #define _ASM_ARCHRANDOM_H
4 
5 #ifdef CONFIG_ARCH_RANDOM
6 
7 #include <linux/random.h>
8 #include <asm/cpufeature.h>
9 
10 static inline bool __arm64_rndr(unsigned long *v)
11 {
12 	bool ok;
13 
14 	/*
15 	 * Reads of RNDR set PSTATE.NZCV to 0b0000 on success,
16 	 * and set PSTATE.NZCV to 0b0100 otherwise.
17 	 */
18 	asm volatile(
19 		__mrs_s("%0", SYS_RNDR_EL0) "\n"
20 	"	cset %w1, ne\n"
21 	: "=r" (*v), "=r" (ok)
22 	:
23 	: "cc");
24 
25 	return ok;
26 }
27 
28 static inline bool __must_check arch_get_random_long(unsigned long *v)
29 {
30 	return false;
31 }
32 
33 static inline bool __must_check arch_get_random_int(unsigned int *v)
34 {
35 	return false;
36 }
37 
38 static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
39 {
40 	/*
41 	 * Only support the generic interface after we have detected
42 	 * the system wide capability, avoiding complexity with the
43 	 * cpufeature code and with potential scheduling between CPUs
44 	 * with and without the feature.
45 	 */
46 	if (!cpus_have_const_cap(ARM64_HAS_RNG))
47 		return false;
48 
49 	return __arm64_rndr(v);
50 }
51 
52 
53 static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
54 {
55 	unsigned long val;
56 	bool ok = arch_get_random_seed_long(&val);
57 
58 	*v = val;
59 	return ok;
60 }
61 
62 static inline bool __init __early_cpu_has_rndr(void)
63 {
64 	/* Open code as we run prior to the first call to cpufeature. */
65 	unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1);
66 	return (ftr >> ID_AA64ISAR0_RNDR_SHIFT) & 0xf;
67 }
68 
69 #else
70 
71 static inline bool __arm64_rndr(unsigned long *v) { return false; }
72 static inline bool __init __early_cpu_has_rndr(void) { return false; }
73 
74 #endif /* CONFIG_ARCH_RANDOM */
75 #endif /* _ASM_ARCHRANDOM_H */
76