xref: /openbmc/linux/arch/arm64/kernel/cpuinfo.c (revision 33ac9dba)
1 /*
2  * Record and handle CPU attributes.
3  *
4  * Copyright (C) 2014 ARM Ltd.
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <asm/arch_timer.h>
18 #include <asm/cachetype.h>
19 #include <asm/cpu.h>
20 #include <asm/cputype.h>
21 
22 #include <linux/bitops.h>
23 #include <linux/init.h>
24 #include <linux/kernel.h>
25 #include <linux/printk.h>
26 #include <linux/smp.h>
27 
28 /*
29  * In case the boot CPU is hotpluggable, we record its initial state and
30  * current state separately. Certain system registers may contain different
31  * values depending on configuration at or after reset.
32  */
33 DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
34 static struct cpuinfo_arm64 boot_cpu_data;
35 
36 static char *icache_policy_str[] = {
37 	[ICACHE_POLICY_RESERVED] = "RESERVED/UNKNOWN",
38 	[ICACHE_POLICY_AIVIVT] = "AIVIVT",
39 	[ICACHE_POLICY_VIPT] = "VIPT",
40 	[ICACHE_POLICY_PIPT] = "PIPT",
41 };
42 
43 unsigned long __icache_flags;
44 
45 static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
46 {
47 	unsigned int cpu = smp_processor_id();
48 	u32 l1ip = CTR_L1IP(info->reg_ctr);
49 
50 	if (l1ip != ICACHE_POLICY_PIPT)
51 		set_bit(ICACHEF_ALIASING, &__icache_flags);
52 	if (l1ip == ICACHE_POLICY_AIVIVT)
53 		set_bit(ICACHEF_AIVIVT, &__icache_flags);
54 
55 	pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
56 }
57 
58 static int check_reg_mask(char *name, u64 mask, u64 boot, u64 cur, int cpu)
59 {
60 	if ((boot & mask) == (cur & mask))
61 		return 0;
62 
63 	pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016lx, CPU%d: %#016lx\n",
64 		name, (unsigned long)boot, cpu, (unsigned long)cur);
65 
66 	return 1;
67 }
68 
69 #define CHECK_MASK(field, mask, boot, cur, cpu) \
70 	check_reg_mask(#field, mask, (boot)->reg_ ## field, (cur)->reg_ ## field, cpu)
71 
72 #define CHECK(field, boot, cur, cpu) \
73 	CHECK_MASK(field, ~0ULL, boot, cur, cpu)
74 
75 /*
76  * Verify that CPUs don't have unexpected differences that will cause problems.
77  */
78 static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
79 {
80 	unsigned int cpu = smp_processor_id();
81 	struct cpuinfo_arm64 *boot = &boot_cpu_data;
82 	unsigned int diff = 0;
83 
84 	/*
85 	 * The kernel can handle differing I-cache policies, but otherwise
86 	 * caches should look identical. Userspace JITs will make use of
87 	 * *minLine.
88 	 */
89 	diff |= CHECK_MASK(ctr, 0xffff3fff, boot, cur, cpu);
90 
91 	/*
92 	 * Userspace may perform DC ZVA instructions. Mismatched block sizes
93 	 * could result in too much or too little memory being zeroed if a
94 	 * process is preempted and migrated between CPUs.
95 	 */
96 	diff |= CHECK(dczid, boot, cur, cpu);
97 
98 	/* If different, timekeeping will be broken (especially with KVM) */
99 	diff |= CHECK(cntfrq, boot, cur, cpu);
100 
101 	/*
102 	 * Even in big.LITTLE, processors should be identical instruction-set
103 	 * wise.
104 	 */
105 	diff |= CHECK(id_aa64isar0, boot, cur, cpu);
106 	diff |= CHECK(id_aa64isar1, boot, cur, cpu);
107 
108 	/*
109 	 * Differing PARange support is fine as long as all peripherals and
110 	 * memory are mapped within the minimum PARange of all CPUs.
111 	 * Linux should not care about secure memory.
112 	 * ID_AA64MMFR1 is currently RES0.
113 	 */
114 	diff |= CHECK_MASK(id_aa64mmfr0, 0xffffffffffff0ff0, boot, cur, cpu);
115 	diff |= CHECK(id_aa64mmfr1, boot, cur, cpu);
116 
117 	/*
118 	 * EL3 is not our concern.
119 	 * ID_AA64PFR1 is currently RES0.
120 	 */
121 	diff |= CHECK_MASK(id_aa64pfr0, 0xffffffffffff0fff, boot, cur, cpu);
122 	diff |= CHECK(id_aa64pfr1, boot, cur, cpu);
123 
124 	/*
125 	 * If we have AArch32, we care about 32-bit features for compat. These
126 	 * registers should be RES0 otherwise.
127 	 */
128 	diff |= CHECK(id_isar0, boot, cur, cpu);
129 	diff |= CHECK(id_isar1, boot, cur, cpu);
130 	diff |= CHECK(id_isar2, boot, cur, cpu);
131 	diff |= CHECK(id_isar3, boot, cur, cpu);
132 	diff |= CHECK(id_isar4, boot, cur, cpu);
133 	diff |= CHECK(id_isar5, boot, cur, cpu);
134 	diff |= CHECK(id_mmfr0, boot, cur, cpu);
135 	diff |= CHECK(id_mmfr1, boot, cur, cpu);
136 	diff |= CHECK(id_mmfr2, boot, cur, cpu);
137 	diff |= CHECK(id_mmfr3, boot, cur, cpu);
138 	diff |= CHECK(id_pfr0, boot, cur, cpu);
139 	diff |= CHECK(id_pfr1, boot, cur, cpu);
140 
141 	/*
142 	 * Mismatched CPU features are a recipe for disaster. Don't even
143 	 * pretend to support them.
144 	 */
145 	WARN_TAINT_ONCE(diff, TAINT_CPU_OUT_OF_SPEC,
146 			"Unsupported CPU feature variation.");
147 }
148 
149 static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
150 {
151 	info->reg_cntfrq = arch_timer_get_cntfrq();
152 	info->reg_ctr = read_cpuid_cachetype();
153 	info->reg_dczid = read_cpuid(DCZID_EL0);
154 	info->reg_midr = read_cpuid_id();
155 
156 	info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
157 	info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
158 	info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
159 	info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
160 	info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
161 	info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
162 
163 	info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
164 	info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
165 	info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
166 	info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
167 	info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
168 	info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
169 	info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
170 	info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
171 	info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
172 	info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
173 	info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
174 	info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
175 
176 	cpuinfo_detect_icache_policy(info);
177 }
178 
179 void cpuinfo_store_cpu(void)
180 {
181 	struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data);
182 	__cpuinfo_store_cpu(info);
183 	cpuinfo_sanity_check(info);
184 }
185 
186 void __init cpuinfo_store_boot_cpu(void)
187 {
188 	struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0);
189 	__cpuinfo_store_cpu(info);
190 
191 	boot_cpu_data = *info;
192 }
193