xref: /openbmc/linux/arch/arm64/kernel/cpuinfo.c (revision add48ba4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Record and handle CPU attributes.
4  *
5  * Copyright (C) 2014 ARM Ltd.
6  */
7 #include <asm/arch_timer.h>
8 #include <asm/cache.h>
9 #include <asm/cpu.h>
10 #include <asm/cputype.h>
11 #include <asm/cpufeature.h>
12 #include <asm/fpsimd.h>
13 
14 #include <linux/bitops.h>
15 #include <linux/bug.h>
16 #include <linux/compat.h>
17 #include <linux/elf.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/personality.h>
21 #include <linux/preempt.h>
22 #include <linux/printk.h>
23 #include <linux/seq_file.h>
24 #include <linux/sched.h>
25 #include <linux/smp.h>
26 #include <linux/delay.h>
27 
28 /*
29  * In case the boot CPU is hotpluggable, we record its initial state and
30  * current state separately. Certain system registers may contain different
31  * values depending on configuration at or after reset.
32  */
33 DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
34 static struct cpuinfo_arm64 boot_cpu_data;
35 
36 static const char *icache_policy_str[] = {
37 	[0 ... ICACHE_POLICY_PIPT]	= "RESERVED/UNKNOWN",
38 	[ICACHE_POLICY_VIPT]		= "VIPT",
39 	[ICACHE_POLICY_PIPT]		= "PIPT",
40 	[ICACHE_POLICY_VPIPT]		= "VPIPT",
41 };
42 
43 unsigned long __icache_flags;
44 
45 static const char *const hwcap_str[] = {
46 	"fp",
47 	"asimd",
48 	"evtstrm",
49 	"aes",
50 	"pmull",
51 	"sha1",
52 	"sha2",
53 	"crc32",
54 	"atomics",
55 	"fphp",
56 	"asimdhp",
57 	"cpuid",
58 	"asimdrdm",
59 	"jscvt",
60 	"fcma",
61 	"lrcpc",
62 	"dcpop",
63 	"sha3",
64 	"sm3",
65 	"sm4",
66 	"asimddp",
67 	"sha512",
68 	"sve",
69 	"asimdfhm",
70 	"dit",
71 	"uscat",
72 	"ilrcpc",
73 	"flagm",
74 	"ssbs",
75 	"sb",
76 	"paca",
77 	"pacg",
78 	"dcpodp",
79 	"sve2",
80 	"sveaes",
81 	"svepmull",
82 	"svebitperm",
83 	"svesha3",
84 	"svesm4",
85 	"flagm2",
86 	"frint",
87 	"svei8mm",
88 	"svef32mm",
89 	"svef64mm",
90 	"svebf16",
91 	"i8mm",
92 	"bf16",
93 	"dgh",
94 	"rng",
95 	"bti",
96 	NULL
97 };
98 
99 #ifdef CONFIG_COMPAT
100 static const char *const compat_hwcap_str[] = {
101 	"swp",
102 	"half",
103 	"thumb",
104 	"26bit",
105 	"fastmult",
106 	"fpa",
107 	"vfp",
108 	"edsp",
109 	"java",
110 	"iwmmxt",
111 	"crunch",
112 	"thumbee",
113 	"neon",
114 	"vfpv3",
115 	"vfpv3d16",
116 	"tls",
117 	"vfpv4",
118 	"idiva",
119 	"idivt",
120 	"vfpd32",
121 	"lpae",
122 	"evtstrm",
123 	NULL
124 };
125 
126 static const char *const compat_hwcap2_str[] = {
127 	"aes",
128 	"pmull",
129 	"sha1",
130 	"sha2",
131 	"crc32",
132 	NULL
133 };
134 #endif /* CONFIG_COMPAT */
135 
136 static int c_show(struct seq_file *m, void *v)
137 {
138 	int i, j;
139 	bool compat = personality(current->personality) == PER_LINUX32;
140 
141 	for_each_online_cpu(i) {
142 		struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
143 		u32 midr = cpuinfo->reg_midr;
144 
145 		/*
146 		 * glibc reads /proc/cpuinfo to determine the number of
147 		 * online processors, looking for lines beginning with
148 		 * "processor".  Give glibc what it expects.
149 		 */
150 		seq_printf(m, "processor\t: %d\n", i);
151 		if (compat)
152 			seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n",
153 				   MIDR_REVISION(midr), COMPAT_ELF_PLATFORM);
154 
155 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
156 			   loops_per_jiffy / (500000UL/HZ),
157 			   loops_per_jiffy / (5000UL/HZ) % 100);
158 
159 		/*
160 		 * Dump out the common processor features in a single line.
161 		 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
162 		 * rather than attempting to parse this, but there's a body of
163 		 * software which does already (at least for 32-bit).
164 		 */
165 		seq_puts(m, "Features\t:");
166 		if (compat) {
167 #ifdef CONFIG_COMPAT
168 			for (j = 0; compat_hwcap_str[j]; j++)
169 				if (compat_elf_hwcap & (1 << j))
170 					seq_printf(m, " %s", compat_hwcap_str[j]);
171 
172 			for (j = 0; compat_hwcap2_str[j]; j++)
173 				if (compat_elf_hwcap2 & (1 << j))
174 					seq_printf(m, " %s", compat_hwcap2_str[j]);
175 #endif /* CONFIG_COMPAT */
176 		} else {
177 			for (j = 0; hwcap_str[j]; j++)
178 				if (cpu_have_feature(j))
179 					seq_printf(m, " %s", hwcap_str[j]);
180 		}
181 		seq_puts(m, "\n");
182 
183 		seq_printf(m, "CPU implementer\t: 0x%02x\n",
184 			   MIDR_IMPLEMENTOR(midr));
185 		seq_printf(m, "CPU architecture: 8\n");
186 		seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
187 		seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
188 		seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
189 	}
190 
191 	return 0;
192 }
193 
194 static void *c_start(struct seq_file *m, loff_t *pos)
195 {
196 	return *pos < 1 ? (void *)1 : NULL;
197 }
198 
199 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
200 {
201 	++*pos;
202 	return NULL;
203 }
204 
205 static void c_stop(struct seq_file *m, void *v)
206 {
207 }
208 
209 const struct seq_operations cpuinfo_op = {
210 	.start	= c_start,
211 	.next	= c_next,
212 	.stop	= c_stop,
213 	.show	= c_show
214 };
215 
216 
217 static struct kobj_type cpuregs_kobj_type = {
218 	.sysfs_ops = &kobj_sysfs_ops,
219 };
220 
221 /*
222  * The ARM ARM uses the phrase "32-bit register" to describe a register
223  * whose upper 32 bits are RES0 (per C5.1.1, ARM DDI 0487A.i), however
224  * no statement is made as to whether the upper 32 bits will or will not
225  * be made use of in future, and between ARM DDI 0487A.c and ARM DDI
226  * 0487A.d CLIDR_EL1 was expanded from 32-bit to 64-bit.
227  *
228  * Thus, while both MIDR_EL1 and REVIDR_EL1 are described as 32-bit
229  * registers, we expose them both as 64 bit values to cater for possible
230  * future expansion without an ABI break.
231  */
232 #define kobj_to_cpuinfo(kobj)	container_of(kobj, struct cpuinfo_arm64, kobj)
233 #define CPUREGS_ATTR_RO(_name, _field)						\
234 	static ssize_t _name##_show(struct kobject *kobj,			\
235 			struct kobj_attribute *attr, char *buf)			\
236 	{									\
237 		struct cpuinfo_arm64 *info = kobj_to_cpuinfo(kobj);		\
238 										\
239 		if (info->reg_midr)						\
240 			return sprintf(buf, "0x%016x\n", info->reg_##_field);	\
241 		else								\
242 			return 0;						\
243 	}									\
244 	static struct kobj_attribute cpuregs_attr_##_name = __ATTR_RO(_name)
245 
246 CPUREGS_ATTR_RO(midr_el1, midr);
247 CPUREGS_ATTR_RO(revidr_el1, revidr);
248 
249 static struct attribute *cpuregs_id_attrs[] = {
250 	&cpuregs_attr_midr_el1.attr,
251 	&cpuregs_attr_revidr_el1.attr,
252 	NULL
253 };
254 
255 static const struct attribute_group cpuregs_attr_group = {
256 	.attrs = cpuregs_id_attrs,
257 	.name = "identification"
258 };
259 
260 static int cpuid_cpu_online(unsigned int cpu)
261 {
262 	int rc;
263 	struct device *dev;
264 	struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
265 
266 	dev = get_cpu_device(cpu);
267 	if (!dev) {
268 		rc = -ENODEV;
269 		goto out;
270 	}
271 	rc = kobject_add(&info->kobj, &dev->kobj, "regs");
272 	if (rc)
273 		goto out;
274 	rc = sysfs_create_group(&info->kobj, &cpuregs_attr_group);
275 	if (rc)
276 		kobject_del(&info->kobj);
277 out:
278 	return rc;
279 }
280 
281 static int cpuid_cpu_offline(unsigned int cpu)
282 {
283 	struct device *dev;
284 	struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
285 
286 	dev = get_cpu_device(cpu);
287 	if (!dev)
288 		return -ENODEV;
289 	if (info->kobj.parent) {
290 		sysfs_remove_group(&info->kobj, &cpuregs_attr_group);
291 		kobject_del(&info->kobj);
292 	}
293 
294 	return 0;
295 }
296 
297 static int __init cpuinfo_regs_init(void)
298 {
299 	int cpu, ret;
300 
301 	for_each_possible_cpu(cpu) {
302 		struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
303 
304 		kobject_init(&info->kobj, &cpuregs_kobj_type);
305 	}
306 
307 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "arm64/cpuinfo:online",
308 				cpuid_cpu_online, cpuid_cpu_offline);
309 	if (ret < 0) {
310 		pr_err("cpuinfo: failed to register hotplug callbacks.\n");
311 		return ret;
312 	}
313 	return 0;
314 }
315 device_initcall(cpuinfo_regs_init);
316 
317 static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
318 {
319 	unsigned int cpu = smp_processor_id();
320 	u32 l1ip = CTR_L1IP(info->reg_ctr);
321 
322 	switch (l1ip) {
323 	case ICACHE_POLICY_PIPT:
324 		break;
325 	case ICACHE_POLICY_VPIPT:
326 		set_bit(ICACHEF_VPIPT, &__icache_flags);
327 		break;
328 	default:
329 		/* Fallthrough */
330 	case ICACHE_POLICY_VIPT:
331 		/* Assume aliasing */
332 		set_bit(ICACHEF_ALIASING, &__icache_flags);
333 	}
334 
335 	pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
336 }
337 
338 static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
339 {
340 	info->reg_cntfrq = arch_timer_get_cntfrq();
341 	/*
342 	 * Use the effective value of the CTR_EL0 than the raw value
343 	 * exposed by the CPU. CTR_EL0.IDC field value must be interpreted
344 	 * with the CLIDR_EL1 fields to avoid triggering false warnings
345 	 * when there is a mismatch across the CPUs. Keep track of the
346 	 * effective value of the CTR_EL0 in our internal records for
347 	 * acurate sanity check and feature enablement.
348 	 */
349 	info->reg_ctr = read_cpuid_effective_cachetype();
350 	info->reg_dczid = read_cpuid(DCZID_EL0);
351 	info->reg_midr = read_cpuid_id();
352 	info->reg_revidr = read_cpuid(REVIDR_EL1);
353 
354 	info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1);
355 	info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
356 	info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
357 	info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
358 	info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
359 	info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
360 	info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1);
361 	info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
362 	info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
363 	info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1);
364 
365 	/* Update the 32bit ID registers only if AArch32 is implemented */
366 	if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
367 		info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
368 		info->reg_id_dfr1 = read_cpuid(ID_DFR1_EL1);
369 		info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
370 		info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
371 		info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
372 		info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
373 		info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
374 		info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
375 		info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1);
376 		info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
377 		info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
378 		info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
379 		info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
380 		info->reg_id_mmfr4 = read_cpuid(ID_MMFR4_EL1);
381 		info->reg_id_mmfr5 = read_cpuid(ID_MMFR5_EL1);
382 		info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
383 		info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
384 		info->reg_id_pfr2 = read_cpuid(ID_PFR2_EL1);
385 
386 		info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
387 		info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
388 		info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
389 	}
390 
391 	if (IS_ENABLED(CONFIG_ARM64_SVE) &&
392 	    id_aa64pfr0_sve(info->reg_id_aa64pfr0))
393 		info->reg_zcr = read_zcr_features();
394 
395 	cpuinfo_detect_icache_policy(info);
396 }
397 
398 void cpuinfo_store_cpu(void)
399 {
400 	struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data);
401 	__cpuinfo_store_cpu(info);
402 	update_cpu_features(smp_processor_id(), info, &boot_cpu_data);
403 }
404 
405 void __init cpuinfo_store_boot_cpu(void)
406 {
407 	struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0);
408 	__cpuinfo_store_cpu(info);
409 
410 	boot_cpu_data = *info;
411 	init_cpu_features(&boot_cpu_data);
412 }
413