xref: /openbmc/linux/arch/arm64/kernel/cpuinfo.c (revision 81de3bf3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Record and handle CPU attributes.
4  *
5  * Copyright (C) 2014 ARM Ltd.
6  */
7 #include <asm/arch_timer.h>
8 #include <asm/cache.h>
9 #include <asm/cpu.h>
10 #include <asm/cputype.h>
11 #include <asm/cpufeature.h>
12 #include <asm/fpsimd.h>
13 
14 #include <linux/bitops.h>
15 #include <linux/bug.h>
16 #include <linux/compat.h>
17 #include <linux/elf.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/personality.h>
21 #include <linux/preempt.h>
22 #include <linux/printk.h>
23 #include <linux/seq_file.h>
24 #include <linux/sched.h>
25 #include <linux/smp.h>
26 #include <linux/delay.h>
27 
28 /*
29  * In case the boot CPU is hotpluggable, we record its initial state and
30  * current state separately. Certain system registers may contain different
31  * values depending on configuration at or after reset.
32  */
33 DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
34 static struct cpuinfo_arm64 boot_cpu_data;
35 
36 static const char *icache_policy_str[] = {
37 	[0 ... ICACHE_POLICY_PIPT]	= "RESERVED/UNKNOWN",
38 	[ICACHE_POLICY_VIPT]		= "VIPT",
39 	[ICACHE_POLICY_PIPT]		= "PIPT",
40 	[ICACHE_POLICY_VPIPT]		= "VPIPT",
41 };
42 
43 unsigned long __icache_flags;
44 
45 static const char *const hwcap_str[] = {
46 	"fp",
47 	"asimd",
48 	"evtstrm",
49 	"aes",
50 	"pmull",
51 	"sha1",
52 	"sha2",
53 	"crc32",
54 	"atomics",
55 	"fphp",
56 	"asimdhp",
57 	"cpuid",
58 	"asimdrdm",
59 	"jscvt",
60 	"fcma",
61 	"lrcpc",
62 	"dcpop",
63 	"sha3",
64 	"sm3",
65 	"sm4",
66 	"asimddp",
67 	"sha512",
68 	"sve",
69 	"asimdfhm",
70 	"dit",
71 	"uscat",
72 	"ilrcpc",
73 	"flagm",
74 	"ssbs",
75 	"sb",
76 	"paca",
77 	"pacg",
78 	"dcpodp",
79 	"sve2",
80 	"sveaes",
81 	"svepmull",
82 	"svebitperm",
83 	"svesha3",
84 	"svesm4",
85 	"flagm2",
86 	"frint",
87 	NULL
88 };
89 
90 #ifdef CONFIG_COMPAT
91 static const char *const compat_hwcap_str[] = {
92 	"swp",
93 	"half",
94 	"thumb",
95 	"26bit",
96 	"fastmult",
97 	"fpa",
98 	"vfp",
99 	"edsp",
100 	"java",
101 	"iwmmxt",
102 	"crunch",
103 	"thumbee",
104 	"neon",
105 	"vfpv3",
106 	"vfpv3d16",
107 	"tls",
108 	"vfpv4",
109 	"idiva",
110 	"idivt",
111 	"vfpd32",
112 	"lpae",
113 	"evtstrm",
114 	NULL
115 };
116 
117 static const char *const compat_hwcap2_str[] = {
118 	"aes",
119 	"pmull",
120 	"sha1",
121 	"sha2",
122 	"crc32",
123 	NULL
124 };
125 #endif /* CONFIG_COMPAT */
126 
127 static int c_show(struct seq_file *m, void *v)
128 {
129 	int i, j;
130 	bool compat = personality(current->personality) == PER_LINUX32;
131 
132 	for_each_online_cpu(i) {
133 		struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
134 		u32 midr = cpuinfo->reg_midr;
135 
136 		/*
137 		 * glibc reads /proc/cpuinfo to determine the number of
138 		 * online processors, looking for lines beginning with
139 		 * "processor".  Give glibc what it expects.
140 		 */
141 		seq_printf(m, "processor\t: %d\n", i);
142 		if (compat)
143 			seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n",
144 				   MIDR_REVISION(midr), COMPAT_ELF_PLATFORM);
145 
146 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
147 			   loops_per_jiffy / (500000UL/HZ),
148 			   loops_per_jiffy / (5000UL/HZ) % 100);
149 
150 		/*
151 		 * Dump out the common processor features in a single line.
152 		 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
153 		 * rather than attempting to parse this, but there's a body of
154 		 * software which does already (at least for 32-bit).
155 		 */
156 		seq_puts(m, "Features\t:");
157 		if (compat) {
158 #ifdef CONFIG_COMPAT
159 			for (j = 0; compat_hwcap_str[j]; j++)
160 				if (compat_elf_hwcap & (1 << j))
161 					seq_printf(m, " %s", compat_hwcap_str[j]);
162 
163 			for (j = 0; compat_hwcap2_str[j]; j++)
164 				if (compat_elf_hwcap2 & (1 << j))
165 					seq_printf(m, " %s", compat_hwcap2_str[j]);
166 #endif /* CONFIG_COMPAT */
167 		} else {
168 			for (j = 0; hwcap_str[j]; j++)
169 				if (cpu_have_feature(j))
170 					seq_printf(m, " %s", hwcap_str[j]);
171 		}
172 		seq_puts(m, "\n");
173 
174 		seq_printf(m, "CPU implementer\t: 0x%02x\n",
175 			   MIDR_IMPLEMENTOR(midr));
176 		seq_printf(m, "CPU architecture: 8\n");
177 		seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
178 		seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
179 		seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
180 	}
181 
182 	return 0;
183 }
184 
185 static void *c_start(struct seq_file *m, loff_t *pos)
186 {
187 	return *pos < 1 ? (void *)1 : NULL;
188 }
189 
190 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
191 {
192 	++*pos;
193 	return NULL;
194 }
195 
196 static void c_stop(struct seq_file *m, void *v)
197 {
198 }
199 
200 const struct seq_operations cpuinfo_op = {
201 	.start	= c_start,
202 	.next	= c_next,
203 	.stop	= c_stop,
204 	.show	= c_show
205 };
206 
207 
208 static struct kobj_type cpuregs_kobj_type = {
209 	.sysfs_ops = &kobj_sysfs_ops,
210 };
211 
212 /*
213  * The ARM ARM uses the phrase "32-bit register" to describe a register
214  * whose upper 32 bits are RES0 (per C5.1.1, ARM DDI 0487A.i), however
215  * no statement is made as to whether the upper 32 bits will or will not
216  * be made use of in future, and between ARM DDI 0487A.c and ARM DDI
217  * 0487A.d CLIDR_EL1 was expanded from 32-bit to 64-bit.
218  *
219  * Thus, while both MIDR_EL1 and REVIDR_EL1 are described as 32-bit
220  * registers, we expose them both as 64 bit values to cater for possible
221  * future expansion without an ABI break.
222  */
223 #define kobj_to_cpuinfo(kobj)	container_of(kobj, struct cpuinfo_arm64, kobj)
224 #define CPUREGS_ATTR_RO(_name, _field)						\
225 	static ssize_t _name##_show(struct kobject *kobj,			\
226 			struct kobj_attribute *attr, char *buf)			\
227 	{									\
228 		struct cpuinfo_arm64 *info = kobj_to_cpuinfo(kobj);		\
229 										\
230 		if (info->reg_midr)						\
231 			return sprintf(buf, "0x%016x\n", info->reg_##_field);	\
232 		else								\
233 			return 0;						\
234 	}									\
235 	static struct kobj_attribute cpuregs_attr_##_name = __ATTR_RO(_name)
236 
237 CPUREGS_ATTR_RO(midr_el1, midr);
238 CPUREGS_ATTR_RO(revidr_el1, revidr);
239 
240 static struct attribute *cpuregs_id_attrs[] = {
241 	&cpuregs_attr_midr_el1.attr,
242 	&cpuregs_attr_revidr_el1.attr,
243 	NULL
244 };
245 
246 static const struct attribute_group cpuregs_attr_group = {
247 	.attrs = cpuregs_id_attrs,
248 	.name = "identification"
249 };
250 
251 static int cpuid_cpu_online(unsigned int cpu)
252 {
253 	int rc;
254 	struct device *dev;
255 	struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
256 
257 	dev = get_cpu_device(cpu);
258 	if (!dev) {
259 		rc = -ENODEV;
260 		goto out;
261 	}
262 	rc = kobject_add(&info->kobj, &dev->kobj, "regs");
263 	if (rc)
264 		goto out;
265 	rc = sysfs_create_group(&info->kobj, &cpuregs_attr_group);
266 	if (rc)
267 		kobject_del(&info->kobj);
268 out:
269 	return rc;
270 }
271 
272 static int cpuid_cpu_offline(unsigned int cpu)
273 {
274 	struct device *dev;
275 	struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
276 
277 	dev = get_cpu_device(cpu);
278 	if (!dev)
279 		return -ENODEV;
280 	if (info->kobj.parent) {
281 		sysfs_remove_group(&info->kobj, &cpuregs_attr_group);
282 		kobject_del(&info->kobj);
283 	}
284 
285 	return 0;
286 }
287 
288 static int __init cpuinfo_regs_init(void)
289 {
290 	int cpu, ret;
291 
292 	for_each_possible_cpu(cpu) {
293 		struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
294 
295 		kobject_init(&info->kobj, &cpuregs_kobj_type);
296 	}
297 
298 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "arm64/cpuinfo:online",
299 				cpuid_cpu_online, cpuid_cpu_offline);
300 	if (ret < 0) {
301 		pr_err("cpuinfo: failed to register hotplug callbacks.\n");
302 		return ret;
303 	}
304 	return 0;
305 }
306 static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
307 {
308 	unsigned int cpu = smp_processor_id();
309 	u32 l1ip = CTR_L1IP(info->reg_ctr);
310 
311 	switch (l1ip) {
312 	case ICACHE_POLICY_PIPT:
313 		break;
314 	case ICACHE_POLICY_VPIPT:
315 		set_bit(ICACHEF_VPIPT, &__icache_flags);
316 		break;
317 	default:
318 		/* Fallthrough */
319 	case ICACHE_POLICY_VIPT:
320 		/* Assume aliasing */
321 		set_bit(ICACHEF_ALIASING, &__icache_flags);
322 	}
323 
324 	pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
325 }
326 
327 static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
328 {
329 	info->reg_cntfrq = arch_timer_get_cntfrq();
330 	/*
331 	 * Use the effective value of the CTR_EL0 than the raw value
332 	 * exposed by the CPU. CTR_EL0.IDC field value must be interpreted
333 	 * with the CLIDR_EL1 fields to avoid triggering false warnings
334 	 * when there is a mismatch across the CPUs. Keep track of the
335 	 * effective value of the CTR_EL0 in our internal records for
336 	 * acurate sanity check and feature enablement.
337 	 */
338 	info->reg_ctr = read_cpuid_effective_cachetype();
339 	info->reg_dczid = read_cpuid(DCZID_EL0);
340 	info->reg_midr = read_cpuid_id();
341 	info->reg_revidr = read_cpuid(REVIDR_EL1);
342 
343 	info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1);
344 	info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
345 	info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
346 	info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
347 	info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
348 	info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
349 	info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1);
350 	info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
351 	info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
352 	info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1);
353 
354 	/* Update the 32bit ID registers only if AArch32 is implemented */
355 	if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
356 		info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
357 		info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
358 		info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
359 		info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
360 		info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
361 		info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
362 		info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
363 		info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
364 		info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
365 		info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
366 		info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
367 		info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
368 		info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
369 
370 		info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
371 		info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
372 		info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
373 	}
374 
375 	if (IS_ENABLED(CONFIG_ARM64_SVE) &&
376 	    id_aa64pfr0_sve(info->reg_id_aa64pfr0))
377 		info->reg_zcr = read_zcr_features();
378 
379 	cpuinfo_detect_icache_policy(info);
380 }
381 
382 void cpuinfo_store_cpu(void)
383 {
384 	struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data);
385 	__cpuinfo_store_cpu(info);
386 	update_cpu_features(smp_processor_id(), info, &boot_cpu_data);
387 }
388 
389 void __init cpuinfo_store_boot_cpu(void)
390 {
391 	struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0);
392 	__cpuinfo_store_cpu(info);
393 
394 	boot_cpu_data = *info;
395 	init_cpu_features(&boot_cpu_data);
396 }
397 
398 device_initcall(cpuinfo_regs_init);
399