1 /* 2 * Record and handle CPU attributes. 3 * 4 * Copyright (C) 2014 ARM Ltd. 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 #include <asm/arch_timer.h> 18 #include <asm/cache.h> 19 #include <asm/cpu.h> 20 #include <asm/cputype.h> 21 #include <asm/cpufeature.h> 22 #include <asm/fpsimd.h> 23 24 #include <linux/bitops.h> 25 #include <linux/bug.h> 26 #include <linux/compat.h> 27 #include <linux/elf.h> 28 #include <linux/init.h> 29 #include <linux/kernel.h> 30 #include <linux/personality.h> 31 #include <linux/preempt.h> 32 #include <linux/printk.h> 33 #include <linux/seq_file.h> 34 #include <linux/sched.h> 35 #include <linux/smp.h> 36 #include <linux/delay.h> 37 38 /* 39 * In case the boot CPU is hotpluggable, we record its initial state and 40 * current state separately. Certain system registers may contain different 41 * values depending on configuration at or after reset. 42 */ 43 DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data); 44 static struct cpuinfo_arm64 boot_cpu_data; 45 46 static char *icache_policy_str[] = { 47 [0 ... ICACHE_POLICY_PIPT] = "RESERVED/UNKNOWN", 48 [ICACHE_POLICY_VIPT] = "VIPT", 49 [ICACHE_POLICY_PIPT] = "PIPT", 50 [ICACHE_POLICY_VPIPT] = "VPIPT", 51 }; 52 53 unsigned long __icache_flags; 54 55 static const char *const hwcap_str[] = { 56 "fp", 57 "asimd", 58 "evtstrm", 59 "aes", 60 "pmull", 61 "sha1", 62 "sha2", 63 "crc32", 64 "atomics", 65 "fphp", 66 "asimdhp", 67 "cpuid", 68 "asimdrdm", 69 "jscvt", 70 "fcma", 71 "lrcpc", 72 "dcpop", 73 "sha3", 74 "sm3", 75 "sm4", 76 "asimddp", 77 "sha512", 78 "sve", 79 "asimdfhm", 80 "dit", 81 "uscat", 82 "ilrcpc", 83 "flagm", 84 "ssbs", 85 "sb", 86 "paca", 87 "pacg", 88 "dcpodp", 89 "sve2", 90 "sveaes", 91 "svepmull", 92 "svebitperm", 93 "svesha3", 94 "svesm4", 95 NULL 96 }; 97 98 #ifdef CONFIG_COMPAT 99 static const char *const compat_hwcap_str[] = { 100 "swp", 101 "half", 102 "thumb", 103 "26bit", 104 "fastmult", 105 "fpa", 106 "vfp", 107 "edsp", 108 "java", 109 "iwmmxt", 110 "crunch", 111 "thumbee", 112 "neon", 113 "vfpv3", 114 "vfpv3d16", 115 "tls", 116 "vfpv4", 117 "idiva", 118 "idivt", 119 "vfpd32", 120 "lpae", 121 "evtstrm", 122 NULL 123 }; 124 125 static const char *const compat_hwcap2_str[] = { 126 "aes", 127 "pmull", 128 "sha1", 129 "sha2", 130 "crc32", 131 NULL 132 }; 133 #endif /* CONFIG_COMPAT */ 134 135 static int c_show(struct seq_file *m, void *v) 136 { 137 int i, j; 138 bool compat = personality(current->personality) == PER_LINUX32; 139 140 for_each_online_cpu(i) { 141 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); 142 u32 midr = cpuinfo->reg_midr; 143 144 /* 145 * glibc reads /proc/cpuinfo to determine the number of 146 * online processors, looking for lines beginning with 147 * "processor". Give glibc what it expects. 148 */ 149 seq_printf(m, "processor\t: %d\n", i); 150 if (compat) 151 seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n", 152 MIDR_REVISION(midr), COMPAT_ELF_PLATFORM); 153 154 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", 155 loops_per_jiffy / (500000UL/HZ), 156 loops_per_jiffy / (5000UL/HZ) % 100); 157 158 /* 159 * Dump out the common processor features in a single line. 160 * Userspace should read the hwcaps with getauxval(AT_HWCAP) 161 * rather than attempting to parse this, but there's a body of 162 * software which does already (at least for 32-bit). 163 */ 164 seq_puts(m, "Features\t:"); 165 if (compat) { 166 #ifdef CONFIG_COMPAT 167 for (j = 0; compat_hwcap_str[j]; j++) 168 if (compat_elf_hwcap & (1 << j)) 169 seq_printf(m, " %s", compat_hwcap_str[j]); 170 171 for (j = 0; compat_hwcap2_str[j]; j++) 172 if (compat_elf_hwcap2 & (1 << j)) 173 seq_printf(m, " %s", compat_hwcap2_str[j]); 174 #endif /* CONFIG_COMPAT */ 175 } else { 176 for (j = 0; hwcap_str[j]; j++) 177 if (cpu_have_feature(j)) 178 seq_printf(m, " %s", hwcap_str[j]); 179 } 180 seq_puts(m, "\n"); 181 182 seq_printf(m, "CPU implementer\t: 0x%02x\n", 183 MIDR_IMPLEMENTOR(midr)); 184 seq_printf(m, "CPU architecture: 8\n"); 185 seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr)); 186 seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr)); 187 seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr)); 188 } 189 190 return 0; 191 } 192 193 static void *c_start(struct seq_file *m, loff_t *pos) 194 { 195 return *pos < 1 ? (void *)1 : NULL; 196 } 197 198 static void *c_next(struct seq_file *m, void *v, loff_t *pos) 199 { 200 ++*pos; 201 return NULL; 202 } 203 204 static void c_stop(struct seq_file *m, void *v) 205 { 206 } 207 208 const struct seq_operations cpuinfo_op = { 209 .start = c_start, 210 .next = c_next, 211 .stop = c_stop, 212 .show = c_show 213 }; 214 215 216 static struct kobj_type cpuregs_kobj_type = { 217 .sysfs_ops = &kobj_sysfs_ops, 218 }; 219 220 /* 221 * The ARM ARM uses the phrase "32-bit register" to describe a register 222 * whose upper 32 bits are RES0 (per C5.1.1, ARM DDI 0487A.i), however 223 * no statement is made as to whether the upper 32 bits will or will not 224 * be made use of in future, and between ARM DDI 0487A.c and ARM DDI 225 * 0487A.d CLIDR_EL1 was expanded from 32-bit to 64-bit. 226 * 227 * Thus, while both MIDR_EL1 and REVIDR_EL1 are described as 32-bit 228 * registers, we expose them both as 64 bit values to cater for possible 229 * future expansion without an ABI break. 230 */ 231 #define kobj_to_cpuinfo(kobj) container_of(kobj, struct cpuinfo_arm64, kobj) 232 #define CPUREGS_ATTR_RO(_name, _field) \ 233 static ssize_t _name##_show(struct kobject *kobj, \ 234 struct kobj_attribute *attr, char *buf) \ 235 { \ 236 struct cpuinfo_arm64 *info = kobj_to_cpuinfo(kobj); \ 237 \ 238 if (info->reg_midr) \ 239 return sprintf(buf, "0x%016x\n", info->reg_##_field); \ 240 else \ 241 return 0; \ 242 } \ 243 static struct kobj_attribute cpuregs_attr_##_name = __ATTR_RO(_name) 244 245 CPUREGS_ATTR_RO(midr_el1, midr); 246 CPUREGS_ATTR_RO(revidr_el1, revidr); 247 248 static struct attribute *cpuregs_id_attrs[] = { 249 &cpuregs_attr_midr_el1.attr, 250 &cpuregs_attr_revidr_el1.attr, 251 NULL 252 }; 253 254 static const struct attribute_group cpuregs_attr_group = { 255 .attrs = cpuregs_id_attrs, 256 .name = "identification" 257 }; 258 259 static int cpuid_cpu_online(unsigned int cpu) 260 { 261 int rc; 262 struct device *dev; 263 struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu); 264 265 dev = get_cpu_device(cpu); 266 if (!dev) { 267 rc = -ENODEV; 268 goto out; 269 } 270 rc = kobject_add(&info->kobj, &dev->kobj, "regs"); 271 if (rc) 272 goto out; 273 rc = sysfs_create_group(&info->kobj, &cpuregs_attr_group); 274 if (rc) 275 kobject_del(&info->kobj); 276 out: 277 return rc; 278 } 279 280 static int cpuid_cpu_offline(unsigned int cpu) 281 { 282 struct device *dev; 283 struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu); 284 285 dev = get_cpu_device(cpu); 286 if (!dev) 287 return -ENODEV; 288 if (info->kobj.parent) { 289 sysfs_remove_group(&info->kobj, &cpuregs_attr_group); 290 kobject_del(&info->kobj); 291 } 292 293 return 0; 294 } 295 296 static int __init cpuinfo_regs_init(void) 297 { 298 int cpu, ret; 299 300 for_each_possible_cpu(cpu) { 301 struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu); 302 303 kobject_init(&info->kobj, &cpuregs_kobj_type); 304 } 305 306 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "arm64/cpuinfo:online", 307 cpuid_cpu_online, cpuid_cpu_offline); 308 if (ret < 0) { 309 pr_err("cpuinfo: failed to register hotplug callbacks.\n"); 310 return ret; 311 } 312 return 0; 313 } 314 static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info) 315 { 316 unsigned int cpu = smp_processor_id(); 317 u32 l1ip = CTR_L1IP(info->reg_ctr); 318 319 switch (l1ip) { 320 case ICACHE_POLICY_PIPT: 321 break; 322 case ICACHE_POLICY_VPIPT: 323 set_bit(ICACHEF_VPIPT, &__icache_flags); 324 break; 325 default: 326 /* Fallthrough */ 327 case ICACHE_POLICY_VIPT: 328 /* Assume aliasing */ 329 set_bit(ICACHEF_ALIASING, &__icache_flags); 330 } 331 332 pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu); 333 } 334 335 static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) 336 { 337 info->reg_cntfrq = arch_timer_get_cntfrq(); 338 /* 339 * Use the effective value of the CTR_EL0 than the raw value 340 * exposed by the CPU. CTR_E0.IDC field value must be interpreted 341 * with the CLIDR_EL1 fields to avoid triggering false warnings 342 * when there is a mismatch across the CPUs. Keep track of the 343 * effective value of the CTR_EL0 in our internal records for 344 * acurate sanity check and feature enablement. 345 */ 346 info->reg_ctr = read_cpuid_effective_cachetype(); 347 info->reg_dczid = read_cpuid(DCZID_EL0); 348 info->reg_midr = read_cpuid_id(); 349 info->reg_revidr = read_cpuid(REVIDR_EL1); 350 351 info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1); 352 info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1); 353 info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1); 354 info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1); 355 info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); 356 info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); 357 info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1); 358 info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1); 359 info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1); 360 info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1); 361 362 /* Update the 32bit ID registers only if AArch32 is implemented */ 363 if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) { 364 info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1); 365 info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1); 366 info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1); 367 info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1); 368 info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1); 369 info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1); 370 info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1); 371 info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1); 372 info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1); 373 info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1); 374 info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1); 375 info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1); 376 info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1); 377 378 info->reg_mvfr0 = read_cpuid(MVFR0_EL1); 379 info->reg_mvfr1 = read_cpuid(MVFR1_EL1); 380 info->reg_mvfr2 = read_cpuid(MVFR2_EL1); 381 } 382 383 if (IS_ENABLED(CONFIG_ARM64_SVE) && 384 id_aa64pfr0_sve(info->reg_id_aa64pfr0)) 385 info->reg_zcr = read_zcr_features(); 386 387 cpuinfo_detect_icache_policy(info); 388 } 389 390 void cpuinfo_store_cpu(void) 391 { 392 struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data); 393 __cpuinfo_store_cpu(info); 394 update_cpu_features(smp_processor_id(), info, &boot_cpu_data); 395 } 396 397 void __init cpuinfo_store_boot_cpu(void) 398 { 399 struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0); 400 __cpuinfo_store_cpu(info); 401 402 boot_cpu_data = *info; 403 init_cpu_features(&boot_cpu_data); 404 } 405 406 device_initcall(cpuinfo_regs_init); 407