1 /* 2 * Record and handle CPU attributes. 3 * 4 * Copyright (C) 2014 ARM Ltd. 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 #include <asm/arch_timer.h> 18 #include <asm/cache.h> 19 #include <asm/cpu.h> 20 #include <asm/cputype.h> 21 #include <asm/cpufeature.h> 22 #include <asm/fpsimd.h> 23 24 #include <linux/bitops.h> 25 #include <linux/bug.h> 26 #include <linux/compat.h> 27 #include <linux/elf.h> 28 #include <linux/init.h> 29 #include <linux/kernel.h> 30 #include <linux/personality.h> 31 #include <linux/preempt.h> 32 #include <linux/printk.h> 33 #include <linux/seq_file.h> 34 #include <linux/sched.h> 35 #include <linux/smp.h> 36 #include <linux/delay.h> 37 38 /* 39 * In case the boot CPU is hotpluggable, we record its initial state and 40 * current state separately. Certain system registers may contain different 41 * values depending on configuration at or after reset. 42 */ 43 DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data); 44 static struct cpuinfo_arm64 boot_cpu_data; 45 46 static char *icache_policy_str[] = { 47 [0 ... ICACHE_POLICY_PIPT] = "RESERVED/UNKNOWN", 48 [ICACHE_POLICY_VIPT] = "VIPT", 49 [ICACHE_POLICY_PIPT] = "PIPT", 50 [ICACHE_POLICY_VPIPT] = "VPIPT", 51 }; 52 53 unsigned long __icache_flags; 54 55 static const char *const hwcap_str[] = { 56 "fp", 57 "asimd", 58 "evtstrm", 59 "aes", 60 "pmull", 61 "sha1", 62 "sha2", 63 "crc32", 64 "atomics", 65 "fphp", 66 "asimdhp", 67 "cpuid", 68 "asimdrdm", 69 "jscvt", 70 "fcma", 71 "lrcpc", 72 "dcpop", 73 "sha3", 74 "sm3", 75 "sm4", 76 "asimddp", 77 "sha512", 78 "sve", 79 "asimdfhm", 80 "dit", 81 "uscat", 82 "ilrcpc", 83 "flagm", 84 "ssbs", 85 "sb", 86 "paca", 87 "pacg", 88 NULL 89 }; 90 91 #ifdef CONFIG_COMPAT 92 static const char *const compat_hwcap_str[] = { 93 "swp", 94 "half", 95 "thumb", 96 "26bit", 97 "fastmult", 98 "fpa", 99 "vfp", 100 "edsp", 101 "java", 102 "iwmmxt", 103 "crunch", 104 "thumbee", 105 "neon", 106 "vfpv3", 107 "vfpv3d16", 108 "tls", 109 "vfpv4", 110 "idiva", 111 "idivt", 112 "vfpd32", 113 "lpae", 114 "evtstrm", 115 NULL 116 }; 117 118 static const char *const compat_hwcap2_str[] = { 119 "aes", 120 "pmull", 121 "sha1", 122 "sha2", 123 "crc32", 124 NULL 125 }; 126 #endif /* CONFIG_COMPAT */ 127 128 static int c_show(struct seq_file *m, void *v) 129 { 130 int i, j; 131 bool compat = personality(current->personality) == PER_LINUX32; 132 133 for_each_online_cpu(i) { 134 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); 135 u32 midr = cpuinfo->reg_midr; 136 137 /* 138 * glibc reads /proc/cpuinfo to determine the number of 139 * online processors, looking for lines beginning with 140 * "processor". Give glibc what it expects. 141 */ 142 seq_printf(m, "processor\t: %d\n", i); 143 if (compat) 144 seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n", 145 MIDR_REVISION(midr), COMPAT_ELF_PLATFORM); 146 147 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", 148 loops_per_jiffy / (500000UL/HZ), 149 loops_per_jiffy / (5000UL/HZ) % 100); 150 151 /* 152 * Dump out the common processor features in a single line. 153 * Userspace should read the hwcaps with getauxval(AT_HWCAP) 154 * rather than attempting to parse this, but there's a body of 155 * software which does already (at least for 32-bit). 156 */ 157 seq_puts(m, "Features\t:"); 158 if (compat) { 159 #ifdef CONFIG_COMPAT 160 for (j = 0; compat_hwcap_str[j]; j++) 161 if (compat_elf_hwcap & (1 << j)) 162 seq_printf(m, " %s", compat_hwcap_str[j]); 163 164 for (j = 0; compat_hwcap2_str[j]; j++) 165 if (compat_elf_hwcap2 & (1 << j)) 166 seq_printf(m, " %s", compat_hwcap2_str[j]); 167 #endif /* CONFIG_COMPAT */ 168 } else { 169 for (j = 0; hwcap_str[j]; j++) 170 if (elf_hwcap & (1 << j)) 171 seq_printf(m, " %s", hwcap_str[j]); 172 } 173 seq_puts(m, "\n"); 174 175 seq_printf(m, "CPU implementer\t: 0x%02x\n", 176 MIDR_IMPLEMENTOR(midr)); 177 seq_printf(m, "CPU architecture: 8\n"); 178 seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr)); 179 seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr)); 180 seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr)); 181 } 182 183 return 0; 184 } 185 186 static void *c_start(struct seq_file *m, loff_t *pos) 187 { 188 return *pos < 1 ? (void *)1 : NULL; 189 } 190 191 static void *c_next(struct seq_file *m, void *v, loff_t *pos) 192 { 193 ++*pos; 194 return NULL; 195 } 196 197 static void c_stop(struct seq_file *m, void *v) 198 { 199 } 200 201 const struct seq_operations cpuinfo_op = { 202 .start = c_start, 203 .next = c_next, 204 .stop = c_stop, 205 .show = c_show 206 }; 207 208 209 static struct kobj_type cpuregs_kobj_type = { 210 .sysfs_ops = &kobj_sysfs_ops, 211 }; 212 213 /* 214 * The ARM ARM uses the phrase "32-bit register" to describe a register 215 * whose upper 32 bits are RES0 (per C5.1.1, ARM DDI 0487A.i), however 216 * no statement is made as to whether the upper 32 bits will or will not 217 * be made use of in future, and between ARM DDI 0487A.c and ARM DDI 218 * 0487A.d CLIDR_EL1 was expanded from 32-bit to 64-bit. 219 * 220 * Thus, while both MIDR_EL1 and REVIDR_EL1 are described as 32-bit 221 * registers, we expose them both as 64 bit values to cater for possible 222 * future expansion without an ABI break. 223 */ 224 #define kobj_to_cpuinfo(kobj) container_of(kobj, struct cpuinfo_arm64, kobj) 225 #define CPUREGS_ATTR_RO(_name, _field) \ 226 static ssize_t _name##_show(struct kobject *kobj, \ 227 struct kobj_attribute *attr, char *buf) \ 228 { \ 229 struct cpuinfo_arm64 *info = kobj_to_cpuinfo(kobj); \ 230 \ 231 if (info->reg_midr) \ 232 return sprintf(buf, "0x%016x\n", info->reg_##_field); \ 233 else \ 234 return 0; \ 235 } \ 236 static struct kobj_attribute cpuregs_attr_##_name = __ATTR_RO(_name) 237 238 CPUREGS_ATTR_RO(midr_el1, midr); 239 CPUREGS_ATTR_RO(revidr_el1, revidr); 240 241 static struct attribute *cpuregs_id_attrs[] = { 242 &cpuregs_attr_midr_el1.attr, 243 &cpuregs_attr_revidr_el1.attr, 244 NULL 245 }; 246 247 static const struct attribute_group cpuregs_attr_group = { 248 .attrs = cpuregs_id_attrs, 249 .name = "identification" 250 }; 251 252 static int cpuid_cpu_online(unsigned int cpu) 253 { 254 int rc; 255 struct device *dev; 256 struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu); 257 258 dev = get_cpu_device(cpu); 259 if (!dev) { 260 rc = -ENODEV; 261 goto out; 262 } 263 rc = kobject_add(&info->kobj, &dev->kobj, "regs"); 264 if (rc) 265 goto out; 266 rc = sysfs_create_group(&info->kobj, &cpuregs_attr_group); 267 if (rc) 268 kobject_del(&info->kobj); 269 out: 270 return rc; 271 } 272 273 static int cpuid_cpu_offline(unsigned int cpu) 274 { 275 struct device *dev; 276 struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu); 277 278 dev = get_cpu_device(cpu); 279 if (!dev) 280 return -ENODEV; 281 if (info->kobj.parent) { 282 sysfs_remove_group(&info->kobj, &cpuregs_attr_group); 283 kobject_del(&info->kobj); 284 } 285 286 return 0; 287 } 288 289 static int __init cpuinfo_regs_init(void) 290 { 291 int cpu, ret; 292 293 for_each_possible_cpu(cpu) { 294 struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu); 295 296 kobject_init(&info->kobj, &cpuregs_kobj_type); 297 } 298 299 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "arm64/cpuinfo:online", 300 cpuid_cpu_online, cpuid_cpu_offline); 301 if (ret < 0) { 302 pr_err("cpuinfo: failed to register hotplug callbacks.\n"); 303 return ret; 304 } 305 return 0; 306 } 307 static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info) 308 { 309 unsigned int cpu = smp_processor_id(); 310 u32 l1ip = CTR_L1IP(info->reg_ctr); 311 312 switch (l1ip) { 313 case ICACHE_POLICY_PIPT: 314 break; 315 case ICACHE_POLICY_VPIPT: 316 set_bit(ICACHEF_VPIPT, &__icache_flags); 317 break; 318 default: 319 /* Fallthrough */ 320 case ICACHE_POLICY_VIPT: 321 /* Assume aliasing */ 322 set_bit(ICACHEF_ALIASING, &__icache_flags); 323 } 324 325 pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu); 326 } 327 328 static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) 329 { 330 info->reg_cntfrq = arch_timer_get_cntfrq(); 331 /* 332 * Use the effective value of the CTR_EL0 than the raw value 333 * exposed by the CPU. CTR_E0.IDC field value must be interpreted 334 * with the CLIDR_EL1 fields to avoid triggering false warnings 335 * when there is a mismatch across the CPUs. Keep track of the 336 * effective value of the CTR_EL0 in our internal records for 337 * acurate sanity check and feature enablement. 338 */ 339 info->reg_ctr = read_cpuid_effective_cachetype(); 340 info->reg_dczid = read_cpuid(DCZID_EL0); 341 info->reg_midr = read_cpuid_id(); 342 info->reg_revidr = read_cpuid(REVIDR_EL1); 343 344 info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1); 345 info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1); 346 info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1); 347 info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1); 348 info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); 349 info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); 350 info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1); 351 info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1); 352 info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1); 353 info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1); 354 355 /* Update the 32bit ID registers only if AArch32 is implemented */ 356 if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) { 357 info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1); 358 info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1); 359 info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1); 360 info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1); 361 info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1); 362 info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1); 363 info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1); 364 info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1); 365 info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1); 366 info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1); 367 info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1); 368 info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1); 369 info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1); 370 371 info->reg_mvfr0 = read_cpuid(MVFR0_EL1); 372 info->reg_mvfr1 = read_cpuid(MVFR1_EL1); 373 info->reg_mvfr2 = read_cpuid(MVFR2_EL1); 374 } 375 376 if (IS_ENABLED(CONFIG_ARM64_SVE) && 377 id_aa64pfr0_sve(info->reg_id_aa64pfr0)) 378 info->reg_zcr = read_zcr_features(); 379 380 cpuinfo_detect_icache_policy(info); 381 } 382 383 void cpuinfo_store_cpu(void) 384 { 385 struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data); 386 __cpuinfo_store_cpu(info); 387 update_cpu_features(smp_processor_id(), info, &boot_cpu_data); 388 } 389 390 void __init cpuinfo_store_boot_cpu(void) 391 { 392 struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0); 393 __cpuinfo_store_cpu(info); 394 395 boot_cpu_data = *info; 396 init_cpu_features(&boot_cpu_data); 397 } 398 399 device_initcall(cpuinfo_regs_init); 400