1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org> 4 */ 5 6 #ifndef __ASM_CPUFEATURE_H 7 #define __ASM_CPUFEATURE_H 8 9 #include <asm/cpucaps.h> 10 #include <asm/cputype.h> 11 #include <asm/hwcap.h> 12 #include <asm/sysreg.h> 13 14 #define MAX_CPU_FEATURES 128 15 #define cpu_feature(x) KERNEL_HWCAP_ ## x 16 17 #ifndef __ASSEMBLY__ 18 19 #include <linux/bug.h> 20 #include <linux/jump_label.h> 21 #include <linux/kernel.h> 22 23 /* 24 * CPU feature register tracking 25 * 26 * The safe value of a CPUID feature field is dependent on the implications 27 * of the values assigned to it by the architecture. Based on the relationship 28 * between the values, the features are classified into 3 types - LOWER_SAFE, 29 * HIGHER_SAFE and EXACT. 30 * 31 * The lowest value of all the CPUs is chosen for LOWER_SAFE and highest 32 * for HIGHER_SAFE. It is expected that all CPUs have the same value for 33 * a field when EXACT is specified, failing which, the safe value specified 34 * in the table is chosen. 35 */ 36 37 enum ftr_type { 38 FTR_EXACT, /* Use a predefined safe value */ 39 FTR_LOWER_SAFE, /* Smaller value is safe */ 40 FTR_HIGHER_SAFE, /* Bigger value is safe */ 41 FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */ 42 }; 43 44 #define FTR_STRICT true /* SANITY check strict matching required */ 45 #define FTR_NONSTRICT false /* SANITY check ignored */ 46 47 #define FTR_SIGNED true /* Value should be treated as signed */ 48 #define FTR_UNSIGNED false /* Value should be treated as unsigned */ 49 50 #define FTR_VISIBLE true /* Feature visible to the user space */ 51 #define FTR_HIDDEN false /* Feature is hidden from the user */ 52 53 #define FTR_VISIBLE_IF_IS_ENABLED(config) \ 54 (IS_ENABLED(config) ? FTR_VISIBLE : FTR_HIDDEN) 55 56 struct arm64_ftr_bits { 57 bool sign; /* Value is signed ? */ 58 bool visible; 59 bool strict; /* CPU Sanity check: strict matching required ? */ 60 enum ftr_type type; 61 u8 shift; 62 u8 width; 63 s64 safe_val; /* safe value for FTR_EXACT features */ 64 }; 65 66 /* 67 * Describe the early feature override to the core override code: 68 * 69 * @val Values that are to be merged into the final 70 * sanitised value of the register. Only the bitfields 71 * set to 1 in @mask are valid 72 * @mask Mask of the features that are overridden by @val 73 * 74 * A @mask field set to full-1 indicates that the corresponding field 75 * in @val is a valid override. 76 * 77 * A @mask field set to full-0 with the corresponding @val field set 78 * to full-0 denotes that this field has no override 79 * 80 * A @mask field set to full-0 with the corresponding @val field set 81 * to full-1 denotes thath this field has an invalid override. 82 */ 83 struct arm64_ftr_override { 84 u64 val; 85 u64 mask; 86 }; 87 88 /* 89 * @arm64_ftr_reg - Feature register 90 * @strict_mask Bits which should match across all CPUs for sanity. 91 * @sys_val Safe value across the CPUs (system view) 92 */ 93 struct arm64_ftr_reg { 94 const char *name; 95 u64 strict_mask; 96 u64 user_mask; 97 u64 sys_val; 98 u64 user_val; 99 struct arm64_ftr_override *override; 100 const struct arm64_ftr_bits *ftr_bits; 101 }; 102 103 extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; 104 105 /* 106 * CPU capabilities: 107 * 108 * We use arm64_cpu_capabilities to represent system features, errata work 109 * arounds (both used internally by kernel and tracked in cpu_hwcaps) and 110 * ELF HWCAPs (which are exposed to user). 111 * 112 * To support systems with heterogeneous CPUs, we need to make sure that we 113 * detect the capabilities correctly on the system and take appropriate 114 * measures to ensure there are no incompatibilities. 115 * 116 * This comment tries to explain how we treat the capabilities. 117 * Each capability has the following list of attributes : 118 * 119 * 1) Scope of Detection : The system detects a given capability by 120 * performing some checks at runtime. This could be, e.g, checking the 121 * value of a field in CPU ID feature register or checking the cpu 122 * model. The capability provides a call back ( @matches() ) to 123 * perform the check. Scope defines how the checks should be performed. 124 * There are three cases: 125 * 126 * a) SCOPE_LOCAL_CPU: check all the CPUs and "detect" if at least one 127 * matches. This implies, we have to run the check on all the 128 * booting CPUs, until the system decides that state of the 129 * capability is finalised. (See section 2 below) 130 * Or 131 * b) SCOPE_SYSTEM: check all the CPUs and "detect" if all the CPUs 132 * matches. This implies, we run the check only once, when the 133 * system decides to finalise the state of the capability. If the 134 * capability relies on a field in one of the CPU ID feature 135 * registers, we use the sanitised value of the register from the 136 * CPU feature infrastructure to make the decision. 137 * Or 138 * c) SCOPE_BOOT_CPU: Check only on the primary boot CPU to detect the 139 * feature. This category is for features that are "finalised" 140 * (or used) by the kernel very early even before the SMP cpus 141 * are brought up. 142 * 143 * The process of detection is usually denoted by "update" capability 144 * state in the code. 145 * 146 * 2) Finalise the state : The kernel should finalise the state of a 147 * capability at some point during its execution and take necessary 148 * actions if any. Usually, this is done, after all the boot-time 149 * enabled CPUs are brought up by the kernel, so that it can make 150 * better decision based on the available set of CPUs. However, there 151 * are some special cases, where the action is taken during the early 152 * boot by the primary boot CPU. (e.g, running the kernel at EL2 with 153 * Virtualisation Host Extensions). The kernel usually disallows any 154 * changes to the state of a capability once it finalises the capability 155 * and takes any action, as it may be impossible to execute the actions 156 * safely. A CPU brought up after a capability is "finalised" is 157 * referred to as "Late CPU" w.r.t the capability. e.g, all secondary 158 * CPUs are treated "late CPUs" for capabilities determined by the boot 159 * CPU. 160 * 161 * At the moment there are two passes of finalising the capabilities. 162 * a) Boot CPU scope capabilities - Finalised by primary boot CPU via 163 * setup_boot_cpu_capabilities(). 164 * b) Everything except (a) - Run via setup_system_capabilities(). 165 * 166 * 3) Verification: When a CPU is brought online (e.g, by user or by the 167 * kernel), the kernel should make sure that it is safe to use the CPU, 168 * by verifying that the CPU is compliant with the state of the 169 * capabilities finalised already. This happens via : 170 * 171 * secondary_start_kernel()-> check_local_cpu_capabilities() 172 * 173 * As explained in (2) above, capabilities could be finalised at 174 * different points in the execution. Each newly booted CPU is verified 175 * against the capabilities that have been finalised by the time it 176 * boots. 177 * 178 * a) SCOPE_BOOT_CPU : All CPUs are verified against the capability 179 * except for the primary boot CPU. 180 * 181 * b) SCOPE_LOCAL_CPU, SCOPE_SYSTEM: All CPUs hotplugged on by the 182 * user after the kernel boot are verified against the capability. 183 * 184 * If there is a conflict, the kernel takes an action, based on the 185 * severity (e.g, a CPU could be prevented from booting or cause a 186 * kernel panic). The CPU is allowed to "affect" the state of the 187 * capability, if it has not been finalised already. See section 5 188 * for more details on conflicts. 189 * 190 * 4) Action: As mentioned in (2), the kernel can take an action for each 191 * detected capability, on all CPUs on the system. Appropriate actions 192 * include, turning on an architectural feature, modifying the control 193 * registers (e.g, SCTLR, TCR etc.) or patching the kernel via 194 * alternatives. The kernel patching is batched and performed at later 195 * point. The actions are always initiated only after the capability 196 * is finalised. This is usally denoted by "enabling" the capability. 197 * The actions are initiated as follows : 198 * a) Action is triggered on all online CPUs, after the capability is 199 * finalised, invoked within the stop_machine() context from 200 * enable_cpu_capabilitie(). 201 * 202 * b) Any late CPU, brought up after (1), the action is triggered via: 203 * 204 * check_local_cpu_capabilities() -> verify_local_cpu_capabilities() 205 * 206 * 5) Conflicts: Based on the state of the capability on a late CPU vs. 207 * the system state, we could have the following combinations : 208 * 209 * x-----------------------------x 210 * | Type | System | Late CPU | 211 * |-----------------------------| 212 * | a | y | n | 213 * |-----------------------------| 214 * | b | n | y | 215 * x-----------------------------x 216 * 217 * Two separate flag bits are defined to indicate whether each kind of 218 * conflict can be allowed: 219 * ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU - Case(a) is allowed 220 * ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU - Case(b) is allowed 221 * 222 * Case (a) is not permitted for a capability that the system requires 223 * all CPUs to have in order for the capability to be enabled. This is 224 * typical for capabilities that represent enhanced functionality. 225 * 226 * Case (b) is not permitted for a capability that must be enabled 227 * during boot if any CPU in the system requires it in order to run 228 * safely. This is typical for erratum work arounds that cannot be 229 * enabled after the corresponding capability is finalised. 230 * 231 * In some non-typical cases either both (a) and (b), or neither, 232 * should be permitted. This can be described by including neither 233 * or both flags in the capability's type field. 234 * 235 * In case of a conflict, the CPU is prevented from booting. If the 236 * ARM64_CPUCAP_PANIC_ON_CONFLICT flag is specified for the capability, 237 * then a kernel panic is triggered. 238 */ 239 240 241 /* 242 * Decide how the capability is detected. 243 * On any local CPU vs System wide vs the primary boot CPU 244 */ 245 #define ARM64_CPUCAP_SCOPE_LOCAL_CPU ((u16)BIT(0)) 246 #define ARM64_CPUCAP_SCOPE_SYSTEM ((u16)BIT(1)) 247 /* 248 * The capabilitiy is detected on the Boot CPU and is used by kernel 249 * during early boot. i.e, the capability should be "detected" and 250 * "enabled" as early as possibly on all booting CPUs. 251 */ 252 #define ARM64_CPUCAP_SCOPE_BOOT_CPU ((u16)BIT(2)) 253 #define ARM64_CPUCAP_SCOPE_MASK \ 254 (ARM64_CPUCAP_SCOPE_SYSTEM | \ 255 ARM64_CPUCAP_SCOPE_LOCAL_CPU | \ 256 ARM64_CPUCAP_SCOPE_BOOT_CPU) 257 258 #define SCOPE_SYSTEM ARM64_CPUCAP_SCOPE_SYSTEM 259 #define SCOPE_LOCAL_CPU ARM64_CPUCAP_SCOPE_LOCAL_CPU 260 #define SCOPE_BOOT_CPU ARM64_CPUCAP_SCOPE_BOOT_CPU 261 #define SCOPE_ALL ARM64_CPUCAP_SCOPE_MASK 262 263 /* 264 * Is it permitted for a late CPU to have this capability when system 265 * hasn't already enabled it ? 266 */ 267 #define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4)) 268 /* Is it safe for a late CPU to miss this capability when system has it */ 269 #define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5)) 270 /* Panic when a conflict is detected */ 271 #define ARM64_CPUCAP_PANIC_ON_CONFLICT ((u16)BIT(6)) 272 273 /* 274 * CPU errata workarounds that need to be enabled at boot time if one or 275 * more CPUs in the system requires it. When one of these capabilities 276 * has been enabled, it is safe to allow any CPU to boot that doesn't 277 * require the workaround. However, it is not safe if a "late" CPU 278 * requires a workaround and the system hasn't enabled it already. 279 */ 280 #define ARM64_CPUCAP_LOCAL_CPU_ERRATUM \ 281 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU) 282 /* 283 * CPU feature detected at boot time based on system-wide value of a 284 * feature. It is safe for a late CPU to have this feature even though 285 * the system hasn't enabled it, although the feature will not be used 286 * by Linux in this case. If the system has enabled this feature already, 287 * then every late CPU must have it. 288 */ 289 #define ARM64_CPUCAP_SYSTEM_FEATURE \ 290 (ARM64_CPUCAP_SCOPE_SYSTEM | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU) 291 /* 292 * CPU feature detected at boot time based on feature of one or more CPUs. 293 * All possible conflicts for a late CPU are ignored. 294 * NOTE: this means that a late CPU with the feature will *not* cause the 295 * capability to be advertised by cpus_have_*cap()! 296 */ 297 #define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE \ 298 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \ 299 ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU | \ 300 ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU) 301 302 /* 303 * CPU feature detected at boot time, on one or more CPUs. A late CPU 304 * is not allowed to have the capability when the system doesn't have it. 305 * It is Ok for a late CPU to miss the feature. 306 */ 307 #define ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE \ 308 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \ 309 ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU) 310 311 /* 312 * CPU feature used early in the boot based on the boot CPU. All secondary 313 * CPUs must match the state of the capability as detected by the boot CPU. In 314 * case of a conflict, a kernel panic is triggered. 315 */ 316 #define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE \ 317 (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PANIC_ON_CONFLICT) 318 319 /* 320 * CPU feature used early in the boot based on the boot CPU. It is safe for a 321 * late CPU to have this feature even though the boot CPU hasn't enabled it, 322 * although the feature will not be used by Linux in this case. If the boot CPU 323 * has enabled this feature already, then every late CPU must have it. 324 */ 325 #define ARM64_CPUCAP_BOOT_CPU_FEATURE \ 326 (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU) 327 328 struct arm64_cpu_capabilities { 329 const char *desc; 330 u16 capability; 331 u16 type; 332 bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope); 333 /* 334 * Take the appropriate actions to configure this capability 335 * for this CPU. If the capability is detected by the kernel 336 * this will be called on all the CPUs in the system, 337 * including the hotplugged CPUs, regardless of whether the 338 * capability is available on that specific CPU. This is 339 * useful for some capabilities (e.g, working around CPU 340 * errata), where all the CPUs must take some action (e.g, 341 * changing system control/configuration). Thus, if an action 342 * is required only if the CPU has the capability, then the 343 * routine must check it before taking any action. 344 */ 345 void (*cpu_enable)(const struct arm64_cpu_capabilities *cap); 346 union { 347 struct { /* To be used for erratum handling only */ 348 struct midr_range midr_range; 349 const struct arm64_midr_revidr { 350 u32 midr_rv; /* revision/variant */ 351 u32 revidr_mask; 352 } * const fixed_revs; 353 }; 354 355 const struct midr_range *midr_range_list; 356 struct { /* Feature register checking */ 357 u32 sys_reg; 358 u8 field_pos; 359 u8 field_width; 360 u8 min_field_value; 361 u8 hwcap_type; 362 bool sign; 363 unsigned long hwcap; 364 }; 365 }; 366 367 /* 368 * An optional list of "matches/cpu_enable" pair for the same 369 * "capability" of the same "type" as described by the parent. 370 * Only matches(), cpu_enable() and fields relevant to these 371 * methods are significant in the list. The cpu_enable is 372 * invoked only if the corresponding entry "matches()". 373 * However, if a cpu_enable() method is associated 374 * with multiple matches(), care should be taken that either 375 * the match criteria are mutually exclusive, or that the 376 * method is robust against being called multiple times. 377 */ 378 const struct arm64_cpu_capabilities *match_list; 379 }; 380 381 static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap) 382 { 383 return cap->type & ARM64_CPUCAP_SCOPE_MASK; 384 } 385 386 /* 387 * Generic helper for handling capabilities with multiple (match,enable) pairs 388 * of call backs, sharing the same capability bit. 389 * Iterate over each entry to see if at least one matches. 390 */ 391 static inline bool 392 cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, 393 int scope) 394 { 395 const struct arm64_cpu_capabilities *caps; 396 397 for (caps = entry->match_list; caps->matches; caps++) 398 if (caps->matches(caps, scope)) 399 return true; 400 401 return false; 402 } 403 404 static __always_inline bool is_vhe_hyp_code(void) 405 { 406 /* Only defined for code run in VHE hyp context */ 407 return __is_defined(__KVM_VHE_HYPERVISOR__); 408 } 409 410 static __always_inline bool is_nvhe_hyp_code(void) 411 { 412 /* Only defined for code run in NVHE hyp context */ 413 return __is_defined(__KVM_NVHE_HYPERVISOR__); 414 } 415 416 static __always_inline bool is_hyp_code(void) 417 { 418 return is_vhe_hyp_code() || is_nvhe_hyp_code(); 419 } 420 421 extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); 422 extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; 423 extern struct static_key_false arm64_const_caps_ready; 424 425 /* ARM64 CAPS + alternative_cb */ 426 #define ARM64_NPATCHABLE (ARM64_NCAPS + 1) 427 extern DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE); 428 429 #define for_each_available_cap(cap) \ 430 for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS) 431 432 bool this_cpu_has_cap(unsigned int cap); 433 void cpu_set_feature(unsigned int num); 434 bool cpu_have_feature(unsigned int num); 435 unsigned long cpu_get_elf_hwcap(void); 436 unsigned long cpu_get_elf_hwcap2(void); 437 438 #define cpu_set_named_feature(name) cpu_set_feature(cpu_feature(name)) 439 #define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name)) 440 441 static __always_inline bool system_capabilities_finalized(void) 442 { 443 return static_branch_likely(&arm64_const_caps_ready); 444 } 445 446 /* 447 * Test for a capability with a runtime check. 448 * 449 * Before the capability is detected, this returns false. 450 */ 451 static inline bool cpus_have_cap(unsigned int num) 452 { 453 if (num >= ARM64_NCAPS) 454 return false; 455 return test_bit(num, cpu_hwcaps); 456 } 457 458 /* 459 * Test for a capability without a runtime check. 460 * 461 * Before capabilities are finalized, this returns false. 462 * After capabilities are finalized, this is patched to avoid a runtime check. 463 * 464 * @num must be a compile-time constant. 465 */ 466 static __always_inline bool __cpus_have_const_cap(int num) 467 { 468 if (num >= ARM64_NCAPS) 469 return false; 470 return static_branch_unlikely(&cpu_hwcap_keys[num]); 471 } 472 473 /* 474 * Test for a capability without a runtime check. 475 * 476 * Before capabilities are finalized, this will BUG(). 477 * After capabilities are finalized, this is patched to avoid a runtime check. 478 * 479 * @num must be a compile-time constant. 480 */ 481 static __always_inline bool cpus_have_final_cap(int num) 482 { 483 if (system_capabilities_finalized()) 484 return __cpus_have_const_cap(num); 485 else 486 BUG(); 487 } 488 489 /* 490 * Test for a capability, possibly with a runtime check for non-hyp code. 491 * 492 * For hyp code, this behaves the same as cpus_have_final_cap(). 493 * 494 * For non-hyp code: 495 * Before capabilities are finalized, this behaves as cpus_have_cap(). 496 * After capabilities are finalized, this is patched to avoid a runtime check. 497 * 498 * @num must be a compile-time constant. 499 */ 500 static __always_inline bool cpus_have_const_cap(int num) 501 { 502 if (is_hyp_code()) 503 return cpus_have_final_cap(num); 504 else if (system_capabilities_finalized()) 505 return __cpus_have_const_cap(num); 506 else 507 return cpus_have_cap(num); 508 } 509 510 static inline void cpus_set_cap(unsigned int num) 511 { 512 if (num >= ARM64_NCAPS) { 513 pr_warn("Attempt to set an illegal CPU capability (%d >= %d)\n", 514 num, ARM64_NCAPS); 515 } else { 516 __set_bit(num, cpu_hwcaps); 517 } 518 } 519 520 static inline int __attribute_const__ 521 cpuid_feature_extract_signed_field_width(u64 features, int field, int width) 522 { 523 return (s64)(features << (64 - width - field)) >> (64 - width); 524 } 525 526 static inline int __attribute_const__ 527 cpuid_feature_extract_signed_field(u64 features, int field) 528 { 529 return cpuid_feature_extract_signed_field_width(features, field, 4); 530 } 531 532 static __always_inline unsigned int __attribute_const__ 533 cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width) 534 { 535 return (u64)(features << (64 - width - field)) >> (64 - width); 536 } 537 538 static __always_inline unsigned int __attribute_const__ 539 cpuid_feature_extract_unsigned_field(u64 features, int field) 540 { 541 return cpuid_feature_extract_unsigned_field_width(features, field, 4); 542 } 543 544 /* 545 * Fields that identify the version of the Performance Monitors Extension do 546 * not follow the standard ID scheme. See ARM DDI 0487E.a page D13-2825, 547 * "Alternative ID scheme used for the Performance Monitors Extension version". 548 */ 549 static inline u64 __attribute_const__ 550 cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap) 551 { 552 u64 val = cpuid_feature_extract_unsigned_field(features, field); 553 u64 mask = GENMASK_ULL(field + 3, field); 554 555 /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */ 556 if (val == ID_AA64DFR0_PMUVER_IMP_DEF) 557 val = 0; 558 559 if (val > cap) { 560 features &= ~mask; 561 features |= (cap << field) & mask; 562 } 563 564 return features; 565 } 566 567 static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp) 568 { 569 return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift); 570 } 571 572 static inline u64 arm64_ftr_reg_user_value(const struct arm64_ftr_reg *reg) 573 { 574 return (reg->user_val | (reg->sys_val & reg->user_mask)); 575 } 576 577 static inline int __attribute_const__ 578 cpuid_feature_extract_field_width(u64 features, int field, int width, bool sign) 579 { 580 if (WARN_ON_ONCE(!width)) 581 width = 4; 582 return (sign) ? 583 cpuid_feature_extract_signed_field_width(features, field, width) : 584 cpuid_feature_extract_unsigned_field_width(features, field, width); 585 } 586 587 static inline int __attribute_const__ 588 cpuid_feature_extract_field(u64 features, int field, bool sign) 589 { 590 return cpuid_feature_extract_field_width(features, field, 4, sign); 591 } 592 593 static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val) 594 { 595 return (s64)cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width, ftrp->sign); 596 } 597 598 static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) 599 { 600 return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL_SHIFT) == 0x1 || 601 cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1; 602 } 603 604 static inline bool id_aa64pfr0_32bit_el1(u64 pfr0) 605 { 606 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SHIFT); 607 608 return val == ID_AA64PFR0_ELx_32BIT_64BIT; 609 } 610 611 static inline bool id_aa64pfr0_32bit_el0(u64 pfr0) 612 { 613 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT); 614 615 return val == ID_AA64PFR0_ELx_32BIT_64BIT; 616 } 617 618 static inline bool id_aa64pfr0_sve(u64 pfr0) 619 { 620 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_SVE_SHIFT); 621 622 return val > 0; 623 } 624 625 static inline bool id_aa64pfr1_sme(u64 pfr1) 626 { 627 u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_SME_SHIFT); 628 629 return val > 0; 630 } 631 632 static inline bool id_aa64pfr1_mte(u64 pfr1) 633 { 634 u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_MTE_SHIFT); 635 636 return val >= ID_AA64PFR1_MTE; 637 } 638 639 void __init setup_cpu_features(void); 640 void check_local_cpu_capabilities(void); 641 642 u64 read_sanitised_ftr_reg(u32 id); 643 u64 __read_sysreg_by_encoding(u32 sys_id); 644 645 static inline bool cpu_supports_mixed_endian_el0(void) 646 { 647 return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1)); 648 } 649 650 651 static inline bool supports_csv2p3(int scope) 652 { 653 u64 pfr0; 654 u8 csv2_val; 655 656 if (scope == SCOPE_LOCAL_CPU) 657 pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1); 658 else 659 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); 660 661 csv2_val = cpuid_feature_extract_unsigned_field(pfr0, 662 ID_AA64PFR0_CSV2_SHIFT); 663 return csv2_val == 3; 664 } 665 666 static inline bool supports_clearbhb(int scope) 667 { 668 u64 isar2; 669 670 if (scope == SCOPE_LOCAL_CPU) 671 isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1); 672 else 673 isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1); 674 675 return cpuid_feature_extract_unsigned_field(isar2, 676 ID_AA64ISAR2_EL1_BC_SHIFT); 677 } 678 679 const struct cpumask *system_32bit_el0_cpumask(void); 680 DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0); 681 682 static inline bool system_supports_32bit_el0(void) 683 { 684 u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); 685 686 return static_branch_unlikely(&arm64_mismatched_32bit_el0) || 687 id_aa64pfr0_32bit_el0(pfr0); 688 } 689 690 static inline bool system_supports_4kb_granule(void) 691 { 692 u64 mmfr0; 693 u32 val; 694 695 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); 696 val = cpuid_feature_extract_unsigned_field(mmfr0, 697 ID_AA64MMFR0_TGRAN4_SHIFT); 698 699 return (val >= ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN) && 700 (val <= ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX); 701 } 702 703 static inline bool system_supports_64kb_granule(void) 704 { 705 u64 mmfr0; 706 u32 val; 707 708 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); 709 val = cpuid_feature_extract_unsigned_field(mmfr0, 710 ID_AA64MMFR0_TGRAN64_SHIFT); 711 712 return (val >= ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN) && 713 (val <= ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX); 714 } 715 716 static inline bool system_supports_16kb_granule(void) 717 { 718 u64 mmfr0; 719 u32 val; 720 721 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); 722 val = cpuid_feature_extract_unsigned_field(mmfr0, 723 ID_AA64MMFR0_TGRAN16_SHIFT); 724 725 return (val >= ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN) && 726 (val <= ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX); 727 } 728 729 static inline bool system_supports_mixed_endian_el0(void) 730 { 731 return id_aa64mmfr0_mixed_endian_el0(read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1)); 732 } 733 734 static inline bool system_supports_mixed_endian(void) 735 { 736 u64 mmfr0; 737 u32 val; 738 739 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); 740 val = cpuid_feature_extract_unsigned_field(mmfr0, 741 ID_AA64MMFR0_BIGENDEL_SHIFT); 742 743 return val == 0x1; 744 } 745 746 static __always_inline bool system_supports_fpsimd(void) 747 { 748 return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD); 749 } 750 751 static inline bool system_uses_hw_pan(void) 752 { 753 return IS_ENABLED(CONFIG_ARM64_PAN) && 754 cpus_have_const_cap(ARM64_HAS_PAN); 755 } 756 757 static inline bool system_uses_ttbr0_pan(void) 758 { 759 return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) && 760 !system_uses_hw_pan(); 761 } 762 763 static __always_inline bool system_supports_sve(void) 764 { 765 return IS_ENABLED(CONFIG_ARM64_SVE) && 766 cpus_have_const_cap(ARM64_SVE); 767 } 768 769 static __always_inline bool system_supports_sme(void) 770 { 771 return IS_ENABLED(CONFIG_ARM64_SME) && 772 cpus_have_const_cap(ARM64_SME); 773 } 774 775 static __always_inline bool system_supports_fa64(void) 776 { 777 return IS_ENABLED(CONFIG_ARM64_SME) && 778 cpus_have_const_cap(ARM64_SME_FA64); 779 } 780 781 static __always_inline bool system_supports_tpidr2(void) 782 { 783 return system_supports_sme(); 784 } 785 786 static __always_inline bool system_supports_cnp(void) 787 { 788 return IS_ENABLED(CONFIG_ARM64_CNP) && 789 cpus_have_const_cap(ARM64_HAS_CNP); 790 } 791 792 static inline bool system_supports_address_auth(void) 793 { 794 return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) && 795 cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH); 796 } 797 798 static inline bool system_supports_generic_auth(void) 799 { 800 return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) && 801 cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH); 802 } 803 804 static inline bool system_has_full_ptr_auth(void) 805 { 806 return system_supports_address_auth() && system_supports_generic_auth(); 807 } 808 809 static __always_inline bool system_uses_irq_prio_masking(void) 810 { 811 return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && 812 cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING); 813 } 814 815 static inline bool system_supports_mte(void) 816 { 817 return IS_ENABLED(CONFIG_ARM64_MTE) && 818 cpus_have_const_cap(ARM64_MTE); 819 } 820 821 static inline bool system_has_prio_mask_debugging(void) 822 { 823 return IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING) && 824 system_uses_irq_prio_masking(); 825 } 826 827 static inline bool system_supports_bti(void) 828 { 829 return IS_ENABLED(CONFIG_ARM64_BTI) && cpus_have_const_cap(ARM64_BTI); 830 } 831 832 static inline bool system_supports_tlb_range(void) 833 { 834 return IS_ENABLED(CONFIG_ARM64_TLB_RANGE) && 835 cpus_have_const_cap(ARM64_HAS_TLB_RANGE); 836 } 837 838 extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt); 839 840 static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange) 841 { 842 switch (parange) { 843 case ID_AA64MMFR0_PARANGE_32: return 32; 844 case ID_AA64MMFR0_PARANGE_36: return 36; 845 case ID_AA64MMFR0_PARANGE_40: return 40; 846 case ID_AA64MMFR0_PARANGE_42: return 42; 847 case ID_AA64MMFR0_PARANGE_44: return 44; 848 case ID_AA64MMFR0_PARANGE_48: return 48; 849 case ID_AA64MMFR0_PARANGE_52: return 52; 850 /* 851 * A future PE could use a value unknown to the kernel. 852 * However, by the "D10.1.4 Principles of the ID scheme 853 * for fields in ID registers", ARM DDI 0487C.a, any new 854 * value is guaranteed to be higher than what we know already. 855 * As a safe limit, we return the limit supported by the kernel. 856 */ 857 default: return CONFIG_ARM64_PA_BITS; 858 } 859 } 860 861 /* Check whether hardware update of the Access flag is supported */ 862 static inline bool cpu_has_hw_af(void) 863 { 864 u64 mmfr1; 865 866 if (!IS_ENABLED(CONFIG_ARM64_HW_AFDBM)) 867 return false; 868 869 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); 870 return cpuid_feature_extract_unsigned_field(mmfr1, 871 ID_AA64MMFR1_HADBS_SHIFT); 872 } 873 874 static inline bool cpu_has_pan(void) 875 { 876 u64 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); 877 return cpuid_feature_extract_unsigned_field(mmfr1, 878 ID_AA64MMFR1_PAN_SHIFT); 879 } 880 881 #ifdef CONFIG_ARM64_AMU_EXTN 882 /* Check whether the cpu supports the Activity Monitors Unit (AMU) */ 883 extern bool cpu_has_amu_feat(int cpu); 884 #else 885 static inline bool cpu_has_amu_feat(int cpu) 886 { 887 return false; 888 } 889 #endif 890 891 /* Get a cpu that supports the Activity Monitors Unit (AMU) */ 892 extern int get_cpu_with_amu_feat(void); 893 894 static inline unsigned int get_vmid_bits(u64 mmfr1) 895 { 896 int vmid_bits; 897 898 vmid_bits = cpuid_feature_extract_unsigned_field(mmfr1, 899 ID_AA64MMFR1_VMIDBITS_SHIFT); 900 if (vmid_bits == ID_AA64MMFR1_VMIDBITS_16) 901 return 16; 902 903 /* 904 * Return the default here even if any reserved 905 * value is fetched from the system register. 906 */ 907 return 8; 908 } 909 910 extern struct arm64_ftr_override id_aa64mmfr1_override; 911 extern struct arm64_ftr_override id_aa64pfr0_override; 912 extern struct arm64_ftr_override id_aa64pfr1_override; 913 extern struct arm64_ftr_override id_aa64zfr0_override; 914 extern struct arm64_ftr_override id_aa64smfr0_override; 915 extern struct arm64_ftr_override id_aa64isar1_override; 916 extern struct arm64_ftr_override id_aa64isar2_override; 917 918 u32 get_kvm_ipa_limit(void); 919 void dump_cpu_features(void); 920 921 #endif /* __ASSEMBLY__ */ 922 923 #endif 924