Lines Matching +full:0 +full:x8000000a

202 	[GDT_ENTRY_KERNEL32_CS]		= GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
203 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
204 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
205 [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
206 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
207 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
209 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
210 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
211 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
212 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
219 [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
221 [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
223 [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
225 [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
227 [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
233 [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
235 [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
237 [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
239 [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
240 [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
254 return 0; in x86_nopcid_setup()
258 return 0; in x86_nopcid_setup()
271 return 0; in x86_noinvpcid_setup()
275 return 0; in x86_noinvpcid_setup()
304 "popl %0 \n\t" in flag_is_changeable_p()
305 "movl %0, %1 \n\t" in flag_is_changeable_p()
306 "xorl %2, %0 \n\t" in flag_is_changeable_p()
307 "pushl %0 \n\t" in flag_is_changeable_p()
310 "popl %0 \n\t" in flag_is_changeable_p()
316 return ((f1^f2) & flag) != 0; in flag_is_changeable_p()
335 lo |= 0x200000; in squash_the_stupid_serial_number()
342 c->cpuid_level = cpuid_eax(0); in squash_the_stupid_serial_number()
347 disable_x86_serial_nr = 0; in x86_serial_nr_setup()
411 unsigned long bits_missing = 0; in native_write_cr0()
414 asm volatile("mov %0,%%cr0": "+r" (val) : : "memory"); in native_write_cr0()
430 unsigned long bits_changed = 0; in native_write_cr4()
433 asm volatile("mov %0,%%cr4": "+r" (val) : : "memory"); in native_write_cr4()
442 WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n", in native_write_cr4()
501 return 0; in x86_nofsgsbase_setup()
563 u64 msr = 0; in ibt_save()
608 wrmsrl(MSR_IA32_S_CET, 0); in setup_cet()
614 wrmsrl(MSR_IA32_S_CET, 0); in setup_cet()
625 wrmsrl(MSR_IA32_S_CET, 0); in cet_disable()
626 wrmsrl(MSR_IA32_U_CET, 0); in cet_disable()
641 { X86_FEATURE_MWAIT, 0x00000005 },
642 { X86_FEATURE_DCA, 0x00000009 },
643 { X86_FEATURE_XSAVE, 0x0000000d },
644 { 0, 0 }
657 * extended_extended_level is set to 0 if unavailable in filter_cpuid_features()
662 if (!((s32)df->level < 0 ? in filter_cpuid_features()
671 pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n", in filter_cpuid_features()
679 * in particular, if CPUID levels 0x80000002..4 are supported, this
763 * per CPU stack canary is 0 in both per CPU areas. in switch_gdt_and_percpu_base()
785 if (c->extended_cpuid_level < 0x80000004) in get_model_name()
789 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); in get_model_name()
790 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); in get_model_name()
791 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); in get_model_name()
792 c->x86_model_id[48] = 0; in get_model_name()
795 p = q = s = &c->x86_model_id[0]; in get_model_name()
808 *(s + 1) = '\0'; in get_model_name()
819 cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); in detect_num_cpu_cores()
820 if (eax & 0x1f) in detect_num_cpu_cores()
830 if (n >= 0x80000005) { in cpu_detect_cache_sizes()
831 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); in cpu_detect_cache_sizes()
835 c->x86_tlbsize = 0; in cpu_detect_cache_sizes()
839 if (n < 0x80000006) /* Some chips just has a large L1. */ in cpu_detect_cache_sizes()
842 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); in cpu_detect_cache_sizes()
846 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); in cpu_detect_cache_sizes()
856 if (l2size == 0) in cpu_detect_cache_sizes()
901 smp_num_siblings = (ebx & 0xff0000) >> 16; in detect_ht_early()
905 return 0; in detect_ht_early()
913 if (detect_ht_early(c) < 0) in detect_ht()
935 for (i = 0; i < X86_VENDOR_NUM; i++) { in get_cpu_vendor()
939 if (!strcmp(v, cpu_devs[i]->c_ident[0]) || in get_cpu_vendor()
959 cpuid(0x00000000, (unsigned int *)&c->cpuid_level, in cpu_detect()
960 (unsigned int *)&c->x86_vendor_id[0], in cpu_detect()
965 /* Intel-defined flags: level 0x00000001 */ in cpu_detect()
966 if (c->cpuid_level >= 0x00000001) { in cpu_detect()
969 cpuid(0x00000001, &tfms, &misc, &junk, &cap0); in cpu_detect()
975 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; in cpu_detect()
985 for (i = 0; i < NCAPINTS + NBUGINTS; i++) { in apply_forced_caps()
1036 /* Intel-defined flags: level 0x00000001 */ in get_cpu_cap()
1037 if (c->cpuid_level >= 0x00000001) { in get_cpu_cap()
1038 cpuid(0x00000001, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1044 /* Thermal and Power Management Leaf: level 0x00000006 (eax) */ in get_cpu_cap()
1045 if (c->cpuid_level >= 0x00000006) in get_cpu_cap()
1046 c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); in get_cpu_cap()
1048 /* Additional Intel-defined flags: level 0x00000007 */ in get_cpu_cap()
1049 if (c->cpuid_level >= 0x00000007) { in get_cpu_cap()
1050 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1057 cpuid_count(0x00000007, 1, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1062 /* Extended state features: level 0x0000000d */ in get_cpu_cap()
1063 if (c->cpuid_level >= 0x0000000d) { in get_cpu_cap()
1064 cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1069 /* AMD-defined flags: level 0x80000001 */ in get_cpu_cap()
1070 eax = cpuid_eax(0x80000000); in get_cpu_cap()
1073 if ((eax & 0xffff0000) == 0x80000000) { in get_cpu_cap()
1074 if (eax >= 0x80000001) { in get_cpu_cap()
1075 cpuid(0x80000001, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1082 if (c->extended_cpuid_level >= 0x80000007) { in get_cpu_cap()
1083 cpuid(0x80000007, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1089 if (c->extended_cpuid_level >= 0x80000008) { in get_cpu_cap()
1090 cpuid(0x80000008, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1094 if (c->extended_cpuid_level >= 0x8000000a) in get_cpu_cap()
1095 c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); in get_cpu_cap()
1097 if (c->extended_cpuid_level >= 0x8000001f) in get_cpu_cap()
1098 c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f); in get_cpu_cap()
1100 if (c->extended_cpuid_level >= 0x80000021) in get_cpu_cap()
1101 c->x86_capability[CPUID_8000_0021_EAX] = cpuid_eax(0x80000021); in get_cpu_cap()
1118 if (c->extended_cpuid_level >= 0x80000008) { in get_cpu_address_sizes()
1119 cpuid(0x80000008, &eax, &ebx, &ecx, &edx); in get_cpu_address_sizes()
1121 c->x86_virt_bits = (eax >> 8) & 0xff; in get_cpu_address_sizes()
1122 c->x86_phys_bits = eax & 0xff; in get_cpu_address_sizes()
1145 for (i = 0; i < X86_VENDOR_NUM; i++) in identify_cpu_without_cpuid()
1147 c->x86_vendor_id[0] = 0; in identify_cpu_without_cpuid()
1149 if (c->x86_vendor_id[0]) { in identify_cpu_without_cpuid()
1157 #define NO_SPECULATION BIT(0)
1230 /* AMD Family 0xf - 0x12 */
1231 …VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO …
1232 …VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO …
1233 …VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO …
1234 …VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO …
1236 /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
1260 #define SRBDS BIT(0)
1296 VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED),
1315 VULNBL_AMD(0x15, RETBLEED),
1316 VULNBL_AMD(0x16, RETBLEED),
1317 VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
1318 VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
1319 VULNBL_AMD(0x19, SRSO),
1332 u64 x86_arch_cap_msr = 0; in x86_read_arch_cap_msr()
1409 * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when: in cpu_set_bug_bits()
1530 int arglen, taint = 0; in cpu_parse_early_param()
1557 if (arglen <= 0) in cpu_parse_early_param()
1591 for (bit = 0; bit < 32 * NCAPINTS; bit++) { in cpu_parse_early_param()
1636 memset(&c->x86_capability, 0, sizeof(c->x86_capability)); in early_identify_cpu()
1637 c->extended_cpuid_level = 0; in early_identify_cpu()
1654 c->cpu_index = 0; in early_identify_cpu()
1698 int count = 0; in init_cpu_devs()
1721 for (i = 0; i < X86_VENDOR_NUM && cpu_devs[i]; i++) { in early_cpu_init()
1722 for (j = 0; j < 2; j++) { in early_cpu_init()
1754 loadsegment(fs, 0); in detect_null_seg_behavior()
1757 return tmp == 0; in detect_null_seg_behavior()
1783 * 0x18 is the respective family for Hygon. in check_null_seg_clears_base()
1785 if ((c->x86 == 0x17 || c->x86 == 0x18) && in check_null_seg_clears_base()
1795 c->extended_cpuid_level = 0; in generic_identify()
1812 if (c->cpuid_level >= 0x00000001) { in generic_identify()
1813 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; in generic_identify()
1816 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); in generic_identify()
1828 * systems that run Linux at CPL > 0 may or may not have the in generic_identify()
1862 c->logical_proc_id = 0; in validate_apic_and_package_id()
1874 c->x86_cache_size = 0; in identify_cpu()
1876 c->x86_model = c->x86_stepping = 0; /* So far unknown... */ in identify_cpu()
1877 c->x86_vendor_id[0] = '\0'; /* Unset */ in identify_cpu()
1878 c->x86_model_id[0] = '\0'; /* Unset */ in identify_cpu()
1880 c->x86_coreid_bits = 0; in identify_cpu()
1881 c->cu_id = 0xff; in identify_cpu()
1893 memset(&c->x86_capability, 0, sizeof(c->x86_capability)); in identify_cpu()
1895 memset(&c->vmx_capability, 0, sizeof(c->vmx_capability)); in identify_cpu()
1907 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); in identify_cpu()
1953 if (!c->x86_model_id[0]) { in identify_cpu()
1986 for (i = 0; i < NCAPINTS; i++) in identify_cpu()
2028 wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); in enable_sep_cpu()
2029 wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0); in enable_sep_cpu()
2030 wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0); in enable_sep_cpu()
2074 if (c->cpuid_level >= 0) in print_cpu_info()
2081 if (c->x86_model_id[0]) in print_cpu_info()
2086 pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model); in print_cpu_info()
2088 if (c->x86_stepping || c->cpuid_level >= 0) in print_cpu_info()
2089 pr_cont(", stepping: 0x%x)\n", c->x86_stepping); in print_cpu_info()
2130 wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS); in syscall_init()
2148 wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); in syscall_init()
2149 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL); in syscall_init()
2182 for (i = 0; i < 8; i++) { in clear_all_debug_regs()
2187 set_debugreg(0, i); in clear_all_debug_regs()
2211 wrmsr(MSR_TSC_AUX, cpudata, 0); in setup_getcpu()
2246 tss->io_bitmap.prev_max = 0; in tss_setup_io_bitmap()
2247 tss->io_bitmap.prev_sequence = 0; in tss_setup_io_bitmap()
2248 memset(tss->io_bitmap.bitmap, 0xff, sizeof(tss->io_bitmap.bitmap)); in tss_setup_io_bitmap()
2253 tss->io_bitmap.mapall[IO_BITMAP_LONGS] = ~0UL; in tss_setup_io_bitmap()
2295 if (this_cpu_read(numa_node) == 0 && in cpu_init()
2306 loadsegment(fs, 0); in cpu_init()
2307 memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); in cpu_init()
2310 wrmsrl(MSR_FS_BASE, 0); in cpu_init()
2311 wrmsrl(MSR_KERNEL_GS_BASE, 0); in cpu_init()
2352 curr_info->cpuid_level = cpuid_eax(0); in store_cpu_caps()
2429 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); in arch_cpu_finalize_init()
2451 set_memory_4k((unsigned long)__va(0), 1); in arch_cpu_finalize_init()