1 #include <linux/init.h> 2 #include <linux/string.h> 3 #include <linux/delay.h> 4 #include <linux/smp.h> 5 #include <linux/module.h> 6 #include <linux/percpu.h> 7 #include <linux/bootmem.h> 8 #include <asm/processor.h> 9 #include <asm/i387.h> 10 #include <asm/msr.h> 11 #include <asm/io.h> 12 #include <asm/mmu_context.h> 13 #include <asm/mtrr.h> 14 #include <asm/mce.h> 15 #ifdef CONFIG_X86_LOCAL_APIC 16 #include <asm/mpspec.h> 17 #include <asm/apic.h> 18 #include <mach_apic.h> 19 #endif 20 21 #include "cpu.h" 22 23 DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { 24 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, 25 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, 26 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, 27 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } }, 28 /* 29 * Segments used for calling PnP BIOS have byte granularity. 30 * They code segments and data segments have fixed 64k limits, 31 * the transfer segment sizes are set at run time. 32 */ 33 /* 32-bit code */ 34 [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } }, 35 /* 16-bit code */ 36 [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } }, 37 /* 16-bit data */ 38 [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } }, 39 /* 16-bit data */ 40 [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } }, 41 /* 16-bit data */ 42 [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } }, 43 /* 44 * The APM segments have byte granularity and their bases 45 * are set at run time. All have 64k limits. 46 */ 47 /* 32-bit code */ 48 [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } }, 49 /* 16-bit code */ 50 [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } }, 51 /* data */ 52 [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, 53 54 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, 55 [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } }, 56 } }; 57 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); 58 59 __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; 60 61 static int cachesize_override __cpuinitdata = -1; 62 static int disable_x86_serial_nr __cpuinitdata = 1; 63 64 struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; 65 66 static void __cpuinit default_init(struct cpuinfo_x86 *c) 67 { 68 /* Not much we can do here... */ 69 /* Check if at least it has cpuid */ 70 if (c->cpuid_level == -1) { 71 /* No cpuid. It must be an ancient CPU */ 72 if (c->x86 == 4) 73 strcpy(c->x86_model_id, "486"); 74 else if (c->x86 == 3) 75 strcpy(c->x86_model_id, "386"); 76 } 77 } 78 79 static struct cpu_dev __cpuinitdata default_cpu = { 80 .c_init = default_init, 81 .c_vendor = "Unknown", 82 }; 83 static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; 84 85 static int __init cachesize_setup(char *str) 86 { 87 get_option(&str, &cachesize_override); 88 return 1; 89 } 90 __setup("cachesize=", cachesize_setup); 91 92 int __cpuinit get_model_name(struct cpuinfo_x86 *c) 93 { 94 unsigned int *v; 95 char *p, *q; 96 97 if (cpuid_eax(0x80000000) < 0x80000004) 98 return 0; 99 100 v = (unsigned int *) c->x86_model_id; 101 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); 102 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); 103 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); 104 c->x86_model_id[48] = 0; 105 106 /* Intel chips right-justify this string for some dumb reason; 107 undo that brain damage */ 108 p = q = &c->x86_model_id[0]; 109 while (*p == ' ') 110 p++; 111 if (p != q) { 112 while (*p) 113 *q++ = *p++; 114 while (q <= &c->x86_model_id[48]) 115 *q++ = '\0'; /* Zero-pad the rest */ 116 } 117 118 return 1; 119 } 120 121 122 void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) 123 { 124 unsigned int n, dummy, ecx, edx, l2size; 125 126 n = cpuid_eax(0x80000000); 127 128 if (n >= 0x80000005) { 129 cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); 130 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", 131 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); 132 c->x86_cache_size = (ecx>>24)+(edx>>24); 133 } 134 135 if (n < 0x80000006) /* Some chips just has a large L1. */ 136 return; 137 138 ecx = cpuid_ecx(0x80000006); 139 l2size = ecx >> 16; 140 141 /* do processor-specific cache resizing */ 142 if (this_cpu->c_size_cache) 143 l2size = this_cpu->c_size_cache(c, l2size); 144 145 /* Allow user to override all this if necessary. */ 146 if (cachesize_override != -1) 147 l2size = cachesize_override; 148 149 if (l2size == 0) 150 return; /* Again, no L2 cache is possible */ 151 152 c->x86_cache_size = l2size; 153 154 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", 155 l2size, ecx & 0xFF); 156 } 157 158 /* 159 * Naming convention should be: <Name> [(<Codename>)] 160 * This table only is used unless init_<vendor>() below doesn't set it; 161 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used 162 * 163 */ 164 165 /* Look up CPU names by table lookup. */ 166 static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) 167 { 168 struct cpu_model_info *info; 169 170 if (c->x86_model >= 16) 171 return NULL; /* Range check */ 172 173 if (!this_cpu) 174 return NULL; 175 176 info = this_cpu->c_models; 177 178 while (info && info->family) { 179 if (info->family == c->x86) 180 return info->model_names[c->x86_model]; 181 info++; 182 } 183 return NULL; /* Not found */ 184 } 185 186 187 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) 188 { 189 char *v = c->x86_vendor_id; 190 int i; 191 static int printed; 192 193 for (i = 0; i < X86_VENDOR_NUM; i++) { 194 if (cpu_devs[i]) { 195 if (!strcmp(v, cpu_devs[i]->c_ident[0]) || 196 (cpu_devs[i]->c_ident[1] && 197 !strcmp(v, cpu_devs[i]->c_ident[1]))) { 198 c->x86_vendor = i; 199 if (!early) 200 this_cpu = cpu_devs[i]; 201 return; 202 } 203 } 204 } 205 if (!printed) { 206 printed++; 207 printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); 208 printk(KERN_ERR "CPU: Your system may be unstable.\n"); 209 } 210 c->x86_vendor = X86_VENDOR_UNKNOWN; 211 this_cpu = &default_cpu; 212 } 213 214 215 static int __init x86_fxsr_setup(char *s) 216 { 217 setup_clear_cpu_cap(X86_FEATURE_FXSR); 218 setup_clear_cpu_cap(X86_FEATURE_XMM); 219 return 1; 220 } 221 __setup("nofxsr", x86_fxsr_setup); 222 223 224 static int __init x86_sep_setup(char *s) 225 { 226 setup_clear_cpu_cap(X86_FEATURE_SEP); 227 return 1; 228 } 229 __setup("nosep", x86_sep_setup); 230 231 232 /* Standard macro to see if a specific flag is changeable */ 233 static inline int flag_is_changeable_p(u32 flag) 234 { 235 u32 f1, f2; 236 237 asm("pushfl\n\t" 238 "pushfl\n\t" 239 "popl %0\n\t" 240 "movl %0,%1\n\t" 241 "xorl %2,%0\n\t" 242 "pushl %0\n\t" 243 "popfl\n\t" 244 "pushfl\n\t" 245 "popl %0\n\t" 246 "popfl\n\t" 247 : "=&r" (f1), "=&r" (f2) 248 : "ir" (flag)); 249 250 return ((f1^f2) & flag) != 0; 251 } 252 253 254 /* Probe for the CPUID instruction */ 255 static int __cpuinit have_cpuid_p(void) 256 { 257 return flag_is_changeable_p(X86_EFLAGS_ID); 258 } 259 260 void __init cpu_detect(struct cpuinfo_x86 *c) 261 { 262 /* Get vendor name */ 263 cpuid(0x00000000, (unsigned int *)&c->cpuid_level, 264 (unsigned int *)&c->x86_vendor_id[0], 265 (unsigned int *)&c->x86_vendor_id[8], 266 (unsigned int *)&c->x86_vendor_id[4]); 267 268 c->x86 = 4; 269 if (c->cpuid_level >= 0x00000001) { 270 u32 junk, tfms, cap0, misc; 271 cpuid(0x00000001, &tfms, &misc, &junk, &cap0); 272 c->x86 = (tfms >> 8) & 15; 273 c->x86_model = (tfms >> 4) & 15; 274 if (c->x86 == 0xf) 275 c->x86 += (tfms >> 20) & 0xff; 276 if (c->x86 >= 0x6) 277 c->x86_model += ((tfms >> 16) & 0xF) << 4; 278 c->x86_mask = tfms & 15; 279 if (cap0 & (1<<19)) { 280 c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8; 281 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; 282 } 283 } 284 } 285 static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) 286 { 287 u32 tfms, xlvl; 288 unsigned int ebx; 289 290 memset(&c->x86_capability, 0, sizeof c->x86_capability); 291 if (have_cpuid_p()) { 292 /* Intel-defined flags: level 0x00000001 */ 293 if (c->cpuid_level >= 0x00000001) { 294 u32 capability, excap; 295 cpuid(0x00000001, &tfms, &ebx, &excap, &capability); 296 c->x86_capability[0] = capability; 297 c->x86_capability[4] = excap; 298 } 299 300 /* AMD-defined flags: level 0x80000001 */ 301 xlvl = cpuid_eax(0x80000000); 302 if ((xlvl & 0xffff0000) == 0x80000000) { 303 if (xlvl >= 0x80000001) { 304 c->x86_capability[1] = cpuid_edx(0x80000001); 305 c->x86_capability[6] = cpuid_ecx(0x80000001); 306 } 307 } 308 309 } 310 311 clear_cpu_cap(c, X86_FEATURE_PAT); 312 313 switch (c->x86_vendor) { 314 case X86_VENDOR_AMD: 315 if (c->x86 >= 0xf && c->x86 <= 0x11) 316 set_cpu_cap(c, X86_FEATURE_PAT); 317 break; 318 case X86_VENDOR_INTEL: 319 if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) 320 set_cpu_cap(c, X86_FEATURE_PAT); 321 break; 322 } 323 324 } 325 326 /* 327 * Do minimum CPU detection early. 328 * Fields really needed: vendor, cpuid_level, family, model, mask, 329 * cache alignment. 330 * The others are not touched to avoid unwanted side effects. 331 * 332 * WARNING: this function is only called on the BP. Don't add code here 333 * that is supposed to run on all CPUs. 334 */ 335 static void __init early_cpu_detect(void) 336 { 337 struct cpuinfo_x86 *c = &boot_cpu_data; 338 339 c->x86_cache_alignment = 32; 340 c->x86_clflush_size = 32; 341 342 if (!have_cpuid_p()) 343 return; 344 345 cpu_detect(c); 346 347 get_cpu_vendor(c, 1); 348 349 if (c->x86_vendor != X86_VENDOR_UNKNOWN && 350 cpu_devs[c->x86_vendor]->c_early_init) 351 cpu_devs[c->x86_vendor]->c_early_init(c); 352 353 early_get_cap(c); 354 } 355 356 static void __cpuinit generic_identify(struct cpuinfo_x86 *c) 357 { 358 u32 tfms, xlvl; 359 unsigned int ebx; 360 361 if (have_cpuid_p()) { 362 /* Get vendor name */ 363 cpuid(0x00000000, (unsigned int *)&c->cpuid_level, 364 (unsigned int *)&c->x86_vendor_id[0], 365 (unsigned int *)&c->x86_vendor_id[8], 366 (unsigned int *)&c->x86_vendor_id[4]); 367 368 get_cpu_vendor(c, 0); 369 /* Initialize the standard set of capabilities */ 370 /* Note that the vendor-specific code below might override */ 371 /* Intel-defined flags: level 0x00000001 */ 372 if (c->cpuid_level >= 0x00000001) { 373 u32 capability, excap; 374 cpuid(0x00000001, &tfms, &ebx, &excap, &capability); 375 c->x86_capability[0] = capability; 376 c->x86_capability[4] = excap; 377 c->x86 = (tfms >> 8) & 15; 378 c->x86_model = (tfms >> 4) & 15; 379 if (c->x86 == 0xf) 380 c->x86 += (tfms >> 20) & 0xff; 381 if (c->x86 >= 0x6) 382 c->x86_model += ((tfms >> 16) & 0xF) << 4; 383 c->x86_mask = tfms & 15; 384 c->initial_apicid = (ebx >> 24) & 0xFF; 385 #ifdef CONFIG_X86_HT 386 c->apicid = phys_pkg_id(c->initial_apicid, 0); 387 c->phys_proc_id = c->initial_apicid; 388 #else 389 c->apicid = c->initial_apicid; 390 #endif 391 if (test_cpu_cap(c, X86_FEATURE_CLFLSH)) 392 c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8; 393 } else { 394 /* Have CPUID level 0 only - unheard of */ 395 c->x86 = 4; 396 } 397 398 /* AMD-defined flags: level 0x80000001 */ 399 xlvl = cpuid_eax(0x80000000); 400 if ((xlvl & 0xffff0000) == 0x80000000) { 401 if (xlvl >= 0x80000001) { 402 c->x86_capability[1] = cpuid_edx(0x80000001); 403 c->x86_capability[6] = cpuid_ecx(0x80000001); 404 } 405 if (xlvl >= 0x80000004) 406 get_model_name(c); /* Default name */ 407 } 408 409 init_scattered_cpuid_features(c); 410 } 411 412 clear_cpu_cap(c, X86_FEATURE_PAT); 413 414 switch (c->x86_vendor) { 415 case X86_VENDOR_AMD: 416 if (c->x86 >= 0xf && c->x86 <= 0x11) 417 set_cpu_cap(c, X86_FEATURE_PAT); 418 break; 419 case X86_VENDOR_INTEL: 420 if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) 421 set_cpu_cap(c, X86_FEATURE_PAT); 422 break; 423 } 424 } 425 426 static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 427 { 428 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) { 429 /* Disable processor serial number */ 430 unsigned long lo, hi; 431 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 432 lo |= 0x200000; 433 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 434 printk(KERN_NOTICE "CPU serial number disabled.\n"); 435 clear_cpu_cap(c, X86_FEATURE_PN); 436 437 /* Disabling the serial number may affect the cpuid level */ 438 c->cpuid_level = cpuid_eax(0); 439 } 440 } 441 442 static int __init x86_serial_nr_setup(char *s) 443 { 444 disable_x86_serial_nr = 0; 445 return 1; 446 } 447 __setup("serialnumber", x86_serial_nr_setup); 448 449 450 451 /* 452 * This does the hard work of actually picking apart the CPU stuff... 453 */ 454 void __cpuinit identify_cpu(struct cpuinfo_x86 *c) 455 { 456 int i; 457 458 c->loops_per_jiffy = loops_per_jiffy; 459 c->x86_cache_size = -1; 460 c->x86_vendor = X86_VENDOR_UNKNOWN; 461 c->cpuid_level = -1; /* CPUID not detected */ 462 c->x86_model = c->x86_mask = 0; /* So far unknown... */ 463 c->x86_vendor_id[0] = '\0'; /* Unset */ 464 c->x86_model_id[0] = '\0'; /* Unset */ 465 c->x86_max_cores = 1; 466 c->x86_clflush_size = 32; 467 memset(&c->x86_capability, 0, sizeof c->x86_capability); 468 469 if (!have_cpuid_p()) { 470 /* 471 * First of all, decide if this is a 486 or higher 472 * It's a 486 if we can modify the AC flag 473 */ 474 if (flag_is_changeable_p(X86_EFLAGS_AC)) 475 c->x86 = 4; 476 else 477 c->x86 = 3; 478 } 479 480 generic_identify(c); 481 482 if (this_cpu->c_identify) 483 this_cpu->c_identify(c); 484 485 /* 486 * Vendor-specific initialization. In this section we 487 * canonicalize the feature flags, meaning if there are 488 * features a certain CPU supports which CPUID doesn't 489 * tell us, CPUID claiming incorrect flags, or other bugs, 490 * we handle them here. 491 * 492 * At the end of this section, c->x86_capability better 493 * indicate the features this CPU genuinely supports! 494 */ 495 if (this_cpu->c_init) 496 this_cpu->c_init(c); 497 498 /* Disable the PN if appropriate */ 499 squash_the_stupid_serial_number(c); 500 501 /* 502 * The vendor-specific functions might have changed features. Now 503 * we do "generic changes." 504 */ 505 506 /* If the model name is still unset, do table lookup. */ 507 if (!c->x86_model_id[0]) { 508 char *p; 509 p = table_lookup_model(c); 510 if (p) 511 strcpy(c->x86_model_id, p); 512 else 513 /* Last resort... */ 514 sprintf(c->x86_model_id, "%02x/%02x", 515 c->x86, c->x86_model); 516 } 517 518 /* 519 * On SMP, boot_cpu_data holds the common feature set between 520 * all CPUs; so make sure that we indicate which features are 521 * common between the CPUs. The first time this routine gets 522 * executed, c == &boot_cpu_data. 523 */ 524 if (c != &boot_cpu_data) { 525 /* AND the already accumulated flags with these */ 526 for (i = 0 ; i < NCAPINTS ; i++) 527 boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; 528 } 529 530 /* Clear all flags overriden by options */ 531 for (i = 0; i < NCAPINTS; i++) 532 c->x86_capability[i] &= ~cleared_cpu_caps[i]; 533 534 /* Init Machine Check Exception if available. */ 535 mcheck_init(c); 536 537 select_idle_routine(c); 538 } 539 540 void __init identify_boot_cpu(void) 541 { 542 identify_cpu(&boot_cpu_data); 543 sysenter_setup(); 544 enable_sep_cpu(); 545 } 546 547 void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) 548 { 549 BUG_ON(c == &boot_cpu_data); 550 identify_cpu(c); 551 enable_sep_cpu(); 552 mtrr_ap_init(); 553 } 554 555 #ifdef CONFIG_X86_HT 556 void __cpuinit detect_ht(struct cpuinfo_x86 *c) 557 { 558 u32 eax, ebx, ecx, edx; 559 int index_msb, core_bits; 560 561 cpuid(1, &eax, &ebx, &ecx, &edx); 562 563 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) 564 return; 565 566 smp_num_siblings = (ebx & 0xff0000) >> 16; 567 568 if (smp_num_siblings == 1) { 569 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); 570 } else if (smp_num_siblings > 1) { 571 572 if (smp_num_siblings > NR_CPUS) { 573 printk(KERN_WARNING "CPU: Unsupported number of the " 574 "siblings %d", smp_num_siblings); 575 smp_num_siblings = 1; 576 return; 577 } 578 579 index_msb = get_count_order(smp_num_siblings); 580 c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); 581 582 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", 583 c->phys_proc_id); 584 585 smp_num_siblings = smp_num_siblings / c->x86_max_cores; 586 587 index_msb = get_count_order(smp_num_siblings) ; 588 589 core_bits = get_count_order(c->x86_max_cores); 590 591 c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & 592 ((1 << core_bits) - 1); 593 594 if (c->x86_max_cores > 1) 595 printk(KERN_INFO "CPU: Processor Core ID: %d\n", 596 c->cpu_core_id); 597 } 598 } 599 #endif 600 601 static __init int setup_noclflush(char *arg) 602 { 603 setup_clear_cpu_cap(X86_FEATURE_CLFLSH); 604 return 1; 605 } 606 __setup("noclflush", setup_noclflush); 607 608 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) 609 { 610 char *vendor = NULL; 611 612 if (c->x86_vendor < X86_VENDOR_NUM) 613 vendor = this_cpu->c_vendor; 614 else if (c->cpuid_level >= 0) 615 vendor = c->x86_vendor_id; 616 617 if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor))) 618 printk("%s ", vendor); 619 620 if (!c->x86_model_id[0]) 621 printk("%d86", c->x86); 622 else 623 printk("%s", c->x86_model_id); 624 625 if (c->x86_mask || c->cpuid_level >= 0) 626 printk(" stepping %02x\n", c->x86_mask); 627 else 628 printk("\n"); 629 } 630 631 static __init int setup_disablecpuid(char *arg) 632 { 633 int bit; 634 if (get_option(&arg, &bit) && bit < NCAPINTS*32) 635 setup_clear_cpu_cap(bit); 636 else 637 return 0; 638 return 1; 639 } 640 __setup("clearcpuid=", setup_disablecpuid); 641 642 cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; 643 644 void __init early_cpu_init(void) 645 { 646 struct cpu_vendor_dev *cvdev; 647 648 for (cvdev = __x86cpuvendor_start ; 649 cvdev < __x86cpuvendor_end ; 650 cvdev++) 651 cpu_devs[cvdev->vendor] = cvdev->cpu_dev; 652 653 early_cpu_detect(); 654 } 655 656 /* Make sure %fs is initialized properly in idle threads */ 657 struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) 658 { 659 memset(regs, 0, sizeof(struct pt_regs)); 660 regs->fs = __KERNEL_PERCPU; 661 return regs; 662 } 663 664 /* Current gdt points %fs at the "master" per-cpu area: after this, 665 * it's on the real one. */ 666 void switch_to_new_gdt(void) 667 { 668 struct desc_ptr gdt_descr; 669 670 gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); 671 gdt_descr.size = GDT_SIZE - 1; 672 load_gdt(&gdt_descr); 673 asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); 674 } 675 676 /* 677 * cpu_init() initializes state that is per-CPU. Some data is already 678 * initialized (naturally) in the bootstrap process, such as the GDT 679 * and IDT. We reload them nevertheless, this function acts as a 680 * 'CPU state barrier', nothing should get across. 681 */ 682 void __cpuinit cpu_init(void) 683 { 684 int cpu = smp_processor_id(); 685 struct task_struct *curr = current; 686 struct tss_struct *t = &per_cpu(init_tss, cpu); 687 struct thread_struct *thread = &curr->thread; 688 689 if (cpu_test_and_set(cpu, cpu_initialized)) { 690 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); 691 for (;;) local_irq_enable(); 692 } 693 694 printk(KERN_INFO "Initializing CPU#%d\n", cpu); 695 696 if (cpu_has_vme || cpu_has_tsc || cpu_has_de) 697 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 698 699 load_idt(&idt_descr); 700 switch_to_new_gdt(); 701 702 /* 703 * Set up and load the per-CPU TSS and LDT 704 */ 705 atomic_inc(&init_mm.mm_count); 706 curr->active_mm = &init_mm; 707 if (curr->mm) 708 BUG(); 709 enter_lazy_tlb(&init_mm, curr); 710 711 load_sp0(t, thread); 712 set_tss_desc(cpu, t); 713 load_TR_desc(); 714 load_LDT(&init_mm.context); 715 716 #ifdef CONFIG_DOUBLEFAULT 717 /* Set up doublefault TSS pointer in the GDT */ 718 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); 719 #endif 720 721 /* Clear %gs. */ 722 asm volatile ("mov %0, %%gs" : : "r" (0)); 723 724 /* Clear all 6 debug registers: */ 725 set_debugreg(0, 0); 726 set_debugreg(0, 1); 727 set_debugreg(0, 2); 728 set_debugreg(0, 3); 729 set_debugreg(0, 6); 730 set_debugreg(0, 7); 731 732 /* 733 * Force FPU initialization: 734 */ 735 current_thread_info()->status = 0; 736 clear_used_math(); 737 mxcsr_feature_mask_init(); 738 } 739 740 #ifdef CONFIG_HOTPLUG_CPU 741 void __cpuinit cpu_uninit(void) 742 { 743 int cpu = raw_smp_processor_id(); 744 cpu_clear(cpu, cpu_initialized); 745 746 /* lazy TLB state */ 747 per_cpu(cpu_tlbstate, cpu).state = 0; 748 per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm; 749 } 750 #endif 751