1 /* 2 * Architecture-specific setup. 3 * 4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co 5 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * Stephane Eranian <eranian@hpl.hp.com> 7 * Copyright (C) 2000, 2004 Intel Corp 8 * Rohit Seth <rohit.seth@intel.com> 9 * Suresh Siddha <suresh.b.siddha@intel.com> 10 * Gordon Jin <gordon.jin@intel.com> 11 * Copyright (C) 1999 VA Linux Systems 12 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 13 * 14 * 12/26/04 S.Siddha, G.Jin, R.Seth 15 * Add multi-threading and multi-core detection 16 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo(). 17 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map 18 * 03/31/00 R.Seth cpu_initialized and current->processor fixes 19 * 02/04/00 D.Mosberger some more get_cpuinfo fixes... 20 * 02/01/00 R.Seth fixed get_cpuinfo for SMP 21 * 01/07/99 S.Eranian added the support for command line argument 22 * 06/24/99 W.Drummond added boot_cpu_data. 23 * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()" 24 */ 25 #include <linux/config.h> 26 #include <linux/module.h> 27 #include <linux/init.h> 28 29 #include <linux/acpi.h> 30 #include <linux/bootmem.h> 31 #include <linux/console.h> 32 #include <linux/delay.h> 33 #include <linux/kernel.h> 34 #include <linux/reboot.h> 35 #include <linux/sched.h> 36 #include <linux/seq_file.h> 37 #include <linux/string.h> 38 #include <linux/threads.h> 39 #include <linux/tty.h> 40 #include <linux/dmi.h> 41 #include <linux/serial.h> 42 #include <linux/serial_core.h> 43 #include <linux/efi.h> 44 #include <linux/initrd.h> 45 #include <linux/pm.h> 46 #include <linux/cpufreq.h> 47 48 #include <asm/ia32.h> 49 #include <asm/machvec.h> 50 #include <asm/mca.h> 51 #include <asm/meminit.h> 52 #include <asm/page.h> 53 #include <asm/patch.h> 54 #include <asm/pgtable.h> 55 #include <asm/processor.h> 56 #include <asm/sal.h> 57 #include <asm/sections.h> 58 #include <asm/serial.h> 59 #include <asm/setup.h> 60 #include <asm/smp.h> 61 #include <asm/system.h> 62 #include <asm/unistd.h> 63 #include <asm/system.h> 64 65 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) 66 # error "struct cpuinfo_ia64 too big!" 67 #endif 68 69 #ifdef CONFIG_SMP 70 unsigned long __per_cpu_offset[NR_CPUS]; 71 EXPORT_SYMBOL(__per_cpu_offset); 72 #endif 73 74 extern void ia64_setup_printk_clock(void); 75 76 DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); 77 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); 78 DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8); 79 unsigned long ia64_cycles_per_usec; 80 struct ia64_boot_param *ia64_boot_param; 81 struct screen_info screen_info; 82 unsigned long vga_console_iobase; 83 unsigned long vga_console_membase; 84 85 static struct resource data_resource = { 86 .name = "Kernel data", 87 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 88 }; 89 90 static struct resource code_resource = { 91 .name = "Kernel code", 92 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 93 }; 94 extern void efi_initialize_iomem_resources(struct resource *, 95 struct resource *); 96 extern char _text[], _end[], _etext[]; 97 98 unsigned long ia64_max_cacheline_size; 99 100 int dma_get_cache_alignment(void) 101 { 102 return ia64_max_cacheline_size; 103 } 104 EXPORT_SYMBOL(dma_get_cache_alignment); 105 106 unsigned long ia64_iobase; /* virtual address for I/O accesses */ 107 EXPORT_SYMBOL(ia64_iobase); 108 struct io_space io_space[MAX_IO_SPACES]; 109 EXPORT_SYMBOL(io_space); 110 unsigned int num_io_spaces; 111 112 /* 113 * "flush_icache_range()" needs to know what processor dependent stride size to use 114 * when it makes i-cache(s) coherent with d-caches. 115 */ 116 #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */ 117 unsigned long ia64_i_cache_stride_shift = ~0; 118 119 /* 120 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This 121 * mask specifies a mask of address bits that must be 0 in order for two buffers to be 122 * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start 123 * address of the second buffer must be aligned to (merge_mask+1) in order to be 124 * mergeable). By default, we assume there is no I/O MMU which can merge physically 125 * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu 126 * page-size of 2^64. 127 */ 128 unsigned long ia64_max_iommu_merge_mask = ~0UL; 129 EXPORT_SYMBOL(ia64_max_iommu_merge_mask); 130 131 /* 132 * We use a special marker for the end of memory and it uses the extra (+1) slot 133 */ 134 struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata; 135 int num_rsvd_regions __initdata; 136 137 138 /* 139 * Filter incoming memory segments based on the primitive map created from the boot 140 * parameters. Segments contained in the map are removed from the memory ranges. A 141 * caller-specified function is called with the memory ranges that remain after filtering. 142 * This routine does not assume the incoming segments are sorted. 143 */ 144 int __init 145 filter_rsvd_memory (unsigned long start, unsigned long end, void *arg) 146 { 147 unsigned long range_start, range_end, prev_start; 148 void (*func)(unsigned long, unsigned long, int); 149 int i; 150 151 #if IGNORE_PFN0 152 if (start == PAGE_OFFSET) { 153 printk(KERN_WARNING "warning: skipping physical page 0\n"); 154 start += PAGE_SIZE; 155 if (start >= end) return 0; 156 } 157 #endif 158 /* 159 * lowest possible address(walker uses virtual) 160 */ 161 prev_start = PAGE_OFFSET; 162 func = arg; 163 164 for (i = 0; i < num_rsvd_regions; ++i) { 165 range_start = max(start, prev_start); 166 range_end = min(end, rsvd_region[i].start); 167 168 if (range_start < range_end) 169 call_pernode_memory(__pa(range_start), range_end - range_start, func); 170 171 /* nothing more available in this segment */ 172 if (range_end == end) return 0; 173 174 prev_start = rsvd_region[i].end; 175 } 176 /* end of memory marker allows full processing inside loop body */ 177 return 0; 178 } 179 180 static void __init 181 sort_regions (struct rsvd_region *rsvd_region, int max) 182 { 183 int j; 184 185 /* simple bubble sorting */ 186 while (max--) { 187 for (j = 0; j < max; ++j) { 188 if (rsvd_region[j].start > rsvd_region[j+1].start) { 189 struct rsvd_region tmp; 190 tmp = rsvd_region[j]; 191 rsvd_region[j] = rsvd_region[j + 1]; 192 rsvd_region[j + 1] = tmp; 193 } 194 } 195 } 196 } 197 198 /* 199 * Request address space for all standard resources 200 */ 201 static int __init register_memory(void) 202 { 203 code_resource.start = ia64_tpa(_text); 204 code_resource.end = ia64_tpa(_etext) - 1; 205 data_resource.start = ia64_tpa(_etext); 206 data_resource.end = ia64_tpa(_end) - 1; 207 efi_initialize_iomem_resources(&code_resource, &data_resource); 208 209 return 0; 210 } 211 212 __initcall(register_memory); 213 214 /** 215 * reserve_memory - setup reserved memory areas 216 * 217 * Setup the reserved memory areas set aside for the boot parameters, 218 * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined, 219 * see include/asm-ia64/meminit.h if you need to define more. 220 */ 221 void __init 222 reserve_memory (void) 223 { 224 int n = 0; 225 226 /* 227 * none of the entries in this table overlap 228 */ 229 rsvd_region[n].start = (unsigned long) ia64_boot_param; 230 rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param); 231 n++; 232 233 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap); 234 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size; 235 n++; 236 237 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line); 238 rsvd_region[n].end = (rsvd_region[n].start 239 + strlen(__va(ia64_boot_param->command_line)) + 1); 240 n++; 241 242 rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START); 243 rsvd_region[n].end = (unsigned long) ia64_imva(_end); 244 n++; 245 246 #ifdef CONFIG_BLK_DEV_INITRD 247 if (ia64_boot_param->initrd_start) { 248 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start); 249 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size; 250 n++; 251 } 252 #endif 253 254 efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); 255 n++; 256 257 /* end of memory marker */ 258 rsvd_region[n].start = ~0UL; 259 rsvd_region[n].end = ~0UL; 260 n++; 261 262 num_rsvd_regions = n; 263 264 sort_regions(rsvd_region, num_rsvd_regions); 265 } 266 267 /** 268 * find_initrd - get initrd parameters from the boot parameter structure 269 * 270 * Grab the initrd start and end from the boot parameter struct given us by 271 * the boot loader. 272 */ 273 void __init 274 find_initrd (void) 275 { 276 #ifdef CONFIG_BLK_DEV_INITRD 277 if (ia64_boot_param->initrd_start) { 278 initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start); 279 initrd_end = initrd_start+ia64_boot_param->initrd_size; 280 281 printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n", 282 initrd_start, ia64_boot_param->initrd_size); 283 } 284 #endif 285 } 286 287 static void __init 288 io_port_init (void) 289 { 290 unsigned long phys_iobase; 291 292 /* 293 * Set `iobase' based on the EFI memory map or, failing that, the 294 * value firmware left in ar.k0. 295 * 296 * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute 297 * the port's virtual address, so ia32_load_state() loads it with a 298 * user virtual address. But in ia64 mode, glibc uses the 299 * *physical* address in ar.k0 to mmap the appropriate area from 300 * /dev/mem, and the inX()/outX() interfaces use MMIO. In both 301 * cases, user-mode can only use the legacy 0-64K I/O port space. 302 * 303 * ar.k0 is not involved in kernel I/O port accesses, which can use 304 * any of the I/O port spaces and are done via MMIO using the 305 * virtual mmio_base from the appropriate io_space[]. 306 */ 307 phys_iobase = efi_get_iobase(); 308 if (!phys_iobase) { 309 phys_iobase = ia64_get_kr(IA64_KR_IO_BASE); 310 printk(KERN_INFO "No I/O port range found in EFI memory map, " 311 "falling back to AR.KR0 (0x%lx)\n", phys_iobase); 312 } 313 ia64_iobase = (unsigned long) ioremap(phys_iobase, 0); 314 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); 315 316 /* setup legacy IO port space */ 317 io_space[0].mmio_base = ia64_iobase; 318 io_space[0].sparse = 1; 319 num_io_spaces = 1; 320 } 321 322 /** 323 * early_console_setup - setup debugging console 324 * 325 * Consoles started here require little enough setup that we can start using 326 * them very early in the boot process, either right after the machine 327 * vector initialization, or even before if the drivers can detect their hw. 328 * 329 * Returns non-zero if a console couldn't be setup. 330 */ 331 static inline int __init 332 early_console_setup (char *cmdline) 333 { 334 int earlycons = 0; 335 336 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE 337 { 338 extern int sn_serial_console_early_setup(void); 339 if (!sn_serial_console_early_setup()) 340 earlycons++; 341 } 342 #endif 343 #ifdef CONFIG_EFI_PCDP 344 if (!efi_setup_pcdp_console(cmdline)) 345 earlycons++; 346 #endif 347 #ifdef CONFIG_SERIAL_8250_CONSOLE 348 if (!early_serial_console_init(cmdline)) 349 earlycons++; 350 #endif 351 352 return (earlycons) ? 0 : -1; 353 } 354 355 static inline void 356 mark_bsp_online (void) 357 { 358 #ifdef CONFIG_SMP 359 /* If we register an early console, allow CPU 0 to printk */ 360 cpu_set(smp_processor_id(), cpu_online_map); 361 #endif 362 } 363 364 #ifdef CONFIG_SMP 365 static void __init 366 check_for_logical_procs (void) 367 { 368 pal_logical_to_physical_t info; 369 s64 status; 370 371 status = ia64_pal_logical_to_phys(0, &info); 372 if (status == -1) { 373 printk(KERN_INFO "No logical to physical processor mapping " 374 "available\n"); 375 return; 376 } 377 if (status) { 378 printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n", 379 status); 380 return; 381 } 382 /* 383 * Total number of siblings that BSP has. Though not all of them 384 * may have booted successfully. The correct number of siblings 385 * booted is in info.overview_num_log. 386 */ 387 smp_num_siblings = info.overview_tpc; 388 smp_num_cpucores = info.overview_cpp; 389 } 390 #endif 391 392 static __initdata int nomca; 393 static __init int setup_nomca(char *s) 394 { 395 nomca = 1; 396 return 0; 397 } 398 early_param("nomca", setup_nomca); 399 400 void __init 401 setup_arch (char **cmdline_p) 402 { 403 unw_init(); 404 405 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); 406 407 *cmdline_p = __va(ia64_boot_param->command_line); 408 strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE); 409 410 efi_init(); 411 io_port_init(); 412 413 parse_early_param(); 414 415 #ifdef CONFIG_IA64_GENERIC 416 machvec_init(NULL); 417 #endif 418 419 if (early_console_setup(*cmdline_p) == 0) 420 mark_bsp_online(); 421 422 #ifdef CONFIG_ACPI 423 /* Initialize the ACPI boot-time table parser */ 424 acpi_table_init(); 425 # ifdef CONFIG_ACPI_NUMA 426 acpi_numa_init(); 427 # endif 428 #else 429 # ifdef CONFIG_SMP 430 smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */ 431 # endif 432 #endif /* CONFIG_APCI_BOOT */ 433 434 find_memory(); 435 436 /* process SAL system table: */ 437 ia64_sal_init(__va(efi.sal_systab)); 438 439 ia64_setup_printk_clock(); 440 441 #ifdef CONFIG_SMP 442 cpu_physical_id(0) = hard_smp_processor_id(); 443 444 cpu_set(0, cpu_sibling_map[0]); 445 cpu_set(0, cpu_core_map[0]); 446 447 check_for_logical_procs(); 448 if (smp_num_cpucores > 1) 449 printk(KERN_INFO 450 "cpu package is Multi-Core capable: number of cores=%d\n", 451 smp_num_cpucores); 452 if (smp_num_siblings > 1) 453 printk(KERN_INFO 454 "cpu package is Multi-Threading capable: number of siblings=%d\n", 455 smp_num_siblings); 456 #endif 457 458 cpu_init(); /* initialize the bootstrap CPU */ 459 mmu_context_init(); /* initialize context_id bitmap */ 460 461 #ifdef CONFIG_ACPI 462 acpi_boot_init(); 463 #endif 464 465 #ifdef CONFIG_VT 466 if (!conswitchp) { 467 # if defined(CONFIG_DUMMY_CONSOLE) 468 conswitchp = &dummy_con; 469 # endif 470 # if defined(CONFIG_VGA_CONSOLE) 471 /* 472 * Non-legacy systems may route legacy VGA MMIO range to system 473 * memory. vga_con probes the MMIO hole, so memory looks like 474 * a VGA device to it. The EFI memory map can tell us if it's 475 * memory so we can avoid this problem. 476 */ 477 if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY) 478 conswitchp = &vga_con; 479 # endif 480 } 481 #endif 482 483 /* enable IA-64 Machine Check Abort Handling unless disabled */ 484 if (!nomca) 485 ia64_mca_init(); 486 487 platform_setup(cmdline_p); 488 paging_init(); 489 } 490 491 /* 492 * Display cpu info for all cpu's. 493 */ 494 static int 495 show_cpuinfo (struct seq_file *m, void *v) 496 { 497 #ifdef CONFIG_SMP 498 # define lpj c->loops_per_jiffy 499 # define cpunum c->cpu 500 #else 501 # define lpj loops_per_jiffy 502 # define cpunum 0 503 #endif 504 static struct { 505 unsigned long mask; 506 const char *feature_name; 507 } feature_bits[] = { 508 { 1UL << 0, "branchlong" }, 509 { 1UL << 1, "spontaneous deferral"}, 510 { 1UL << 2, "16-byte atomic ops" } 511 }; 512 char features[128], *cp, sep; 513 struct cpuinfo_ia64 *c = v; 514 unsigned long mask; 515 unsigned long proc_freq; 516 int i; 517 518 mask = c->features; 519 520 /* build the feature string: */ 521 memcpy(features, " standard", 10); 522 cp = features; 523 sep = 0; 524 for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) { 525 if (mask & feature_bits[i].mask) { 526 if (sep) 527 *cp++ = sep; 528 sep = ','; 529 *cp++ = ' '; 530 strcpy(cp, feature_bits[i].feature_name); 531 cp += strlen(feature_bits[i].feature_name); 532 mask &= ~feature_bits[i].mask; 533 } 534 } 535 if (mask) { 536 /* print unknown features as a hex value: */ 537 if (sep) 538 *cp++ = sep; 539 sprintf(cp, " 0x%lx", mask); 540 } 541 542 proc_freq = cpufreq_quick_get(cpunum); 543 if (!proc_freq) 544 proc_freq = c->proc_freq / 1000; 545 546 seq_printf(m, 547 "processor : %d\n" 548 "vendor : %s\n" 549 "arch : IA-64\n" 550 "family : %u\n" 551 "model : %u\n" 552 "model name : %s\n" 553 "revision : %u\n" 554 "archrev : %u\n" 555 "features :%s\n" /* don't change this---it _is_ right! */ 556 "cpu number : %lu\n" 557 "cpu regs : %u\n" 558 "cpu MHz : %lu.%06lu\n" 559 "itc MHz : %lu.%06lu\n" 560 "BogoMIPS : %lu.%02lu\n", 561 cpunum, c->vendor, c->family, c->model, 562 c->model_name, c->revision, c->archrev, 563 features, c->ppn, c->number, 564 proc_freq / 1000, proc_freq % 1000, 565 c->itc_freq / 1000000, c->itc_freq % 1000000, 566 lpj*HZ/500000, (lpj*HZ/5000) % 100); 567 #ifdef CONFIG_SMP 568 seq_printf(m, "siblings : %u\n", cpus_weight(cpu_core_map[cpunum])); 569 if (c->threads_per_core > 1 || c->cores_per_socket > 1) 570 seq_printf(m, 571 "physical id: %u\n" 572 "core id : %u\n" 573 "thread id : %u\n", 574 c->socket_id, c->core_id, c->thread_id); 575 #endif 576 seq_printf(m,"\n"); 577 578 return 0; 579 } 580 581 static void * 582 c_start (struct seq_file *m, loff_t *pos) 583 { 584 #ifdef CONFIG_SMP 585 while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map)) 586 ++*pos; 587 #endif 588 return *pos < NR_CPUS ? cpu_data(*pos) : NULL; 589 } 590 591 static void * 592 c_next (struct seq_file *m, void *v, loff_t *pos) 593 { 594 ++*pos; 595 return c_start(m, pos); 596 } 597 598 static void 599 c_stop (struct seq_file *m, void *v) 600 { 601 } 602 603 struct seq_operations cpuinfo_op = { 604 .start = c_start, 605 .next = c_next, 606 .stop = c_stop, 607 .show = show_cpuinfo 608 }; 609 610 static char brandname[128]; 611 612 static char * __cpuinit 613 get_model_name(__u8 family, __u8 model) 614 { 615 char brand[128]; 616 617 if (ia64_pal_get_brand_info(brand)) { 618 if (family == 0x7) 619 memcpy(brand, "Merced", 7); 620 else if (family == 0x1f) switch (model) { 621 case 0: memcpy(brand, "McKinley", 9); break; 622 case 1: memcpy(brand, "Madison", 8); break; 623 case 2: memcpy(brand, "Madison up to 9M cache", 23); break; 624 } else 625 memcpy(brand, "Unknown", 8); 626 } 627 if (brandname[0] == '\0') 628 return strcpy(brandname, brand); 629 else if (strcmp(brandname, brand) == 0) 630 return brandname; 631 else 632 return kstrdup(brand, GFP_KERNEL); 633 } 634 635 static void __cpuinit 636 identify_cpu (struct cpuinfo_ia64 *c) 637 { 638 union { 639 unsigned long bits[5]; 640 struct { 641 /* id 0 & 1: */ 642 char vendor[16]; 643 644 /* id 2 */ 645 u64 ppn; /* processor serial number */ 646 647 /* id 3: */ 648 unsigned number : 8; 649 unsigned revision : 8; 650 unsigned model : 8; 651 unsigned family : 8; 652 unsigned archrev : 8; 653 unsigned reserved : 24; 654 655 /* id 4: */ 656 u64 features; 657 } field; 658 } cpuid; 659 pal_vm_info_1_u_t vm1; 660 pal_vm_info_2_u_t vm2; 661 pal_status_t status; 662 unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */ 663 int i; 664 for (i = 0; i < 5; ++i) 665 cpuid.bits[i] = ia64_get_cpuid(i); 666 667 memcpy(c->vendor, cpuid.field.vendor, 16); 668 #ifdef CONFIG_SMP 669 c->cpu = smp_processor_id(); 670 671 /* below default values will be overwritten by identify_siblings() 672 * for Multi-Threading/Multi-Core capable cpu's 673 */ 674 c->threads_per_core = c->cores_per_socket = c->num_log = 1; 675 c->socket_id = -1; 676 677 identify_siblings(c); 678 #endif 679 c->ppn = cpuid.field.ppn; 680 c->number = cpuid.field.number; 681 c->revision = cpuid.field.revision; 682 c->model = cpuid.field.model; 683 c->family = cpuid.field.family; 684 c->archrev = cpuid.field.archrev; 685 c->features = cpuid.field.features; 686 c->model_name = get_model_name(c->family, c->model); 687 688 status = ia64_pal_vm_summary(&vm1, &vm2); 689 if (status == PAL_STATUS_SUCCESS) { 690 impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb; 691 phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size; 692 } 693 c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1)); 694 c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); 695 } 696 697 void 698 setup_per_cpu_areas (void) 699 { 700 /* start_kernel() requires this... */ 701 #ifdef CONFIG_ACPI_HOTPLUG_CPU 702 prefill_possible_map(); 703 #endif 704 } 705 706 /* 707 * Calculate the max. cache line size. 708 * 709 * In addition, the minimum of the i-cache stride sizes is calculated for 710 * "flush_icache_range()". 711 */ 712 static void __cpuinit 713 get_max_cacheline_size (void) 714 { 715 unsigned long line_size, max = 1; 716 unsigned int cache_size = 0; 717 u64 l, levels, unique_caches; 718 pal_cache_config_info_t cci; 719 s64 status; 720 721 status = ia64_pal_cache_summary(&levels, &unique_caches); 722 if (status != 0) { 723 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", 724 __FUNCTION__, status); 725 max = SMP_CACHE_BYTES; 726 /* Safest setup for "flush_icache_range()" */ 727 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; 728 goto out; 729 } 730 731 for (l = 0; l < levels; ++l) { 732 status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2, 733 &cci); 734 if (status != 0) { 735 printk(KERN_ERR 736 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n", 737 __FUNCTION__, l, status); 738 max = SMP_CACHE_BYTES; 739 /* The safest setup for "flush_icache_range()" */ 740 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 741 cci.pcci_unified = 1; 742 } 743 line_size = 1 << cci.pcci_line_size; 744 if (line_size > max) 745 max = line_size; 746 if (cache_size < cci.pcci_cache_size) 747 cache_size = cci.pcci_cache_size; 748 if (!cci.pcci_unified) { 749 status = ia64_pal_cache_config_info(l, 750 /* cache_type (instruction)= */ 1, 751 &cci); 752 if (status != 0) { 753 printk(KERN_ERR 754 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n", 755 __FUNCTION__, l, status); 756 /* The safest setup for "flush_icache_range()" */ 757 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 758 } 759 } 760 if (cci.pcci_stride < ia64_i_cache_stride_shift) 761 ia64_i_cache_stride_shift = cci.pcci_stride; 762 } 763 out: 764 #ifdef CONFIG_SMP 765 max_cache_size = max(max_cache_size, cache_size); 766 #endif 767 if (max > ia64_max_cacheline_size) 768 ia64_max_cacheline_size = max; 769 } 770 771 /* 772 * cpu_init() initializes state that is per-CPU. This function acts 773 * as a 'CPU state barrier', nothing should get across. 774 */ 775 void __cpuinit 776 cpu_init (void) 777 { 778 extern void __cpuinit ia64_mmu_init (void *); 779 unsigned long num_phys_stacked; 780 pal_vm_info_2_u_t vmi; 781 unsigned int max_ctx; 782 struct cpuinfo_ia64 *cpu_info; 783 void *cpu_data; 784 785 cpu_data = per_cpu_init(); 786 787 /* 788 * We set ar.k3 so that assembly code in MCA handler can compute 789 * physical addresses of per cpu variables with a simple: 790 * phys = ar.k3 + &per_cpu_var 791 */ 792 ia64_set_kr(IA64_KR_PER_CPU_DATA, 793 ia64_tpa(cpu_data) - (long) __per_cpu_start); 794 795 get_max_cacheline_size(); 796 797 /* 798 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called 799 * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it 800 * depends on the data returned by identify_cpu(). We break the dependency by 801 * accessing cpu_data() through the canonical per-CPU address. 802 */ 803 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start); 804 identify_cpu(cpu_info); 805 806 #ifdef CONFIG_MCKINLEY 807 { 808 # define FEATURE_SET 16 809 struct ia64_pal_retval iprv; 810 811 if (cpu_info->family == 0x1f) { 812 PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0); 813 if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80)) 814 PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, 815 (iprv.v1 | 0x80), FEATURE_SET, 0); 816 } 817 } 818 #endif 819 820 /* Clear the stack memory reserved for pt_regs: */ 821 memset(task_pt_regs(current), 0, sizeof(struct pt_regs)); 822 823 ia64_set_kr(IA64_KR_FPU_OWNER, 0); 824 825 /* 826 * Initialize the page-table base register to a global 827 * directory with all zeroes. This ensure that we can handle 828 * TLB-misses to user address-space even before we created the 829 * first user address-space. This may happen, e.g., due to 830 * aggressive use of lfetch.fault. 831 */ 832 ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page))); 833 834 /* 835 * Initialize default control register to defer speculative faults except 836 * for those arising from TLB misses, which are not deferred. The 837 * kernel MUST NOT depend on a particular setting of these bits (in other words, 838 * the kernel must have recovery code for all speculative accesses). Turn on 839 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps 840 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll 841 * be fine). 842 */ 843 ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR 844 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); 845 atomic_inc(&init_mm.mm_count); 846 current->active_mm = &init_mm; 847 if (current->mm) 848 BUG(); 849 850 ia64_mmu_init(ia64_imva(cpu_data)); 851 ia64_mca_cpu_init(ia64_imva(cpu_data)); 852 853 #ifdef CONFIG_IA32_SUPPORT 854 ia32_cpu_init(); 855 #endif 856 857 /* Clear ITC to eliminiate sched_clock() overflows in human time. */ 858 ia64_set_itc(0); 859 860 /* disable all local interrupt sources: */ 861 ia64_set_itv(1 << 16); 862 ia64_set_lrr0(1 << 16); 863 ia64_set_lrr1(1 << 16); 864 ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); 865 ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); 866 867 /* clear TPR & XTP to enable all interrupt classes: */ 868 ia64_setreg(_IA64_REG_CR_TPR, 0); 869 #ifdef CONFIG_SMP 870 normal_xtp(); 871 #endif 872 873 /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ 874 if (ia64_pal_vm_summary(NULL, &vmi) == 0) 875 max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; 876 else { 877 printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); 878 max_ctx = (1U << 15) - 1; /* use architected minimum */ 879 } 880 while (max_ctx < ia64_ctx.max_ctx) { 881 unsigned int old = ia64_ctx.max_ctx; 882 if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old) 883 break; 884 } 885 886 if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) { 887 printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical " 888 "stacked regs\n"); 889 num_phys_stacked = 96; 890 } 891 /* size of physical stacked register partition plus 8 bytes: */ 892 __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8; 893 platform_cpu_init(); 894 pm_idle = default_idle; 895 } 896 897 /* 898 * On SMP systems, when the scheduler does migration-cost autodetection, 899 * it needs a way to flush as much of the CPU's caches as possible. 900 */ 901 void sched_cacheflush(void) 902 { 903 ia64_sal_cache_flush(3); 904 } 905 906 void __init 907 check_bugs (void) 908 { 909 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, 910 (unsigned long) __end___mckinley_e9_bundles); 911 } 912 913 static int __init run_dmi_scan(void) 914 { 915 dmi_scan_machine(); 916 return 0; 917 } 918 core_initcall(run_dmi_scan); 919