1 /* 2 * Architecture-specific setup. 3 * 4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co 5 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * Stephane Eranian <eranian@hpl.hp.com> 7 * Copyright (C) 2000, 2004 Intel Corp 8 * Rohit Seth <rohit.seth@intel.com> 9 * Suresh Siddha <suresh.b.siddha@intel.com> 10 * Gordon Jin <gordon.jin@intel.com> 11 * Copyright (C) 1999 VA Linux Systems 12 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 13 * 14 * 12/26/04 S.Siddha, G.Jin, R.Seth 15 * Add multi-threading and multi-core detection 16 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo(). 17 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map 18 * 03/31/00 R.Seth cpu_initialized and current->processor fixes 19 * 02/04/00 D.Mosberger some more get_cpuinfo fixes... 20 * 02/01/00 R.Seth fixed get_cpuinfo for SMP 21 * 01/07/99 S.Eranian added the support for command line argument 22 * 06/24/99 W.Drummond added boot_cpu_data. 23 * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()" 24 */ 25 #include <linux/module.h> 26 #include <linux/init.h> 27 28 #include <linux/acpi.h> 29 #include <linux/bootmem.h> 30 #include <linux/console.h> 31 #include <linux/delay.h> 32 #include <linux/kernel.h> 33 #include <linux/reboot.h> 34 #include <linux/sched.h> 35 #include <linux/seq_file.h> 36 #include <linux/string.h> 37 #include <linux/threads.h> 38 #include <linux/screen_info.h> 39 #include <linux/dmi.h> 40 #include <linux/serial.h> 41 #include <linux/serial_core.h> 42 #include <linux/efi.h> 43 #include <linux/initrd.h> 44 #include <linux/pm.h> 45 #include <linux/cpufreq.h> 46 #include <linux/kexec.h> 47 #include <linux/crash_dump.h> 48 49 #include <asm/ia32.h> 50 #include <asm/machvec.h> 51 #include <asm/mca.h> 52 #include <asm/meminit.h> 53 #include <asm/page.h> 54 #include <asm/patch.h> 55 #include <asm/pgtable.h> 56 #include <asm/processor.h> 57 #include <asm/sal.h> 58 #include <asm/sections.h> 59 #include <asm/setup.h> 60 #include <asm/smp.h> 61 #include <asm/system.h> 62 #include <asm/unistd.h> 63 #include <asm/system.h> 64 65 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) 66 # error "struct cpuinfo_ia64 too big!" 67 #endif 68 69 #ifdef CONFIG_SMP 70 unsigned long __per_cpu_offset[NR_CPUS]; 71 EXPORT_SYMBOL(__per_cpu_offset); 72 #endif 73 74 extern void ia64_setup_printk_clock(void); 75 76 DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); 77 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); 78 unsigned long ia64_cycles_per_usec; 79 struct ia64_boot_param *ia64_boot_param; 80 struct screen_info screen_info; 81 unsigned long vga_console_iobase; 82 unsigned long vga_console_membase; 83 84 static struct resource data_resource = { 85 .name = "Kernel data", 86 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 87 }; 88 89 static struct resource code_resource = { 90 .name = "Kernel code", 91 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 92 }; 93 extern void efi_initialize_iomem_resources(struct resource *, 94 struct resource *); 95 extern char _text[], _end[], _etext[]; 96 97 unsigned long ia64_max_cacheline_size; 98 99 int dma_get_cache_alignment(void) 100 { 101 return ia64_max_cacheline_size; 102 } 103 EXPORT_SYMBOL(dma_get_cache_alignment); 104 105 unsigned long ia64_iobase; /* virtual address for I/O accesses */ 106 EXPORT_SYMBOL(ia64_iobase); 107 struct io_space io_space[MAX_IO_SPACES]; 108 EXPORT_SYMBOL(io_space); 109 unsigned int num_io_spaces; 110 111 /* 112 * "flush_icache_range()" needs to know what processor dependent stride size to use 113 * when it makes i-cache(s) coherent with d-caches. 114 */ 115 #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */ 116 unsigned long ia64_i_cache_stride_shift = ~0; 117 118 /* 119 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This 120 * mask specifies a mask of address bits that must be 0 in order for two buffers to be 121 * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start 122 * address of the second buffer must be aligned to (merge_mask+1) in order to be 123 * mergeable). By default, we assume there is no I/O MMU which can merge physically 124 * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu 125 * page-size of 2^64. 126 */ 127 unsigned long ia64_max_iommu_merge_mask = ~0UL; 128 EXPORT_SYMBOL(ia64_max_iommu_merge_mask); 129 130 /* 131 * We use a special marker for the end of memory and it uses the extra (+1) slot 132 */ 133 struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata; 134 int num_rsvd_regions __initdata; 135 136 137 /* 138 * Filter incoming memory segments based on the primitive map created from the boot 139 * parameters. Segments contained in the map are removed from the memory ranges. A 140 * caller-specified function is called with the memory ranges that remain after filtering. 141 * This routine does not assume the incoming segments are sorted. 142 */ 143 int __init 144 filter_rsvd_memory (unsigned long start, unsigned long end, void *arg) 145 { 146 unsigned long range_start, range_end, prev_start; 147 void (*func)(unsigned long, unsigned long, int); 148 int i; 149 150 #if IGNORE_PFN0 151 if (start == PAGE_OFFSET) { 152 printk(KERN_WARNING "warning: skipping physical page 0\n"); 153 start += PAGE_SIZE; 154 if (start >= end) return 0; 155 } 156 #endif 157 /* 158 * lowest possible address(walker uses virtual) 159 */ 160 prev_start = PAGE_OFFSET; 161 func = arg; 162 163 for (i = 0; i < num_rsvd_regions; ++i) { 164 range_start = max(start, prev_start); 165 range_end = min(end, rsvd_region[i].start); 166 167 if (range_start < range_end) 168 call_pernode_memory(__pa(range_start), range_end - range_start, func); 169 170 /* nothing more available in this segment */ 171 if (range_end == end) return 0; 172 173 prev_start = rsvd_region[i].end; 174 } 175 /* end of memory marker allows full processing inside loop body */ 176 return 0; 177 } 178 179 static void __init 180 sort_regions (struct rsvd_region *rsvd_region, int max) 181 { 182 int j; 183 184 /* simple bubble sorting */ 185 while (max--) { 186 for (j = 0; j < max; ++j) { 187 if (rsvd_region[j].start > rsvd_region[j+1].start) { 188 struct rsvd_region tmp; 189 tmp = rsvd_region[j]; 190 rsvd_region[j] = rsvd_region[j + 1]; 191 rsvd_region[j + 1] = tmp; 192 } 193 } 194 } 195 } 196 197 /* 198 * Request address space for all standard resources 199 */ 200 static int __init register_memory(void) 201 { 202 code_resource.start = ia64_tpa(_text); 203 code_resource.end = ia64_tpa(_etext) - 1; 204 data_resource.start = ia64_tpa(_etext); 205 data_resource.end = ia64_tpa(_end) - 1; 206 efi_initialize_iomem_resources(&code_resource, &data_resource); 207 208 return 0; 209 } 210 211 __initcall(register_memory); 212 213 /** 214 * reserve_memory - setup reserved memory areas 215 * 216 * Setup the reserved memory areas set aside for the boot parameters, 217 * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined, 218 * see include/asm-ia64/meminit.h if you need to define more. 219 */ 220 void __init 221 reserve_memory (void) 222 { 223 int n = 0; 224 225 /* 226 * none of the entries in this table overlap 227 */ 228 rsvd_region[n].start = (unsigned long) ia64_boot_param; 229 rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param); 230 n++; 231 232 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap); 233 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size; 234 n++; 235 236 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line); 237 rsvd_region[n].end = (rsvd_region[n].start 238 + strlen(__va(ia64_boot_param->command_line)) + 1); 239 n++; 240 241 rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START); 242 rsvd_region[n].end = (unsigned long) ia64_imva(_end); 243 n++; 244 245 #ifdef CONFIG_BLK_DEV_INITRD 246 if (ia64_boot_param->initrd_start) { 247 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start); 248 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size; 249 n++; 250 } 251 #endif 252 253 efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); 254 n++; 255 256 #ifdef CONFIG_KEXEC 257 /* crashkernel=size@offset specifies the size to reserve for a crash 258 * kernel. If offset is 0, then it is determined automatically. 259 * By reserving this memory we guarantee that linux never set's it 260 * up as a DMA target.Useful for holding code to do something 261 * appropriate after a kernel panic. 262 */ 263 { 264 char *from = strstr(saved_command_line, "crashkernel="); 265 unsigned long base, size; 266 if (from) { 267 size = memparse(from + 12, &from); 268 if (*from == '@') 269 base = memparse(from+1, &from); 270 else 271 base = 0; 272 if (size) { 273 if (!base) { 274 sort_regions(rsvd_region, n); 275 base = kdump_find_rsvd_region(size, 276 rsvd_region, n); 277 } 278 if (base != ~0UL) { 279 rsvd_region[n].start = 280 (unsigned long)__va(base); 281 rsvd_region[n].end = 282 (unsigned long)__va(base + size); 283 n++; 284 crashk_res.start = base; 285 crashk_res.end = base + size - 1; 286 } 287 } 288 } 289 efi_memmap_res.start = ia64_boot_param->efi_memmap; 290 efi_memmap_res.end = efi_memmap_res.start + 291 ia64_boot_param->efi_memmap_size; 292 boot_param_res.start = __pa(ia64_boot_param); 293 boot_param_res.end = boot_param_res.start + 294 sizeof(*ia64_boot_param); 295 } 296 #endif 297 /* end of memory marker */ 298 rsvd_region[n].start = ~0UL; 299 rsvd_region[n].end = ~0UL; 300 n++; 301 302 num_rsvd_regions = n; 303 BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n); 304 305 sort_regions(rsvd_region, num_rsvd_regions); 306 } 307 308 309 /** 310 * find_initrd - get initrd parameters from the boot parameter structure 311 * 312 * Grab the initrd start and end from the boot parameter struct given us by 313 * the boot loader. 314 */ 315 void __init 316 find_initrd (void) 317 { 318 #ifdef CONFIG_BLK_DEV_INITRD 319 if (ia64_boot_param->initrd_start) { 320 initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start); 321 initrd_end = initrd_start+ia64_boot_param->initrd_size; 322 323 printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n", 324 initrd_start, ia64_boot_param->initrd_size); 325 } 326 #endif 327 } 328 329 static void __init 330 io_port_init (void) 331 { 332 unsigned long phys_iobase; 333 334 /* 335 * Set `iobase' based on the EFI memory map or, failing that, the 336 * value firmware left in ar.k0. 337 * 338 * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute 339 * the port's virtual address, so ia32_load_state() loads it with a 340 * user virtual address. But in ia64 mode, glibc uses the 341 * *physical* address in ar.k0 to mmap the appropriate area from 342 * /dev/mem, and the inX()/outX() interfaces use MMIO. In both 343 * cases, user-mode can only use the legacy 0-64K I/O port space. 344 * 345 * ar.k0 is not involved in kernel I/O port accesses, which can use 346 * any of the I/O port spaces and are done via MMIO using the 347 * virtual mmio_base from the appropriate io_space[]. 348 */ 349 phys_iobase = efi_get_iobase(); 350 if (!phys_iobase) { 351 phys_iobase = ia64_get_kr(IA64_KR_IO_BASE); 352 printk(KERN_INFO "No I/O port range found in EFI memory map, " 353 "falling back to AR.KR0 (0x%lx)\n", phys_iobase); 354 } 355 ia64_iobase = (unsigned long) ioremap(phys_iobase, 0); 356 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); 357 358 /* setup legacy IO port space */ 359 io_space[0].mmio_base = ia64_iobase; 360 io_space[0].sparse = 1; 361 num_io_spaces = 1; 362 } 363 364 /** 365 * early_console_setup - setup debugging console 366 * 367 * Consoles started here require little enough setup that we can start using 368 * them very early in the boot process, either right after the machine 369 * vector initialization, or even before if the drivers can detect their hw. 370 * 371 * Returns non-zero if a console couldn't be setup. 372 */ 373 static inline int __init 374 early_console_setup (char *cmdline) 375 { 376 int earlycons = 0; 377 378 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE 379 { 380 extern int sn_serial_console_early_setup(void); 381 if (!sn_serial_console_early_setup()) 382 earlycons++; 383 } 384 #endif 385 #ifdef CONFIG_EFI_PCDP 386 if (!efi_setup_pcdp_console(cmdline)) 387 earlycons++; 388 #endif 389 #ifdef CONFIG_SERIAL_8250_CONSOLE 390 if (!early_serial_console_init(cmdline)) 391 earlycons++; 392 #endif 393 394 return (earlycons) ? 0 : -1; 395 } 396 397 static inline void 398 mark_bsp_online (void) 399 { 400 #ifdef CONFIG_SMP 401 /* If we register an early console, allow CPU 0 to printk */ 402 cpu_set(smp_processor_id(), cpu_online_map); 403 #endif 404 } 405 406 #ifdef CONFIG_SMP 407 static void __init 408 check_for_logical_procs (void) 409 { 410 pal_logical_to_physical_t info; 411 s64 status; 412 413 status = ia64_pal_logical_to_phys(0, &info); 414 if (status == -1) { 415 printk(KERN_INFO "No logical to physical processor mapping " 416 "available\n"); 417 return; 418 } 419 if (status) { 420 printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n", 421 status); 422 return; 423 } 424 /* 425 * Total number of siblings that BSP has. Though not all of them 426 * may have booted successfully. The correct number of siblings 427 * booted is in info.overview_num_log. 428 */ 429 smp_num_siblings = info.overview_tpc; 430 smp_num_cpucores = info.overview_cpp; 431 } 432 #endif 433 434 static __initdata int nomca; 435 static __init int setup_nomca(char *s) 436 { 437 nomca = 1; 438 return 0; 439 } 440 early_param("nomca", setup_nomca); 441 442 #ifdef CONFIG_PROC_VMCORE 443 /* elfcorehdr= specifies the location of elf core header 444 * stored by the crashed kernel. 445 */ 446 static int __init parse_elfcorehdr(char *arg) 447 { 448 if (!arg) 449 return -EINVAL; 450 451 elfcorehdr_addr = memparse(arg, &arg); 452 return 0; 453 } 454 early_param("elfcorehdr", parse_elfcorehdr); 455 #endif /* CONFIG_PROC_VMCORE */ 456 457 void __init 458 setup_arch (char **cmdline_p) 459 { 460 unw_init(); 461 462 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); 463 464 *cmdline_p = __va(ia64_boot_param->command_line); 465 strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE); 466 467 efi_init(); 468 io_port_init(); 469 470 parse_early_param(); 471 472 #ifdef CONFIG_IA64_GENERIC 473 machvec_init(NULL); 474 #endif 475 476 if (early_console_setup(*cmdline_p) == 0) 477 mark_bsp_online(); 478 479 #ifdef CONFIG_ACPI 480 /* Initialize the ACPI boot-time table parser */ 481 acpi_table_init(); 482 # ifdef CONFIG_ACPI_NUMA 483 acpi_numa_init(); 484 # endif 485 #else 486 # ifdef CONFIG_SMP 487 smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */ 488 # endif 489 #endif /* CONFIG_APCI_BOOT */ 490 491 find_memory(); 492 493 /* process SAL system table: */ 494 ia64_sal_init(__va(efi.sal_systab)); 495 496 ia64_setup_printk_clock(); 497 498 #ifdef CONFIG_SMP 499 cpu_physical_id(0) = hard_smp_processor_id(); 500 501 cpu_set(0, cpu_sibling_map[0]); 502 cpu_set(0, cpu_core_map[0]); 503 504 check_for_logical_procs(); 505 if (smp_num_cpucores > 1) 506 printk(KERN_INFO 507 "cpu package is Multi-Core capable: number of cores=%d\n", 508 smp_num_cpucores); 509 if (smp_num_siblings > 1) 510 printk(KERN_INFO 511 "cpu package is Multi-Threading capable: number of siblings=%d\n", 512 smp_num_siblings); 513 #endif 514 515 cpu_init(); /* initialize the bootstrap CPU */ 516 mmu_context_init(); /* initialize context_id bitmap */ 517 518 check_sal_cache_flush(); 519 520 #ifdef CONFIG_ACPI 521 acpi_boot_init(); 522 #endif 523 524 #ifdef CONFIG_VT 525 if (!conswitchp) { 526 # if defined(CONFIG_DUMMY_CONSOLE) 527 conswitchp = &dummy_con; 528 # endif 529 # if defined(CONFIG_VGA_CONSOLE) 530 /* 531 * Non-legacy systems may route legacy VGA MMIO range to system 532 * memory. vga_con probes the MMIO hole, so memory looks like 533 * a VGA device to it. The EFI memory map can tell us if it's 534 * memory so we can avoid this problem. 535 */ 536 if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY) 537 conswitchp = &vga_con; 538 # endif 539 } 540 #endif 541 542 /* enable IA-64 Machine Check Abort Handling unless disabled */ 543 if (!nomca) 544 ia64_mca_init(); 545 546 platform_setup(cmdline_p); 547 paging_init(); 548 } 549 550 /* 551 * Display cpu info for all cpu's. 552 */ 553 static int 554 show_cpuinfo (struct seq_file *m, void *v) 555 { 556 #ifdef CONFIG_SMP 557 # define lpj c->loops_per_jiffy 558 # define cpunum c->cpu 559 #else 560 # define lpj loops_per_jiffy 561 # define cpunum 0 562 #endif 563 static struct { 564 unsigned long mask; 565 const char *feature_name; 566 } feature_bits[] = { 567 { 1UL << 0, "branchlong" }, 568 { 1UL << 1, "spontaneous deferral"}, 569 { 1UL << 2, "16-byte atomic ops" } 570 }; 571 char features[128], *cp, sep; 572 struct cpuinfo_ia64 *c = v; 573 unsigned long mask; 574 unsigned long proc_freq; 575 int i; 576 577 mask = c->features; 578 579 /* build the feature string: */ 580 memcpy(features, " standard", 10); 581 cp = features; 582 sep = 0; 583 for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) { 584 if (mask & feature_bits[i].mask) { 585 if (sep) 586 *cp++ = sep; 587 sep = ','; 588 *cp++ = ' '; 589 strcpy(cp, feature_bits[i].feature_name); 590 cp += strlen(feature_bits[i].feature_name); 591 mask &= ~feature_bits[i].mask; 592 } 593 } 594 if (mask) { 595 /* print unknown features as a hex value: */ 596 if (sep) 597 *cp++ = sep; 598 sprintf(cp, " 0x%lx", mask); 599 } 600 601 proc_freq = cpufreq_quick_get(cpunum); 602 if (!proc_freq) 603 proc_freq = c->proc_freq / 1000; 604 605 seq_printf(m, 606 "processor : %d\n" 607 "vendor : %s\n" 608 "arch : IA-64\n" 609 "family : %u\n" 610 "model : %u\n" 611 "model name : %s\n" 612 "revision : %u\n" 613 "archrev : %u\n" 614 "features :%s\n" /* don't change this---it _is_ right! */ 615 "cpu number : %lu\n" 616 "cpu regs : %u\n" 617 "cpu MHz : %lu.%06lu\n" 618 "itc MHz : %lu.%06lu\n" 619 "BogoMIPS : %lu.%02lu\n", 620 cpunum, c->vendor, c->family, c->model, 621 c->model_name, c->revision, c->archrev, 622 features, c->ppn, c->number, 623 proc_freq / 1000, proc_freq % 1000, 624 c->itc_freq / 1000000, c->itc_freq % 1000000, 625 lpj*HZ/500000, (lpj*HZ/5000) % 100); 626 #ifdef CONFIG_SMP 627 seq_printf(m, "siblings : %u\n", cpus_weight(cpu_core_map[cpunum])); 628 if (c->threads_per_core > 1 || c->cores_per_socket > 1) 629 seq_printf(m, 630 "physical id: %u\n" 631 "core id : %u\n" 632 "thread id : %u\n", 633 c->socket_id, c->core_id, c->thread_id); 634 #endif 635 seq_printf(m,"\n"); 636 637 return 0; 638 } 639 640 static void * 641 c_start (struct seq_file *m, loff_t *pos) 642 { 643 #ifdef CONFIG_SMP 644 while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map)) 645 ++*pos; 646 #endif 647 return *pos < NR_CPUS ? cpu_data(*pos) : NULL; 648 } 649 650 static void * 651 c_next (struct seq_file *m, void *v, loff_t *pos) 652 { 653 ++*pos; 654 return c_start(m, pos); 655 } 656 657 static void 658 c_stop (struct seq_file *m, void *v) 659 { 660 } 661 662 struct seq_operations cpuinfo_op = { 663 .start = c_start, 664 .next = c_next, 665 .stop = c_stop, 666 .show = show_cpuinfo 667 }; 668 669 static char brandname[128]; 670 671 static char * __cpuinit 672 get_model_name(__u8 family, __u8 model) 673 { 674 char brand[128]; 675 676 memcpy(brand, "Unknown", 8); 677 if (ia64_pal_get_brand_info(brand)) { 678 if (family == 0x7) 679 memcpy(brand, "Merced", 7); 680 else if (family == 0x1f) switch (model) { 681 case 0: memcpy(brand, "McKinley", 9); break; 682 case 1: memcpy(brand, "Madison", 8); break; 683 case 2: memcpy(brand, "Madison up to 9M cache", 23); break; 684 } 685 } 686 if (brandname[0] == '\0') 687 return strcpy(brandname, brand); 688 else if (strcmp(brandname, brand) == 0) 689 return brandname; 690 else 691 return kstrdup(brand, GFP_KERNEL); 692 } 693 694 static void __cpuinit 695 identify_cpu (struct cpuinfo_ia64 *c) 696 { 697 union { 698 unsigned long bits[5]; 699 struct { 700 /* id 0 & 1: */ 701 char vendor[16]; 702 703 /* id 2 */ 704 u64 ppn; /* processor serial number */ 705 706 /* id 3: */ 707 unsigned number : 8; 708 unsigned revision : 8; 709 unsigned model : 8; 710 unsigned family : 8; 711 unsigned archrev : 8; 712 unsigned reserved : 24; 713 714 /* id 4: */ 715 u64 features; 716 } field; 717 } cpuid; 718 pal_vm_info_1_u_t vm1; 719 pal_vm_info_2_u_t vm2; 720 pal_status_t status; 721 unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */ 722 int i; 723 for (i = 0; i < 5; ++i) 724 cpuid.bits[i] = ia64_get_cpuid(i); 725 726 memcpy(c->vendor, cpuid.field.vendor, 16); 727 #ifdef CONFIG_SMP 728 c->cpu = smp_processor_id(); 729 730 /* below default values will be overwritten by identify_siblings() 731 * for Multi-Threading/Multi-Core capable cpu's 732 */ 733 c->threads_per_core = c->cores_per_socket = c->num_log = 1; 734 c->socket_id = -1; 735 736 identify_siblings(c); 737 #endif 738 c->ppn = cpuid.field.ppn; 739 c->number = cpuid.field.number; 740 c->revision = cpuid.field.revision; 741 c->model = cpuid.field.model; 742 c->family = cpuid.field.family; 743 c->archrev = cpuid.field.archrev; 744 c->features = cpuid.field.features; 745 c->model_name = get_model_name(c->family, c->model); 746 747 status = ia64_pal_vm_summary(&vm1, &vm2); 748 if (status == PAL_STATUS_SUCCESS) { 749 impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb; 750 phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size; 751 } 752 c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1)); 753 c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); 754 } 755 756 void 757 setup_per_cpu_areas (void) 758 { 759 /* start_kernel() requires this... */ 760 #ifdef CONFIG_ACPI_HOTPLUG_CPU 761 prefill_possible_map(); 762 #endif 763 } 764 765 /* 766 * Calculate the max. cache line size. 767 * 768 * In addition, the minimum of the i-cache stride sizes is calculated for 769 * "flush_icache_range()". 770 */ 771 static void __cpuinit 772 get_max_cacheline_size (void) 773 { 774 unsigned long line_size, max = 1; 775 unsigned int cache_size = 0; 776 u64 l, levels, unique_caches; 777 pal_cache_config_info_t cci; 778 s64 status; 779 780 status = ia64_pal_cache_summary(&levels, &unique_caches); 781 if (status != 0) { 782 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", 783 __FUNCTION__, status); 784 max = SMP_CACHE_BYTES; 785 /* Safest setup for "flush_icache_range()" */ 786 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; 787 goto out; 788 } 789 790 for (l = 0; l < levels; ++l) { 791 status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2, 792 &cci); 793 if (status != 0) { 794 printk(KERN_ERR 795 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n", 796 __FUNCTION__, l, status); 797 max = SMP_CACHE_BYTES; 798 /* The safest setup for "flush_icache_range()" */ 799 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 800 cci.pcci_unified = 1; 801 } 802 line_size = 1 << cci.pcci_line_size; 803 if (line_size > max) 804 max = line_size; 805 if (cache_size < cci.pcci_cache_size) 806 cache_size = cci.pcci_cache_size; 807 if (!cci.pcci_unified) { 808 status = ia64_pal_cache_config_info(l, 809 /* cache_type (instruction)= */ 1, 810 &cci); 811 if (status != 0) { 812 printk(KERN_ERR 813 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n", 814 __FUNCTION__, l, status); 815 /* The safest setup for "flush_icache_range()" */ 816 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 817 } 818 } 819 if (cci.pcci_stride < ia64_i_cache_stride_shift) 820 ia64_i_cache_stride_shift = cci.pcci_stride; 821 } 822 out: 823 #ifdef CONFIG_SMP 824 max_cache_size = max(max_cache_size, cache_size); 825 #endif 826 if (max > ia64_max_cacheline_size) 827 ia64_max_cacheline_size = max; 828 } 829 830 /* 831 * cpu_init() initializes state that is per-CPU. This function acts 832 * as a 'CPU state barrier', nothing should get across. 833 */ 834 void __cpuinit 835 cpu_init (void) 836 { 837 extern void __cpuinit ia64_mmu_init (void *); 838 static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG; 839 unsigned long num_phys_stacked; 840 pal_vm_info_2_u_t vmi; 841 unsigned int max_ctx; 842 struct cpuinfo_ia64 *cpu_info; 843 void *cpu_data; 844 845 cpu_data = per_cpu_init(); 846 847 /* 848 * We set ar.k3 so that assembly code in MCA handler can compute 849 * physical addresses of per cpu variables with a simple: 850 * phys = ar.k3 + &per_cpu_var 851 */ 852 ia64_set_kr(IA64_KR_PER_CPU_DATA, 853 ia64_tpa(cpu_data) - (long) __per_cpu_start); 854 855 get_max_cacheline_size(); 856 857 /* 858 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called 859 * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it 860 * depends on the data returned by identify_cpu(). We break the dependency by 861 * accessing cpu_data() through the canonical per-CPU address. 862 */ 863 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start); 864 identify_cpu(cpu_info); 865 866 #ifdef CONFIG_MCKINLEY 867 { 868 # define FEATURE_SET 16 869 struct ia64_pal_retval iprv; 870 871 if (cpu_info->family == 0x1f) { 872 PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0); 873 if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80)) 874 PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, 875 (iprv.v1 | 0x80), FEATURE_SET, 0); 876 } 877 } 878 #endif 879 880 /* Clear the stack memory reserved for pt_regs: */ 881 memset(task_pt_regs(current), 0, sizeof(struct pt_regs)); 882 883 ia64_set_kr(IA64_KR_FPU_OWNER, 0); 884 885 /* 886 * Initialize the page-table base register to a global 887 * directory with all zeroes. This ensure that we can handle 888 * TLB-misses to user address-space even before we created the 889 * first user address-space. This may happen, e.g., due to 890 * aggressive use of lfetch.fault. 891 */ 892 ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page))); 893 894 /* 895 * Initialize default control register to defer speculative faults except 896 * for those arising from TLB misses, which are not deferred. The 897 * kernel MUST NOT depend on a particular setting of these bits (in other words, 898 * the kernel must have recovery code for all speculative accesses). Turn on 899 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps 900 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll 901 * be fine). 902 */ 903 ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR 904 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); 905 atomic_inc(&init_mm.mm_count); 906 current->active_mm = &init_mm; 907 if (current->mm) 908 BUG(); 909 910 ia64_mmu_init(ia64_imva(cpu_data)); 911 ia64_mca_cpu_init(ia64_imva(cpu_data)); 912 913 #ifdef CONFIG_IA32_SUPPORT 914 ia32_cpu_init(); 915 #endif 916 917 /* Clear ITC to eliminiate sched_clock() overflows in human time. */ 918 ia64_set_itc(0); 919 920 /* disable all local interrupt sources: */ 921 ia64_set_itv(1 << 16); 922 ia64_set_lrr0(1 << 16); 923 ia64_set_lrr1(1 << 16); 924 ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); 925 ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); 926 927 /* clear TPR & XTP to enable all interrupt classes: */ 928 ia64_setreg(_IA64_REG_CR_TPR, 0); 929 #ifdef CONFIG_SMP 930 normal_xtp(); 931 #endif 932 933 /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ 934 if (ia64_pal_vm_summary(NULL, &vmi) == 0) 935 max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; 936 else { 937 printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); 938 max_ctx = (1U << 15) - 1; /* use architected minimum */ 939 } 940 while (max_ctx < ia64_ctx.max_ctx) { 941 unsigned int old = ia64_ctx.max_ctx; 942 if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old) 943 break; 944 } 945 946 if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) { 947 printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical " 948 "stacked regs\n"); 949 num_phys_stacked = 96; 950 } 951 /* size of physical stacked register partition plus 8 bytes: */ 952 if (num_phys_stacked > max_num_phys_stacked) { 953 ia64_patch_phys_stack_reg(num_phys_stacked*8 + 8); 954 max_num_phys_stacked = num_phys_stacked; 955 } 956 platform_cpu_init(); 957 pm_idle = default_idle; 958 } 959 960 /* 961 * On SMP systems, when the scheduler does migration-cost autodetection, 962 * it needs a way to flush as much of the CPU's caches as possible. 963 */ 964 void sched_cacheflush(void) 965 { 966 ia64_sal_cache_flush(3); 967 } 968 969 void __init 970 check_bugs (void) 971 { 972 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, 973 (unsigned long) __end___mckinley_e9_bundles); 974 } 975 976 static int __init run_dmi_scan(void) 977 { 978 dmi_scan_machine(); 979 return 0; 980 } 981 core_initcall(run_dmi_scan); 982