1 /* 2 * Architecture-specific setup. 3 * 4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co 5 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * Stephane Eranian <eranian@hpl.hp.com> 7 * Copyright (C) 2000, 2004 Intel Corp 8 * Rohit Seth <rohit.seth@intel.com> 9 * Suresh Siddha <suresh.b.siddha@intel.com> 10 * Gordon Jin <gordon.jin@intel.com> 11 * Copyright (C) 1999 VA Linux Systems 12 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 13 * 14 * 12/26/04 S.Siddha, G.Jin, R.Seth 15 * Add multi-threading and multi-core detection 16 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo(). 17 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map 18 * 03/31/00 R.Seth cpu_initialized and current->processor fixes 19 * 02/04/00 D.Mosberger some more get_cpuinfo fixes... 20 * 02/01/00 R.Seth fixed get_cpuinfo for SMP 21 * 01/07/99 S.Eranian added the support for command line argument 22 * 06/24/99 W.Drummond added boot_cpu_data. 23 * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()" 24 */ 25 #include <linux/module.h> 26 #include <linux/init.h> 27 28 #include <linux/acpi.h> 29 #include <linux/bootmem.h> 30 #include <linux/console.h> 31 #include <linux/delay.h> 32 #include <linux/kernel.h> 33 #include <linux/reboot.h> 34 #include <linux/sched.h> 35 #include <linux/seq_file.h> 36 #include <linux/string.h> 37 #include <linux/threads.h> 38 #include <linux/screen_info.h> 39 #include <linux/dmi.h> 40 #include <linux/serial.h> 41 #include <linux/serial_core.h> 42 #include <linux/efi.h> 43 #include <linux/initrd.h> 44 #include <linux/pm.h> 45 #include <linux/cpufreq.h> 46 #include <linux/kexec.h> 47 #include <linux/crash_dump.h> 48 49 #include <asm/ia32.h> 50 #include <asm/machvec.h> 51 #include <asm/mca.h> 52 #include <asm/meminit.h> 53 #include <asm/page.h> 54 #include <asm/paravirt.h> 55 #include <asm/patch.h> 56 #include <asm/pgtable.h> 57 #include <asm/processor.h> 58 #include <asm/sal.h> 59 #include <asm/sections.h> 60 #include <asm/setup.h> 61 #include <asm/smp.h> 62 #include <asm/system.h> 63 #include <asm/tlbflush.h> 64 #include <asm/unistd.h> 65 #include <asm/hpsim.h> 66 67 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) 68 # error "struct cpuinfo_ia64 too big!" 69 #endif 70 71 #ifdef CONFIG_SMP 72 unsigned long __per_cpu_offset[NR_CPUS]; 73 EXPORT_SYMBOL(__per_cpu_offset); 74 #endif 75 76 DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); 77 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); 78 unsigned long ia64_cycles_per_usec; 79 struct ia64_boot_param *ia64_boot_param; 80 struct screen_info screen_info; 81 unsigned long vga_console_iobase; 82 unsigned long vga_console_membase; 83 84 static struct resource data_resource = { 85 .name = "Kernel data", 86 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 87 }; 88 89 static struct resource code_resource = { 90 .name = "Kernel code", 91 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 92 }; 93 94 static struct resource bss_resource = { 95 .name = "Kernel bss", 96 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 97 }; 98 99 unsigned long ia64_max_cacheline_size; 100 101 int dma_get_cache_alignment(void) 102 { 103 return ia64_max_cacheline_size; 104 } 105 EXPORT_SYMBOL(dma_get_cache_alignment); 106 107 unsigned long ia64_iobase; /* virtual address for I/O accesses */ 108 EXPORT_SYMBOL(ia64_iobase); 109 struct io_space io_space[MAX_IO_SPACES]; 110 EXPORT_SYMBOL(io_space); 111 unsigned int num_io_spaces; 112 113 /* 114 * "flush_icache_range()" needs to know what processor dependent stride size to use 115 * when it makes i-cache(s) coherent with d-caches. 116 */ 117 #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */ 118 unsigned long ia64_i_cache_stride_shift = ~0; 119 /* 120 * "clflush_cache_range()" needs to know what processor dependent stride size to 121 * use when it flushes cache lines including both d-cache and i-cache. 122 */ 123 /* Safest way to go: 32 bytes by 32 bytes */ 124 #define CACHE_STRIDE_SHIFT 5 125 unsigned long ia64_cache_stride_shift = ~0; 126 127 /* 128 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This 129 * mask specifies a mask of address bits that must be 0 in order for two buffers to be 130 * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start 131 * address of the second buffer must be aligned to (merge_mask+1) in order to be 132 * mergeable). By default, we assume there is no I/O MMU which can merge physically 133 * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu 134 * page-size of 2^64. 135 */ 136 unsigned long ia64_max_iommu_merge_mask = ~0UL; 137 EXPORT_SYMBOL(ia64_max_iommu_merge_mask); 138 139 /* 140 * We use a special marker for the end of memory and it uses the extra (+1) slot 141 */ 142 struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata; 143 int num_rsvd_regions __initdata; 144 145 146 /* 147 * Filter incoming memory segments based on the primitive map created from the boot 148 * parameters. Segments contained in the map are removed from the memory ranges. A 149 * caller-specified function is called with the memory ranges that remain after filtering. 150 * This routine does not assume the incoming segments are sorted. 151 */ 152 int __init 153 filter_rsvd_memory (unsigned long start, unsigned long end, void *arg) 154 { 155 unsigned long range_start, range_end, prev_start; 156 void (*func)(unsigned long, unsigned long, int); 157 int i; 158 159 #if IGNORE_PFN0 160 if (start == PAGE_OFFSET) { 161 printk(KERN_WARNING "warning: skipping physical page 0\n"); 162 start += PAGE_SIZE; 163 if (start >= end) return 0; 164 } 165 #endif 166 /* 167 * lowest possible address(walker uses virtual) 168 */ 169 prev_start = PAGE_OFFSET; 170 func = arg; 171 172 for (i = 0; i < num_rsvd_regions; ++i) { 173 range_start = max(start, prev_start); 174 range_end = min(end, rsvd_region[i].start); 175 176 if (range_start < range_end) 177 call_pernode_memory(__pa(range_start), range_end - range_start, func); 178 179 /* nothing more available in this segment */ 180 if (range_end == end) return 0; 181 182 prev_start = rsvd_region[i].end; 183 } 184 /* end of memory marker allows full processing inside loop body */ 185 return 0; 186 } 187 188 /* 189 * Similar to "filter_rsvd_memory()", but the reserved memory ranges 190 * are not filtered out. 191 */ 192 int __init 193 filter_memory(unsigned long start, unsigned long end, void *arg) 194 { 195 void (*func)(unsigned long, unsigned long, int); 196 197 #if IGNORE_PFN0 198 if (start == PAGE_OFFSET) { 199 printk(KERN_WARNING "warning: skipping physical page 0\n"); 200 start += PAGE_SIZE; 201 if (start >= end) 202 return 0; 203 } 204 #endif 205 func = arg; 206 if (start < end) 207 call_pernode_memory(__pa(start), end - start, func); 208 return 0; 209 } 210 211 static void __init 212 sort_regions (struct rsvd_region *rsvd_region, int max) 213 { 214 int j; 215 216 /* simple bubble sorting */ 217 while (max--) { 218 for (j = 0; j < max; ++j) { 219 if (rsvd_region[j].start > rsvd_region[j+1].start) { 220 struct rsvd_region tmp; 221 tmp = rsvd_region[j]; 222 rsvd_region[j] = rsvd_region[j + 1]; 223 rsvd_region[j + 1] = tmp; 224 } 225 } 226 } 227 } 228 229 /* 230 * Request address space for all standard resources 231 */ 232 static int __init register_memory(void) 233 { 234 code_resource.start = ia64_tpa(_text); 235 code_resource.end = ia64_tpa(_etext) - 1; 236 data_resource.start = ia64_tpa(_etext); 237 data_resource.end = ia64_tpa(_edata) - 1; 238 bss_resource.start = ia64_tpa(__bss_start); 239 bss_resource.end = ia64_tpa(_end) - 1; 240 efi_initialize_iomem_resources(&code_resource, &data_resource, 241 &bss_resource); 242 243 return 0; 244 } 245 246 __initcall(register_memory); 247 248 249 #ifdef CONFIG_KEXEC 250 251 /* 252 * This function checks if the reserved crashkernel is allowed on the specific 253 * IA64 machine flavour. Machines without an IO TLB use swiotlb and require 254 * some memory below 4 GB (i.e. in 32 bit area), see the implementation of 255 * lib/swiotlb.c. The hpzx1 architecture has an IO TLB but cannot use that 256 * in kdump case. See the comment in sba_init() in sba_iommu.c. 257 * 258 * So, the only machvec that really supports loading the kdump kernel 259 * over 4 GB is "sn2". 260 */ 261 static int __init check_crashkernel_memory(unsigned long pbase, size_t size) 262 { 263 if (ia64_platform_is("sn2") || ia64_platform_is("uv")) 264 return 1; 265 else 266 return pbase < (1UL << 32); 267 } 268 269 static void __init setup_crashkernel(unsigned long total, int *n) 270 { 271 unsigned long long base = 0, size = 0; 272 int ret; 273 274 ret = parse_crashkernel(boot_command_line, total, 275 &size, &base); 276 if (ret == 0 && size > 0) { 277 if (!base) { 278 sort_regions(rsvd_region, *n); 279 base = kdump_find_rsvd_region(size, 280 rsvd_region, *n); 281 } 282 283 if (!check_crashkernel_memory(base, size)) { 284 pr_warning("crashkernel: There would be kdump memory " 285 "at %ld GB but this is unusable because it " 286 "must\nbe below 4 GB. Change the memory " 287 "configuration of the machine.\n", 288 (unsigned long)(base >> 30)); 289 return; 290 } 291 292 if (base != ~0UL) { 293 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " 294 "for crashkernel (System RAM: %ldMB)\n", 295 (unsigned long)(size >> 20), 296 (unsigned long)(base >> 20), 297 (unsigned long)(total >> 20)); 298 rsvd_region[*n].start = 299 (unsigned long)__va(base); 300 rsvd_region[*n].end = 301 (unsigned long)__va(base + size); 302 (*n)++; 303 crashk_res.start = base; 304 crashk_res.end = base + size - 1; 305 } 306 } 307 efi_memmap_res.start = ia64_boot_param->efi_memmap; 308 efi_memmap_res.end = efi_memmap_res.start + 309 ia64_boot_param->efi_memmap_size; 310 boot_param_res.start = __pa(ia64_boot_param); 311 boot_param_res.end = boot_param_res.start + 312 sizeof(*ia64_boot_param); 313 } 314 #else 315 static inline void __init setup_crashkernel(unsigned long total, int *n) 316 {} 317 #endif 318 319 /** 320 * reserve_memory - setup reserved memory areas 321 * 322 * Setup the reserved memory areas set aside for the boot parameters, 323 * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined, 324 * see arch/ia64/include/asm/meminit.h if you need to define more. 325 */ 326 void __init 327 reserve_memory (void) 328 { 329 int n = 0; 330 unsigned long total_memory; 331 332 /* 333 * none of the entries in this table overlap 334 */ 335 rsvd_region[n].start = (unsigned long) ia64_boot_param; 336 rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param); 337 n++; 338 339 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap); 340 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size; 341 n++; 342 343 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line); 344 rsvd_region[n].end = (rsvd_region[n].start 345 + strlen(__va(ia64_boot_param->command_line)) + 1); 346 n++; 347 348 rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START); 349 rsvd_region[n].end = (unsigned long) ia64_imva(_end); 350 n++; 351 352 n += paravirt_reserve_memory(&rsvd_region[n]); 353 354 #ifdef CONFIG_BLK_DEV_INITRD 355 if (ia64_boot_param->initrd_start) { 356 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start); 357 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size; 358 n++; 359 } 360 #endif 361 362 #ifdef CONFIG_CRASH_DUMP 363 if (reserve_elfcorehdr(&rsvd_region[n].start, 364 &rsvd_region[n].end) == 0) 365 n++; 366 #endif 367 368 total_memory = efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); 369 n++; 370 371 setup_crashkernel(total_memory, &n); 372 373 /* end of memory marker */ 374 rsvd_region[n].start = ~0UL; 375 rsvd_region[n].end = ~0UL; 376 n++; 377 378 num_rsvd_regions = n; 379 BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n); 380 381 sort_regions(rsvd_region, num_rsvd_regions); 382 } 383 384 385 /** 386 * find_initrd - get initrd parameters from the boot parameter structure 387 * 388 * Grab the initrd start and end from the boot parameter struct given us by 389 * the boot loader. 390 */ 391 void __init 392 find_initrd (void) 393 { 394 #ifdef CONFIG_BLK_DEV_INITRD 395 if (ia64_boot_param->initrd_start) { 396 initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start); 397 initrd_end = initrd_start+ia64_boot_param->initrd_size; 398 399 printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n", 400 initrd_start, ia64_boot_param->initrd_size); 401 } 402 #endif 403 } 404 405 static void __init 406 io_port_init (void) 407 { 408 unsigned long phys_iobase; 409 410 /* 411 * Set `iobase' based on the EFI memory map or, failing that, the 412 * value firmware left in ar.k0. 413 * 414 * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute 415 * the port's virtual address, so ia32_load_state() loads it with a 416 * user virtual address. But in ia64 mode, glibc uses the 417 * *physical* address in ar.k0 to mmap the appropriate area from 418 * /dev/mem, and the inX()/outX() interfaces use MMIO. In both 419 * cases, user-mode can only use the legacy 0-64K I/O port space. 420 * 421 * ar.k0 is not involved in kernel I/O port accesses, which can use 422 * any of the I/O port spaces and are done via MMIO using the 423 * virtual mmio_base from the appropriate io_space[]. 424 */ 425 phys_iobase = efi_get_iobase(); 426 if (!phys_iobase) { 427 phys_iobase = ia64_get_kr(IA64_KR_IO_BASE); 428 printk(KERN_INFO "No I/O port range found in EFI memory map, " 429 "falling back to AR.KR0 (0x%lx)\n", phys_iobase); 430 } 431 ia64_iobase = (unsigned long) ioremap(phys_iobase, 0); 432 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); 433 434 /* setup legacy IO port space */ 435 io_space[0].mmio_base = ia64_iobase; 436 io_space[0].sparse = 1; 437 num_io_spaces = 1; 438 } 439 440 /** 441 * early_console_setup - setup debugging console 442 * 443 * Consoles started here require little enough setup that we can start using 444 * them very early in the boot process, either right after the machine 445 * vector initialization, or even before if the drivers can detect their hw. 446 * 447 * Returns non-zero if a console couldn't be setup. 448 */ 449 static inline int __init 450 early_console_setup (char *cmdline) 451 { 452 int earlycons = 0; 453 454 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE 455 { 456 extern int sn_serial_console_early_setup(void); 457 if (!sn_serial_console_early_setup()) 458 earlycons++; 459 } 460 #endif 461 #ifdef CONFIG_EFI_PCDP 462 if (!efi_setup_pcdp_console(cmdline)) 463 earlycons++; 464 #endif 465 if (!simcons_register()) 466 earlycons++; 467 468 return (earlycons) ? 0 : -1; 469 } 470 471 static inline void 472 mark_bsp_online (void) 473 { 474 #ifdef CONFIG_SMP 475 /* If we register an early console, allow CPU 0 to printk */ 476 cpu_set(smp_processor_id(), cpu_online_map); 477 #endif 478 } 479 480 static __initdata int nomca; 481 static __init int setup_nomca(char *s) 482 { 483 nomca = 1; 484 return 0; 485 } 486 early_param("nomca", setup_nomca); 487 488 /* 489 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by 490 * is_kdump_kernel() to determine if we are booting after a panic. Hence 491 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE. 492 */ 493 #ifdef CONFIG_CRASH_DUMP 494 /* elfcorehdr= specifies the location of elf core header 495 * stored by the crashed kernel. 496 */ 497 static int __init parse_elfcorehdr(char *arg) 498 { 499 if (!arg) 500 return -EINVAL; 501 502 elfcorehdr_addr = memparse(arg, &arg); 503 return 0; 504 } 505 early_param("elfcorehdr", parse_elfcorehdr); 506 507 int __init reserve_elfcorehdr(unsigned long *start, unsigned long *end) 508 { 509 unsigned long length; 510 511 /* We get the address using the kernel command line, 512 * but the size is extracted from the EFI tables. 513 * Both address and size are required for reservation 514 * to work properly. 515 */ 516 517 if (!is_vmcore_usable()) 518 return -EINVAL; 519 520 if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) { 521 vmcore_unusable(); 522 return -EINVAL; 523 } 524 525 *start = (unsigned long)__va(elfcorehdr_addr); 526 *end = *start + length; 527 return 0; 528 } 529 530 #endif /* CONFIG_PROC_VMCORE */ 531 532 void __init 533 setup_arch (char **cmdline_p) 534 { 535 unw_init(); 536 537 paravirt_arch_setup_early(); 538 539 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); 540 541 *cmdline_p = __va(ia64_boot_param->command_line); 542 strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); 543 544 efi_init(); 545 io_port_init(); 546 547 #ifdef CONFIG_IA64_GENERIC 548 /* machvec needs to be parsed from the command line 549 * before parse_early_param() is called to ensure 550 * that ia64_mv is initialised before any command line 551 * settings may cause console setup to occur 552 */ 553 machvec_init_from_cmdline(*cmdline_p); 554 #endif 555 556 parse_early_param(); 557 558 if (early_console_setup(*cmdline_p) == 0) 559 mark_bsp_online(); 560 561 #ifdef CONFIG_ACPI 562 /* Initialize the ACPI boot-time table parser */ 563 acpi_table_init(); 564 early_acpi_boot_init(); 565 # ifdef CONFIG_ACPI_NUMA 566 acpi_numa_init(); 567 #ifdef CONFIG_ACPI_HOTPLUG_CPU 568 prefill_possible_map(); 569 #endif 570 per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? 571 32 : cpus_weight(early_cpu_possible_map)), 572 additional_cpus > 0 ? additional_cpus : 0); 573 # endif 574 #else 575 # ifdef CONFIG_SMP 576 smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */ 577 # endif 578 #endif /* CONFIG_APCI_BOOT */ 579 580 find_memory(); 581 582 /* process SAL system table: */ 583 ia64_sal_init(__va(efi.sal_systab)); 584 585 #ifdef CONFIG_ITANIUM 586 ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist); 587 #else 588 { 589 u64 num_phys_stacked; 590 591 if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96) 592 ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist); 593 } 594 #endif 595 596 #ifdef CONFIG_SMP 597 cpu_physical_id(0) = hard_smp_processor_id(); 598 #endif 599 600 cpu_init(); /* initialize the bootstrap CPU */ 601 mmu_context_init(); /* initialize context_id bitmap */ 602 603 #ifdef CONFIG_ACPI 604 acpi_boot_init(); 605 #endif 606 607 paravirt_banner(); 608 paravirt_arch_setup_console(cmdline_p); 609 610 #ifdef CONFIG_VT 611 if (!conswitchp) { 612 # if defined(CONFIG_DUMMY_CONSOLE) 613 conswitchp = &dummy_con; 614 # endif 615 # if defined(CONFIG_VGA_CONSOLE) 616 /* 617 * Non-legacy systems may route legacy VGA MMIO range to system 618 * memory. vga_con probes the MMIO hole, so memory looks like 619 * a VGA device to it. The EFI memory map can tell us if it's 620 * memory so we can avoid this problem. 621 */ 622 if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY) 623 conswitchp = &vga_con; 624 # endif 625 } 626 #endif 627 628 /* enable IA-64 Machine Check Abort Handling unless disabled */ 629 if (paravirt_arch_setup_nomca()) 630 nomca = 1; 631 if (!nomca) 632 ia64_mca_init(); 633 634 platform_setup(cmdline_p); 635 #ifndef CONFIG_IA64_HP_SIM 636 check_sal_cache_flush(); 637 #endif 638 paging_init(); 639 } 640 641 /* 642 * Display cpu info for all CPUs. 643 */ 644 static int 645 show_cpuinfo (struct seq_file *m, void *v) 646 { 647 #ifdef CONFIG_SMP 648 # define lpj c->loops_per_jiffy 649 # define cpunum c->cpu 650 #else 651 # define lpj loops_per_jiffy 652 # define cpunum 0 653 #endif 654 static struct { 655 unsigned long mask; 656 const char *feature_name; 657 } feature_bits[] = { 658 { 1UL << 0, "branchlong" }, 659 { 1UL << 1, "spontaneous deferral"}, 660 { 1UL << 2, "16-byte atomic ops" } 661 }; 662 char features[128], *cp, *sep; 663 struct cpuinfo_ia64 *c = v; 664 unsigned long mask; 665 unsigned long proc_freq; 666 int i, size; 667 668 mask = c->features; 669 670 /* build the feature string: */ 671 memcpy(features, "standard", 9); 672 cp = features; 673 size = sizeof(features); 674 sep = ""; 675 for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) { 676 if (mask & feature_bits[i].mask) { 677 cp += snprintf(cp, size, "%s%s", sep, 678 feature_bits[i].feature_name), 679 sep = ", "; 680 mask &= ~feature_bits[i].mask; 681 size = sizeof(features) - (cp - features); 682 } 683 } 684 if (mask && size > 1) { 685 /* print unknown features as a hex value */ 686 snprintf(cp, size, "%s0x%lx", sep, mask); 687 } 688 689 proc_freq = cpufreq_quick_get(cpunum); 690 if (!proc_freq) 691 proc_freq = c->proc_freq / 1000; 692 693 seq_printf(m, 694 "processor : %d\n" 695 "vendor : %s\n" 696 "arch : IA-64\n" 697 "family : %u\n" 698 "model : %u\n" 699 "model name : %s\n" 700 "revision : %u\n" 701 "archrev : %u\n" 702 "features : %s\n" 703 "cpu number : %lu\n" 704 "cpu regs : %u\n" 705 "cpu MHz : %lu.%03lu\n" 706 "itc MHz : %lu.%06lu\n" 707 "BogoMIPS : %lu.%02lu\n", 708 cpunum, c->vendor, c->family, c->model, 709 c->model_name, c->revision, c->archrev, 710 features, c->ppn, c->number, 711 proc_freq / 1000, proc_freq % 1000, 712 c->itc_freq / 1000000, c->itc_freq % 1000000, 713 lpj*HZ/500000, (lpj*HZ/5000) % 100); 714 #ifdef CONFIG_SMP 715 seq_printf(m, "siblings : %u\n", cpus_weight(cpu_core_map[cpunum])); 716 if (c->socket_id != -1) 717 seq_printf(m, "physical id: %u\n", c->socket_id); 718 if (c->threads_per_core > 1 || c->cores_per_socket > 1) 719 seq_printf(m, 720 "core id : %u\n" 721 "thread id : %u\n", 722 c->core_id, c->thread_id); 723 #endif 724 seq_printf(m,"\n"); 725 726 return 0; 727 } 728 729 static void * 730 c_start (struct seq_file *m, loff_t *pos) 731 { 732 #ifdef CONFIG_SMP 733 while (*pos < nr_cpu_ids && !cpu_online(*pos)) 734 ++*pos; 735 #endif 736 return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL; 737 } 738 739 static void * 740 c_next (struct seq_file *m, void *v, loff_t *pos) 741 { 742 ++*pos; 743 return c_start(m, pos); 744 } 745 746 static void 747 c_stop (struct seq_file *m, void *v) 748 { 749 } 750 751 const struct seq_operations cpuinfo_op = { 752 .start = c_start, 753 .next = c_next, 754 .stop = c_stop, 755 .show = show_cpuinfo 756 }; 757 758 #define MAX_BRANDS 8 759 static char brandname[MAX_BRANDS][128]; 760 761 static char * __cpuinit 762 get_model_name(__u8 family, __u8 model) 763 { 764 static int overflow; 765 char brand[128]; 766 int i; 767 768 memcpy(brand, "Unknown", 8); 769 if (ia64_pal_get_brand_info(brand)) { 770 if (family == 0x7) 771 memcpy(brand, "Merced", 7); 772 else if (family == 0x1f) switch (model) { 773 case 0: memcpy(brand, "McKinley", 9); break; 774 case 1: memcpy(brand, "Madison", 8); break; 775 case 2: memcpy(brand, "Madison up to 9M cache", 23); break; 776 } 777 } 778 for (i = 0; i < MAX_BRANDS; i++) 779 if (strcmp(brandname[i], brand) == 0) 780 return brandname[i]; 781 for (i = 0; i < MAX_BRANDS; i++) 782 if (brandname[i][0] == '\0') 783 return strcpy(brandname[i], brand); 784 if (overflow++ == 0) 785 printk(KERN_ERR 786 "%s: Table overflow. Some processor model information will be missing\n", 787 __func__); 788 return "Unknown"; 789 } 790 791 static void __cpuinit 792 identify_cpu (struct cpuinfo_ia64 *c) 793 { 794 union { 795 unsigned long bits[5]; 796 struct { 797 /* id 0 & 1: */ 798 char vendor[16]; 799 800 /* id 2 */ 801 u64 ppn; /* processor serial number */ 802 803 /* id 3: */ 804 unsigned number : 8; 805 unsigned revision : 8; 806 unsigned model : 8; 807 unsigned family : 8; 808 unsigned archrev : 8; 809 unsigned reserved : 24; 810 811 /* id 4: */ 812 u64 features; 813 } field; 814 } cpuid; 815 pal_vm_info_1_u_t vm1; 816 pal_vm_info_2_u_t vm2; 817 pal_status_t status; 818 unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */ 819 int i; 820 for (i = 0; i < 5; ++i) 821 cpuid.bits[i] = ia64_get_cpuid(i); 822 823 memcpy(c->vendor, cpuid.field.vendor, 16); 824 #ifdef CONFIG_SMP 825 c->cpu = smp_processor_id(); 826 827 /* below default values will be overwritten by identify_siblings() 828 * for Multi-Threading/Multi-Core capable CPUs 829 */ 830 c->threads_per_core = c->cores_per_socket = c->num_log = 1; 831 c->socket_id = -1; 832 833 identify_siblings(c); 834 835 if (c->threads_per_core > smp_num_siblings) 836 smp_num_siblings = c->threads_per_core; 837 #endif 838 c->ppn = cpuid.field.ppn; 839 c->number = cpuid.field.number; 840 c->revision = cpuid.field.revision; 841 c->model = cpuid.field.model; 842 c->family = cpuid.field.family; 843 c->archrev = cpuid.field.archrev; 844 c->features = cpuid.field.features; 845 c->model_name = get_model_name(c->family, c->model); 846 847 status = ia64_pal_vm_summary(&vm1, &vm2); 848 if (status == PAL_STATUS_SUCCESS) { 849 impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb; 850 phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size; 851 } 852 c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1)); 853 c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); 854 } 855 856 void __init 857 setup_per_cpu_areas (void) 858 { 859 /* start_kernel() requires this... */ 860 } 861 862 /* 863 * Do the following calculations: 864 * 865 * 1. the max. cache line size. 866 * 2. the minimum of the i-cache stride sizes for "flush_icache_range()". 867 * 3. the minimum of the cache stride sizes for "clflush_cache_range()". 868 */ 869 static void __cpuinit 870 get_cache_info(void) 871 { 872 unsigned long line_size, max = 1; 873 u64 l, levels, unique_caches; 874 pal_cache_config_info_t cci; 875 s64 status; 876 877 status = ia64_pal_cache_summary(&levels, &unique_caches); 878 if (status != 0) { 879 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", 880 __func__, status); 881 max = SMP_CACHE_BYTES; 882 /* Safest setup for "flush_icache_range()" */ 883 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; 884 /* Safest setup for "clflush_cache_range()" */ 885 ia64_cache_stride_shift = CACHE_STRIDE_SHIFT; 886 goto out; 887 } 888 889 for (l = 0; l < levels; ++l) { 890 /* cache_type (data_or_unified)=2 */ 891 status = ia64_pal_cache_config_info(l, 2, &cci); 892 if (status != 0) { 893 printk(KERN_ERR 894 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n", 895 __func__, l, status); 896 max = SMP_CACHE_BYTES; 897 /* The safest setup for "flush_icache_range()" */ 898 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 899 /* The safest setup for "clflush_cache_range()" */ 900 ia64_cache_stride_shift = CACHE_STRIDE_SHIFT; 901 cci.pcci_unified = 1; 902 } else { 903 if (cci.pcci_stride < ia64_cache_stride_shift) 904 ia64_cache_stride_shift = cci.pcci_stride; 905 906 line_size = 1 << cci.pcci_line_size; 907 if (line_size > max) 908 max = line_size; 909 } 910 911 if (!cci.pcci_unified) { 912 /* cache_type (instruction)=1*/ 913 status = ia64_pal_cache_config_info(l, 1, &cci); 914 if (status != 0) { 915 printk(KERN_ERR 916 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n", 917 __func__, l, status); 918 /* The safest setup for "flush_icache_range()" */ 919 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 920 } 921 } 922 if (cci.pcci_stride < ia64_i_cache_stride_shift) 923 ia64_i_cache_stride_shift = cci.pcci_stride; 924 } 925 out: 926 if (max > ia64_max_cacheline_size) 927 ia64_max_cacheline_size = max; 928 } 929 930 /* 931 * cpu_init() initializes state that is per-CPU. This function acts 932 * as a 'CPU state barrier', nothing should get across. 933 */ 934 void __cpuinit 935 cpu_init (void) 936 { 937 extern void __cpuinit ia64_mmu_init (void *); 938 static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG; 939 unsigned long num_phys_stacked; 940 pal_vm_info_2_u_t vmi; 941 unsigned int max_ctx; 942 struct cpuinfo_ia64 *cpu_info; 943 void *cpu_data; 944 945 cpu_data = per_cpu_init(); 946 #ifdef CONFIG_SMP 947 /* 948 * insert boot cpu into sibling and core mapes 949 * (must be done after per_cpu area is setup) 950 */ 951 if (smp_processor_id() == 0) { 952 cpu_set(0, per_cpu(cpu_sibling_map, 0)); 953 cpu_set(0, cpu_core_map[0]); 954 } else { 955 /* 956 * Set ar.k3 so that assembly code in MCA handler can compute 957 * physical addresses of per cpu variables with a simple: 958 * phys = ar.k3 + &per_cpu_var 959 * and the alt-dtlb-miss handler can set per-cpu mapping into 960 * the TLB when needed. head.S already did this for cpu0. 961 */ 962 ia64_set_kr(IA64_KR_PER_CPU_DATA, 963 ia64_tpa(cpu_data) - (long) __per_cpu_start); 964 } 965 #endif 966 967 get_cache_info(); 968 969 /* 970 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called 971 * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it 972 * depends on the data returned by identify_cpu(). We break the dependency by 973 * accessing cpu_data() through the canonical per-CPU address. 974 */ 975 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start); 976 identify_cpu(cpu_info); 977 978 #ifdef CONFIG_MCKINLEY 979 { 980 # define FEATURE_SET 16 981 struct ia64_pal_retval iprv; 982 983 if (cpu_info->family == 0x1f) { 984 PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0); 985 if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80)) 986 PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, 987 (iprv.v1 | 0x80), FEATURE_SET, 0); 988 } 989 } 990 #endif 991 992 /* Clear the stack memory reserved for pt_regs: */ 993 memset(task_pt_regs(current), 0, sizeof(struct pt_regs)); 994 995 ia64_set_kr(IA64_KR_FPU_OWNER, 0); 996 997 /* 998 * Initialize the page-table base register to a global 999 * directory with all zeroes. This ensure that we can handle 1000 * TLB-misses to user address-space even before we created the 1001 * first user address-space. This may happen, e.g., due to 1002 * aggressive use of lfetch.fault. 1003 */ 1004 ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page))); 1005 1006 /* 1007 * Initialize default control register to defer speculative faults except 1008 * for those arising from TLB misses, which are not deferred. The 1009 * kernel MUST NOT depend on a particular setting of these bits (in other words, 1010 * the kernel must have recovery code for all speculative accesses). Turn on 1011 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps 1012 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll 1013 * be fine). 1014 */ 1015 ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR 1016 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); 1017 atomic_inc(&init_mm.mm_count); 1018 current->active_mm = &init_mm; 1019 if (current->mm) 1020 BUG(); 1021 1022 ia64_mmu_init(ia64_imva(cpu_data)); 1023 ia64_mca_cpu_init(ia64_imva(cpu_data)); 1024 1025 #ifdef CONFIG_IA32_SUPPORT 1026 ia32_cpu_init(); 1027 #endif 1028 1029 /* Clear ITC to eliminate sched_clock() overflows in human time. */ 1030 ia64_set_itc(0); 1031 1032 /* disable all local interrupt sources: */ 1033 ia64_set_itv(1 << 16); 1034 ia64_set_lrr0(1 << 16); 1035 ia64_set_lrr1(1 << 16); 1036 ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); 1037 ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); 1038 1039 /* clear TPR & XTP to enable all interrupt classes: */ 1040 ia64_setreg(_IA64_REG_CR_TPR, 0); 1041 1042 /* Clear any pending interrupts left by SAL/EFI */ 1043 while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR) 1044 ia64_eoi(); 1045 1046 #ifdef CONFIG_SMP 1047 normal_xtp(); 1048 #endif 1049 1050 /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ 1051 if (ia64_pal_vm_summary(NULL, &vmi) == 0) { 1052 max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; 1053 setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, NPTCG_FROM_PAL); 1054 } else { 1055 printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); 1056 max_ctx = (1U << 15) - 1; /* use architected minimum */ 1057 } 1058 while (max_ctx < ia64_ctx.max_ctx) { 1059 unsigned int old = ia64_ctx.max_ctx; 1060 if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old) 1061 break; 1062 } 1063 1064 if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) { 1065 printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical " 1066 "stacked regs\n"); 1067 num_phys_stacked = 96; 1068 } 1069 /* size of physical stacked register partition plus 8 bytes: */ 1070 if (num_phys_stacked > max_num_phys_stacked) { 1071 ia64_patch_phys_stack_reg(num_phys_stacked*8 + 8); 1072 max_num_phys_stacked = num_phys_stacked; 1073 } 1074 platform_cpu_init(); 1075 pm_idle = default_idle; 1076 } 1077 1078 void __init 1079 check_bugs (void) 1080 { 1081 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, 1082 (unsigned long) __end___mckinley_e9_bundles); 1083 } 1084 1085 static int __init run_dmi_scan(void) 1086 { 1087 dmi_scan_machine(); 1088 return 0; 1089 } 1090 core_initcall(run_dmi_scan); 1091