1 /* 2 * Architecture-specific setup. 3 * 4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co 5 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * Stephane Eranian <eranian@hpl.hp.com> 7 * Copyright (C) 2000, 2004 Intel Corp 8 * Rohit Seth <rohit.seth@intel.com> 9 * Suresh Siddha <suresh.b.siddha@intel.com> 10 * Gordon Jin <gordon.jin@intel.com> 11 * Copyright (C) 1999 VA Linux Systems 12 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 13 * 14 * 12/26/04 S.Siddha, G.Jin, R.Seth 15 * Add multi-threading and multi-core detection 16 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo(). 17 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map 18 * 03/31/00 R.Seth cpu_initialized and current->processor fixes 19 * 02/04/00 D.Mosberger some more get_cpuinfo fixes... 20 * 02/01/00 R.Seth fixed get_cpuinfo for SMP 21 * 01/07/99 S.Eranian added the support for command line argument 22 * 06/24/99 W.Drummond added boot_cpu_data. 23 * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()" 24 */ 25 #include <linux/module.h> 26 #include <linux/init.h> 27 28 #include <linux/acpi.h> 29 #include <linux/bootmem.h> 30 #include <linux/console.h> 31 #include <linux/delay.h> 32 #include <linux/kernel.h> 33 #include <linux/reboot.h> 34 #include <linux/sched.h> 35 #include <linux/sched/clock.h> 36 #include <linux/sched/task_stack.h> 37 #include <linux/seq_file.h> 38 #include <linux/string.h> 39 #include <linux/threads.h> 40 #include <linux/screen_info.h> 41 #include <linux/dmi.h> 42 #include <linux/serial.h> 43 #include <linux/serial_core.h> 44 #include <linux/efi.h> 45 #include <linux/initrd.h> 46 #include <linux/pm.h> 47 #include <linux/cpufreq.h> 48 #include <linux/kexec.h> 49 #include <linux/crash_dump.h> 50 51 #include <asm/machvec.h> 52 #include <asm/mca.h> 53 #include <asm/meminit.h> 54 #include <asm/page.h> 55 #include <asm/patch.h> 56 #include <asm/pgtable.h> 57 #include <asm/processor.h> 58 #include <asm/sal.h> 59 #include <asm/sections.h> 60 #include <asm/setup.h> 61 #include <asm/smp.h> 62 #include <asm/tlbflush.h> 63 #include <asm/unistd.h> 64 #include <asm/hpsim.h> 65 66 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) 67 # error "struct cpuinfo_ia64 too big!" 68 #endif 69 70 #ifdef CONFIG_SMP 71 unsigned long __per_cpu_offset[NR_CPUS]; 72 EXPORT_SYMBOL(__per_cpu_offset); 73 #endif 74 75 DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info); 76 EXPORT_SYMBOL(ia64_cpu_info); 77 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); 78 #ifdef CONFIG_SMP 79 EXPORT_SYMBOL(local_per_cpu_offset); 80 #endif 81 unsigned long ia64_cycles_per_usec; 82 struct ia64_boot_param *ia64_boot_param; 83 struct screen_info screen_info; 84 unsigned long vga_console_iobase; 85 unsigned long vga_console_membase; 86 87 static struct resource data_resource = { 88 .name = "Kernel data", 89 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM 90 }; 91 92 static struct resource code_resource = { 93 .name = "Kernel code", 94 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM 95 }; 96 97 static struct resource bss_resource = { 98 .name = "Kernel bss", 99 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM 100 }; 101 102 unsigned long ia64_max_cacheline_size; 103 104 unsigned long ia64_iobase; /* virtual address for I/O accesses */ 105 EXPORT_SYMBOL(ia64_iobase); 106 struct io_space io_space[MAX_IO_SPACES]; 107 EXPORT_SYMBOL(io_space); 108 unsigned int num_io_spaces; 109 110 /* 111 * "flush_icache_range()" needs to know what processor dependent stride size to use 112 * when it makes i-cache(s) coherent with d-caches. 113 */ 114 #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */ 115 unsigned long ia64_i_cache_stride_shift = ~0; 116 /* 117 * "clflush_cache_range()" needs to know what processor dependent stride size to 118 * use when it flushes cache lines including both d-cache and i-cache. 119 */ 120 /* Safest way to go: 32 bytes by 32 bytes */ 121 #define CACHE_STRIDE_SHIFT 5 122 unsigned long ia64_cache_stride_shift = ~0; 123 124 /* 125 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This 126 * mask specifies a mask of address bits that must be 0 in order for two buffers to be 127 * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start 128 * address of the second buffer must be aligned to (merge_mask+1) in order to be 129 * mergeable). By default, we assume there is no I/O MMU which can merge physically 130 * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu 131 * page-size of 2^64. 132 */ 133 unsigned long ia64_max_iommu_merge_mask = ~0UL; 134 EXPORT_SYMBOL(ia64_max_iommu_merge_mask); 135 136 /* 137 * We use a special marker for the end of memory and it uses the extra (+1) slot 138 */ 139 struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata; 140 int num_rsvd_regions __initdata; 141 142 143 /* 144 * Filter incoming memory segments based on the primitive map created from the boot 145 * parameters. Segments contained in the map are removed from the memory ranges. A 146 * caller-specified function is called with the memory ranges that remain after filtering. 147 * This routine does not assume the incoming segments are sorted. 148 */ 149 int __init 150 filter_rsvd_memory (u64 start, u64 end, void *arg) 151 { 152 u64 range_start, range_end, prev_start; 153 void (*func)(unsigned long, unsigned long, int); 154 int i; 155 156 #if IGNORE_PFN0 157 if (start == PAGE_OFFSET) { 158 printk(KERN_WARNING "warning: skipping physical page 0\n"); 159 start += PAGE_SIZE; 160 if (start >= end) return 0; 161 } 162 #endif 163 /* 164 * lowest possible address(walker uses virtual) 165 */ 166 prev_start = PAGE_OFFSET; 167 func = arg; 168 169 for (i = 0; i < num_rsvd_regions; ++i) { 170 range_start = max(start, prev_start); 171 range_end = min(end, rsvd_region[i].start); 172 173 if (range_start < range_end) 174 call_pernode_memory(__pa(range_start), range_end - range_start, func); 175 176 /* nothing more available in this segment */ 177 if (range_end == end) return 0; 178 179 prev_start = rsvd_region[i].end; 180 } 181 /* end of memory marker allows full processing inside loop body */ 182 return 0; 183 } 184 185 /* 186 * Similar to "filter_rsvd_memory()", but the reserved memory ranges 187 * are not filtered out. 188 */ 189 int __init 190 filter_memory(u64 start, u64 end, void *arg) 191 { 192 void (*func)(unsigned long, unsigned long, int); 193 194 #if IGNORE_PFN0 195 if (start == PAGE_OFFSET) { 196 printk(KERN_WARNING "warning: skipping physical page 0\n"); 197 start += PAGE_SIZE; 198 if (start >= end) 199 return 0; 200 } 201 #endif 202 func = arg; 203 if (start < end) 204 call_pernode_memory(__pa(start), end - start, func); 205 return 0; 206 } 207 208 static void __init 209 sort_regions (struct rsvd_region *rsvd_region, int max) 210 { 211 int j; 212 213 /* simple bubble sorting */ 214 while (max--) { 215 for (j = 0; j < max; ++j) { 216 if (rsvd_region[j].start > rsvd_region[j+1].start) { 217 struct rsvd_region tmp; 218 tmp = rsvd_region[j]; 219 rsvd_region[j] = rsvd_region[j + 1]; 220 rsvd_region[j + 1] = tmp; 221 } 222 } 223 } 224 } 225 226 /* merge overlaps */ 227 static int __init 228 merge_regions (struct rsvd_region *rsvd_region, int max) 229 { 230 int i; 231 for (i = 1; i < max; ++i) { 232 if (rsvd_region[i].start >= rsvd_region[i-1].end) 233 continue; 234 if (rsvd_region[i].end > rsvd_region[i-1].end) 235 rsvd_region[i-1].end = rsvd_region[i].end; 236 --max; 237 memmove(&rsvd_region[i], &rsvd_region[i+1], 238 (max - i) * sizeof(struct rsvd_region)); 239 } 240 return max; 241 } 242 243 /* 244 * Request address space for all standard resources 245 */ 246 static int __init register_memory(void) 247 { 248 code_resource.start = ia64_tpa(_text); 249 code_resource.end = ia64_tpa(_etext) - 1; 250 data_resource.start = ia64_tpa(_etext); 251 data_resource.end = ia64_tpa(_edata) - 1; 252 bss_resource.start = ia64_tpa(__bss_start); 253 bss_resource.end = ia64_tpa(_end) - 1; 254 efi_initialize_iomem_resources(&code_resource, &data_resource, 255 &bss_resource); 256 257 return 0; 258 } 259 260 __initcall(register_memory); 261 262 263 #ifdef CONFIG_KEXEC 264 265 /* 266 * This function checks if the reserved crashkernel is allowed on the specific 267 * IA64 machine flavour. Machines without an IO TLB use swiotlb and require 268 * some memory below 4 GB (i.e. in 32 bit area), see the implementation of 269 * lib/swiotlb.c. The hpzx1 architecture has an IO TLB but cannot use that 270 * in kdump case. See the comment in sba_init() in sba_iommu.c. 271 * 272 * So, the only machvec that really supports loading the kdump kernel 273 * over 4 GB is "sn2". 274 */ 275 static int __init check_crashkernel_memory(unsigned long pbase, size_t size) 276 { 277 if (ia64_platform_is("sn2") || ia64_platform_is("uv")) 278 return 1; 279 else 280 return pbase < (1UL << 32); 281 } 282 283 static void __init setup_crashkernel(unsigned long total, int *n) 284 { 285 unsigned long long base = 0, size = 0; 286 int ret; 287 288 ret = parse_crashkernel(boot_command_line, total, 289 &size, &base); 290 if (ret == 0 && size > 0) { 291 if (!base) { 292 sort_regions(rsvd_region, *n); 293 *n = merge_regions(rsvd_region, *n); 294 base = kdump_find_rsvd_region(size, 295 rsvd_region, *n); 296 } 297 298 if (!check_crashkernel_memory(base, size)) { 299 pr_warning("crashkernel: There would be kdump memory " 300 "at %ld GB but this is unusable because it " 301 "must\nbe below 4 GB. Change the memory " 302 "configuration of the machine.\n", 303 (unsigned long)(base >> 30)); 304 return; 305 } 306 307 if (base != ~0UL) { 308 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " 309 "for crashkernel (System RAM: %ldMB)\n", 310 (unsigned long)(size >> 20), 311 (unsigned long)(base >> 20), 312 (unsigned long)(total >> 20)); 313 rsvd_region[*n].start = 314 (unsigned long)__va(base); 315 rsvd_region[*n].end = 316 (unsigned long)__va(base + size); 317 (*n)++; 318 crashk_res.start = base; 319 crashk_res.end = base + size - 1; 320 } 321 } 322 efi_memmap_res.start = ia64_boot_param->efi_memmap; 323 efi_memmap_res.end = efi_memmap_res.start + 324 ia64_boot_param->efi_memmap_size; 325 boot_param_res.start = __pa(ia64_boot_param); 326 boot_param_res.end = boot_param_res.start + 327 sizeof(*ia64_boot_param); 328 } 329 #else 330 static inline void __init setup_crashkernel(unsigned long total, int *n) 331 {} 332 #endif 333 334 /** 335 * reserve_memory - setup reserved memory areas 336 * 337 * Setup the reserved memory areas set aside for the boot parameters, 338 * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined, 339 * see arch/ia64/include/asm/meminit.h if you need to define more. 340 */ 341 void __init 342 reserve_memory (void) 343 { 344 int n = 0; 345 unsigned long total_memory; 346 347 /* 348 * none of the entries in this table overlap 349 */ 350 rsvd_region[n].start = (unsigned long) ia64_boot_param; 351 rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param); 352 n++; 353 354 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap); 355 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size; 356 n++; 357 358 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line); 359 rsvd_region[n].end = (rsvd_region[n].start 360 + strlen(__va(ia64_boot_param->command_line)) + 1); 361 n++; 362 363 rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START); 364 rsvd_region[n].end = (unsigned long) ia64_imva(_end); 365 n++; 366 367 #ifdef CONFIG_BLK_DEV_INITRD 368 if (ia64_boot_param->initrd_start) { 369 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start); 370 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size; 371 n++; 372 } 373 #endif 374 375 #ifdef CONFIG_CRASH_DUMP 376 if (reserve_elfcorehdr(&rsvd_region[n].start, 377 &rsvd_region[n].end) == 0) 378 n++; 379 #endif 380 381 total_memory = efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); 382 n++; 383 384 setup_crashkernel(total_memory, &n); 385 386 /* end of memory marker */ 387 rsvd_region[n].start = ~0UL; 388 rsvd_region[n].end = ~0UL; 389 n++; 390 391 num_rsvd_regions = n; 392 BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n); 393 394 sort_regions(rsvd_region, num_rsvd_regions); 395 num_rsvd_regions = merge_regions(rsvd_region, num_rsvd_regions); 396 } 397 398 399 /** 400 * find_initrd - get initrd parameters from the boot parameter structure 401 * 402 * Grab the initrd start and end from the boot parameter struct given us by 403 * the boot loader. 404 */ 405 void __init 406 find_initrd (void) 407 { 408 #ifdef CONFIG_BLK_DEV_INITRD 409 if (ia64_boot_param->initrd_start) { 410 initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start); 411 initrd_end = initrd_start+ia64_boot_param->initrd_size; 412 413 printk(KERN_INFO "Initial ramdisk at: 0x%lx (%llu bytes)\n", 414 initrd_start, ia64_boot_param->initrd_size); 415 } 416 #endif 417 } 418 419 static void __init 420 io_port_init (void) 421 { 422 unsigned long phys_iobase; 423 424 /* 425 * Set `iobase' based on the EFI memory map or, failing that, the 426 * value firmware left in ar.k0. 427 * 428 * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute 429 * the port's virtual address, so ia32_load_state() loads it with a 430 * user virtual address. But in ia64 mode, glibc uses the 431 * *physical* address in ar.k0 to mmap the appropriate area from 432 * /dev/mem, and the inX()/outX() interfaces use MMIO. In both 433 * cases, user-mode can only use the legacy 0-64K I/O port space. 434 * 435 * ar.k0 is not involved in kernel I/O port accesses, which can use 436 * any of the I/O port spaces and are done via MMIO using the 437 * virtual mmio_base from the appropriate io_space[]. 438 */ 439 phys_iobase = efi_get_iobase(); 440 if (!phys_iobase) { 441 phys_iobase = ia64_get_kr(IA64_KR_IO_BASE); 442 printk(KERN_INFO "No I/O port range found in EFI memory map, " 443 "falling back to AR.KR0 (0x%lx)\n", phys_iobase); 444 } 445 ia64_iobase = (unsigned long) ioremap(phys_iobase, 0); 446 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); 447 448 /* setup legacy IO port space */ 449 io_space[0].mmio_base = ia64_iobase; 450 io_space[0].sparse = 1; 451 num_io_spaces = 1; 452 } 453 454 /** 455 * early_console_setup - setup debugging console 456 * 457 * Consoles started here require little enough setup that we can start using 458 * them very early in the boot process, either right after the machine 459 * vector initialization, or even before if the drivers can detect their hw. 460 * 461 * Returns non-zero if a console couldn't be setup. 462 */ 463 static inline int __init 464 early_console_setup (char *cmdline) 465 { 466 int earlycons = 0; 467 468 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE 469 { 470 extern int sn_serial_console_early_setup(void); 471 if (!sn_serial_console_early_setup()) 472 earlycons++; 473 } 474 #endif 475 #ifdef CONFIG_EFI_PCDP 476 if (!efi_setup_pcdp_console(cmdline)) 477 earlycons++; 478 #endif 479 if (!simcons_register()) 480 earlycons++; 481 482 return (earlycons) ? 0 : -1; 483 } 484 485 static inline void 486 mark_bsp_online (void) 487 { 488 #ifdef CONFIG_SMP 489 /* If we register an early console, allow CPU 0 to printk */ 490 set_cpu_online(smp_processor_id(), true); 491 #endif 492 } 493 494 static __initdata int nomca; 495 static __init int setup_nomca(char *s) 496 { 497 nomca = 1; 498 return 0; 499 } 500 early_param("nomca", setup_nomca); 501 502 #ifdef CONFIG_CRASH_DUMP 503 int __init reserve_elfcorehdr(u64 *start, u64 *end) 504 { 505 u64 length; 506 507 /* We get the address using the kernel command line, 508 * but the size is extracted from the EFI tables. 509 * Both address and size are required for reservation 510 * to work properly. 511 */ 512 513 if (!is_vmcore_usable()) 514 return -EINVAL; 515 516 if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) { 517 vmcore_unusable(); 518 return -EINVAL; 519 } 520 521 *start = (unsigned long)__va(elfcorehdr_addr); 522 *end = *start + length; 523 return 0; 524 } 525 526 #endif /* CONFIG_PROC_VMCORE */ 527 528 void __init 529 setup_arch (char **cmdline_p) 530 { 531 unw_init(); 532 533 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); 534 535 *cmdline_p = __va(ia64_boot_param->command_line); 536 strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); 537 538 efi_init(); 539 io_port_init(); 540 541 #ifdef CONFIG_IA64_GENERIC 542 /* machvec needs to be parsed from the command line 543 * before parse_early_param() is called to ensure 544 * that ia64_mv is initialised before any command line 545 * settings may cause console setup to occur 546 */ 547 machvec_init_from_cmdline(*cmdline_p); 548 #endif 549 550 parse_early_param(); 551 552 if (early_console_setup(*cmdline_p) == 0) 553 mark_bsp_online(); 554 555 #ifdef CONFIG_ACPI 556 /* Initialize the ACPI boot-time table parser */ 557 acpi_table_init(); 558 early_acpi_boot_init(); 559 # ifdef CONFIG_ACPI_NUMA 560 acpi_numa_init(); 561 acpi_numa_fixup(); 562 # ifdef CONFIG_ACPI_HOTPLUG_CPU 563 prefill_possible_map(); 564 # endif 565 per_cpu_scan_finalize((cpumask_weight(&early_cpu_possible_map) == 0 ? 566 32 : cpumask_weight(&early_cpu_possible_map)), 567 additional_cpus > 0 ? additional_cpus : 0); 568 # endif 569 #endif /* CONFIG_APCI_BOOT */ 570 571 #ifdef CONFIG_SMP 572 smp_build_cpu_map(); 573 #endif 574 find_memory(); 575 576 /* process SAL system table: */ 577 ia64_sal_init(__va(efi.sal_systab)); 578 579 #ifdef CONFIG_ITANIUM 580 ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist); 581 #else 582 { 583 unsigned long num_phys_stacked; 584 585 if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96) 586 ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist); 587 } 588 #endif 589 590 #ifdef CONFIG_SMP 591 cpu_physical_id(0) = hard_smp_processor_id(); 592 #endif 593 594 cpu_init(); /* initialize the bootstrap CPU */ 595 mmu_context_init(); /* initialize context_id bitmap */ 596 597 #ifdef CONFIG_VT 598 if (!conswitchp) { 599 # if defined(CONFIG_DUMMY_CONSOLE) 600 conswitchp = &dummy_con; 601 # endif 602 # if defined(CONFIG_VGA_CONSOLE) 603 /* 604 * Non-legacy systems may route legacy VGA MMIO range to system 605 * memory. vga_con probes the MMIO hole, so memory looks like 606 * a VGA device to it. The EFI memory map can tell us if it's 607 * memory so we can avoid this problem. 608 */ 609 if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY) 610 conswitchp = &vga_con; 611 # endif 612 } 613 #endif 614 615 /* enable IA-64 Machine Check Abort Handling unless disabled */ 616 if (!nomca) 617 ia64_mca_init(); 618 619 platform_setup(cmdline_p); 620 #ifndef CONFIG_IA64_HP_SIM 621 check_sal_cache_flush(); 622 #endif 623 paging_init(); 624 625 clear_sched_clock_stable(); 626 } 627 628 /* 629 * Display cpu info for all CPUs. 630 */ 631 static int 632 show_cpuinfo (struct seq_file *m, void *v) 633 { 634 #ifdef CONFIG_SMP 635 # define lpj c->loops_per_jiffy 636 # define cpunum c->cpu 637 #else 638 # define lpj loops_per_jiffy 639 # define cpunum 0 640 #endif 641 static struct { 642 unsigned long mask; 643 const char *feature_name; 644 } feature_bits[] = { 645 { 1UL << 0, "branchlong" }, 646 { 1UL << 1, "spontaneous deferral"}, 647 { 1UL << 2, "16-byte atomic ops" } 648 }; 649 char features[128], *cp, *sep; 650 struct cpuinfo_ia64 *c = v; 651 unsigned long mask; 652 unsigned long proc_freq; 653 int i, size; 654 655 mask = c->features; 656 657 /* build the feature string: */ 658 memcpy(features, "standard", 9); 659 cp = features; 660 size = sizeof(features); 661 sep = ""; 662 for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) { 663 if (mask & feature_bits[i].mask) { 664 cp += snprintf(cp, size, "%s%s", sep, 665 feature_bits[i].feature_name), 666 sep = ", "; 667 mask &= ~feature_bits[i].mask; 668 size = sizeof(features) - (cp - features); 669 } 670 } 671 if (mask && size > 1) { 672 /* print unknown features as a hex value */ 673 snprintf(cp, size, "%s0x%lx", sep, mask); 674 } 675 676 proc_freq = cpufreq_quick_get(cpunum); 677 if (!proc_freq) 678 proc_freq = c->proc_freq / 1000; 679 680 seq_printf(m, 681 "processor : %d\n" 682 "vendor : %s\n" 683 "arch : IA-64\n" 684 "family : %u\n" 685 "model : %u\n" 686 "model name : %s\n" 687 "revision : %u\n" 688 "archrev : %u\n" 689 "features : %s\n" 690 "cpu number : %lu\n" 691 "cpu regs : %u\n" 692 "cpu MHz : %lu.%03lu\n" 693 "itc MHz : %lu.%06lu\n" 694 "BogoMIPS : %lu.%02lu\n", 695 cpunum, c->vendor, c->family, c->model, 696 c->model_name, c->revision, c->archrev, 697 features, c->ppn, c->number, 698 proc_freq / 1000, proc_freq % 1000, 699 c->itc_freq / 1000000, c->itc_freq % 1000000, 700 lpj*HZ/500000, (lpj*HZ/5000) % 100); 701 #ifdef CONFIG_SMP 702 seq_printf(m, "siblings : %u\n", 703 cpumask_weight(&cpu_core_map[cpunum])); 704 if (c->socket_id != -1) 705 seq_printf(m, "physical id: %u\n", c->socket_id); 706 if (c->threads_per_core > 1 || c->cores_per_socket > 1) 707 seq_printf(m, 708 "core id : %u\n" 709 "thread id : %u\n", 710 c->core_id, c->thread_id); 711 #endif 712 seq_printf(m,"\n"); 713 714 return 0; 715 } 716 717 static void * 718 c_start (struct seq_file *m, loff_t *pos) 719 { 720 #ifdef CONFIG_SMP 721 while (*pos < nr_cpu_ids && !cpu_online(*pos)) 722 ++*pos; 723 #endif 724 return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL; 725 } 726 727 static void * 728 c_next (struct seq_file *m, void *v, loff_t *pos) 729 { 730 ++*pos; 731 return c_start(m, pos); 732 } 733 734 static void 735 c_stop (struct seq_file *m, void *v) 736 { 737 } 738 739 const struct seq_operations cpuinfo_op = { 740 .start = c_start, 741 .next = c_next, 742 .stop = c_stop, 743 .show = show_cpuinfo 744 }; 745 746 #define MAX_BRANDS 8 747 static char brandname[MAX_BRANDS][128]; 748 749 static char * 750 get_model_name(__u8 family, __u8 model) 751 { 752 static int overflow; 753 char brand[128]; 754 int i; 755 756 memcpy(brand, "Unknown", 8); 757 if (ia64_pal_get_brand_info(brand)) { 758 if (family == 0x7) 759 memcpy(brand, "Merced", 7); 760 else if (family == 0x1f) switch (model) { 761 case 0: memcpy(brand, "McKinley", 9); break; 762 case 1: memcpy(brand, "Madison", 8); break; 763 case 2: memcpy(brand, "Madison up to 9M cache", 23); break; 764 } 765 } 766 for (i = 0; i < MAX_BRANDS; i++) 767 if (strcmp(brandname[i], brand) == 0) 768 return brandname[i]; 769 for (i = 0; i < MAX_BRANDS; i++) 770 if (brandname[i][0] == '\0') 771 return strcpy(brandname[i], brand); 772 if (overflow++ == 0) 773 printk(KERN_ERR 774 "%s: Table overflow. Some processor model information will be missing\n", 775 __func__); 776 return "Unknown"; 777 } 778 779 static void 780 identify_cpu (struct cpuinfo_ia64 *c) 781 { 782 union { 783 unsigned long bits[5]; 784 struct { 785 /* id 0 & 1: */ 786 char vendor[16]; 787 788 /* id 2 */ 789 u64 ppn; /* processor serial number */ 790 791 /* id 3: */ 792 unsigned number : 8; 793 unsigned revision : 8; 794 unsigned model : 8; 795 unsigned family : 8; 796 unsigned archrev : 8; 797 unsigned reserved : 24; 798 799 /* id 4: */ 800 u64 features; 801 } field; 802 } cpuid; 803 pal_vm_info_1_u_t vm1; 804 pal_vm_info_2_u_t vm2; 805 pal_status_t status; 806 unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */ 807 int i; 808 for (i = 0; i < 5; ++i) 809 cpuid.bits[i] = ia64_get_cpuid(i); 810 811 memcpy(c->vendor, cpuid.field.vendor, 16); 812 #ifdef CONFIG_SMP 813 c->cpu = smp_processor_id(); 814 815 /* below default values will be overwritten by identify_siblings() 816 * for Multi-Threading/Multi-Core capable CPUs 817 */ 818 c->threads_per_core = c->cores_per_socket = c->num_log = 1; 819 c->socket_id = -1; 820 821 identify_siblings(c); 822 823 if (c->threads_per_core > smp_num_siblings) 824 smp_num_siblings = c->threads_per_core; 825 #endif 826 c->ppn = cpuid.field.ppn; 827 c->number = cpuid.field.number; 828 c->revision = cpuid.field.revision; 829 c->model = cpuid.field.model; 830 c->family = cpuid.field.family; 831 c->archrev = cpuid.field.archrev; 832 c->features = cpuid.field.features; 833 c->model_name = get_model_name(c->family, c->model); 834 835 status = ia64_pal_vm_summary(&vm1, &vm2); 836 if (status == PAL_STATUS_SUCCESS) { 837 impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb; 838 phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size; 839 } 840 c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1)); 841 c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); 842 } 843 844 /* 845 * Do the following calculations: 846 * 847 * 1. the max. cache line size. 848 * 2. the minimum of the i-cache stride sizes for "flush_icache_range()". 849 * 3. the minimum of the cache stride sizes for "clflush_cache_range()". 850 */ 851 static void 852 get_cache_info(void) 853 { 854 unsigned long line_size, max = 1; 855 unsigned long l, levels, unique_caches; 856 pal_cache_config_info_t cci; 857 long status; 858 859 status = ia64_pal_cache_summary(&levels, &unique_caches); 860 if (status != 0) { 861 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", 862 __func__, status); 863 max = SMP_CACHE_BYTES; 864 /* Safest setup for "flush_icache_range()" */ 865 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; 866 /* Safest setup for "clflush_cache_range()" */ 867 ia64_cache_stride_shift = CACHE_STRIDE_SHIFT; 868 goto out; 869 } 870 871 for (l = 0; l < levels; ++l) { 872 /* cache_type (data_or_unified)=2 */ 873 status = ia64_pal_cache_config_info(l, 2, &cci); 874 if (status != 0) { 875 printk(KERN_ERR "%s: ia64_pal_cache_config_info" 876 "(l=%lu, 2) failed (status=%ld)\n", 877 __func__, l, status); 878 max = SMP_CACHE_BYTES; 879 /* The safest setup for "flush_icache_range()" */ 880 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 881 /* The safest setup for "clflush_cache_range()" */ 882 ia64_cache_stride_shift = CACHE_STRIDE_SHIFT; 883 cci.pcci_unified = 1; 884 } else { 885 if (cci.pcci_stride < ia64_cache_stride_shift) 886 ia64_cache_stride_shift = cci.pcci_stride; 887 888 line_size = 1 << cci.pcci_line_size; 889 if (line_size > max) 890 max = line_size; 891 } 892 893 if (!cci.pcci_unified) { 894 /* cache_type (instruction)=1*/ 895 status = ia64_pal_cache_config_info(l, 1, &cci); 896 if (status != 0) { 897 printk(KERN_ERR "%s: ia64_pal_cache_config_info" 898 "(l=%lu, 1) failed (status=%ld)\n", 899 __func__, l, status); 900 /* The safest setup for flush_icache_range() */ 901 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 902 } 903 } 904 if (cci.pcci_stride < ia64_i_cache_stride_shift) 905 ia64_i_cache_stride_shift = cci.pcci_stride; 906 } 907 out: 908 if (max > ia64_max_cacheline_size) 909 ia64_max_cacheline_size = max; 910 } 911 912 /* 913 * cpu_init() initializes state that is per-CPU. This function acts 914 * as a 'CPU state barrier', nothing should get across. 915 */ 916 void 917 cpu_init (void) 918 { 919 extern void ia64_mmu_init(void *); 920 static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG; 921 unsigned long num_phys_stacked; 922 pal_vm_info_2_u_t vmi; 923 unsigned int max_ctx; 924 struct cpuinfo_ia64 *cpu_info; 925 void *cpu_data; 926 927 cpu_data = per_cpu_init(); 928 #ifdef CONFIG_SMP 929 /* 930 * insert boot cpu into sibling and core mapes 931 * (must be done after per_cpu area is setup) 932 */ 933 if (smp_processor_id() == 0) { 934 cpumask_set_cpu(0, &per_cpu(cpu_sibling_map, 0)); 935 cpumask_set_cpu(0, &cpu_core_map[0]); 936 } else { 937 /* 938 * Set ar.k3 so that assembly code in MCA handler can compute 939 * physical addresses of per cpu variables with a simple: 940 * phys = ar.k3 + &per_cpu_var 941 * and the alt-dtlb-miss handler can set per-cpu mapping into 942 * the TLB when needed. head.S already did this for cpu0. 943 */ 944 ia64_set_kr(IA64_KR_PER_CPU_DATA, 945 ia64_tpa(cpu_data) - (long) __per_cpu_start); 946 } 947 #endif 948 949 get_cache_info(); 950 951 /* 952 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called 953 * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it 954 * depends on the data returned by identify_cpu(). We break the dependency by 955 * accessing cpu_data() through the canonical per-CPU address. 956 */ 957 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start); 958 identify_cpu(cpu_info); 959 960 #ifdef CONFIG_MCKINLEY 961 { 962 # define FEATURE_SET 16 963 struct ia64_pal_retval iprv; 964 965 if (cpu_info->family == 0x1f) { 966 PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0); 967 if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80)) 968 PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, 969 (iprv.v1 | 0x80), FEATURE_SET, 0); 970 } 971 } 972 #endif 973 974 /* Clear the stack memory reserved for pt_regs: */ 975 memset(task_pt_regs(current), 0, sizeof(struct pt_regs)); 976 977 ia64_set_kr(IA64_KR_FPU_OWNER, 0); 978 979 /* 980 * Initialize the page-table base register to a global 981 * directory with all zeroes. This ensure that we can handle 982 * TLB-misses to user address-space even before we created the 983 * first user address-space. This may happen, e.g., due to 984 * aggressive use of lfetch.fault. 985 */ 986 ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page))); 987 988 /* 989 * Initialize default control register to defer speculative faults except 990 * for those arising from TLB misses, which are not deferred. The 991 * kernel MUST NOT depend on a particular setting of these bits (in other words, 992 * the kernel must have recovery code for all speculative accesses). Turn on 993 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps 994 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll 995 * be fine). 996 */ 997 ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR 998 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); 999 mmgrab(&init_mm); 1000 current->active_mm = &init_mm; 1001 BUG_ON(current->mm); 1002 1003 ia64_mmu_init(ia64_imva(cpu_data)); 1004 ia64_mca_cpu_init(ia64_imva(cpu_data)); 1005 1006 /* Clear ITC to eliminate sched_clock() overflows in human time. */ 1007 ia64_set_itc(0); 1008 1009 /* disable all local interrupt sources: */ 1010 ia64_set_itv(1 << 16); 1011 ia64_set_lrr0(1 << 16); 1012 ia64_set_lrr1(1 << 16); 1013 ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); 1014 ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); 1015 1016 /* clear TPR & XTP to enable all interrupt classes: */ 1017 ia64_setreg(_IA64_REG_CR_TPR, 0); 1018 1019 /* Clear any pending interrupts left by SAL/EFI */ 1020 while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR) 1021 ia64_eoi(); 1022 1023 #ifdef CONFIG_SMP 1024 normal_xtp(); 1025 #endif 1026 1027 /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ 1028 if (ia64_pal_vm_summary(NULL, &vmi) == 0) { 1029 max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; 1030 setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, NPTCG_FROM_PAL); 1031 } else { 1032 printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); 1033 max_ctx = (1U << 15) - 1; /* use architected minimum */ 1034 } 1035 while (max_ctx < ia64_ctx.max_ctx) { 1036 unsigned int old = ia64_ctx.max_ctx; 1037 if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old) 1038 break; 1039 } 1040 1041 if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) { 1042 printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical " 1043 "stacked regs\n"); 1044 num_phys_stacked = 96; 1045 } 1046 /* size of physical stacked register partition plus 8 bytes: */ 1047 if (num_phys_stacked > max_num_phys_stacked) { 1048 ia64_patch_phys_stack_reg(num_phys_stacked*8 + 8); 1049 max_num_phys_stacked = num_phys_stacked; 1050 } 1051 platform_cpu_init(); 1052 } 1053 1054 void __init 1055 check_bugs (void) 1056 { 1057 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, 1058 (unsigned long) __end___mckinley_e9_bundles); 1059 } 1060 1061 static int __init run_dmi_scan(void) 1062 { 1063 dmi_scan_machine(); 1064 dmi_memdev_walk(); 1065 dmi_set_dump_stack_arch_desc(); 1066 return 0; 1067 } 1068 core_initcall(run_dmi_scan); 1069