1 /* 2 * Architecture-specific setup. 3 * 4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co 5 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * Stephane Eranian <eranian@hpl.hp.com> 7 * Copyright (C) 2000, 2004 Intel Corp 8 * Rohit Seth <rohit.seth@intel.com> 9 * Suresh Siddha <suresh.b.siddha@intel.com> 10 * Gordon Jin <gordon.jin@intel.com> 11 * Copyright (C) 1999 VA Linux Systems 12 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 13 * 14 * 12/26/04 S.Siddha, G.Jin, R.Seth 15 * Add multi-threading and multi-core detection 16 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo(). 17 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map 18 * 03/31/00 R.Seth cpu_initialized and current->processor fixes 19 * 02/04/00 D.Mosberger some more get_cpuinfo fixes... 20 * 02/01/00 R.Seth fixed get_cpuinfo for SMP 21 * 01/07/99 S.Eranian added the support for command line argument 22 * 06/24/99 W.Drummond added boot_cpu_data. 23 * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()" 24 */ 25 #include <linux/module.h> 26 #include <linux/init.h> 27 28 #include <linux/acpi.h> 29 #include <linux/bootmem.h> 30 #include <linux/console.h> 31 #include <linux/delay.h> 32 #include <linux/kernel.h> 33 #include <linux/reboot.h> 34 #include <linux/sched.h> 35 #include <linux/seq_file.h> 36 #include <linux/string.h> 37 #include <linux/threads.h> 38 #include <linux/screen_info.h> 39 #include <linux/dmi.h> 40 #include <linux/serial.h> 41 #include <linux/serial_core.h> 42 #include <linux/efi.h> 43 #include <linux/initrd.h> 44 #include <linux/pm.h> 45 #include <linux/cpufreq.h> 46 #include <linux/kexec.h> 47 #include <linux/crash_dump.h> 48 49 #include <asm/ia32.h> 50 #include <asm/machvec.h> 51 #include <asm/mca.h> 52 #include <asm/meminit.h> 53 #include <asm/page.h> 54 #include <asm/patch.h> 55 #include <asm/pgtable.h> 56 #include <asm/processor.h> 57 #include <asm/sal.h> 58 #include <asm/sections.h> 59 #include <asm/setup.h> 60 #include <asm/smp.h> 61 #include <asm/system.h> 62 #include <asm/unistd.h> 63 #include <asm/hpsim.h> 64 65 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) 66 # error "struct cpuinfo_ia64 too big!" 67 #endif 68 69 #ifdef CONFIG_SMP 70 unsigned long __per_cpu_offset[NR_CPUS]; 71 EXPORT_SYMBOL(__per_cpu_offset); 72 #endif 73 74 DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); 75 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); 76 unsigned long ia64_cycles_per_usec; 77 struct ia64_boot_param *ia64_boot_param; 78 struct screen_info screen_info; 79 unsigned long vga_console_iobase; 80 unsigned long vga_console_membase; 81 82 static struct resource data_resource = { 83 .name = "Kernel data", 84 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 85 }; 86 87 static struct resource code_resource = { 88 .name = "Kernel code", 89 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 90 }; 91 92 static struct resource bss_resource = { 93 .name = "Kernel bss", 94 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 95 }; 96 97 unsigned long ia64_max_cacheline_size; 98 99 int dma_get_cache_alignment(void) 100 { 101 return ia64_max_cacheline_size; 102 } 103 EXPORT_SYMBOL(dma_get_cache_alignment); 104 105 unsigned long ia64_iobase; /* virtual address for I/O accesses */ 106 EXPORT_SYMBOL(ia64_iobase); 107 struct io_space io_space[MAX_IO_SPACES]; 108 EXPORT_SYMBOL(io_space); 109 unsigned int num_io_spaces; 110 111 /* 112 * "flush_icache_range()" needs to know what processor dependent stride size to use 113 * when it makes i-cache(s) coherent with d-caches. 114 */ 115 #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */ 116 unsigned long ia64_i_cache_stride_shift = ~0; 117 118 /* 119 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This 120 * mask specifies a mask of address bits that must be 0 in order for two buffers to be 121 * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start 122 * address of the second buffer must be aligned to (merge_mask+1) in order to be 123 * mergeable). By default, we assume there is no I/O MMU which can merge physically 124 * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu 125 * page-size of 2^64. 126 */ 127 unsigned long ia64_max_iommu_merge_mask = ~0UL; 128 EXPORT_SYMBOL(ia64_max_iommu_merge_mask); 129 130 /* 131 * We use a special marker for the end of memory and it uses the extra (+1) slot 132 */ 133 struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata; 134 int num_rsvd_regions __initdata; 135 136 137 /* 138 * Filter incoming memory segments based on the primitive map created from the boot 139 * parameters. Segments contained in the map are removed from the memory ranges. A 140 * caller-specified function is called with the memory ranges that remain after filtering. 141 * This routine does not assume the incoming segments are sorted. 142 */ 143 int __init 144 filter_rsvd_memory (unsigned long start, unsigned long end, void *arg) 145 { 146 unsigned long range_start, range_end, prev_start; 147 void (*func)(unsigned long, unsigned long, int); 148 int i; 149 150 #if IGNORE_PFN0 151 if (start == PAGE_OFFSET) { 152 printk(KERN_WARNING "warning: skipping physical page 0\n"); 153 start += PAGE_SIZE; 154 if (start >= end) return 0; 155 } 156 #endif 157 /* 158 * lowest possible address(walker uses virtual) 159 */ 160 prev_start = PAGE_OFFSET; 161 func = arg; 162 163 for (i = 0; i < num_rsvd_regions; ++i) { 164 range_start = max(start, prev_start); 165 range_end = min(end, rsvd_region[i].start); 166 167 if (range_start < range_end) 168 call_pernode_memory(__pa(range_start), range_end - range_start, func); 169 170 /* nothing more available in this segment */ 171 if (range_end == end) return 0; 172 173 prev_start = rsvd_region[i].end; 174 } 175 /* end of memory marker allows full processing inside loop body */ 176 return 0; 177 } 178 179 /* 180 * Similar to "filter_rsvd_memory()", but the reserved memory ranges 181 * are not filtered out. 182 */ 183 int __init 184 filter_memory(unsigned long start, unsigned long end, void *arg) 185 { 186 void (*func)(unsigned long, unsigned long, int); 187 188 #if IGNORE_PFN0 189 if (start == PAGE_OFFSET) { 190 printk(KERN_WARNING "warning: skipping physical page 0\n"); 191 start += PAGE_SIZE; 192 if (start >= end) 193 return 0; 194 } 195 #endif 196 func = arg; 197 if (start < end) 198 call_pernode_memory(__pa(start), end - start, func); 199 return 0; 200 } 201 202 static void __init 203 sort_regions (struct rsvd_region *rsvd_region, int max) 204 { 205 int j; 206 207 /* simple bubble sorting */ 208 while (max--) { 209 for (j = 0; j < max; ++j) { 210 if (rsvd_region[j].start > rsvd_region[j+1].start) { 211 struct rsvd_region tmp; 212 tmp = rsvd_region[j]; 213 rsvd_region[j] = rsvd_region[j + 1]; 214 rsvd_region[j + 1] = tmp; 215 } 216 } 217 } 218 } 219 220 /* 221 * Request address space for all standard resources 222 */ 223 static int __init register_memory(void) 224 { 225 code_resource.start = ia64_tpa(_text); 226 code_resource.end = ia64_tpa(_etext) - 1; 227 data_resource.start = ia64_tpa(_etext); 228 data_resource.end = ia64_tpa(_edata) - 1; 229 bss_resource.start = ia64_tpa(__bss_start); 230 bss_resource.end = ia64_tpa(_end) - 1; 231 efi_initialize_iomem_resources(&code_resource, &data_resource, 232 &bss_resource); 233 234 return 0; 235 } 236 237 __initcall(register_memory); 238 239 240 #ifdef CONFIG_KEXEC 241 static void __init setup_crashkernel(unsigned long total, int *n) 242 { 243 unsigned long long base = 0, size = 0; 244 int ret; 245 246 ret = parse_crashkernel(boot_command_line, total, 247 &size, &base); 248 if (ret == 0 && size > 0) { 249 if (!base) { 250 sort_regions(rsvd_region, *n); 251 base = kdump_find_rsvd_region(size, 252 rsvd_region, *n); 253 } 254 if (base != ~0UL) { 255 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " 256 "for crashkernel (System RAM: %ldMB)\n", 257 (unsigned long)(size >> 20), 258 (unsigned long)(base >> 20), 259 (unsigned long)(total >> 20)); 260 rsvd_region[*n].start = 261 (unsigned long)__va(base); 262 rsvd_region[*n].end = 263 (unsigned long)__va(base + size); 264 (*n)++; 265 crashk_res.start = base; 266 crashk_res.end = base + size - 1; 267 } 268 } 269 efi_memmap_res.start = ia64_boot_param->efi_memmap; 270 efi_memmap_res.end = efi_memmap_res.start + 271 ia64_boot_param->efi_memmap_size; 272 boot_param_res.start = __pa(ia64_boot_param); 273 boot_param_res.end = boot_param_res.start + 274 sizeof(*ia64_boot_param); 275 } 276 #else 277 static inline void __init setup_crashkernel(unsigned long total, int *n) 278 {} 279 #endif 280 281 /** 282 * reserve_memory - setup reserved memory areas 283 * 284 * Setup the reserved memory areas set aside for the boot parameters, 285 * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined, 286 * see include/asm-ia64/meminit.h if you need to define more. 287 */ 288 void __init 289 reserve_memory (void) 290 { 291 int n = 0; 292 unsigned long total_memory; 293 294 /* 295 * none of the entries in this table overlap 296 */ 297 rsvd_region[n].start = (unsigned long) ia64_boot_param; 298 rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param); 299 n++; 300 301 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap); 302 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size; 303 n++; 304 305 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line); 306 rsvd_region[n].end = (rsvd_region[n].start 307 + strlen(__va(ia64_boot_param->command_line)) + 1); 308 n++; 309 310 rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START); 311 rsvd_region[n].end = (unsigned long) ia64_imva(_end); 312 n++; 313 314 #ifdef CONFIG_BLK_DEV_INITRD 315 if (ia64_boot_param->initrd_start) { 316 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start); 317 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size; 318 n++; 319 } 320 #endif 321 322 #ifdef CONFIG_PROC_VMCORE 323 if (reserve_elfcorehdr(&rsvd_region[n].start, 324 &rsvd_region[n].end) == 0) 325 n++; 326 #endif 327 328 total_memory = efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); 329 n++; 330 331 setup_crashkernel(total_memory, &n); 332 333 /* end of memory marker */ 334 rsvd_region[n].start = ~0UL; 335 rsvd_region[n].end = ~0UL; 336 n++; 337 338 num_rsvd_regions = n; 339 BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n); 340 341 sort_regions(rsvd_region, num_rsvd_regions); 342 } 343 344 345 /** 346 * find_initrd - get initrd parameters from the boot parameter structure 347 * 348 * Grab the initrd start and end from the boot parameter struct given us by 349 * the boot loader. 350 */ 351 void __init 352 find_initrd (void) 353 { 354 #ifdef CONFIG_BLK_DEV_INITRD 355 if (ia64_boot_param->initrd_start) { 356 initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start); 357 initrd_end = initrd_start+ia64_boot_param->initrd_size; 358 359 printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n", 360 initrd_start, ia64_boot_param->initrd_size); 361 } 362 #endif 363 } 364 365 static void __init 366 io_port_init (void) 367 { 368 unsigned long phys_iobase; 369 370 /* 371 * Set `iobase' based on the EFI memory map or, failing that, the 372 * value firmware left in ar.k0. 373 * 374 * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute 375 * the port's virtual address, so ia32_load_state() loads it with a 376 * user virtual address. But in ia64 mode, glibc uses the 377 * *physical* address in ar.k0 to mmap the appropriate area from 378 * /dev/mem, and the inX()/outX() interfaces use MMIO. In both 379 * cases, user-mode can only use the legacy 0-64K I/O port space. 380 * 381 * ar.k0 is not involved in kernel I/O port accesses, which can use 382 * any of the I/O port spaces and are done via MMIO using the 383 * virtual mmio_base from the appropriate io_space[]. 384 */ 385 phys_iobase = efi_get_iobase(); 386 if (!phys_iobase) { 387 phys_iobase = ia64_get_kr(IA64_KR_IO_BASE); 388 printk(KERN_INFO "No I/O port range found in EFI memory map, " 389 "falling back to AR.KR0 (0x%lx)\n", phys_iobase); 390 } 391 ia64_iobase = (unsigned long) ioremap(phys_iobase, 0); 392 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); 393 394 /* setup legacy IO port space */ 395 io_space[0].mmio_base = ia64_iobase; 396 io_space[0].sparse = 1; 397 num_io_spaces = 1; 398 } 399 400 /** 401 * early_console_setup - setup debugging console 402 * 403 * Consoles started here require little enough setup that we can start using 404 * them very early in the boot process, either right after the machine 405 * vector initialization, or even before if the drivers can detect their hw. 406 * 407 * Returns non-zero if a console couldn't be setup. 408 */ 409 static inline int __init 410 early_console_setup (char *cmdline) 411 { 412 int earlycons = 0; 413 414 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE 415 { 416 extern int sn_serial_console_early_setup(void); 417 if (!sn_serial_console_early_setup()) 418 earlycons++; 419 } 420 #endif 421 #ifdef CONFIG_EFI_PCDP 422 if (!efi_setup_pcdp_console(cmdline)) 423 earlycons++; 424 #endif 425 if (!simcons_register()) 426 earlycons++; 427 428 return (earlycons) ? 0 : -1; 429 } 430 431 static inline void 432 mark_bsp_online (void) 433 { 434 #ifdef CONFIG_SMP 435 /* If we register an early console, allow CPU 0 to printk */ 436 cpu_set(smp_processor_id(), cpu_online_map); 437 #endif 438 } 439 440 static __initdata int nomca; 441 static __init int setup_nomca(char *s) 442 { 443 nomca = 1; 444 return 0; 445 } 446 early_param("nomca", setup_nomca); 447 448 #ifdef CONFIG_PROC_VMCORE 449 /* elfcorehdr= specifies the location of elf core header 450 * stored by the crashed kernel. 451 */ 452 static int __init parse_elfcorehdr(char *arg) 453 { 454 if (!arg) 455 return -EINVAL; 456 457 elfcorehdr_addr = memparse(arg, &arg); 458 return 0; 459 } 460 early_param("elfcorehdr", parse_elfcorehdr); 461 462 int __init reserve_elfcorehdr(unsigned long *start, unsigned long *end) 463 { 464 unsigned long length; 465 466 /* We get the address using the kernel command line, 467 * but the size is extracted from the EFI tables. 468 * Both address and size are required for reservation 469 * to work properly. 470 */ 471 472 if (elfcorehdr_addr >= ELFCORE_ADDR_MAX) 473 return -EINVAL; 474 475 if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) { 476 elfcorehdr_addr = ELFCORE_ADDR_MAX; 477 return -EINVAL; 478 } 479 480 *start = (unsigned long)__va(elfcorehdr_addr); 481 *end = *start + length; 482 return 0; 483 } 484 485 #endif /* CONFIG_PROC_VMCORE */ 486 487 void __init 488 setup_arch (char **cmdline_p) 489 { 490 unw_init(); 491 492 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); 493 494 *cmdline_p = __va(ia64_boot_param->command_line); 495 strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); 496 497 efi_init(); 498 io_port_init(); 499 500 #ifdef CONFIG_IA64_GENERIC 501 /* machvec needs to be parsed from the command line 502 * before parse_early_param() is called to ensure 503 * that ia64_mv is initialised before any command line 504 * settings may cause console setup to occur 505 */ 506 machvec_init_from_cmdline(*cmdline_p); 507 #endif 508 509 parse_early_param(); 510 511 if (early_console_setup(*cmdline_p) == 0) 512 mark_bsp_online(); 513 514 #ifdef CONFIG_ACPI 515 /* Initialize the ACPI boot-time table parser */ 516 acpi_table_init(); 517 # ifdef CONFIG_ACPI_NUMA 518 acpi_numa_init(); 519 # endif 520 #else 521 # ifdef CONFIG_SMP 522 smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */ 523 # endif 524 #endif /* CONFIG_APCI_BOOT */ 525 526 find_memory(); 527 528 /* process SAL system table: */ 529 ia64_sal_init(__va(efi.sal_systab)); 530 531 #ifdef CONFIG_SMP 532 cpu_physical_id(0) = hard_smp_processor_id(); 533 #endif 534 535 cpu_init(); /* initialize the bootstrap CPU */ 536 mmu_context_init(); /* initialize context_id bitmap */ 537 538 check_sal_cache_flush(); 539 540 #ifdef CONFIG_ACPI 541 acpi_boot_init(); 542 #endif 543 544 #ifdef CONFIG_VT 545 if (!conswitchp) { 546 # if defined(CONFIG_DUMMY_CONSOLE) 547 conswitchp = &dummy_con; 548 # endif 549 # if defined(CONFIG_VGA_CONSOLE) 550 /* 551 * Non-legacy systems may route legacy VGA MMIO range to system 552 * memory. vga_con probes the MMIO hole, so memory looks like 553 * a VGA device to it. The EFI memory map can tell us if it's 554 * memory so we can avoid this problem. 555 */ 556 if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY) 557 conswitchp = &vga_con; 558 # endif 559 } 560 #endif 561 562 /* enable IA-64 Machine Check Abort Handling unless disabled */ 563 if (!nomca) 564 ia64_mca_init(); 565 566 platform_setup(cmdline_p); 567 paging_init(); 568 } 569 570 /* 571 * Display cpu info for all CPUs. 572 */ 573 static int 574 show_cpuinfo (struct seq_file *m, void *v) 575 { 576 #ifdef CONFIG_SMP 577 # define lpj c->loops_per_jiffy 578 # define cpunum c->cpu 579 #else 580 # define lpj loops_per_jiffy 581 # define cpunum 0 582 #endif 583 static struct { 584 unsigned long mask; 585 const char *feature_name; 586 } feature_bits[] = { 587 { 1UL << 0, "branchlong" }, 588 { 1UL << 1, "spontaneous deferral"}, 589 { 1UL << 2, "16-byte atomic ops" } 590 }; 591 char features[128], *cp, *sep; 592 struct cpuinfo_ia64 *c = v; 593 unsigned long mask; 594 unsigned long proc_freq; 595 int i, size; 596 597 mask = c->features; 598 599 /* build the feature string: */ 600 memcpy(features, "standard", 9); 601 cp = features; 602 size = sizeof(features); 603 sep = ""; 604 for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) { 605 if (mask & feature_bits[i].mask) { 606 cp += snprintf(cp, size, "%s%s", sep, 607 feature_bits[i].feature_name), 608 sep = ", "; 609 mask &= ~feature_bits[i].mask; 610 size = sizeof(features) - (cp - features); 611 } 612 } 613 if (mask && size > 1) { 614 /* print unknown features as a hex value */ 615 snprintf(cp, size, "%s0x%lx", sep, mask); 616 } 617 618 proc_freq = cpufreq_quick_get(cpunum); 619 if (!proc_freq) 620 proc_freq = c->proc_freq / 1000; 621 622 seq_printf(m, 623 "processor : %d\n" 624 "vendor : %s\n" 625 "arch : IA-64\n" 626 "family : %u\n" 627 "model : %u\n" 628 "model name : %s\n" 629 "revision : %u\n" 630 "archrev : %u\n" 631 "features : %s\n" 632 "cpu number : %lu\n" 633 "cpu regs : %u\n" 634 "cpu MHz : %lu.%03lu\n" 635 "itc MHz : %lu.%06lu\n" 636 "BogoMIPS : %lu.%02lu\n", 637 cpunum, c->vendor, c->family, c->model, 638 c->model_name, c->revision, c->archrev, 639 features, c->ppn, c->number, 640 proc_freq / 1000, proc_freq % 1000, 641 c->itc_freq / 1000000, c->itc_freq % 1000000, 642 lpj*HZ/500000, (lpj*HZ/5000) % 100); 643 #ifdef CONFIG_SMP 644 seq_printf(m, "siblings : %u\n", cpus_weight(cpu_core_map[cpunum])); 645 if (c->socket_id != -1) 646 seq_printf(m, "physical id: %u\n", c->socket_id); 647 if (c->threads_per_core > 1 || c->cores_per_socket > 1) 648 seq_printf(m, 649 "core id : %u\n" 650 "thread id : %u\n", 651 c->core_id, c->thread_id); 652 #endif 653 seq_printf(m,"\n"); 654 655 return 0; 656 } 657 658 static void * 659 c_start (struct seq_file *m, loff_t *pos) 660 { 661 #ifdef CONFIG_SMP 662 while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map)) 663 ++*pos; 664 #endif 665 return *pos < NR_CPUS ? cpu_data(*pos) : NULL; 666 } 667 668 static void * 669 c_next (struct seq_file *m, void *v, loff_t *pos) 670 { 671 ++*pos; 672 return c_start(m, pos); 673 } 674 675 static void 676 c_stop (struct seq_file *m, void *v) 677 { 678 } 679 680 const struct seq_operations cpuinfo_op = { 681 .start = c_start, 682 .next = c_next, 683 .stop = c_stop, 684 .show = show_cpuinfo 685 }; 686 687 #define MAX_BRANDS 8 688 static char brandname[MAX_BRANDS][128]; 689 690 static char * __cpuinit 691 get_model_name(__u8 family, __u8 model) 692 { 693 static int overflow; 694 char brand[128]; 695 int i; 696 697 memcpy(brand, "Unknown", 8); 698 if (ia64_pal_get_brand_info(brand)) { 699 if (family == 0x7) 700 memcpy(brand, "Merced", 7); 701 else if (family == 0x1f) switch (model) { 702 case 0: memcpy(brand, "McKinley", 9); break; 703 case 1: memcpy(brand, "Madison", 8); break; 704 case 2: memcpy(brand, "Madison up to 9M cache", 23); break; 705 } 706 } 707 for (i = 0; i < MAX_BRANDS; i++) 708 if (strcmp(brandname[i], brand) == 0) 709 return brandname[i]; 710 for (i = 0; i < MAX_BRANDS; i++) 711 if (brandname[i][0] == '\0') 712 return strcpy(brandname[i], brand); 713 if (overflow++ == 0) 714 printk(KERN_ERR 715 "%s: Table overflow. Some processor model information will be missing\n", 716 __func__); 717 return "Unknown"; 718 } 719 720 static void __cpuinit 721 identify_cpu (struct cpuinfo_ia64 *c) 722 { 723 union { 724 unsigned long bits[5]; 725 struct { 726 /* id 0 & 1: */ 727 char vendor[16]; 728 729 /* id 2 */ 730 u64 ppn; /* processor serial number */ 731 732 /* id 3: */ 733 unsigned number : 8; 734 unsigned revision : 8; 735 unsigned model : 8; 736 unsigned family : 8; 737 unsigned archrev : 8; 738 unsigned reserved : 24; 739 740 /* id 4: */ 741 u64 features; 742 } field; 743 } cpuid; 744 pal_vm_info_1_u_t vm1; 745 pal_vm_info_2_u_t vm2; 746 pal_status_t status; 747 unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */ 748 int i; 749 for (i = 0; i < 5; ++i) 750 cpuid.bits[i] = ia64_get_cpuid(i); 751 752 memcpy(c->vendor, cpuid.field.vendor, 16); 753 #ifdef CONFIG_SMP 754 c->cpu = smp_processor_id(); 755 756 /* below default values will be overwritten by identify_siblings() 757 * for Multi-Threading/Multi-Core capable CPUs 758 */ 759 c->threads_per_core = c->cores_per_socket = c->num_log = 1; 760 c->socket_id = -1; 761 762 identify_siblings(c); 763 764 if (c->threads_per_core > smp_num_siblings) 765 smp_num_siblings = c->threads_per_core; 766 #endif 767 c->ppn = cpuid.field.ppn; 768 c->number = cpuid.field.number; 769 c->revision = cpuid.field.revision; 770 c->model = cpuid.field.model; 771 c->family = cpuid.field.family; 772 c->archrev = cpuid.field.archrev; 773 c->features = cpuid.field.features; 774 c->model_name = get_model_name(c->family, c->model); 775 776 status = ia64_pal_vm_summary(&vm1, &vm2); 777 if (status == PAL_STATUS_SUCCESS) { 778 impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb; 779 phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size; 780 } 781 c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1)); 782 c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); 783 } 784 785 void __init 786 setup_per_cpu_areas (void) 787 { 788 /* start_kernel() requires this... */ 789 #ifdef CONFIG_ACPI_HOTPLUG_CPU 790 prefill_possible_map(); 791 #endif 792 } 793 794 /* 795 * Calculate the max. cache line size. 796 * 797 * In addition, the minimum of the i-cache stride sizes is calculated for 798 * "flush_icache_range()". 799 */ 800 static void __cpuinit 801 get_max_cacheline_size (void) 802 { 803 unsigned long line_size, max = 1; 804 u64 l, levels, unique_caches; 805 pal_cache_config_info_t cci; 806 s64 status; 807 808 status = ia64_pal_cache_summary(&levels, &unique_caches); 809 if (status != 0) { 810 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", 811 __func__, status); 812 max = SMP_CACHE_BYTES; 813 /* Safest setup for "flush_icache_range()" */ 814 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; 815 goto out; 816 } 817 818 for (l = 0; l < levels; ++l) { 819 status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2, 820 &cci); 821 if (status != 0) { 822 printk(KERN_ERR 823 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n", 824 __func__, l, status); 825 max = SMP_CACHE_BYTES; 826 /* The safest setup for "flush_icache_range()" */ 827 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 828 cci.pcci_unified = 1; 829 } 830 line_size = 1 << cci.pcci_line_size; 831 if (line_size > max) 832 max = line_size; 833 if (!cci.pcci_unified) { 834 status = ia64_pal_cache_config_info(l, 835 /* cache_type (instruction)= */ 1, 836 &cci); 837 if (status != 0) { 838 printk(KERN_ERR 839 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n", 840 __func__, l, status); 841 /* The safest setup for "flush_icache_range()" */ 842 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 843 } 844 } 845 if (cci.pcci_stride < ia64_i_cache_stride_shift) 846 ia64_i_cache_stride_shift = cci.pcci_stride; 847 } 848 out: 849 if (max > ia64_max_cacheline_size) 850 ia64_max_cacheline_size = max; 851 } 852 853 /* 854 * cpu_init() initializes state that is per-CPU. This function acts 855 * as a 'CPU state barrier', nothing should get across. 856 */ 857 void __cpuinit 858 cpu_init (void) 859 { 860 extern void __cpuinit ia64_mmu_init (void *); 861 static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG; 862 unsigned long num_phys_stacked; 863 pal_vm_info_2_u_t vmi; 864 unsigned int max_ctx; 865 struct cpuinfo_ia64 *cpu_info; 866 void *cpu_data; 867 868 cpu_data = per_cpu_init(); 869 #ifdef CONFIG_SMP 870 /* 871 * insert boot cpu into sibling and core mapes 872 * (must be done after per_cpu area is setup) 873 */ 874 if (smp_processor_id() == 0) { 875 cpu_set(0, per_cpu(cpu_sibling_map, 0)); 876 cpu_set(0, cpu_core_map[0]); 877 } 878 #endif 879 880 /* 881 * We set ar.k3 so that assembly code in MCA handler can compute 882 * physical addresses of per cpu variables with a simple: 883 * phys = ar.k3 + &per_cpu_var 884 */ 885 ia64_set_kr(IA64_KR_PER_CPU_DATA, 886 ia64_tpa(cpu_data) - (long) __per_cpu_start); 887 888 get_max_cacheline_size(); 889 890 /* 891 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called 892 * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it 893 * depends on the data returned by identify_cpu(). We break the dependency by 894 * accessing cpu_data() through the canonical per-CPU address. 895 */ 896 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start); 897 identify_cpu(cpu_info); 898 899 #ifdef CONFIG_MCKINLEY 900 { 901 # define FEATURE_SET 16 902 struct ia64_pal_retval iprv; 903 904 if (cpu_info->family == 0x1f) { 905 PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0); 906 if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80)) 907 PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, 908 (iprv.v1 | 0x80), FEATURE_SET, 0); 909 } 910 } 911 #endif 912 913 /* Clear the stack memory reserved for pt_regs: */ 914 memset(task_pt_regs(current), 0, sizeof(struct pt_regs)); 915 916 ia64_set_kr(IA64_KR_FPU_OWNER, 0); 917 918 /* 919 * Initialize the page-table base register to a global 920 * directory with all zeroes. This ensure that we can handle 921 * TLB-misses to user address-space even before we created the 922 * first user address-space. This may happen, e.g., due to 923 * aggressive use of lfetch.fault. 924 */ 925 ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page))); 926 927 /* 928 * Initialize default control register to defer speculative faults except 929 * for those arising from TLB misses, which are not deferred. The 930 * kernel MUST NOT depend on a particular setting of these bits (in other words, 931 * the kernel must have recovery code for all speculative accesses). Turn on 932 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps 933 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll 934 * be fine). 935 */ 936 ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR 937 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); 938 atomic_inc(&init_mm.mm_count); 939 current->active_mm = &init_mm; 940 if (current->mm) 941 BUG(); 942 943 ia64_mmu_init(ia64_imva(cpu_data)); 944 ia64_mca_cpu_init(ia64_imva(cpu_data)); 945 946 #ifdef CONFIG_IA32_SUPPORT 947 ia32_cpu_init(); 948 #endif 949 950 /* Clear ITC to eliminate sched_clock() overflows in human time. */ 951 ia64_set_itc(0); 952 953 /* disable all local interrupt sources: */ 954 ia64_set_itv(1 << 16); 955 ia64_set_lrr0(1 << 16); 956 ia64_set_lrr1(1 << 16); 957 ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); 958 ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); 959 960 /* clear TPR & XTP to enable all interrupt classes: */ 961 ia64_setreg(_IA64_REG_CR_TPR, 0); 962 963 /* Clear any pending interrupts left by SAL/EFI */ 964 while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR) 965 ia64_eoi(); 966 967 #ifdef CONFIG_SMP 968 normal_xtp(); 969 #endif 970 971 /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ 972 if (ia64_pal_vm_summary(NULL, &vmi) == 0) 973 max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; 974 else { 975 printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); 976 max_ctx = (1U << 15) - 1; /* use architected minimum */ 977 } 978 while (max_ctx < ia64_ctx.max_ctx) { 979 unsigned int old = ia64_ctx.max_ctx; 980 if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old) 981 break; 982 } 983 984 if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) { 985 printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical " 986 "stacked regs\n"); 987 num_phys_stacked = 96; 988 } 989 /* size of physical stacked register partition plus 8 bytes: */ 990 if (num_phys_stacked > max_num_phys_stacked) { 991 ia64_patch_phys_stack_reg(num_phys_stacked*8 + 8); 992 max_num_phys_stacked = num_phys_stacked; 993 } 994 platform_cpu_init(); 995 pm_idle = default_idle; 996 } 997 998 void __init 999 check_bugs (void) 1000 { 1001 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, 1002 (unsigned long) __end___mckinley_e9_bundles); 1003 } 1004 1005 static int __init run_dmi_scan(void) 1006 { 1007 dmi_scan_machine(); 1008 return 0; 1009 } 1010 core_initcall(run_dmi_scan); 1011