1 /* 2 * Architecture-specific setup. 3 * 4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co 5 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * Stephane Eranian <eranian@hpl.hp.com> 7 * Copyright (C) 2000, 2004 Intel Corp 8 * Rohit Seth <rohit.seth@intel.com> 9 * Suresh Siddha <suresh.b.siddha@intel.com> 10 * Gordon Jin <gordon.jin@intel.com> 11 * Copyright (C) 1999 VA Linux Systems 12 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 13 * 14 * 12/26/04 S.Siddha, G.Jin, R.Seth 15 * Add multi-threading and multi-core detection 16 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo(). 17 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map 18 * 03/31/00 R.Seth cpu_initialized and current->processor fixes 19 * 02/04/00 D.Mosberger some more get_cpuinfo fixes... 20 * 02/01/00 R.Seth fixed get_cpuinfo for SMP 21 * 01/07/99 S.Eranian added the support for command line argument 22 * 06/24/99 W.Drummond added boot_cpu_data. 23 * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()" 24 */ 25 #include <linux/module.h> 26 #include <linux/init.h> 27 28 #include <linux/acpi.h> 29 #include <linux/bootmem.h> 30 #include <linux/console.h> 31 #include <linux/delay.h> 32 #include <linux/kernel.h> 33 #include <linux/reboot.h> 34 #include <linux/sched.h> 35 #include <linux/seq_file.h> 36 #include <linux/string.h> 37 #include <linux/threads.h> 38 #include <linux/screen_info.h> 39 #include <linux/dmi.h> 40 #include <linux/serial.h> 41 #include <linux/serial_core.h> 42 #include <linux/efi.h> 43 #include <linux/initrd.h> 44 #include <linux/pm.h> 45 #include <linux/cpufreq.h> 46 #include <linux/kexec.h> 47 #include <linux/crash_dump.h> 48 49 #include <asm/ia32.h> 50 #include <asm/machvec.h> 51 #include <asm/mca.h> 52 #include <asm/meminit.h> 53 #include <asm/page.h> 54 #include <asm/patch.h> 55 #include <asm/pgtable.h> 56 #include <asm/processor.h> 57 #include <asm/sal.h> 58 #include <asm/sections.h> 59 #include <asm/setup.h> 60 #include <asm/smp.h> 61 #include <asm/system.h> 62 #include <asm/unistd.h> 63 #include <asm/system.h> 64 65 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) 66 # error "struct cpuinfo_ia64 too big!" 67 #endif 68 69 #ifdef CONFIG_SMP 70 unsigned long __per_cpu_offset[NR_CPUS]; 71 EXPORT_SYMBOL(__per_cpu_offset); 72 #endif 73 74 extern void ia64_setup_printk_clock(void); 75 76 DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); 77 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); 78 DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8); 79 unsigned long ia64_cycles_per_usec; 80 struct ia64_boot_param *ia64_boot_param; 81 struct screen_info screen_info; 82 unsigned long vga_console_iobase; 83 unsigned long vga_console_membase; 84 85 static struct resource data_resource = { 86 .name = "Kernel data", 87 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 88 }; 89 90 static struct resource code_resource = { 91 .name = "Kernel code", 92 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 93 }; 94 extern char _text[], _end[], _etext[]; 95 96 unsigned long ia64_max_cacheline_size; 97 98 int dma_get_cache_alignment(void) 99 { 100 return ia64_max_cacheline_size; 101 } 102 EXPORT_SYMBOL(dma_get_cache_alignment); 103 104 unsigned long ia64_iobase; /* virtual address for I/O accesses */ 105 EXPORT_SYMBOL(ia64_iobase); 106 struct io_space io_space[MAX_IO_SPACES]; 107 EXPORT_SYMBOL(io_space); 108 unsigned int num_io_spaces; 109 110 /* 111 * "flush_icache_range()" needs to know what processor dependent stride size to use 112 * when it makes i-cache(s) coherent with d-caches. 113 */ 114 #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */ 115 unsigned long ia64_i_cache_stride_shift = ~0; 116 117 /* 118 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This 119 * mask specifies a mask of address bits that must be 0 in order for two buffers to be 120 * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start 121 * address of the second buffer must be aligned to (merge_mask+1) in order to be 122 * mergeable). By default, we assume there is no I/O MMU which can merge physically 123 * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu 124 * page-size of 2^64. 125 */ 126 unsigned long ia64_max_iommu_merge_mask = ~0UL; 127 EXPORT_SYMBOL(ia64_max_iommu_merge_mask); 128 129 /* 130 * We use a special marker for the end of memory and it uses the extra (+1) slot 131 */ 132 struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata; 133 int num_rsvd_regions __initdata; 134 135 136 /* 137 * Filter incoming memory segments based on the primitive map created from the boot 138 * parameters. Segments contained in the map are removed from the memory ranges. A 139 * caller-specified function is called with the memory ranges that remain after filtering. 140 * This routine does not assume the incoming segments are sorted. 141 */ 142 int __init 143 filter_rsvd_memory (unsigned long start, unsigned long end, void *arg) 144 { 145 unsigned long range_start, range_end, prev_start; 146 void (*func)(unsigned long, unsigned long, int); 147 int i; 148 149 #if IGNORE_PFN0 150 if (start == PAGE_OFFSET) { 151 printk(KERN_WARNING "warning: skipping physical page 0\n"); 152 start += PAGE_SIZE; 153 if (start >= end) return 0; 154 } 155 #endif 156 /* 157 * lowest possible address(walker uses virtual) 158 */ 159 prev_start = PAGE_OFFSET; 160 func = arg; 161 162 for (i = 0; i < num_rsvd_regions; ++i) { 163 range_start = max(start, prev_start); 164 range_end = min(end, rsvd_region[i].start); 165 166 if (range_start < range_end) 167 call_pernode_memory(__pa(range_start), range_end - range_start, func); 168 169 /* nothing more available in this segment */ 170 if (range_end == end) return 0; 171 172 prev_start = rsvd_region[i].end; 173 } 174 /* end of memory marker allows full processing inside loop body */ 175 return 0; 176 } 177 178 static void __init 179 sort_regions (struct rsvd_region *rsvd_region, int max) 180 { 181 int j; 182 183 /* simple bubble sorting */ 184 while (max--) { 185 for (j = 0; j < max; ++j) { 186 if (rsvd_region[j].start > rsvd_region[j+1].start) { 187 struct rsvd_region tmp; 188 tmp = rsvd_region[j]; 189 rsvd_region[j] = rsvd_region[j + 1]; 190 rsvd_region[j + 1] = tmp; 191 } 192 } 193 } 194 } 195 196 /* 197 * Request address space for all standard resources 198 */ 199 static int __init register_memory(void) 200 { 201 code_resource.start = ia64_tpa(_text); 202 code_resource.end = ia64_tpa(_etext) - 1; 203 data_resource.start = ia64_tpa(_etext); 204 data_resource.end = ia64_tpa(_end) - 1; 205 efi_initialize_iomem_resources(&code_resource, &data_resource); 206 207 return 0; 208 } 209 210 __initcall(register_memory); 211 212 /** 213 * reserve_memory - setup reserved memory areas 214 * 215 * Setup the reserved memory areas set aside for the boot parameters, 216 * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined, 217 * see include/asm-ia64/meminit.h if you need to define more. 218 */ 219 void __init 220 reserve_memory (void) 221 { 222 int n = 0; 223 224 /* 225 * none of the entries in this table overlap 226 */ 227 rsvd_region[n].start = (unsigned long) ia64_boot_param; 228 rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param); 229 n++; 230 231 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap); 232 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size; 233 n++; 234 235 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line); 236 rsvd_region[n].end = (rsvd_region[n].start 237 + strlen(__va(ia64_boot_param->command_line)) + 1); 238 n++; 239 240 rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START); 241 rsvd_region[n].end = (unsigned long) ia64_imva(_end); 242 n++; 243 244 #ifdef CONFIG_BLK_DEV_INITRD 245 if (ia64_boot_param->initrd_start) { 246 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start); 247 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size; 248 n++; 249 } 250 #endif 251 252 #ifdef CONFIG_PROC_VMCORE 253 if (reserve_elfcorehdr(&rsvd_region[n].start, 254 &rsvd_region[n].end) == 0) 255 n++; 256 #endif 257 258 efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); 259 n++; 260 261 #ifdef CONFIG_KEXEC 262 /* crashkernel=size@offset specifies the size to reserve for a crash 263 * kernel. If offset is 0, then it is determined automatically. 264 * By reserving this memory we guarantee that linux never set's it 265 * up as a DMA target.Useful for holding code to do something 266 * appropriate after a kernel panic. 267 */ 268 { 269 char *from = strstr(boot_command_line, "crashkernel="); 270 unsigned long base, size; 271 if (from) { 272 size = memparse(from + 12, &from); 273 if (*from == '@') 274 base = memparse(from+1, &from); 275 else 276 base = 0; 277 if (size) { 278 if (!base) { 279 sort_regions(rsvd_region, n); 280 base = kdump_find_rsvd_region(size, 281 rsvd_region, n); 282 } 283 if (base != ~0UL) { 284 rsvd_region[n].start = 285 (unsigned long)__va(base); 286 rsvd_region[n].end = 287 (unsigned long)__va(base + size); 288 n++; 289 crashk_res.start = base; 290 crashk_res.end = base + size - 1; 291 } 292 } 293 } 294 efi_memmap_res.start = ia64_boot_param->efi_memmap; 295 efi_memmap_res.end = efi_memmap_res.start + 296 ia64_boot_param->efi_memmap_size; 297 boot_param_res.start = __pa(ia64_boot_param); 298 boot_param_res.end = boot_param_res.start + 299 sizeof(*ia64_boot_param); 300 } 301 #endif 302 /* end of memory marker */ 303 rsvd_region[n].start = ~0UL; 304 rsvd_region[n].end = ~0UL; 305 n++; 306 307 num_rsvd_regions = n; 308 BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n); 309 310 sort_regions(rsvd_region, num_rsvd_regions); 311 } 312 313 314 /** 315 * find_initrd - get initrd parameters from the boot parameter structure 316 * 317 * Grab the initrd start and end from the boot parameter struct given us by 318 * the boot loader. 319 */ 320 void __init 321 find_initrd (void) 322 { 323 #ifdef CONFIG_BLK_DEV_INITRD 324 if (ia64_boot_param->initrd_start) { 325 initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start); 326 initrd_end = initrd_start+ia64_boot_param->initrd_size; 327 328 printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n", 329 initrd_start, ia64_boot_param->initrd_size); 330 } 331 #endif 332 } 333 334 static void __init 335 io_port_init (void) 336 { 337 unsigned long phys_iobase; 338 339 /* 340 * Set `iobase' based on the EFI memory map or, failing that, the 341 * value firmware left in ar.k0. 342 * 343 * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute 344 * the port's virtual address, so ia32_load_state() loads it with a 345 * user virtual address. But in ia64 mode, glibc uses the 346 * *physical* address in ar.k0 to mmap the appropriate area from 347 * /dev/mem, and the inX()/outX() interfaces use MMIO. In both 348 * cases, user-mode can only use the legacy 0-64K I/O port space. 349 * 350 * ar.k0 is not involved in kernel I/O port accesses, which can use 351 * any of the I/O port spaces and are done via MMIO using the 352 * virtual mmio_base from the appropriate io_space[]. 353 */ 354 phys_iobase = efi_get_iobase(); 355 if (!phys_iobase) { 356 phys_iobase = ia64_get_kr(IA64_KR_IO_BASE); 357 printk(KERN_INFO "No I/O port range found in EFI memory map, " 358 "falling back to AR.KR0 (0x%lx)\n", phys_iobase); 359 } 360 ia64_iobase = (unsigned long) ioremap(phys_iobase, 0); 361 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); 362 363 /* setup legacy IO port space */ 364 io_space[0].mmio_base = ia64_iobase; 365 io_space[0].sparse = 1; 366 num_io_spaces = 1; 367 } 368 369 /** 370 * early_console_setup - setup debugging console 371 * 372 * Consoles started here require little enough setup that we can start using 373 * them very early in the boot process, either right after the machine 374 * vector initialization, or even before if the drivers can detect their hw. 375 * 376 * Returns non-zero if a console couldn't be setup. 377 */ 378 static inline int __init 379 early_console_setup (char *cmdline) 380 { 381 int earlycons = 0; 382 383 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE 384 { 385 extern int sn_serial_console_early_setup(void); 386 if (!sn_serial_console_early_setup()) 387 earlycons++; 388 } 389 #endif 390 #ifdef CONFIG_EFI_PCDP 391 if (!efi_setup_pcdp_console(cmdline)) 392 earlycons++; 393 #endif 394 #ifdef CONFIG_SERIAL_8250_CONSOLE 395 if (!early_serial_console_init(cmdline)) 396 earlycons++; 397 #endif 398 399 return (earlycons) ? 0 : -1; 400 } 401 402 static inline void 403 mark_bsp_online (void) 404 { 405 #ifdef CONFIG_SMP 406 /* If we register an early console, allow CPU 0 to printk */ 407 cpu_set(smp_processor_id(), cpu_online_map); 408 #endif 409 } 410 411 #ifdef CONFIG_SMP 412 static void __init 413 check_for_logical_procs (void) 414 { 415 pal_logical_to_physical_t info; 416 s64 status; 417 418 status = ia64_pal_logical_to_phys(0, &info); 419 if (status == -1) { 420 printk(KERN_INFO "No logical to physical processor mapping " 421 "available\n"); 422 return; 423 } 424 if (status) { 425 printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n", 426 status); 427 return; 428 } 429 /* 430 * Total number of siblings that BSP has. Though not all of them 431 * may have booted successfully. The correct number of siblings 432 * booted is in info.overview_num_log. 433 */ 434 smp_num_siblings = info.overview_tpc; 435 smp_num_cpucores = info.overview_cpp; 436 } 437 #endif 438 439 static __initdata int nomca; 440 static __init int setup_nomca(char *s) 441 { 442 nomca = 1; 443 return 0; 444 } 445 early_param("nomca", setup_nomca); 446 447 #ifdef CONFIG_PROC_VMCORE 448 /* elfcorehdr= specifies the location of elf core header 449 * stored by the crashed kernel. 450 */ 451 static int __init parse_elfcorehdr(char *arg) 452 { 453 if (!arg) 454 return -EINVAL; 455 456 elfcorehdr_addr = memparse(arg, &arg); 457 return 0; 458 } 459 early_param("elfcorehdr", parse_elfcorehdr); 460 461 int __init reserve_elfcorehdr(unsigned long *start, unsigned long *end) 462 { 463 unsigned long length; 464 465 /* We get the address using the kernel command line, 466 * but the size is extracted from the EFI tables. 467 * Both address and size are required for reservation 468 * to work properly. 469 */ 470 471 if (elfcorehdr_addr >= ELFCORE_ADDR_MAX) 472 return -EINVAL; 473 474 if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) { 475 elfcorehdr_addr = ELFCORE_ADDR_MAX; 476 return -EINVAL; 477 } 478 479 *start = (unsigned long)__va(elfcorehdr_addr); 480 *end = *start + length; 481 return 0; 482 } 483 484 #endif /* CONFIG_PROC_VMCORE */ 485 486 void __init 487 setup_arch (char **cmdline_p) 488 { 489 unw_init(); 490 491 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); 492 493 *cmdline_p = __va(ia64_boot_param->command_line); 494 strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); 495 496 efi_init(); 497 io_port_init(); 498 499 parse_early_param(); 500 501 #ifdef CONFIG_IA64_GENERIC 502 machvec_init(NULL); 503 #endif 504 505 if (early_console_setup(*cmdline_p) == 0) 506 mark_bsp_online(); 507 508 #ifdef CONFIG_ACPI 509 /* Initialize the ACPI boot-time table parser */ 510 acpi_table_init(); 511 # ifdef CONFIG_ACPI_NUMA 512 acpi_numa_init(); 513 # endif 514 #else 515 # ifdef CONFIG_SMP 516 smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */ 517 # endif 518 #endif /* CONFIG_APCI_BOOT */ 519 520 find_memory(); 521 522 /* process SAL system table: */ 523 ia64_sal_init(__va(efi.sal_systab)); 524 525 ia64_setup_printk_clock(); 526 527 #ifdef CONFIG_SMP 528 cpu_physical_id(0) = hard_smp_processor_id(); 529 530 cpu_set(0, cpu_sibling_map[0]); 531 cpu_set(0, cpu_core_map[0]); 532 533 check_for_logical_procs(); 534 if (smp_num_cpucores > 1) 535 printk(KERN_INFO 536 "cpu package is Multi-Core capable: number of cores=%d\n", 537 smp_num_cpucores); 538 if (smp_num_siblings > 1) 539 printk(KERN_INFO 540 "cpu package is Multi-Threading capable: number of siblings=%d\n", 541 smp_num_siblings); 542 #endif 543 544 cpu_init(); /* initialize the bootstrap CPU */ 545 mmu_context_init(); /* initialize context_id bitmap */ 546 547 check_sal_cache_flush(); 548 549 #ifdef CONFIG_ACPI 550 acpi_boot_init(); 551 #endif 552 553 #ifdef CONFIG_VT 554 if (!conswitchp) { 555 # if defined(CONFIG_DUMMY_CONSOLE) 556 conswitchp = &dummy_con; 557 # endif 558 # if defined(CONFIG_VGA_CONSOLE) 559 /* 560 * Non-legacy systems may route legacy VGA MMIO range to system 561 * memory. vga_con probes the MMIO hole, so memory looks like 562 * a VGA device to it. The EFI memory map can tell us if it's 563 * memory so we can avoid this problem. 564 */ 565 if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY) 566 conswitchp = &vga_con; 567 # endif 568 } 569 #endif 570 571 /* enable IA-64 Machine Check Abort Handling unless disabled */ 572 if (!nomca) 573 ia64_mca_init(); 574 575 platform_setup(cmdline_p); 576 paging_init(); 577 } 578 579 /* 580 * Display cpu info for all cpu's. 581 */ 582 static int 583 show_cpuinfo (struct seq_file *m, void *v) 584 { 585 #ifdef CONFIG_SMP 586 # define lpj c->loops_per_jiffy 587 # define cpunum c->cpu 588 #else 589 # define lpj loops_per_jiffy 590 # define cpunum 0 591 #endif 592 static struct { 593 unsigned long mask; 594 const char *feature_name; 595 } feature_bits[] = { 596 { 1UL << 0, "branchlong" }, 597 { 1UL << 1, "spontaneous deferral"}, 598 { 1UL << 2, "16-byte atomic ops" } 599 }; 600 char features[128], *cp, *sep; 601 struct cpuinfo_ia64 *c = v; 602 unsigned long mask; 603 unsigned long proc_freq; 604 int i, size; 605 606 mask = c->features; 607 608 /* build the feature string: */ 609 memcpy(features, "standard", 9); 610 cp = features; 611 size = sizeof(features); 612 sep = ""; 613 for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) { 614 if (mask & feature_bits[i].mask) { 615 cp += snprintf(cp, size, "%s%s", sep, 616 feature_bits[i].feature_name), 617 sep = ", "; 618 mask &= ~feature_bits[i].mask; 619 size = sizeof(features) - (cp - features); 620 } 621 } 622 if (mask && size > 1) { 623 /* print unknown features as a hex value */ 624 snprintf(cp, size, "%s0x%lx", sep, mask); 625 } 626 627 proc_freq = cpufreq_quick_get(cpunum); 628 if (!proc_freq) 629 proc_freq = c->proc_freq / 1000; 630 631 seq_printf(m, 632 "processor : %d\n" 633 "vendor : %s\n" 634 "arch : IA-64\n" 635 "family : %u\n" 636 "model : %u\n" 637 "model name : %s\n" 638 "revision : %u\n" 639 "archrev : %u\n" 640 "features : %s\n" 641 "cpu number : %lu\n" 642 "cpu regs : %u\n" 643 "cpu MHz : %lu.%06lu\n" 644 "itc MHz : %lu.%06lu\n" 645 "BogoMIPS : %lu.%02lu\n", 646 cpunum, c->vendor, c->family, c->model, 647 c->model_name, c->revision, c->archrev, 648 features, c->ppn, c->number, 649 proc_freq / 1000, proc_freq % 1000, 650 c->itc_freq / 1000000, c->itc_freq % 1000000, 651 lpj*HZ/500000, (lpj*HZ/5000) % 100); 652 #ifdef CONFIG_SMP 653 seq_printf(m, "siblings : %u\n", cpus_weight(cpu_core_map[cpunum])); 654 if (c->threads_per_core > 1 || c->cores_per_socket > 1) 655 seq_printf(m, 656 "physical id: %u\n" 657 "core id : %u\n" 658 "thread id : %u\n", 659 c->socket_id, c->core_id, c->thread_id); 660 #endif 661 seq_printf(m,"\n"); 662 663 return 0; 664 } 665 666 static void * 667 c_start (struct seq_file *m, loff_t *pos) 668 { 669 #ifdef CONFIG_SMP 670 while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map)) 671 ++*pos; 672 #endif 673 return *pos < NR_CPUS ? cpu_data(*pos) : NULL; 674 } 675 676 static void * 677 c_next (struct seq_file *m, void *v, loff_t *pos) 678 { 679 ++*pos; 680 return c_start(m, pos); 681 } 682 683 static void 684 c_stop (struct seq_file *m, void *v) 685 { 686 } 687 688 struct seq_operations cpuinfo_op = { 689 .start = c_start, 690 .next = c_next, 691 .stop = c_stop, 692 .show = show_cpuinfo 693 }; 694 695 static char brandname[128]; 696 697 static char * __cpuinit 698 get_model_name(__u8 family, __u8 model) 699 { 700 char brand[128]; 701 702 memcpy(brand, "Unknown", 8); 703 if (ia64_pal_get_brand_info(brand)) { 704 if (family == 0x7) 705 memcpy(brand, "Merced", 7); 706 else if (family == 0x1f) switch (model) { 707 case 0: memcpy(brand, "McKinley", 9); break; 708 case 1: memcpy(brand, "Madison", 8); break; 709 case 2: memcpy(brand, "Madison up to 9M cache", 23); break; 710 } 711 } 712 if (brandname[0] == '\0') 713 return strcpy(brandname, brand); 714 else if (strcmp(brandname, brand) == 0) 715 return brandname; 716 else 717 return kstrdup(brand, GFP_KERNEL); 718 } 719 720 static void __cpuinit 721 identify_cpu (struct cpuinfo_ia64 *c) 722 { 723 union { 724 unsigned long bits[5]; 725 struct { 726 /* id 0 & 1: */ 727 char vendor[16]; 728 729 /* id 2 */ 730 u64 ppn; /* processor serial number */ 731 732 /* id 3: */ 733 unsigned number : 8; 734 unsigned revision : 8; 735 unsigned model : 8; 736 unsigned family : 8; 737 unsigned archrev : 8; 738 unsigned reserved : 24; 739 740 /* id 4: */ 741 u64 features; 742 } field; 743 } cpuid; 744 pal_vm_info_1_u_t vm1; 745 pal_vm_info_2_u_t vm2; 746 pal_status_t status; 747 unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */ 748 int i; 749 for (i = 0; i < 5; ++i) 750 cpuid.bits[i] = ia64_get_cpuid(i); 751 752 memcpy(c->vendor, cpuid.field.vendor, 16); 753 #ifdef CONFIG_SMP 754 c->cpu = smp_processor_id(); 755 756 /* below default values will be overwritten by identify_siblings() 757 * for Multi-Threading/Multi-Core capable cpu's 758 */ 759 c->threads_per_core = c->cores_per_socket = c->num_log = 1; 760 c->socket_id = -1; 761 762 identify_siblings(c); 763 #endif 764 c->ppn = cpuid.field.ppn; 765 c->number = cpuid.field.number; 766 c->revision = cpuid.field.revision; 767 c->model = cpuid.field.model; 768 c->family = cpuid.field.family; 769 c->archrev = cpuid.field.archrev; 770 c->features = cpuid.field.features; 771 c->model_name = get_model_name(c->family, c->model); 772 773 status = ia64_pal_vm_summary(&vm1, &vm2); 774 if (status == PAL_STATUS_SUCCESS) { 775 impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb; 776 phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size; 777 } 778 c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1)); 779 c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); 780 } 781 782 void 783 setup_per_cpu_areas (void) 784 { 785 /* start_kernel() requires this... */ 786 #ifdef CONFIG_ACPI_HOTPLUG_CPU 787 prefill_possible_map(); 788 #endif 789 } 790 791 /* 792 * Calculate the max. cache line size. 793 * 794 * In addition, the minimum of the i-cache stride sizes is calculated for 795 * "flush_icache_range()". 796 */ 797 static void __cpuinit 798 get_max_cacheline_size (void) 799 { 800 unsigned long line_size, max = 1; 801 unsigned int cache_size = 0; 802 u64 l, levels, unique_caches; 803 pal_cache_config_info_t cci; 804 s64 status; 805 806 status = ia64_pal_cache_summary(&levels, &unique_caches); 807 if (status != 0) { 808 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", 809 __FUNCTION__, status); 810 max = SMP_CACHE_BYTES; 811 /* Safest setup for "flush_icache_range()" */ 812 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; 813 goto out; 814 } 815 816 for (l = 0; l < levels; ++l) { 817 status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2, 818 &cci); 819 if (status != 0) { 820 printk(KERN_ERR 821 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n", 822 __FUNCTION__, l, status); 823 max = SMP_CACHE_BYTES; 824 /* The safest setup for "flush_icache_range()" */ 825 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 826 cci.pcci_unified = 1; 827 } 828 line_size = 1 << cci.pcci_line_size; 829 if (line_size > max) 830 max = line_size; 831 if (cache_size < cci.pcci_cache_size) 832 cache_size = cci.pcci_cache_size; 833 if (!cci.pcci_unified) { 834 status = ia64_pal_cache_config_info(l, 835 /* cache_type (instruction)= */ 1, 836 &cci); 837 if (status != 0) { 838 printk(KERN_ERR 839 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n", 840 __FUNCTION__, l, status); 841 /* The safest setup for "flush_icache_range()" */ 842 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 843 } 844 } 845 if (cci.pcci_stride < ia64_i_cache_stride_shift) 846 ia64_i_cache_stride_shift = cci.pcci_stride; 847 } 848 out: 849 #ifdef CONFIG_SMP 850 max_cache_size = max(max_cache_size, cache_size); 851 #endif 852 if (max > ia64_max_cacheline_size) 853 ia64_max_cacheline_size = max; 854 } 855 856 /* 857 * cpu_init() initializes state that is per-CPU. This function acts 858 * as a 'CPU state barrier', nothing should get across. 859 */ 860 void __cpuinit 861 cpu_init (void) 862 { 863 extern void __cpuinit ia64_mmu_init (void *); 864 unsigned long num_phys_stacked; 865 pal_vm_info_2_u_t vmi; 866 unsigned int max_ctx; 867 struct cpuinfo_ia64 *cpu_info; 868 void *cpu_data; 869 870 cpu_data = per_cpu_init(); 871 872 /* 873 * We set ar.k3 so that assembly code in MCA handler can compute 874 * physical addresses of per cpu variables with a simple: 875 * phys = ar.k3 + &per_cpu_var 876 */ 877 ia64_set_kr(IA64_KR_PER_CPU_DATA, 878 ia64_tpa(cpu_data) - (long) __per_cpu_start); 879 880 get_max_cacheline_size(); 881 882 /* 883 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called 884 * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it 885 * depends on the data returned by identify_cpu(). We break the dependency by 886 * accessing cpu_data() through the canonical per-CPU address. 887 */ 888 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start); 889 identify_cpu(cpu_info); 890 891 #ifdef CONFIG_MCKINLEY 892 { 893 # define FEATURE_SET 16 894 struct ia64_pal_retval iprv; 895 896 if (cpu_info->family == 0x1f) { 897 PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0); 898 if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80)) 899 PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, 900 (iprv.v1 | 0x80), FEATURE_SET, 0); 901 } 902 } 903 #endif 904 905 /* Clear the stack memory reserved for pt_regs: */ 906 memset(task_pt_regs(current), 0, sizeof(struct pt_regs)); 907 908 ia64_set_kr(IA64_KR_FPU_OWNER, 0); 909 910 /* 911 * Initialize the page-table base register to a global 912 * directory with all zeroes. This ensure that we can handle 913 * TLB-misses to user address-space even before we created the 914 * first user address-space. This may happen, e.g., due to 915 * aggressive use of lfetch.fault. 916 */ 917 ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page))); 918 919 /* 920 * Initialize default control register to defer speculative faults except 921 * for those arising from TLB misses, which are not deferred. The 922 * kernel MUST NOT depend on a particular setting of these bits (in other words, 923 * the kernel must have recovery code for all speculative accesses). Turn on 924 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps 925 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll 926 * be fine). 927 */ 928 ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR 929 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); 930 atomic_inc(&init_mm.mm_count); 931 current->active_mm = &init_mm; 932 if (current->mm) 933 BUG(); 934 935 ia64_mmu_init(ia64_imva(cpu_data)); 936 ia64_mca_cpu_init(ia64_imva(cpu_data)); 937 938 #ifdef CONFIG_IA32_SUPPORT 939 ia32_cpu_init(); 940 #endif 941 942 /* Clear ITC to eliminiate sched_clock() overflows in human time. */ 943 ia64_set_itc(0); 944 945 /* disable all local interrupt sources: */ 946 ia64_set_itv(1 << 16); 947 ia64_set_lrr0(1 << 16); 948 ia64_set_lrr1(1 << 16); 949 ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); 950 ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); 951 952 /* clear TPR & XTP to enable all interrupt classes: */ 953 ia64_setreg(_IA64_REG_CR_TPR, 0); 954 #ifdef CONFIG_SMP 955 normal_xtp(); 956 #endif 957 958 /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ 959 if (ia64_pal_vm_summary(NULL, &vmi) == 0) 960 max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; 961 else { 962 printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); 963 max_ctx = (1U << 15) - 1; /* use architected minimum */ 964 } 965 while (max_ctx < ia64_ctx.max_ctx) { 966 unsigned int old = ia64_ctx.max_ctx; 967 if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old) 968 break; 969 } 970 971 if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) { 972 printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical " 973 "stacked regs\n"); 974 num_phys_stacked = 96; 975 } 976 /* size of physical stacked register partition plus 8 bytes: */ 977 __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8; 978 platform_cpu_init(); 979 pm_idle = default_idle; 980 } 981 982 /* 983 * On SMP systems, when the scheduler does migration-cost autodetection, 984 * it needs a way to flush as much of the CPU's caches as possible. 985 */ 986 void sched_cacheflush(void) 987 { 988 ia64_sal_cache_flush(3); 989 } 990 991 void __init 992 check_bugs (void) 993 { 994 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, 995 (unsigned long) __end___mckinley_e9_bundles); 996 } 997 998 static int __init run_dmi_scan(void) 999 { 1000 dmi_scan_machine(); 1001 return 0; 1002 } 1003 core_initcall(run_dmi_scan); 1004