1 /* 2 * Architecture-specific setup. 3 * 4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co 5 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * Stephane Eranian <eranian@hpl.hp.com> 7 * Copyright (C) 2000, 2004 Intel Corp 8 * Rohit Seth <rohit.seth@intel.com> 9 * Suresh Siddha <suresh.b.siddha@intel.com> 10 * Gordon Jin <gordon.jin@intel.com> 11 * Copyright (C) 1999 VA Linux Systems 12 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 13 * 14 * 12/26/04 S.Siddha, G.Jin, R.Seth 15 * Add multi-threading and multi-core detection 16 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo(). 17 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map 18 * 03/31/00 R.Seth cpu_initialized and current->processor fixes 19 * 02/04/00 D.Mosberger some more get_cpuinfo fixes... 20 * 02/01/00 R.Seth fixed get_cpuinfo for SMP 21 * 01/07/99 S.Eranian added the support for command line argument 22 * 06/24/99 W.Drummond added boot_cpu_data. 23 * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()" 24 */ 25 #include <linux/config.h> 26 #include <linux/module.h> 27 #include <linux/init.h> 28 29 #include <linux/acpi.h> 30 #include <linux/bootmem.h> 31 #include <linux/console.h> 32 #include <linux/delay.h> 33 #include <linux/kernel.h> 34 #include <linux/reboot.h> 35 #include <linux/sched.h> 36 #include <linux/seq_file.h> 37 #include <linux/string.h> 38 #include <linux/threads.h> 39 #include <linux/tty.h> 40 #include <linux/serial.h> 41 #include <linux/serial_core.h> 42 #include <linux/efi.h> 43 #include <linux/initrd.h> 44 #include <linux/platform.h> 45 #include <linux/pm.h> 46 47 #include <asm/ia32.h> 48 #include <asm/machvec.h> 49 #include <asm/mca.h> 50 #include <asm/meminit.h> 51 #include <asm/page.h> 52 #include <asm/patch.h> 53 #include <asm/pgtable.h> 54 #include <asm/processor.h> 55 #include <asm/sal.h> 56 #include <asm/sections.h> 57 #include <asm/serial.h> 58 #include <asm/setup.h> 59 #include <asm/smp.h> 60 #include <asm/system.h> 61 #include <asm/unistd.h> 62 63 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) 64 # error "struct cpuinfo_ia64 too big!" 65 #endif 66 67 #ifdef CONFIG_SMP 68 unsigned long __per_cpu_offset[NR_CPUS]; 69 EXPORT_SYMBOL(__per_cpu_offset); 70 #endif 71 72 DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); 73 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); 74 DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8); 75 unsigned long ia64_cycles_per_usec; 76 struct ia64_boot_param *ia64_boot_param; 77 struct screen_info screen_info; 78 unsigned long vga_console_iobase; 79 unsigned long vga_console_membase; 80 81 unsigned long ia64_max_cacheline_size; 82 unsigned long ia64_iobase; /* virtual address for I/O accesses */ 83 EXPORT_SYMBOL(ia64_iobase); 84 struct io_space io_space[MAX_IO_SPACES]; 85 EXPORT_SYMBOL(io_space); 86 unsigned int num_io_spaces; 87 88 /* 89 * "flush_icache_range()" needs to know what processor dependent stride size to use 90 * when it makes i-cache(s) coherent with d-caches. 91 */ 92 #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */ 93 unsigned long ia64_i_cache_stride_shift = ~0; 94 95 /* 96 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This 97 * mask specifies a mask of address bits that must be 0 in order for two buffers to be 98 * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start 99 * address of the second buffer must be aligned to (merge_mask+1) in order to be 100 * mergeable). By default, we assume there is no I/O MMU which can merge physically 101 * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu 102 * page-size of 2^64. 103 */ 104 unsigned long ia64_max_iommu_merge_mask = ~0UL; 105 EXPORT_SYMBOL(ia64_max_iommu_merge_mask); 106 107 /* 108 * We use a special marker for the end of memory and it uses the extra (+1) slot 109 */ 110 struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1]; 111 int num_rsvd_regions; 112 113 114 /* 115 * Filter incoming memory segments based on the primitive map created from the boot 116 * parameters. Segments contained in the map are removed from the memory ranges. A 117 * caller-specified function is called with the memory ranges that remain after filtering. 118 * This routine does not assume the incoming segments are sorted. 119 */ 120 int 121 filter_rsvd_memory (unsigned long start, unsigned long end, void *arg) 122 { 123 unsigned long range_start, range_end, prev_start; 124 void (*func)(unsigned long, unsigned long, int); 125 int i; 126 127 #if IGNORE_PFN0 128 if (start == PAGE_OFFSET) { 129 printk(KERN_WARNING "warning: skipping physical page 0\n"); 130 start += PAGE_SIZE; 131 if (start >= end) return 0; 132 } 133 #endif 134 /* 135 * lowest possible address(walker uses virtual) 136 */ 137 prev_start = PAGE_OFFSET; 138 func = arg; 139 140 for (i = 0; i < num_rsvd_regions; ++i) { 141 range_start = max(start, prev_start); 142 range_end = min(end, rsvd_region[i].start); 143 144 if (range_start < range_end) 145 call_pernode_memory(__pa(range_start), range_end - range_start, func); 146 147 /* nothing more available in this segment */ 148 if (range_end == end) return 0; 149 150 prev_start = rsvd_region[i].end; 151 } 152 /* end of memory marker allows full processing inside loop body */ 153 return 0; 154 } 155 156 static void 157 sort_regions (struct rsvd_region *rsvd_region, int max) 158 { 159 int j; 160 161 /* simple bubble sorting */ 162 while (max--) { 163 for (j = 0; j < max; ++j) { 164 if (rsvd_region[j].start > rsvd_region[j+1].start) { 165 struct rsvd_region tmp; 166 tmp = rsvd_region[j]; 167 rsvd_region[j] = rsvd_region[j + 1]; 168 rsvd_region[j + 1] = tmp; 169 } 170 } 171 } 172 } 173 174 /** 175 * reserve_memory - setup reserved memory areas 176 * 177 * Setup the reserved memory areas set aside for the boot parameters, 178 * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined, 179 * see include/asm-ia64/meminit.h if you need to define more. 180 */ 181 void 182 reserve_memory (void) 183 { 184 int n = 0; 185 186 /* 187 * none of the entries in this table overlap 188 */ 189 rsvd_region[n].start = (unsigned long) ia64_boot_param; 190 rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param); 191 n++; 192 193 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap); 194 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size; 195 n++; 196 197 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line); 198 rsvd_region[n].end = (rsvd_region[n].start 199 + strlen(__va(ia64_boot_param->command_line)) + 1); 200 n++; 201 202 rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START); 203 rsvd_region[n].end = (unsigned long) ia64_imva(_end); 204 n++; 205 206 #ifdef CONFIG_BLK_DEV_INITRD 207 if (ia64_boot_param->initrd_start) { 208 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start); 209 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size; 210 n++; 211 } 212 #endif 213 214 /* end of memory marker */ 215 rsvd_region[n].start = ~0UL; 216 rsvd_region[n].end = ~0UL; 217 n++; 218 219 num_rsvd_regions = n; 220 221 sort_regions(rsvd_region, num_rsvd_regions); 222 } 223 224 /** 225 * find_initrd - get initrd parameters from the boot parameter structure 226 * 227 * Grab the initrd start and end from the boot parameter struct given us by 228 * the boot loader. 229 */ 230 void 231 find_initrd (void) 232 { 233 #ifdef CONFIG_BLK_DEV_INITRD 234 if (ia64_boot_param->initrd_start) { 235 initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start); 236 initrd_end = initrd_start+ia64_boot_param->initrd_size; 237 238 printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n", 239 initrd_start, ia64_boot_param->initrd_size); 240 } 241 #endif 242 } 243 244 static void __init 245 io_port_init (void) 246 { 247 extern unsigned long ia64_iobase; 248 unsigned long phys_iobase; 249 250 /* 251 * Set `iobase' to the appropriate address in region 6 (uncached access range). 252 * 253 * The EFI memory map is the "preferred" location to get the I/O port space base, 254 * rather the relying on AR.KR0. This should become more clear in future SAL 255 * specs. We'll fall back to getting it out of AR.KR0 if no appropriate entry is 256 * found in the memory map. 257 */ 258 phys_iobase = efi_get_iobase(); 259 if (phys_iobase) 260 /* set AR.KR0 since this is all we use it for anyway */ 261 ia64_set_kr(IA64_KR_IO_BASE, phys_iobase); 262 else { 263 phys_iobase = ia64_get_kr(IA64_KR_IO_BASE); 264 printk(KERN_INFO "No I/O port range found in EFI memory map, falling back " 265 "to AR.KR0\n"); 266 printk(KERN_INFO "I/O port base = 0x%lx\n", phys_iobase); 267 } 268 ia64_iobase = (unsigned long) ioremap(phys_iobase, 0); 269 270 /* setup legacy IO port space */ 271 io_space[0].mmio_base = ia64_iobase; 272 io_space[0].sparse = 1; 273 num_io_spaces = 1; 274 } 275 276 /** 277 * early_console_setup - setup debugging console 278 * 279 * Consoles started here require little enough setup that we can start using 280 * them very early in the boot process, either right after the machine 281 * vector initialization, or even before if the drivers can detect their hw. 282 * 283 * Returns non-zero if a console couldn't be setup. 284 */ 285 static inline int __init 286 early_console_setup (char *cmdline) 287 { 288 int earlycons = 0; 289 290 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE 291 { 292 extern int sn_serial_console_early_setup(void); 293 if (!sn_serial_console_early_setup()) 294 earlycons++; 295 } 296 #endif 297 #ifdef CONFIG_EFI_PCDP 298 if (!efi_setup_pcdp_console(cmdline)) 299 earlycons++; 300 #endif 301 #ifdef CONFIG_SERIAL_8250_CONSOLE 302 if (!early_serial_console_init(cmdline)) 303 earlycons++; 304 #endif 305 306 return (earlycons) ? 0 : -1; 307 } 308 309 static inline void 310 mark_bsp_online (void) 311 { 312 #ifdef CONFIG_SMP 313 /* If we register an early console, allow CPU 0 to printk */ 314 cpu_set(smp_processor_id(), cpu_online_map); 315 #endif 316 } 317 318 #ifdef CONFIG_SMP 319 static void 320 check_for_logical_procs (void) 321 { 322 pal_logical_to_physical_t info; 323 s64 status; 324 325 status = ia64_pal_logical_to_phys(0, &info); 326 if (status == -1) { 327 printk(KERN_INFO "No logical to physical processor mapping " 328 "available\n"); 329 return; 330 } 331 if (status) { 332 printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n", 333 status); 334 return; 335 } 336 /* 337 * Total number of siblings that BSP has. Though not all of them 338 * may have booted successfully. The correct number of siblings 339 * booted is in info.overview_num_log. 340 */ 341 smp_num_siblings = info.overview_tpc; 342 smp_num_cpucores = info.overview_cpp; 343 } 344 #endif 345 346 void __init 347 setup_arch (char **cmdline_p) 348 { 349 unw_init(); 350 351 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); 352 353 *cmdline_p = __va(ia64_boot_param->command_line); 354 strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE); 355 356 efi_init(); 357 io_port_init(); 358 359 #ifdef CONFIG_IA64_GENERIC 360 { 361 const char *mvec_name = strstr (*cmdline_p, "machvec="); 362 char str[64]; 363 364 if (mvec_name) { 365 const char *end; 366 size_t len; 367 368 mvec_name += 8; 369 end = strchr (mvec_name, ' '); 370 if (end) 371 len = end - mvec_name; 372 else 373 len = strlen (mvec_name); 374 len = min(len, sizeof (str) - 1); 375 strncpy (str, mvec_name, len); 376 str[len] = '\0'; 377 mvec_name = str; 378 } else 379 mvec_name = acpi_get_sysname(); 380 machvec_init(mvec_name); 381 } 382 #endif 383 384 if (early_console_setup(*cmdline_p) == 0) 385 mark_bsp_online(); 386 387 #ifdef CONFIG_ACPI_BOOT 388 /* Initialize the ACPI boot-time table parser */ 389 acpi_table_init(); 390 # ifdef CONFIG_ACPI_NUMA 391 acpi_numa_init(); 392 # endif 393 #else 394 # ifdef CONFIG_SMP 395 smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */ 396 # endif 397 #endif /* CONFIG_APCI_BOOT */ 398 399 find_memory(); 400 401 /* process SAL system table: */ 402 ia64_sal_init(efi.sal_systab); 403 404 #ifdef CONFIG_SMP 405 cpu_physical_id(0) = hard_smp_processor_id(); 406 407 cpu_set(0, cpu_sibling_map[0]); 408 cpu_set(0, cpu_core_map[0]); 409 410 check_for_logical_procs(); 411 if (smp_num_cpucores > 1) 412 printk(KERN_INFO 413 "cpu package is Multi-Core capable: number of cores=%d\n", 414 smp_num_cpucores); 415 if (smp_num_siblings > 1) 416 printk(KERN_INFO 417 "cpu package is Multi-Threading capable: number of siblings=%d\n", 418 smp_num_siblings); 419 #endif 420 421 cpu_init(); /* initialize the bootstrap CPU */ 422 423 #ifdef CONFIG_ACPI_BOOT 424 acpi_boot_init(); 425 #endif 426 427 #ifdef CONFIG_VT 428 if (!conswitchp) { 429 # if defined(CONFIG_DUMMY_CONSOLE) 430 conswitchp = &dummy_con; 431 # endif 432 # if defined(CONFIG_VGA_CONSOLE) 433 /* 434 * Non-legacy systems may route legacy VGA MMIO range to system 435 * memory. vga_con probes the MMIO hole, so memory looks like 436 * a VGA device to it. The EFI memory map can tell us if it's 437 * memory so we can avoid this problem. 438 */ 439 if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY) 440 conswitchp = &vga_con; 441 # endif 442 } 443 #endif 444 445 /* enable IA-64 Machine Check Abort Handling unless disabled */ 446 if (!strstr(saved_command_line, "nomca")) 447 ia64_mca_init(); 448 449 platform_setup(cmdline_p); 450 paging_init(); 451 } 452 453 /* 454 * Display cpu info for all cpu's. 455 */ 456 static int 457 show_cpuinfo (struct seq_file *m, void *v) 458 { 459 #ifdef CONFIG_SMP 460 # define lpj c->loops_per_jiffy 461 # define cpunum c->cpu 462 #else 463 # define lpj loops_per_jiffy 464 # define cpunum 0 465 #endif 466 static struct { 467 unsigned long mask; 468 const char *feature_name; 469 } feature_bits[] = { 470 { 1UL << 0, "branchlong" }, 471 { 1UL << 1, "spontaneous deferral"}, 472 { 1UL << 2, "16-byte atomic ops" } 473 }; 474 char family[32], features[128], *cp, sep; 475 struct cpuinfo_ia64 *c = v; 476 unsigned long mask; 477 int i; 478 479 mask = c->features; 480 481 switch (c->family) { 482 case 0x07: memcpy(family, "Itanium", 8); break; 483 case 0x1f: memcpy(family, "Itanium 2", 10); break; 484 default: sprintf(family, "%u", c->family); break; 485 } 486 487 /* build the feature string: */ 488 memcpy(features, " standard", 10); 489 cp = features; 490 sep = 0; 491 for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) { 492 if (mask & feature_bits[i].mask) { 493 if (sep) 494 *cp++ = sep; 495 sep = ','; 496 *cp++ = ' '; 497 strcpy(cp, feature_bits[i].feature_name); 498 cp += strlen(feature_bits[i].feature_name); 499 mask &= ~feature_bits[i].mask; 500 } 501 } 502 if (mask) { 503 /* print unknown features as a hex value: */ 504 if (sep) 505 *cp++ = sep; 506 sprintf(cp, " 0x%lx", mask); 507 } 508 509 seq_printf(m, 510 "processor : %d\n" 511 "vendor : %s\n" 512 "arch : IA-64\n" 513 "family : %s\n" 514 "model : %u\n" 515 "revision : %u\n" 516 "archrev : %u\n" 517 "features :%s\n" /* don't change this---it _is_ right! */ 518 "cpu number : %lu\n" 519 "cpu regs : %u\n" 520 "cpu MHz : %lu.%06lu\n" 521 "itc MHz : %lu.%06lu\n" 522 "BogoMIPS : %lu.%02lu\n", 523 cpunum, c->vendor, family, c->model, c->revision, c->archrev, 524 features, c->ppn, c->number, 525 c->proc_freq / 1000000, c->proc_freq % 1000000, 526 c->itc_freq / 1000000, c->itc_freq % 1000000, 527 lpj*HZ/500000, (lpj*HZ/5000) % 100); 528 #ifdef CONFIG_SMP 529 seq_printf(m, "siblings : %u\n", c->num_log); 530 if (c->threads_per_core > 1 || c->cores_per_socket > 1) 531 seq_printf(m, 532 "physical id: %u\n" 533 "core id : %u\n" 534 "thread id : %u\n", 535 c->socket_id, c->core_id, c->thread_id); 536 #endif 537 seq_printf(m,"\n"); 538 539 return 0; 540 } 541 542 static void * 543 c_start (struct seq_file *m, loff_t *pos) 544 { 545 #ifdef CONFIG_SMP 546 while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map)) 547 ++*pos; 548 #endif 549 return *pos < NR_CPUS ? cpu_data(*pos) : NULL; 550 } 551 552 static void * 553 c_next (struct seq_file *m, void *v, loff_t *pos) 554 { 555 ++*pos; 556 return c_start(m, pos); 557 } 558 559 static void 560 c_stop (struct seq_file *m, void *v) 561 { 562 } 563 564 struct seq_operations cpuinfo_op = { 565 .start = c_start, 566 .next = c_next, 567 .stop = c_stop, 568 .show = show_cpuinfo 569 }; 570 571 void 572 identify_cpu (struct cpuinfo_ia64 *c) 573 { 574 union { 575 unsigned long bits[5]; 576 struct { 577 /* id 0 & 1: */ 578 char vendor[16]; 579 580 /* id 2 */ 581 u64 ppn; /* processor serial number */ 582 583 /* id 3: */ 584 unsigned number : 8; 585 unsigned revision : 8; 586 unsigned model : 8; 587 unsigned family : 8; 588 unsigned archrev : 8; 589 unsigned reserved : 24; 590 591 /* id 4: */ 592 u64 features; 593 } field; 594 } cpuid; 595 pal_vm_info_1_u_t vm1; 596 pal_vm_info_2_u_t vm2; 597 pal_status_t status; 598 unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */ 599 int i; 600 601 for (i = 0; i < 5; ++i) 602 cpuid.bits[i] = ia64_get_cpuid(i); 603 604 memcpy(c->vendor, cpuid.field.vendor, 16); 605 #ifdef CONFIG_SMP 606 c->cpu = smp_processor_id(); 607 608 /* below default values will be overwritten by identify_siblings() 609 * for Multi-Threading/Multi-Core capable cpu's 610 */ 611 c->threads_per_core = c->cores_per_socket = c->num_log = 1; 612 c->socket_id = -1; 613 614 identify_siblings(c); 615 #endif 616 c->ppn = cpuid.field.ppn; 617 c->number = cpuid.field.number; 618 c->revision = cpuid.field.revision; 619 c->model = cpuid.field.model; 620 c->family = cpuid.field.family; 621 c->archrev = cpuid.field.archrev; 622 c->features = cpuid.field.features; 623 624 status = ia64_pal_vm_summary(&vm1, &vm2); 625 if (status == PAL_STATUS_SUCCESS) { 626 impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb; 627 phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size; 628 } 629 c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1)); 630 c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); 631 } 632 633 void 634 setup_per_cpu_areas (void) 635 { 636 /* start_kernel() requires this... */ 637 } 638 639 /* 640 * Calculate the max. cache line size. 641 * 642 * In addition, the minimum of the i-cache stride sizes is calculated for 643 * "flush_icache_range()". 644 */ 645 static void 646 get_max_cacheline_size (void) 647 { 648 unsigned long line_size, max = 1; 649 u64 l, levels, unique_caches; 650 pal_cache_config_info_t cci; 651 s64 status; 652 653 status = ia64_pal_cache_summary(&levels, &unique_caches); 654 if (status != 0) { 655 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", 656 __FUNCTION__, status); 657 max = SMP_CACHE_BYTES; 658 /* Safest setup for "flush_icache_range()" */ 659 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; 660 goto out; 661 } 662 663 for (l = 0; l < levels; ++l) { 664 status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2, 665 &cci); 666 if (status != 0) { 667 printk(KERN_ERR 668 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n", 669 __FUNCTION__, l, status); 670 max = SMP_CACHE_BYTES; 671 /* The safest setup for "flush_icache_range()" */ 672 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 673 cci.pcci_unified = 1; 674 } 675 line_size = 1 << cci.pcci_line_size; 676 if (line_size > max) 677 max = line_size; 678 if (!cci.pcci_unified) { 679 status = ia64_pal_cache_config_info(l, 680 /* cache_type (instruction)= */ 1, 681 &cci); 682 if (status != 0) { 683 printk(KERN_ERR 684 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n", 685 __FUNCTION__, l, status); 686 /* The safest setup for "flush_icache_range()" */ 687 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 688 } 689 } 690 if (cci.pcci_stride < ia64_i_cache_stride_shift) 691 ia64_i_cache_stride_shift = cci.pcci_stride; 692 } 693 out: 694 if (max > ia64_max_cacheline_size) 695 ia64_max_cacheline_size = max; 696 } 697 698 /* 699 * cpu_init() initializes state that is per-CPU. This function acts 700 * as a 'CPU state barrier', nothing should get across. 701 */ 702 void 703 cpu_init (void) 704 { 705 extern void __devinit ia64_mmu_init (void *); 706 unsigned long num_phys_stacked; 707 pal_vm_info_2_u_t vmi; 708 unsigned int max_ctx; 709 struct cpuinfo_ia64 *cpu_info; 710 void *cpu_data; 711 712 cpu_data = per_cpu_init(); 713 714 /* 715 * We set ar.k3 so that assembly code in MCA handler can compute 716 * physical addresses of per cpu variables with a simple: 717 * phys = ar.k3 + &per_cpu_var 718 */ 719 ia64_set_kr(IA64_KR_PER_CPU_DATA, 720 ia64_tpa(cpu_data) - (long) __per_cpu_start); 721 722 get_max_cacheline_size(); 723 724 /* 725 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called 726 * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it 727 * depends on the data returned by identify_cpu(). We break the dependency by 728 * accessing cpu_data() through the canonical per-CPU address. 729 */ 730 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start); 731 identify_cpu(cpu_info); 732 733 #ifdef CONFIG_MCKINLEY 734 { 735 # define FEATURE_SET 16 736 struct ia64_pal_retval iprv; 737 738 if (cpu_info->family == 0x1f) { 739 PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0); 740 if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80)) 741 PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, 742 (iprv.v1 | 0x80), FEATURE_SET, 0); 743 } 744 } 745 #endif 746 747 /* Clear the stack memory reserved for pt_regs: */ 748 memset(ia64_task_regs(current), 0, sizeof(struct pt_regs)); 749 750 ia64_set_kr(IA64_KR_FPU_OWNER, 0); 751 752 /* 753 * Initialize the page-table base register to a global 754 * directory with all zeroes. This ensure that we can handle 755 * TLB-misses to user address-space even before we created the 756 * first user address-space. This may happen, e.g., due to 757 * aggressive use of lfetch.fault. 758 */ 759 ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page))); 760 761 /* 762 * Initialize default control register to defer speculative faults except 763 * for those arising from TLB misses, which are not deferred. The 764 * kernel MUST NOT depend on a particular setting of these bits (in other words, 765 * the kernel must have recovery code for all speculative accesses). Turn on 766 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps 767 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll 768 * be fine). 769 */ 770 ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR 771 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); 772 atomic_inc(&init_mm.mm_count); 773 current->active_mm = &init_mm; 774 if (current->mm) 775 BUG(); 776 777 ia64_mmu_init(ia64_imva(cpu_data)); 778 ia64_mca_cpu_init(ia64_imva(cpu_data)); 779 780 #ifdef CONFIG_IA32_SUPPORT 781 ia32_cpu_init(); 782 #endif 783 784 /* Clear ITC to eliminiate sched_clock() overflows in human time. */ 785 ia64_set_itc(0); 786 787 /* disable all local interrupt sources: */ 788 ia64_set_itv(1 << 16); 789 ia64_set_lrr0(1 << 16); 790 ia64_set_lrr1(1 << 16); 791 ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); 792 ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); 793 794 /* clear TPR & XTP to enable all interrupt classes: */ 795 ia64_setreg(_IA64_REG_CR_TPR, 0); 796 #ifdef CONFIG_SMP 797 normal_xtp(); 798 #endif 799 800 /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ 801 if (ia64_pal_vm_summary(NULL, &vmi) == 0) 802 max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; 803 else { 804 printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); 805 max_ctx = (1U << 15) - 1; /* use architected minimum */ 806 } 807 while (max_ctx < ia64_ctx.max_ctx) { 808 unsigned int old = ia64_ctx.max_ctx; 809 if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old) 810 break; 811 } 812 813 if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) { 814 printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical " 815 "stacked regs\n"); 816 num_phys_stacked = 96; 817 } 818 /* size of physical stacked register partition plus 8 bytes: */ 819 __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8; 820 platform_cpu_init(); 821 pm_idle = default_idle; 822 } 823 824 void 825 check_bugs (void) 826 { 827 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, 828 (unsigned long) __end___mckinley_e9_bundles); 829 } 830