1 /* 2 * Architecture-specific setup. 3 * 4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co 5 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * Stephane Eranian <eranian@hpl.hp.com> 7 * Copyright (C) 2000, 2004 Intel Corp 8 * Rohit Seth <rohit.seth@intel.com> 9 * Suresh Siddha <suresh.b.siddha@intel.com> 10 * Gordon Jin <gordon.jin@intel.com> 11 * Copyright (C) 1999 VA Linux Systems 12 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 13 * 14 * 12/26/04 S.Siddha, G.Jin, R.Seth 15 * Add multi-threading and multi-core detection 16 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo(). 17 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map 18 * 03/31/00 R.Seth cpu_initialized and current->processor fixes 19 * 02/04/00 D.Mosberger some more get_cpuinfo fixes... 20 * 02/01/00 R.Seth fixed get_cpuinfo for SMP 21 * 01/07/99 S.Eranian added the support for command line argument 22 * 06/24/99 W.Drummond added boot_cpu_data. 23 */ 24 #include <linux/config.h> 25 #include <linux/module.h> 26 #include <linux/init.h> 27 28 #include <linux/acpi.h> 29 #include <linux/bootmem.h> 30 #include <linux/console.h> 31 #include <linux/delay.h> 32 #include <linux/kernel.h> 33 #include <linux/reboot.h> 34 #include <linux/sched.h> 35 #include <linux/seq_file.h> 36 #include <linux/string.h> 37 #include <linux/threads.h> 38 #include <linux/tty.h> 39 #include <linux/serial.h> 40 #include <linux/serial_core.h> 41 #include <linux/efi.h> 42 #include <linux/initrd.h> 43 44 #include <asm/ia32.h> 45 #include <asm/machvec.h> 46 #include <asm/mca.h> 47 #include <asm/meminit.h> 48 #include <asm/page.h> 49 #include <asm/patch.h> 50 #include <asm/pgtable.h> 51 #include <asm/processor.h> 52 #include <asm/sal.h> 53 #include <asm/sections.h> 54 #include <asm/serial.h> 55 #include <asm/setup.h> 56 #include <asm/smp.h> 57 #include <asm/system.h> 58 #include <asm/unistd.h> 59 60 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) 61 # error "struct cpuinfo_ia64 too big!" 62 #endif 63 64 #ifdef CONFIG_SMP 65 unsigned long __per_cpu_offset[NR_CPUS]; 66 EXPORT_SYMBOL(__per_cpu_offset); 67 #endif 68 69 DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); 70 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); 71 DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8); 72 unsigned long ia64_cycles_per_usec; 73 struct ia64_boot_param *ia64_boot_param; 74 struct screen_info screen_info; 75 76 unsigned long ia64_max_cacheline_size; 77 unsigned long ia64_iobase; /* virtual address for I/O accesses */ 78 EXPORT_SYMBOL(ia64_iobase); 79 struct io_space io_space[MAX_IO_SPACES]; 80 EXPORT_SYMBOL(io_space); 81 unsigned int num_io_spaces; 82 83 /* 84 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This 85 * mask specifies a mask of address bits that must be 0 in order for two buffers to be 86 * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start 87 * address of the second buffer must be aligned to (merge_mask+1) in order to be 88 * mergeable). By default, we assume there is no I/O MMU which can merge physically 89 * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu 90 * page-size of 2^64. 91 */ 92 unsigned long ia64_max_iommu_merge_mask = ~0UL; 93 EXPORT_SYMBOL(ia64_max_iommu_merge_mask); 94 95 /* 96 * We use a special marker for the end of memory and it uses the extra (+1) slot 97 */ 98 struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1]; 99 int num_rsvd_regions; 100 101 102 /* 103 * Filter incoming memory segments based on the primitive map created from the boot 104 * parameters. Segments contained in the map are removed from the memory ranges. A 105 * caller-specified function is called with the memory ranges that remain after filtering. 106 * This routine does not assume the incoming segments are sorted. 107 */ 108 int 109 filter_rsvd_memory (unsigned long start, unsigned long end, void *arg) 110 { 111 unsigned long range_start, range_end, prev_start; 112 void (*func)(unsigned long, unsigned long, int); 113 int i; 114 115 #if IGNORE_PFN0 116 if (start == PAGE_OFFSET) { 117 printk(KERN_WARNING "warning: skipping physical page 0\n"); 118 start += PAGE_SIZE; 119 if (start >= end) return 0; 120 } 121 #endif 122 /* 123 * lowest possible address(walker uses virtual) 124 */ 125 prev_start = PAGE_OFFSET; 126 func = arg; 127 128 for (i = 0; i < num_rsvd_regions; ++i) { 129 range_start = max(start, prev_start); 130 range_end = min(end, rsvd_region[i].start); 131 132 if (range_start < range_end) 133 call_pernode_memory(__pa(range_start), range_end - range_start, func); 134 135 /* nothing more available in this segment */ 136 if (range_end == end) return 0; 137 138 prev_start = rsvd_region[i].end; 139 } 140 /* end of memory marker allows full processing inside loop body */ 141 return 0; 142 } 143 144 static void 145 sort_regions (struct rsvd_region *rsvd_region, int max) 146 { 147 int j; 148 149 /* simple bubble sorting */ 150 while (max--) { 151 for (j = 0; j < max; ++j) { 152 if (rsvd_region[j].start > rsvd_region[j+1].start) { 153 struct rsvd_region tmp; 154 tmp = rsvd_region[j]; 155 rsvd_region[j] = rsvd_region[j + 1]; 156 rsvd_region[j + 1] = tmp; 157 } 158 } 159 } 160 } 161 162 /** 163 * reserve_memory - setup reserved memory areas 164 * 165 * Setup the reserved memory areas set aside for the boot parameters, 166 * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined, 167 * see include/asm-ia64/meminit.h if you need to define more. 168 */ 169 void 170 reserve_memory (void) 171 { 172 int n = 0; 173 174 /* 175 * none of the entries in this table overlap 176 */ 177 rsvd_region[n].start = (unsigned long) ia64_boot_param; 178 rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param); 179 n++; 180 181 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap); 182 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size; 183 n++; 184 185 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line); 186 rsvd_region[n].end = (rsvd_region[n].start 187 + strlen(__va(ia64_boot_param->command_line)) + 1); 188 n++; 189 190 rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START); 191 rsvd_region[n].end = (unsigned long) ia64_imva(_end); 192 n++; 193 194 #ifdef CONFIG_BLK_DEV_INITRD 195 if (ia64_boot_param->initrd_start) { 196 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start); 197 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size; 198 n++; 199 } 200 #endif 201 202 /* end of memory marker */ 203 rsvd_region[n].start = ~0UL; 204 rsvd_region[n].end = ~0UL; 205 n++; 206 207 num_rsvd_regions = n; 208 209 sort_regions(rsvd_region, num_rsvd_regions); 210 } 211 212 /** 213 * find_initrd - get initrd parameters from the boot parameter structure 214 * 215 * Grab the initrd start and end from the boot parameter struct given us by 216 * the boot loader. 217 */ 218 void 219 find_initrd (void) 220 { 221 #ifdef CONFIG_BLK_DEV_INITRD 222 if (ia64_boot_param->initrd_start) { 223 initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start); 224 initrd_end = initrd_start+ia64_boot_param->initrd_size; 225 226 printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n", 227 initrd_start, ia64_boot_param->initrd_size); 228 } 229 #endif 230 } 231 232 static void __init 233 io_port_init (void) 234 { 235 extern unsigned long ia64_iobase; 236 unsigned long phys_iobase; 237 238 /* 239 * Set `iobase' to the appropriate address in region 6 (uncached access range). 240 * 241 * The EFI memory map is the "preferred" location to get the I/O port space base, 242 * rather the relying on AR.KR0. This should become more clear in future SAL 243 * specs. We'll fall back to getting it out of AR.KR0 if no appropriate entry is 244 * found in the memory map. 245 */ 246 phys_iobase = efi_get_iobase(); 247 if (phys_iobase) 248 /* set AR.KR0 since this is all we use it for anyway */ 249 ia64_set_kr(IA64_KR_IO_BASE, phys_iobase); 250 else { 251 phys_iobase = ia64_get_kr(IA64_KR_IO_BASE); 252 printk(KERN_INFO "No I/O port range found in EFI memory map, falling back " 253 "to AR.KR0\n"); 254 printk(KERN_INFO "I/O port base = 0x%lx\n", phys_iobase); 255 } 256 ia64_iobase = (unsigned long) ioremap(phys_iobase, 0); 257 258 /* setup legacy IO port space */ 259 io_space[0].mmio_base = ia64_iobase; 260 io_space[0].sparse = 1; 261 num_io_spaces = 1; 262 } 263 264 /** 265 * early_console_setup - setup debugging console 266 * 267 * Consoles started here require little enough setup that we can start using 268 * them very early in the boot process, either right after the machine 269 * vector initialization, or even before if the drivers can detect their hw. 270 * 271 * Returns non-zero if a console couldn't be setup. 272 */ 273 static inline int __init 274 early_console_setup (char *cmdline) 275 { 276 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE 277 { 278 extern int sn_serial_console_early_setup(void); 279 if (!sn_serial_console_early_setup()) 280 return 0; 281 } 282 #endif 283 #ifdef CONFIG_EFI_PCDP 284 if (!efi_setup_pcdp_console(cmdline)) 285 return 0; 286 #endif 287 #ifdef CONFIG_SERIAL_8250_CONSOLE 288 if (!early_serial_console_init(cmdline)) 289 return 0; 290 #endif 291 292 return -1; 293 } 294 295 static inline void 296 mark_bsp_online (void) 297 { 298 #ifdef CONFIG_SMP 299 /* If we register an early console, allow CPU 0 to printk */ 300 cpu_set(smp_processor_id(), cpu_online_map); 301 #endif 302 } 303 304 #ifdef CONFIG_SMP 305 static void 306 check_for_logical_procs (void) 307 { 308 pal_logical_to_physical_t info; 309 s64 status; 310 311 status = ia64_pal_logical_to_phys(0, &info); 312 if (status == -1) { 313 printk(KERN_INFO "No logical to physical processor mapping " 314 "available\n"); 315 return; 316 } 317 if (status) { 318 printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n", 319 status); 320 return; 321 } 322 /* 323 * Total number of siblings that BSP has. Though not all of them 324 * may have booted successfully. The correct number of siblings 325 * booted is in info.overview_num_log. 326 */ 327 smp_num_siblings = info.overview_tpc; 328 smp_num_cpucores = info.overview_cpp; 329 } 330 #endif 331 332 void __init 333 setup_arch (char **cmdline_p) 334 { 335 unw_init(); 336 337 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); 338 339 *cmdline_p = __va(ia64_boot_param->command_line); 340 strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE); 341 342 efi_init(); 343 io_port_init(); 344 345 #ifdef CONFIG_IA64_GENERIC 346 { 347 const char *mvec_name = strstr (*cmdline_p, "machvec="); 348 char str[64]; 349 350 if (mvec_name) { 351 const char *end; 352 size_t len; 353 354 mvec_name += 8; 355 end = strchr (mvec_name, ' '); 356 if (end) 357 len = end - mvec_name; 358 else 359 len = strlen (mvec_name); 360 len = min(len, sizeof (str) - 1); 361 strncpy (str, mvec_name, len); 362 str[len] = '\0'; 363 mvec_name = str; 364 } else 365 mvec_name = acpi_get_sysname(); 366 machvec_init(mvec_name); 367 } 368 #endif 369 370 if (early_console_setup(*cmdline_p) == 0) 371 mark_bsp_online(); 372 373 #ifdef CONFIG_ACPI_BOOT 374 /* Initialize the ACPI boot-time table parser */ 375 acpi_table_init(); 376 # ifdef CONFIG_ACPI_NUMA 377 acpi_numa_init(); 378 # endif 379 #else 380 # ifdef CONFIG_SMP 381 smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */ 382 # endif 383 #endif /* CONFIG_APCI_BOOT */ 384 385 find_memory(); 386 387 /* process SAL system table: */ 388 ia64_sal_init(efi.sal_systab); 389 390 #ifdef CONFIG_SMP 391 cpu_physical_id(0) = hard_smp_processor_id(); 392 393 cpu_set(0, cpu_sibling_map[0]); 394 cpu_set(0, cpu_core_map[0]); 395 396 check_for_logical_procs(); 397 if (smp_num_cpucores > 1) 398 printk(KERN_INFO 399 "cpu package is Multi-Core capable: number of cores=%d\n", 400 smp_num_cpucores); 401 if (smp_num_siblings > 1) 402 printk(KERN_INFO 403 "cpu package is Multi-Threading capable: number of siblings=%d\n", 404 smp_num_siblings); 405 #endif 406 407 cpu_init(); /* initialize the bootstrap CPU */ 408 409 #ifdef CONFIG_ACPI_BOOT 410 acpi_boot_init(); 411 #endif 412 413 #ifdef CONFIG_VT 414 if (!conswitchp) { 415 # if defined(CONFIG_DUMMY_CONSOLE) 416 conswitchp = &dummy_con; 417 # endif 418 # if defined(CONFIG_VGA_CONSOLE) 419 /* 420 * Non-legacy systems may route legacy VGA MMIO range to system 421 * memory. vga_con probes the MMIO hole, so memory looks like 422 * a VGA device to it. The EFI memory map can tell us if it's 423 * memory so we can avoid this problem. 424 */ 425 if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY) 426 conswitchp = &vga_con; 427 # endif 428 } 429 #endif 430 431 /* enable IA-64 Machine Check Abort Handling unless disabled */ 432 if (!strstr(saved_command_line, "nomca")) 433 ia64_mca_init(); 434 435 platform_setup(cmdline_p); 436 paging_init(); 437 } 438 439 /* 440 * Display cpu info for all cpu's. 441 */ 442 static int 443 show_cpuinfo (struct seq_file *m, void *v) 444 { 445 #ifdef CONFIG_SMP 446 # define lpj c->loops_per_jiffy 447 # define cpunum c->cpu 448 #else 449 # define lpj loops_per_jiffy 450 # define cpunum 0 451 #endif 452 static struct { 453 unsigned long mask; 454 const char *feature_name; 455 } feature_bits[] = { 456 { 1UL << 0, "branchlong" }, 457 { 1UL << 1, "spontaneous deferral"}, 458 { 1UL << 2, "16-byte atomic ops" } 459 }; 460 char family[32], features[128], *cp, sep; 461 struct cpuinfo_ia64 *c = v; 462 unsigned long mask; 463 int i; 464 465 mask = c->features; 466 467 switch (c->family) { 468 case 0x07: memcpy(family, "Itanium", 8); break; 469 case 0x1f: memcpy(family, "Itanium 2", 10); break; 470 default: sprintf(family, "%u", c->family); break; 471 } 472 473 /* build the feature string: */ 474 memcpy(features, " standard", 10); 475 cp = features; 476 sep = 0; 477 for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) { 478 if (mask & feature_bits[i].mask) { 479 if (sep) 480 *cp++ = sep; 481 sep = ','; 482 *cp++ = ' '; 483 strcpy(cp, feature_bits[i].feature_name); 484 cp += strlen(feature_bits[i].feature_name); 485 mask &= ~feature_bits[i].mask; 486 } 487 } 488 if (mask) { 489 /* print unknown features as a hex value: */ 490 if (sep) 491 *cp++ = sep; 492 sprintf(cp, " 0x%lx", mask); 493 } 494 495 seq_printf(m, 496 "processor : %d\n" 497 "vendor : %s\n" 498 "arch : IA-64\n" 499 "family : %s\n" 500 "model : %u\n" 501 "revision : %u\n" 502 "archrev : %u\n" 503 "features :%s\n" /* don't change this---it _is_ right! */ 504 "cpu number : %lu\n" 505 "cpu regs : %u\n" 506 "cpu MHz : %lu.%06lu\n" 507 "itc MHz : %lu.%06lu\n" 508 "BogoMIPS : %lu.%02lu\n", 509 cpunum, c->vendor, family, c->model, c->revision, c->archrev, 510 features, c->ppn, c->number, 511 c->proc_freq / 1000000, c->proc_freq % 1000000, 512 c->itc_freq / 1000000, c->itc_freq % 1000000, 513 lpj*HZ/500000, (lpj*HZ/5000) % 100); 514 #ifdef CONFIG_SMP 515 seq_printf(m, "siblings : %u\n", c->num_log); 516 if (c->threads_per_core > 1 || c->cores_per_socket > 1) 517 seq_printf(m, 518 "physical id: %u\n" 519 "core id : %u\n" 520 "thread id : %u\n", 521 c->socket_id, c->core_id, c->thread_id); 522 #endif 523 seq_printf(m,"\n"); 524 525 return 0; 526 } 527 528 static void * 529 c_start (struct seq_file *m, loff_t *pos) 530 { 531 #ifdef CONFIG_SMP 532 while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map)) 533 ++*pos; 534 #endif 535 return *pos < NR_CPUS ? cpu_data(*pos) : NULL; 536 } 537 538 static void * 539 c_next (struct seq_file *m, void *v, loff_t *pos) 540 { 541 ++*pos; 542 return c_start(m, pos); 543 } 544 545 static void 546 c_stop (struct seq_file *m, void *v) 547 { 548 } 549 550 struct seq_operations cpuinfo_op = { 551 .start = c_start, 552 .next = c_next, 553 .stop = c_stop, 554 .show = show_cpuinfo 555 }; 556 557 void 558 identify_cpu (struct cpuinfo_ia64 *c) 559 { 560 union { 561 unsigned long bits[5]; 562 struct { 563 /* id 0 & 1: */ 564 char vendor[16]; 565 566 /* id 2 */ 567 u64 ppn; /* processor serial number */ 568 569 /* id 3: */ 570 unsigned number : 8; 571 unsigned revision : 8; 572 unsigned model : 8; 573 unsigned family : 8; 574 unsigned archrev : 8; 575 unsigned reserved : 24; 576 577 /* id 4: */ 578 u64 features; 579 } field; 580 } cpuid; 581 pal_vm_info_1_u_t vm1; 582 pal_vm_info_2_u_t vm2; 583 pal_status_t status; 584 unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */ 585 int i; 586 587 for (i = 0; i < 5; ++i) 588 cpuid.bits[i] = ia64_get_cpuid(i); 589 590 memcpy(c->vendor, cpuid.field.vendor, 16); 591 #ifdef CONFIG_SMP 592 c->cpu = smp_processor_id(); 593 594 /* below default values will be overwritten by identify_siblings() 595 * for Multi-Threading/Multi-Core capable cpu's 596 */ 597 c->threads_per_core = c->cores_per_socket = c->num_log = 1; 598 c->socket_id = -1; 599 600 identify_siblings(c); 601 #endif 602 c->ppn = cpuid.field.ppn; 603 c->number = cpuid.field.number; 604 c->revision = cpuid.field.revision; 605 c->model = cpuid.field.model; 606 c->family = cpuid.field.family; 607 c->archrev = cpuid.field.archrev; 608 c->features = cpuid.field.features; 609 610 status = ia64_pal_vm_summary(&vm1, &vm2); 611 if (status == PAL_STATUS_SUCCESS) { 612 impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb; 613 phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size; 614 } 615 c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1)); 616 c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); 617 } 618 619 void 620 setup_per_cpu_areas (void) 621 { 622 /* start_kernel() requires this... */ 623 } 624 625 static void 626 get_max_cacheline_size (void) 627 { 628 unsigned long line_size, max = 1; 629 u64 l, levels, unique_caches; 630 pal_cache_config_info_t cci; 631 s64 status; 632 633 status = ia64_pal_cache_summary(&levels, &unique_caches); 634 if (status != 0) { 635 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", 636 __FUNCTION__, status); 637 max = SMP_CACHE_BYTES; 638 goto out; 639 } 640 641 for (l = 0; l < levels; ++l) { 642 status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2, 643 &cci); 644 if (status != 0) { 645 printk(KERN_ERR 646 "%s: ia64_pal_cache_config_info(l=%lu) failed (status=%ld)\n", 647 __FUNCTION__, l, status); 648 max = SMP_CACHE_BYTES; 649 } 650 line_size = 1 << cci.pcci_line_size; 651 if (line_size > max) 652 max = line_size; 653 } 654 out: 655 if (max > ia64_max_cacheline_size) 656 ia64_max_cacheline_size = max; 657 } 658 659 /* 660 * cpu_init() initializes state that is per-CPU. This function acts 661 * as a 'CPU state barrier', nothing should get across. 662 */ 663 void 664 cpu_init (void) 665 { 666 extern void __devinit ia64_mmu_init (void *); 667 unsigned long num_phys_stacked; 668 pal_vm_info_2_u_t vmi; 669 unsigned int max_ctx; 670 struct cpuinfo_ia64 *cpu_info; 671 void *cpu_data; 672 673 cpu_data = per_cpu_init(); 674 675 /* 676 * We set ar.k3 so that assembly code in MCA handler can compute 677 * physical addresses of per cpu variables with a simple: 678 * phys = ar.k3 + &per_cpu_var 679 */ 680 ia64_set_kr(IA64_KR_PER_CPU_DATA, 681 ia64_tpa(cpu_data) - (long) __per_cpu_start); 682 683 get_max_cacheline_size(); 684 685 /* 686 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called 687 * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it 688 * depends on the data returned by identify_cpu(). We break the dependency by 689 * accessing cpu_data() through the canonical per-CPU address. 690 */ 691 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start); 692 identify_cpu(cpu_info); 693 694 #ifdef CONFIG_MCKINLEY 695 { 696 # define FEATURE_SET 16 697 struct ia64_pal_retval iprv; 698 699 if (cpu_info->family == 0x1f) { 700 PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0); 701 if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80)) 702 PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, 703 (iprv.v1 | 0x80), FEATURE_SET, 0); 704 } 705 } 706 #endif 707 708 /* Clear the stack memory reserved for pt_regs: */ 709 memset(ia64_task_regs(current), 0, sizeof(struct pt_regs)); 710 711 ia64_set_kr(IA64_KR_FPU_OWNER, 0); 712 713 /* 714 * Initialize the page-table base register to a global 715 * directory with all zeroes. This ensure that we can handle 716 * TLB-misses to user address-space even before we created the 717 * first user address-space. This may happen, e.g., due to 718 * aggressive use of lfetch.fault. 719 */ 720 ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page))); 721 722 /* 723 * Initialize default control register to defer speculative faults except 724 * for those arising from TLB misses, which are not deferred. The 725 * kernel MUST NOT depend on a particular setting of these bits (in other words, 726 * the kernel must have recovery code for all speculative accesses). Turn on 727 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps 728 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll 729 * be fine). 730 */ 731 ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR 732 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); 733 atomic_inc(&init_mm.mm_count); 734 current->active_mm = &init_mm; 735 if (current->mm) 736 BUG(); 737 738 ia64_mmu_init(ia64_imva(cpu_data)); 739 ia64_mca_cpu_init(ia64_imva(cpu_data)); 740 741 #ifdef CONFIG_IA32_SUPPORT 742 ia32_cpu_init(); 743 #endif 744 745 /* Clear ITC to eliminiate sched_clock() overflows in human time. */ 746 ia64_set_itc(0); 747 748 /* disable all local interrupt sources: */ 749 ia64_set_itv(1 << 16); 750 ia64_set_lrr0(1 << 16); 751 ia64_set_lrr1(1 << 16); 752 ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); 753 ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); 754 755 /* clear TPR & XTP to enable all interrupt classes: */ 756 ia64_setreg(_IA64_REG_CR_TPR, 0); 757 #ifdef CONFIG_SMP 758 normal_xtp(); 759 #endif 760 761 /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ 762 if (ia64_pal_vm_summary(NULL, &vmi) == 0) 763 max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; 764 else { 765 printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); 766 max_ctx = (1U << 15) - 1; /* use architected minimum */ 767 } 768 while (max_ctx < ia64_ctx.max_ctx) { 769 unsigned int old = ia64_ctx.max_ctx; 770 if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old) 771 break; 772 } 773 774 if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) { 775 printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical " 776 "stacked regs\n"); 777 num_phys_stacked = 96; 778 } 779 /* size of physical stacked register partition plus 8 bytes: */ 780 __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8; 781 platform_cpu_init(); 782 } 783 784 void 785 check_bugs (void) 786 { 787 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, 788 (unsigned long) __end___mckinley_e9_bundles); 789 } 790