1 /* 2 * Architecture-specific setup. 3 * 4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co 5 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * Stephane Eranian <eranian@hpl.hp.com> 7 * Copyright (C) 2000, 2004 Intel Corp 8 * Rohit Seth <rohit.seth@intel.com> 9 * Suresh Siddha <suresh.b.siddha@intel.com> 10 * Gordon Jin <gordon.jin@intel.com> 11 * Copyright (C) 1999 VA Linux Systems 12 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 13 * 14 * 12/26/04 S.Siddha, G.Jin, R.Seth 15 * Add multi-threading and multi-core detection 16 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo(). 17 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map 18 * 03/31/00 R.Seth cpu_initialized and current->processor fixes 19 * 02/04/00 D.Mosberger some more get_cpuinfo fixes... 20 * 02/01/00 R.Seth fixed get_cpuinfo for SMP 21 * 01/07/99 S.Eranian added the support for command line argument 22 * 06/24/99 W.Drummond added boot_cpu_data. 23 * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()" 24 */ 25 #include <linux/config.h> 26 #include <linux/module.h> 27 #include <linux/init.h> 28 29 #include <linux/acpi.h> 30 #include <linux/bootmem.h> 31 #include <linux/console.h> 32 #include <linux/delay.h> 33 #include <linux/kernel.h> 34 #include <linux/reboot.h> 35 #include <linux/sched.h> 36 #include <linux/seq_file.h> 37 #include <linux/string.h> 38 #include <linux/threads.h> 39 #include <linux/tty.h> 40 #include <linux/serial.h> 41 #include <linux/serial_core.h> 42 #include <linux/efi.h> 43 #include <linux/initrd.h> 44 #include <linux/platform.h> 45 #include <linux/pm.h> 46 47 #include <asm/ia32.h> 48 #include <asm/machvec.h> 49 #include <asm/mca.h> 50 #include <asm/meminit.h> 51 #include <asm/page.h> 52 #include <asm/patch.h> 53 #include <asm/pgtable.h> 54 #include <asm/processor.h> 55 #include <asm/sal.h> 56 #include <asm/sections.h> 57 #include <asm/serial.h> 58 #include <asm/setup.h> 59 #include <asm/smp.h> 60 #include <asm/system.h> 61 #include <asm/unistd.h> 62 63 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) 64 # error "struct cpuinfo_ia64 too big!" 65 #endif 66 67 #ifdef CONFIG_SMP 68 unsigned long __per_cpu_offset[NR_CPUS]; 69 EXPORT_SYMBOL(__per_cpu_offset); 70 #endif 71 72 DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); 73 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); 74 DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8); 75 unsigned long ia64_cycles_per_usec; 76 struct ia64_boot_param *ia64_boot_param; 77 struct screen_info screen_info; 78 unsigned long vga_console_iobase; 79 unsigned long vga_console_membase; 80 81 unsigned long ia64_max_cacheline_size; 82 unsigned long ia64_iobase; /* virtual address for I/O accesses */ 83 EXPORT_SYMBOL(ia64_iobase); 84 struct io_space io_space[MAX_IO_SPACES]; 85 EXPORT_SYMBOL(io_space); 86 unsigned int num_io_spaces; 87 88 /* 89 * "flush_icache_range()" needs to know what processor dependent stride size to use 90 * when it makes i-cache(s) coherent with d-caches. 91 */ 92 #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */ 93 unsigned long ia64_i_cache_stride_shift = ~0; 94 95 /* 96 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This 97 * mask specifies a mask of address bits that must be 0 in order for two buffers to be 98 * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start 99 * address of the second buffer must be aligned to (merge_mask+1) in order to be 100 * mergeable). By default, we assume there is no I/O MMU which can merge physically 101 * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu 102 * page-size of 2^64. 103 */ 104 unsigned long ia64_max_iommu_merge_mask = ~0UL; 105 EXPORT_SYMBOL(ia64_max_iommu_merge_mask); 106 107 /* 108 * We use a special marker for the end of memory and it uses the extra (+1) slot 109 */ 110 struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1]; 111 int num_rsvd_regions; 112 113 114 /* 115 * Filter incoming memory segments based on the primitive map created from the boot 116 * parameters. Segments contained in the map are removed from the memory ranges. A 117 * caller-specified function is called with the memory ranges that remain after filtering. 118 * This routine does not assume the incoming segments are sorted. 119 */ 120 int 121 filter_rsvd_memory (unsigned long start, unsigned long end, void *arg) 122 { 123 unsigned long range_start, range_end, prev_start; 124 void (*func)(unsigned long, unsigned long, int); 125 int i; 126 127 #if IGNORE_PFN0 128 if (start == PAGE_OFFSET) { 129 printk(KERN_WARNING "warning: skipping physical page 0\n"); 130 start += PAGE_SIZE; 131 if (start >= end) return 0; 132 } 133 #endif 134 /* 135 * lowest possible address(walker uses virtual) 136 */ 137 prev_start = PAGE_OFFSET; 138 func = arg; 139 140 for (i = 0; i < num_rsvd_regions; ++i) { 141 range_start = max(start, prev_start); 142 range_end = min(end, rsvd_region[i].start); 143 144 if (range_start < range_end) 145 call_pernode_memory(__pa(range_start), range_end - range_start, func); 146 147 /* nothing more available in this segment */ 148 if (range_end == end) return 0; 149 150 prev_start = rsvd_region[i].end; 151 } 152 /* end of memory marker allows full processing inside loop body */ 153 return 0; 154 } 155 156 static void 157 sort_regions (struct rsvd_region *rsvd_region, int max) 158 { 159 int j; 160 161 /* simple bubble sorting */ 162 while (max--) { 163 for (j = 0; j < max; ++j) { 164 if (rsvd_region[j].start > rsvd_region[j+1].start) { 165 struct rsvd_region tmp; 166 tmp = rsvd_region[j]; 167 rsvd_region[j] = rsvd_region[j + 1]; 168 rsvd_region[j + 1] = tmp; 169 } 170 } 171 } 172 } 173 174 /** 175 * reserve_memory - setup reserved memory areas 176 * 177 * Setup the reserved memory areas set aside for the boot parameters, 178 * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined, 179 * see include/asm-ia64/meminit.h if you need to define more. 180 */ 181 void 182 reserve_memory (void) 183 { 184 int n = 0; 185 186 /* 187 * none of the entries in this table overlap 188 */ 189 rsvd_region[n].start = (unsigned long) ia64_boot_param; 190 rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param); 191 n++; 192 193 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap); 194 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size; 195 n++; 196 197 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line); 198 rsvd_region[n].end = (rsvd_region[n].start 199 + strlen(__va(ia64_boot_param->command_line)) + 1); 200 n++; 201 202 rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START); 203 rsvd_region[n].end = (unsigned long) ia64_imva(_end); 204 n++; 205 206 #ifdef CONFIG_BLK_DEV_INITRD 207 if (ia64_boot_param->initrd_start) { 208 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start); 209 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size; 210 n++; 211 } 212 #endif 213 214 efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); 215 n++; 216 217 /* end of memory marker */ 218 rsvd_region[n].start = ~0UL; 219 rsvd_region[n].end = ~0UL; 220 n++; 221 222 num_rsvd_regions = n; 223 224 sort_regions(rsvd_region, num_rsvd_regions); 225 } 226 227 /** 228 * find_initrd - get initrd parameters from the boot parameter structure 229 * 230 * Grab the initrd start and end from the boot parameter struct given us by 231 * the boot loader. 232 */ 233 void 234 find_initrd (void) 235 { 236 #ifdef CONFIG_BLK_DEV_INITRD 237 if (ia64_boot_param->initrd_start) { 238 initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start); 239 initrd_end = initrd_start+ia64_boot_param->initrd_size; 240 241 printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n", 242 initrd_start, ia64_boot_param->initrd_size); 243 } 244 #endif 245 } 246 247 static void __init 248 io_port_init (void) 249 { 250 extern unsigned long ia64_iobase; 251 unsigned long phys_iobase; 252 253 /* 254 * Set `iobase' to the appropriate address in region 6 (uncached access range). 255 * 256 * The EFI memory map is the "preferred" location to get the I/O port space base, 257 * rather the relying on AR.KR0. This should become more clear in future SAL 258 * specs. We'll fall back to getting it out of AR.KR0 if no appropriate entry is 259 * found in the memory map. 260 */ 261 phys_iobase = efi_get_iobase(); 262 if (phys_iobase) 263 /* set AR.KR0 since this is all we use it for anyway */ 264 ia64_set_kr(IA64_KR_IO_BASE, phys_iobase); 265 else { 266 phys_iobase = ia64_get_kr(IA64_KR_IO_BASE); 267 printk(KERN_INFO "No I/O port range found in EFI memory map, falling back " 268 "to AR.KR0\n"); 269 printk(KERN_INFO "I/O port base = 0x%lx\n", phys_iobase); 270 } 271 ia64_iobase = (unsigned long) ioremap(phys_iobase, 0); 272 273 /* setup legacy IO port space */ 274 io_space[0].mmio_base = ia64_iobase; 275 io_space[0].sparse = 1; 276 num_io_spaces = 1; 277 } 278 279 /** 280 * early_console_setup - setup debugging console 281 * 282 * Consoles started here require little enough setup that we can start using 283 * them very early in the boot process, either right after the machine 284 * vector initialization, or even before if the drivers can detect their hw. 285 * 286 * Returns non-zero if a console couldn't be setup. 287 */ 288 static inline int __init 289 early_console_setup (char *cmdline) 290 { 291 int earlycons = 0; 292 293 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE 294 { 295 extern int sn_serial_console_early_setup(void); 296 if (!sn_serial_console_early_setup()) 297 earlycons++; 298 } 299 #endif 300 #ifdef CONFIG_EFI_PCDP 301 if (!efi_setup_pcdp_console(cmdline)) 302 earlycons++; 303 #endif 304 #ifdef CONFIG_SERIAL_8250_CONSOLE 305 if (!early_serial_console_init(cmdline)) 306 earlycons++; 307 #endif 308 309 return (earlycons) ? 0 : -1; 310 } 311 312 static inline void 313 mark_bsp_online (void) 314 { 315 #ifdef CONFIG_SMP 316 /* If we register an early console, allow CPU 0 to printk */ 317 cpu_set(smp_processor_id(), cpu_online_map); 318 #endif 319 } 320 321 #ifdef CONFIG_SMP 322 static void 323 check_for_logical_procs (void) 324 { 325 pal_logical_to_physical_t info; 326 s64 status; 327 328 status = ia64_pal_logical_to_phys(0, &info); 329 if (status == -1) { 330 printk(KERN_INFO "No logical to physical processor mapping " 331 "available\n"); 332 return; 333 } 334 if (status) { 335 printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n", 336 status); 337 return; 338 } 339 /* 340 * Total number of siblings that BSP has. Though not all of them 341 * may have booted successfully. The correct number of siblings 342 * booted is in info.overview_num_log. 343 */ 344 smp_num_siblings = info.overview_tpc; 345 smp_num_cpucores = info.overview_cpp; 346 } 347 #endif 348 349 void __init 350 setup_arch (char **cmdline_p) 351 { 352 unw_init(); 353 354 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); 355 356 *cmdline_p = __va(ia64_boot_param->command_line); 357 strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE); 358 359 efi_init(); 360 io_port_init(); 361 362 #ifdef CONFIG_IA64_GENERIC 363 { 364 const char *mvec_name = strstr (*cmdline_p, "machvec="); 365 char str[64]; 366 367 if (mvec_name) { 368 const char *end; 369 size_t len; 370 371 mvec_name += 8; 372 end = strchr (mvec_name, ' '); 373 if (end) 374 len = end - mvec_name; 375 else 376 len = strlen (mvec_name); 377 len = min(len, sizeof (str) - 1); 378 strncpy (str, mvec_name, len); 379 str[len] = '\0'; 380 mvec_name = str; 381 } else 382 mvec_name = acpi_get_sysname(); 383 machvec_init(mvec_name); 384 } 385 #endif 386 387 if (early_console_setup(*cmdline_p) == 0) 388 mark_bsp_online(); 389 390 #ifdef CONFIG_ACPI_BOOT 391 /* Initialize the ACPI boot-time table parser */ 392 acpi_table_init(); 393 # ifdef CONFIG_ACPI_NUMA 394 acpi_numa_init(); 395 # endif 396 #else 397 # ifdef CONFIG_SMP 398 smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */ 399 # endif 400 #endif /* CONFIG_APCI_BOOT */ 401 402 find_memory(); 403 404 /* process SAL system table: */ 405 ia64_sal_init(efi.sal_systab); 406 407 #ifdef CONFIG_SMP 408 cpu_physical_id(0) = hard_smp_processor_id(); 409 410 cpu_set(0, cpu_sibling_map[0]); 411 cpu_set(0, cpu_core_map[0]); 412 413 check_for_logical_procs(); 414 if (smp_num_cpucores > 1) 415 printk(KERN_INFO 416 "cpu package is Multi-Core capable: number of cores=%d\n", 417 smp_num_cpucores); 418 if (smp_num_siblings > 1) 419 printk(KERN_INFO 420 "cpu package is Multi-Threading capable: number of siblings=%d\n", 421 smp_num_siblings); 422 #endif 423 424 cpu_init(); /* initialize the bootstrap CPU */ 425 426 #ifdef CONFIG_ACPI_BOOT 427 acpi_boot_init(); 428 #endif 429 430 #ifdef CONFIG_VT 431 if (!conswitchp) { 432 # if defined(CONFIG_DUMMY_CONSOLE) 433 conswitchp = &dummy_con; 434 # endif 435 # if defined(CONFIG_VGA_CONSOLE) 436 /* 437 * Non-legacy systems may route legacy VGA MMIO range to system 438 * memory. vga_con probes the MMIO hole, so memory looks like 439 * a VGA device to it. The EFI memory map can tell us if it's 440 * memory so we can avoid this problem. 441 */ 442 if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY) 443 conswitchp = &vga_con; 444 # endif 445 } 446 #endif 447 448 /* enable IA-64 Machine Check Abort Handling unless disabled */ 449 if (!strstr(saved_command_line, "nomca")) 450 ia64_mca_init(); 451 452 platform_setup(cmdline_p); 453 paging_init(); 454 } 455 456 /* 457 * Display cpu info for all cpu's. 458 */ 459 static int 460 show_cpuinfo (struct seq_file *m, void *v) 461 { 462 #ifdef CONFIG_SMP 463 # define lpj c->loops_per_jiffy 464 # define cpunum c->cpu 465 #else 466 # define lpj loops_per_jiffy 467 # define cpunum 0 468 #endif 469 static struct { 470 unsigned long mask; 471 const char *feature_name; 472 } feature_bits[] = { 473 { 1UL << 0, "branchlong" }, 474 { 1UL << 1, "spontaneous deferral"}, 475 { 1UL << 2, "16-byte atomic ops" } 476 }; 477 char family[32], features[128], *cp, sep; 478 struct cpuinfo_ia64 *c = v; 479 unsigned long mask; 480 int i; 481 482 mask = c->features; 483 484 switch (c->family) { 485 case 0x07: memcpy(family, "Itanium", 8); break; 486 case 0x1f: memcpy(family, "Itanium 2", 10); break; 487 default: sprintf(family, "%u", c->family); break; 488 } 489 490 /* build the feature string: */ 491 memcpy(features, " standard", 10); 492 cp = features; 493 sep = 0; 494 for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) { 495 if (mask & feature_bits[i].mask) { 496 if (sep) 497 *cp++ = sep; 498 sep = ','; 499 *cp++ = ' '; 500 strcpy(cp, feature_bits[i].feature_name); 501 cp += strlen(feature_bits[i].feature_name); 502 mask &= ~feature_bits[i].mask; 503 } 504 } 505 if (mask) { 506 /* print unknown features as a hex value: */ 507 if (sep) 508 *cp++ = sep; 509 sprintf(cp, " 0x%lx", mask); 510 } 511 512 seq_printf(m, 513 "processor : %d\n" 514 "vendor : %s\n" 515 "arch : IA-64\n" 516 "family : %s\n" 517 "model : %u\n" 518 "revision : %u\n" 519 "archrev : %u\n" 520 "features :%s\n" /* don't change this---it _is_ right! */ 521 "cpu number : %lu\n" 522 "cpu regs : %u\n" 523 "cpu MHz : %lu.%06lu\n" 524 "itc MHz : %lu.%06lu\n" 525 "BogoMIPS : %lu.%02lu\n", 526 cpunum, c->vendor, family, c->model, c->revision, c->archrev, 527 features, c->ppn, c->number, 528 c->proc_freq / 1000000, c->proc_freq % 1000000, 529 c->itc_freq / 1000000, c->itc_freq % 1000000, 530 lpj*HZ/500000, (lpj*HZ/5000) % 100); 531 #ifdef CONFIG_SMP 532 seq_printf(m, "siblings : %u\n", c->num_log); 533 if (c->threads_per_core > 1 || c->cores_per_socket > 1) 534 seq_printf(m, 535 "physical id: %u\n" 536 "core id : %u\n" 537 "thread id : %u\n", 538 c->socket_id, c->core_id, c->thread_id); 539 #endif 540 seq_printf(m,"\n"); 541 542 return 0; 543 } 544 545 static void * 546 c_start (struct seq_file *m, loff_t *pos) 547 { 548 #ifdef CONFIG_SMP 549 while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map)) 550 ++*pos; 551 #endif 552 return *pos < NR_CPUS ? cpu_data(*pos) : NULL; 553 } 554 555 static void * 556 c_next (struct seq_file *m, void *v, loff_t *pos) 557 { 558 ++*pos; 559 return c_start(m, pos); 560 } 561 562 static void 563 c_stop (struct seq_file *m, void *v) 564 { 565 } 566 567 struct seq_operations cpuinfo_op = { 568 .start = c_start, 569 .next = c_next, 570 .stop = c_stop, 571 .show = show_cpuinfo 572 }; 573 574 void 575 identify_cpu (struct cpuinfo_ia64 *c) 576 { 577 union { 578 unsigned long bits[5]; 579 struct { 580 /* id 0 & 1: */ 581 char vendor[16]; 582 583 /* id 2 */ 584 u64 ppn; /* processor serial number */ 585 586 /* id 3: */ 587 unsigned number : 8; 588 unsigned revision : 8; 589 unsigned model : 8; 590 unsigned family : 8; 591 unsigned archrev : 8; 592 unsigned reserved : 24; 593 594 /* id 4: */ 595 u64 features; 596 } field; 597 } cpuid; 598 pal_vm_info_1_u_t vm1; 599 pal_vm_info_2_u_t vm2; 600 pal_status_t status; 601 unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */ 602 int i; 603 604 for (i = 0; i < 5; ++i) 605 cpuid.bits[i] = ia64_get_cpuid(i); 606 607 memcpy(c->vendor, cpuid.field.vendor, 16); 608 #ifdef CONFIG_SMP 609 c->cpu = smp_processor_id(); 610 611 /* below default values will be overwritten by identify_siblings() 612 * for Multi-Threading/Multi-Core capable cpu's 613 */ 614 c->threads_per_core = c->cores_per_socket = c->num_log = 1; 615 c->socket_id = -1; 616 617 identify_siblings(c); 618 #endif 619 c->ppn = cpuid.field.ppn; 620 c->number = cpuid.field.number; 621 c->revision = cpuid.field.revision; 622 c->model = cpuid.field.model; 623 c->family = cpuid.field.family; 624 c->archrev = cpuid.field.archrev; 625 c->features = cpuid.field.features; 626 627 status = ia64_pal_vm_summary(&vm1, &vm2); 628 if (status == PAL_STATUS_SUCCESS) { 629 impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb; 630 phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size; 631 } 632 c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1)); 633 c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); 634 } 635 636 void 637 setup_per_cpu_areas (void) 638 { 639 /* start_kernel() requires this... */ 640 } 641 642 /* 643 * Calculate the max. cache line size. 644 * 645 * In addition, the minimum of the i-cache stride sizes is calculated for 646 * "flush_icache_range()". 647 */ 648 static void 649 get_max_cacheline_size (void) 650 { 651 unsigned long line_size, max = 1; 652 u64 l, levels, unique_caches; 653 pal_cache_config_info_t cci; 654 s64 status; 655 656 status = ia64_pal_cache_summary(&levels, &unique_caches); 657 if (status != 0) { 658 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", 659 __FUNCTION__, status); 660 max = SMP_CACHE_BYTES; 661 /* Safest setup for "flush_icache_range()" */ 662 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; 663 goto out; 664 } 665 666 for (l = 0; l < levels; ++l) { 667 status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2, 668 &cci); 669 if (status != 0) { 670 printk(KERN_ERR 671 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n", 672 __FUNCTION__, l, status); 673 max = SMP_CACHE_BYTES; 674 /* The safest setup for "flush_icache_range()" */ 675 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 676 cci.pcci_unified = 1; 677 } 678 line_size = 1 << cci.pcci_line_size; 679 if (line_size > max) 680 max = line_size; 681 if (!cci.pcci_unified) { 682 status = ia64_pal_cache_config_info(l, 683 /* cache_type (instruction)= */ 1, 684 &cci); 685 if (status != 0) { 686 printk(KERN_ERR 687 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n", 688 __FUNCTION__, l, status); 689 /* The safest setup for "flush_icache_range()" */ 690 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 691 } 692 } 693 if (cci.pcci_stride < ia64_i_cache_stride_shift) 694 ia64_i_cache_stride_shift = cci.pcci_stride; 695 } 696 out: 697 if (max > ia64_max_cacheline_size) 698 ia64_max_cacheline_size = max; 699 } 700 701 /* 702 * cpu_init() initializes state that is per-CPU. This function acts 703 * as a 'CPU state barrier', nothing should get across. 704 */ 705 void 706 cpu_init (void) 707 { 708 extern void __devinit ia64_mmu_init (void *); 709 unsigned long num_phys_stacked; 710 pal_vm_info_2_u_t vmi; 711 unsigned int max_ctx; 712 struct cpuinfo_ia64 *cpu_info; 713 void *cpu_data; 714 715 cpu_data = per_cpu_init(); 716 717 /* 718 * We set ar.k3 so that assembly code in MCA handler can compute 719 * physical addresses of per cpu variables with a simple: 720 * phys = ar.k3 + &per_cpu_var 721 */ 722 ia64_set_kr(IA64_KR_PER_CPU_DATA, 723 ia64_tpa(cpu_data) - (long) __per_cpu_start); 724 725 get_max_cacheline_size(); 726 727 /* 728 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called 729 * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it 730 * depends on the data returned by identify_cpu(). We break the dependency by 731 * accessing cpu_data() through the canonical per-CPU address. 732 */ 733 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start); 734 identify_cpu(cpu_info); 735 736 #ifdef CONFIG_MCKINLEY 737 { 738 # define FEATURE_SET 16 739 struct ia64_pal_retval iprv; 740 741 if (cpu_info->family == 0x1f) { 742 PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0); 743 if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80)) 744 PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, 745 (iprv.v1 | 0x80), FEATURE_SET, 0); 746 } 747 } 748 #endif 749 750 /* Clear the stack memory reserved for pt_regs: */ 751 memset(ia64_task_regs(current), 0, sizeof(struct pt_regs)); 752 753 ia64_set_kr(IA64_KR_FPU_OWNER, 0); 754 755 /* 756 * Initialize the page-table base register to a global 757 * directory with all zeroes. This ensure that we can handle 758 * TLB-misses to user address-space even before we created the 759 * first user address-space. This may happen, e.g., due to 760 * aggressive use of lfetch.fault. 761 */ 762 ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page))); 763 764 /* 765 * Initialize default control register to defer speculative faults except 766 * for those arising from TLB misses, which are not deferred. The 767 * kernel MUST NOT depend on a particular setting of these bits (in other words, 768 * the kernel must have recovery code for all speculative accesses). Turn on 769 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps 770 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll 771 * be fine). 772 */ 773 ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR 774 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); 775 atomic_inc(&init_mm.mm_count); 776 current->active_mm = &init_mm; 777 if (current->mm) 778 BUG(); 779 780 ia64_mmu_init(ia64_imva(cpu_data)); 781 ia64_mca_cpu_init(ia64_imva(cpu_data)); 782 783 #ifdef CONFIG_IA32_SUPPORT 784 ia32_cpu_init(); 785 #endif 786 787 /* Clear ITC to eliminiate sched_clock() overflows in human time. */ 788 ia64_set_itc(0); 789 790 /* disable all local interrupt sources: */ 791 ia64_set_itv(1 << 16); 792 ia64_set_lrr0(1 << 16); 793 ia64_set_lrr1(1 << 16); 794 ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); 795 ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); 796 797 /* clear TPR & XTP to enable all interrupt classes: */ 798 ia64_setreg(_IA64_REG_CR_TPR, 0); 799 #ifdef CONFIG_SMP 800 normal_xtp(); 801 #endif 802 803 /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ 804 if (ia64_pal_vm_summary(NULL, &vmi) == 0) 805 max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; 806 else { 807 printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); 808 max_ctx = (1U << 15) - 1; /* use architected minimum */ 809 } 810 while (max_ctx < ia64_ctx.max_ctx) { 811 unsigned int old = ia64_ctx.max_ctx; 812 if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old) 813 break; 814 } 815 816 if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) { 817 printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical " 818 "stacked regs\n"); 819 num_phys_stacked = 96; 820 } 821 /* size of physical stacked register partition plus 8 bytes: */ 822 __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8; 823 platform_cpu_init(); 824 pm_idle = default_idle; 825 } 826 827 void 828 check_bugs (void) 829 { 830 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, 831 (unsigned long) __end___mckinley_e9_bundles); 832 } 833