1 /* 2 * Architecture-specific setup. 3 * 4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co 5 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * Stephane Eranian <eranian@hpl.hp.com> 7 * Copyright (C) 2000, 2004 Intel Corp 8 * Rohit Seth <rohit.seth@intel.com> 9 * Suresh Siddha <suresh.b.siddha@intel.com> 10 * Gordon Jin <gordon.jin@intel.com> 11 * Copyright (C) 1999 VA Linux Systems 12 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 13 * 14 * 12/26/04 S.Siddha, G.Jin, R.Seth 15 * Add multi-threading and multi-core detection 16 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo(). 17 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map 18 * 03/31/00 R.Seth cpu_initialized and current->processor fixes 19 * 02/04/00 D.Mosberger some more get_cpuinfo fixes... 20 * 02/01/00 R.Seth fixed get_cpuinfo for SMP 21 * 01/07/99 S.Eranian added the support for command line argument 22 * 06/24/99 W.Drummond added boot_cpu_data. 23 * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()" 24 */ 25 #include <linux/config.h> 26 #include <linux/module.h> 27 #include <linux/init.h> 28 29 #include <linux/acpi.h> 30 #include <linux/bootmem.h> 31 #include <linux/console.h> 32 #include <linux/delay.h> 33 #include <linux/kernel.h> 34 #include <linux/reboot.h> 35 #include <linux/sched.h> 36 #include <linux/seq_file.h> 37 #include <linux/string.h> 38 #include <linux/threads.h> 39 #include <linux/tty.h> 40 #include <linux/serial.h> 41 #include <linux/serial_core.h> 42 #include <linux/efi.h> 43 #include <linux/initrd.h> 44 #include <linux/platform.h> 45 #include <linux/pm.h> 46 47 #include <asm/ia32.h> 48 #include <asm/machvec.h> 49 #include <asm/mca.h> 50 #include <asm/meminit.h> 51 #include <asm/page.h> 52 #include <asm/patch.h> 53 #include <asm/pgtable.h> 54 #include <asm/processor.h> 55 #include <asm/sal.h> 56 #include <asm/sections.h> 57 #include <asm/serial.h> 58 #include <asm/setup.h> 59 #include <asm/smp.h> 60 #include <asm/system.h> 61 #include <asm/unistd.h> 62 63 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) 64 # error "struct cpuinfo_ia64 too big!" 65 #endif 66 67 #ifdef CONFIG_SMP 68 unsigned long __per_cpu_offset[NR_CPUS]; 69 EXPORT_SYMBOL(__per_cpu_offset); 70 #endif 71 72 DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); 73 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); 74 DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8); 75 unsigned long ia64_cycles_per_usec; 76 struct ia64_boot_param *ia64_boot_param; 77 struct screen_info screen_info; 78 unsigned long vga_console_iobase; 79 unsigned long vga_console_membase; 80 81 static struct resource data_resource = { 82 .name = "Kernel data", 83 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 84 }; 85 86 static struct resource code_resource = { 87 .name = "Kernel code", 88 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 89 }; 90 extern void efi_initialize_iomem_resources(struct resource *, 91 struct resource *); 92 extern char _text[], _end[], _etext[]; 93 94 unsigned long ia64_max_cacheline_size; 95 unsigned long ia64_iobase; /* virtual address for I/O accesses */ 96 EXPORT_SYMBOL(ia64_iobase); 97 struct io_space io_space[MAX_IO_SPACES]; 98 EXPORT_SYMBOL(io_space); 99 unsigned int num_io_spaces; 100 101 /* 102 * "flush_icache_range()" needs to know what processor dependent stride size to use 103 * when it makes i-cache(s) coherent with d-caches. 104 */ 105 #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */ 106 unsigned long ia64_i_cache_stride_shift = ~0; 107 108 /* 109 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This 110 * mask specifies a mask of address bits that must be 0 in order for two buffers to be 111 * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start 112 * address of the second buffer must be aligned to (merge_mask+1) in order to be 113 * mergeable). By default, we assume there is no I/O MMU which can merge physically 114 * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu 115 * page-size of 2^64. 116 */ 117 unsigned long ia64_max_iommu_merge_mask = ~0UL; 118 EXPORT_SYMBOL(ia64_max_iommu_merge_mask); 119 120 /* 121 * We use a special marker for the end of memory and it uses the extra (+1) slot 122 */ 123 struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1]; 124 int num_rsvd_regions; 125 126 127 /* 128 * Filter incoming memory segments based on the primitive map created from the boot 129 * parameters. Segments contained in the map are removed from the memory ranges. A 130 * caller-specified function is called with the memory ranges that remain after filtering. 131 * This routine does not assume the incoming segments are sorted. 132 */ 133 int 134 filter_rsvd_memory (unsigned long start, unsigned long end, void *arg) 135 { 136 unsigned long range_start, range_end, prev_start; 137 void (*func)(unsigned long, unsigned long, int); 138 int i; 139 140 #if IGNORE_PFN0 141 if (start == PAGE_OFFSET) { 142 printk(KERN_WARNING "warning: skipping physical page 0\n"); 143 start += PAGE_SIZE; 144 if (start >= end) return 0; 145 } 146 #endif 147 /* 148 * lowest possible address(walker uses virtual) 149 */ 150 prev_start = PAGE_OFFSET; 151 func = arg; 152 153 for (i = 0; i < num_rsvd_regions; ++i) { 154 range_start = max(start, prev_start); 155 range_end = min(end, rsvd_region[i].start); 156 157 if (range_start < range_end) 158 call_pernode_memory(__pa(range_start), range_end - range_start, func); 159 160 /* nothing more available in this segment */ 161 if (range_end == end) return 0; 162 163 prev_start = rsvd_region[i].end; 164 } 165 /* end of memory marker allows full processing inside loop body */ 166 return 0; 167 } 168 169 static void 170 sort_regions (struct rsvd_region *rsvd_region, int max) 171 { 172 int j; 173 174 /* simple bubble sorting */ 175 while (max--) { 176 for (j = 0; j < max; ++j) { 177 if (rsvd_region[j].start > rsvd_region[j+1].start) { 178 struct rsvd_region tmp; 179 tmp = rsvd_region[j]; 180 rsvd_region[j] = rsvd_region[j + 1]; 181 rsvd_region[j + 1] = tmp; 182 } 183 } 184 } 185 } 186 187 /* 188 * Request address space for all standard resources 189 */ 190 static int __init register_memory(void) 191 { 192 code_resource.start = ia64_tpa(_text); 193 code_resource.end = ia64_tpa(_etext) - 1; 194 data_resource.start = ia64_tpa(_etext); 195 data_resource.end = ia64_tpa(_end) - 1; 196 efi_initialize_iomem_resources(&code_resource, &data_resource); 197 198 return 0; 199 } 200 201 __initcall(register_memory); 202 203 /** 204 * reserve_memory - setup reserved memory areas 205 * 206 * Setup the reserved memory areas set aside for the boot parameters, 207 * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined, 208 * see include/asm-ia64/meminit.h if you need to define more. 209 */ 210 void 211 reserve_memory (void) 212 { 213 int n = 0; 214 215 /* 216 * none of the entries in this table overlap 217 */ 218 rsvd_region[n].start = (unsigned long) ia64_boot_param; 219 rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param); 220 n++; 221 222 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap); 223 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size; 224 n++; 225 226 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line); 227 rsvd_region[n].end = (rsvd_region[n].start 228 + strlen(__va(ia64_boot_param->command_line)) + 1); 229 n++; 230 231 rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START); 232 rsvd_region[n].end = (unsigned long) ia64_imva(_end); 233 n++; 234 235 #ifdef CONFIG_BLK_DEV_INITRD 236 if (ia64_boot_param->initrd_start) { 237 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start); 238 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size; 239 n++; 240 } 241 #endif 242 243 efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); 244 n++; 245 246 /* end of memory marker */ 247 rsvd_region[n].start = ~0UL; 248 rsvd_region[n].end = ~0UL; 249 n++; 250 251 num_rsvd_regions = n; 252 253 sort_regions(rsvd_region, num_rsvd_regions); 254 } 255 256 /** 257 * find_initrd - get initrd parameters from the boot parameter structure 258 * 259 * Grab the initrd start and end from the boot parameter struct given us by 260 * the boot loader. 261 */ 262 void 263 find_initrd (void) 264 { 265 #ifdef CONFIG_BLK_DEV_INITRD 266 if (ia64_boot_param->initrd_start) { 267 initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start); 268 initrd_end = initrd_start+ia64_boot_param->initrd_size; 269 270 printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n", 271 initrd_start, ia64_boot_param->initrd_size); 272 } 273 #endif 274 } 275 276 static void __init 277 io_port_init (void) 278 { 279 unsigned long phys_iobase; 280 281 /* 282 * Set `iobase' based on the EFI memory map or, failing that, the 283 * value firmware left in ar.k0. 284 * 285 * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute 286 * the port's virtual address, so ia32_load_state() loads it with a 287 * user virtual address. But in ia64 mode, glibc uses the 288 * *physical* address in ar.k0 to mmap the appropriate area from 289 * /dev/mem, and the inX()/outX() interfaces use MMIO. In both 290 * cases, user-mode can only use the legacy 0-64K I/O port space. 291 * 292 * ar.k0 is not involved in kernel I/O port accesses, which can use 293 * any of the I/O port spaces and are done via MMIO using the 294 * virtual mmio_base from the appropriate io_space[]. 295 */ 296 phys_iobase = efi_get_iobase(); 297 if (!phys_iobase) { 298 phys_iobase = ia64_get_kr(IA64_KR_IO_BASE); 299 printk(KERN_INFO "No I/O port range found in EFI memory map, " 300 "falling back to AR.KR0 (0x%lx)\n", phys_iobase); 301 } 302 ia64_iobase = (unsigned long) ioremap(phys_iobase, 0); 303 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); 304 305 /* setup legacy IO port space */ 306 io_space[0].mmio_base = ia64_iobase; 307 io_space[0].sparse = 1; 308 num_io_spaces = 1; 309 } 310 311 /** 312 * early_console_setup - setup debugging console 313 * 314 * Consoles started here require little enough setup that we can start using 315 * them very early in the boot process, either right after the machine 316 * vector initialization, or even before if the drivers can detect their hw. 317 * 318 * Returns non-zero if a console couldn't be setup. 319 */ 320 static inline int __init 321 early_console_setup (char *cmdline) 322 { 323 int earlycons = 0; 324 325 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE 326 { 327 extern int sn_serial_console_early_setup(void); 328 if (!sn_serial_console_early_setup()) 329 earlycons++; 330 } 331 #endif 332 #ifdef CONFIG_EFI_PCDP 333 if (!efi_setup_pcdp_console(cmdline)) 334 earlycons++; 335 #endif 336 #ifdef CONFIG_SERIAL_8250_CONSOLE 337 if (!early_serial_console_init(cmdline)) 338 earlycons++; 339 #endif 340 341 return (earlycons) ? 0 : -1; 342 } 343 344 static inline void 345 mark_bsp_online (void) 346 { 347 #ifdef CONFIG_SMP 348 /* If we register an early console, allow CPU 0 to printk */ 349 cpu_set(smp_processor_id(), cpu_online_map); 350 #endif 351 } 352 353 #ifdef CONFIG_SMP 354 static void 355 check_for_logical_procs (void) 356 { 357 pal_logical_to_physical_t info; 358 s64 status; 359 360 status = ia64_pal_logical_to_phys(0, &info); 361 if (status == -1) { 362 printk(KERN_INFO "No logical to physical processor mapping " 363 "available\n"); 364 return; 365 } 366 if (status) { 367 printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n", 368 status); 369 return; 370 } 371 /* 372 * Total number of siblings that BSP has. Though not all of them 373 * may have booted successfully. The correct number of siblings 374 * booted is in info.overview_num_log. 375 */ 376 smp_num_siblings = info.overview_tpc; 377 smp_num_cpucores = info.overview_cpp; 378 } 379 #endif 380 381 void __init 382 setup_arch (char **cmdline_p) 383 { 384 unw_init(); 385 386 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); 387 388 *cmdline_p = __va(ia64_boot_param->command_line); 389 strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE); 390 391 efi_init(); 392 io_port_init(); 393 394 #ifdef CONFIG_IA64_GENERIC 395 { 396 const char *mvec_name = strstr (*cmdline_p, "machvec="); 397 char str[64]; 398 399 if (mvec_name) { 400 const char *end; 401 size_t len; 402 403 mvec_name += 8; 404 end = strchr (mvec_name, ' '); 405 if (end) 406 len = end - mvec_name; 407 else 408 len = strlen (mvec_name); 409 len = min(len, sizeof (str) - 1); 410 strncpy (str, mvec_name, len); 411 str[len] = '\0'; 412 mvec_name = str; 413 } else 414 mvec_name = acpi_get_sysname(); 415 machvec_init(mvec_name); 416 } 417 #endif 418 419 if (early_console_setup(*cmdline_p) == 0) 420 mark_bsp_online(); 421 422 #ifdef CONFIG_ACPI 423 /* Initialize the ACPI boot-time table parser */ 424 acpi_table_init(); 425 # ifdef CONFIG_ACPI_NUMA 426 acpi_numa_init(); 427 # endif 428 #else 429 # ifdef CONFIG_SMP 430 smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */ 431 # endif 432 #endif /* CONFIG_APCI_BOOT */ 433 434 find_memory(); 435 436 /* process SAL system table: */ 437 ia64_sal_init(efi.sal_systab); 438 439 #ifdef CONFIG_SMP 440 cpu_physical_id(0) = hard_smp_processor_id(); 441 442 cpu_set(0, cpu_sibling_map[0]); 443 cpu_set(0, cpu_core_map[0]); 444 445 check_for_logical_procs(); 446 if (smp_num_cpucores > 1) 447 printk(KERN_INFO 448 "cpu package is Multi-Core capable: number of cores=%d\n", 449 smp_num_cpucores); 450 if (smp_num_siblings > 1) 451 printk(KERN_INFO 452 "cpu package is Multi-Threading capable: number of siblings=%d\n", 453 smp_num_siblings); 454 #endif 455 456 cpu_init(); /* initialize the bootstrap CPU */ 457 458 #ifdef CONFIG_ACPI 459 acpi_boot_init(); 460 #endif 461 462 #ifdef CONFIG_VT 463 if (!conswitchp) { 464 # if defined(CONFIG_DUMMY_CONSOLE) 465 conswitchp = &dummy_con; 466 # endif 467 # if defined(CONFIG_VGA_CONSOLE) 468 /* 469 * Non-legacy systems may route legacy VGA MMIO range to system 470 * memory. vga_con probes the MMIO hole, so memory looks like 471 * a VGA device to it. The EFI memory map can tell us if it's 472 * memory so we can avoid this problem. 473 */ 474 if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY) 475 conswitchp = &vga_con; 476 # endif 477 } 478 #endif 479 480 /* enable IA-64 Machine Check Abort Handling unless disabled */ 481 if (!strstr(saved_command_line, "nomca")) 482 ia64_mca_init(); 483 484 platform_setup(cmdline_p); 485 paging_init(); 486 } 487 488 /* 489 * Display cpu info for all cpu's. 490 */ 491 static int 492 show_cpuinfo (struct seq_file *m, void *v) 493 { 494 #ifdef CONFIG_SMP 495 # define lpj c->loops_per_jiffy 496 # define cpunum c->cpu 497 #else 498 # define lpj loops_per_jiffy 499 # define cpunum 0 500 #endif 501 static struct { 502 unsigned long mask; 503 const char *feature_name; 504 } feature_bits[] = { 505 { 1UL << 0, "branchlong" }, 506 { 1UL << 1, "spontaneous deferral"}, 507 { 1UL << 2, "16-byte atomic ops" } 508 }; 509 char family[32], features[128], *cp, sep; 510 struct cpuinfo_ia64 *c = v; 511 unsigned long mask; 512 int i; 513 514 mask = c->features; 515 516 switch (c->family) { 517 case 0x07: memcpy(family, "Itanium", 8); break; 518 case 0x1f: memcpy(family, "Itanium 2", 10); break; 519 default: sprintf(family, "%u", c->family); break; 520 } 521 522 /* build the feature string: */ 523 memcpy(features, " standard", 10); 524 cp = features; 525 sep = 0; 526 for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) { 527 if (mask & feature_bits[i].mask) { 528 if (sep) 529 *cp++ = sep; 530 sep = ','; 531 *cp++ = ' '; 532 strcpy(cp, feature_bits[i].feature_name); 533 cp += strlen(feature_bits[i].feature_name); 534 mask &= ~feature_bits[i].mask; 535 } 536 } 537 if (mask) { 538 /* print unknown features as a hex value: */ 539 if (sep) 540 *cp++ = sep; 541 sprintf(cp, " 0x%lx", mask); 542 } 543 544 seq_printf(m, 545 "processor : %d\n" 546 "vendor : %s\n" 547 "arch : IA-64\n" 548 "family : %s\n" 549 "model : %u\n" 550 "revision : %u\n" 551 "archrev : %u\n" 552 "features :%s\n" /* don't change this---it _is_ right! */ 553 "cpu number : %lu\n" 554 "cpu regs : %u\n" 555 "cpu MHz : %lu.%06lu\n" 556 "itc MHz : %lu.%06lu\n" 557 "BogoMIPS : %lu.%02lu\n", 558 cpunum, c->vendor, family, c->model, c->revision, c->archrev, 559 features, c->ppn, c->number, 560 c->proc_freq / 1000000, c->proc_freq % 1000000, 561 c->itc_freq / 1000000, c->itc_freq % 1000000, 562 lpj*HZ/500000, (lpj*HZ/5000) % 100); 563 #ifdef CONFIG_SMP 564 seq_printf(m, "siblings : %u\n", c->num_log); 565 if (c->threads_per_core > 1 || c->cores_per_socket > 1) 566 seq_printf(m, 567 "physical id: %u\n" 568 "core id : %u\n" 569 "thread id : %u\n", 570 c->socket_id, c->core_id, c->thread_id); 571 #endif 572 seq_printf(m,"\n"); 573 574 return 0; 575 } 576 577 static void * 578 c_start (struct seq_file *m, loff_t *pos) 579 { 580 #ifdef CONFIG_SMP 581 while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map)) 582 ++*pos; 583 #endif 584 return *pos < NR_CPUS ? cpu_data(*pos) : NULL; 585 } 586 587 static void * 588 c_next (struct seq_file *m, void *v, loff_t *pos) 589 { 590 ++*pos; 591 return c_start(m, pos); 592 } 593 594 static void 595 c_stop (struct seq_file *m, void *v) 596 { 597 } 598 599 struct seq_operations cpuinfo_op = { 600 .start = c_start, 601 .next = c_next, 602 .stop = c_stop, 603 .show = show_cpuinfo 604 }; 605 606 void 607 identify_cpu (struct cpuinfo_ia64 *c) 608 { 609 union { 610 unsigned long bits[5]; 611 struct { 612 /* id 0 & 1: */ 613 char vendor[16]; 614 615 /* id 2 */ 616 u64 ppn; /* processor serial number */ 617 618 /* id 3: */ 619 unsigned number : 8; 620 unsigned revision : 8; 621 unsigned model : 8; 622 unsigned family : 8; 623 unsigned archrev : 8; 624 unsigned reserved : 24; 625 626 /* id 4: */ 627 u64 features; 628 } field; 629 } cpuid; 630 pal_vm_info_1_u_t vm1; 631 pal_vm_info_2_u_t vm2; 632 pal_status_t status; 633 unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */ 634 int i; 635 636 for (i = 0; i < 5; ++i) 637 cpuid.bits[i] = ia64_get_cpuid(i); 638 639 memcpy(c->vendor, cpuid.field.vendor, 16); 640 #ifdef CONFIG_SMP 641 c->cpu = smp_processor_id(); 642 643 /* below default values will be overwritten by identify_siblings() 644 * for Multi-Threading/Multi-Core capable cpu's 645 */ 646 c->threads_per_core = c->cores_per_socket = c->num_log = 1; 647 c->socket_id = -1; 648 649 identify_siblings(c); 650 #endif 651 c->ppn = cpuid.field.ppn; 652 c->number = cpuid.field.number; 653 c->revision = cpuid.field.revision; 654 c->model = cpuid.field.model; 655 c->family = cpuid.field.family; 656 c->archrev = cpuid.field.archrev; 657 c->features = cpuid.field.features; 658 659 status = ia64_pal_vm_summary(&vm1, &vm2); 660 if (status == PAL_STATUS_SUCCESS) { 661 impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb; 662 phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size; 663 } 664 c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1)); 665 c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); 666 } 667 668 void 669 setup_per_cpu_areas (void) 670 { 671 /* start_kernel() requires this... */ 672 } 673 674 /* 675 * Calculate the max. cache line size. 676 * 677 * In addition, the minimum of the i-cache stride sizes is calculated for 678 * "flush_icache_range()". 679 */ 680 static void 681 get_max_cacheline_size (void) 682 { 683 unsigned long line_size, max = 1; 684 u64 l, levels, unique_caches; 685 pal_cache_config_info_t cci; 686 s64 status; 687 688 status = ia64_pal_cache_summary(&levels, &unique_caches); 689 if (status != 0) { 690 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", 691 __FUNCTION__, status); 692 max = SMP_CACHE_BYTES; 693 /* Safest setup for "flush_icache_range()" */ 694 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; 695 goto out; 696 } 697 698 for (l = 0; l < levels; ++l) { 699 status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2, 700 &cci); 701 if (status != 0) { 702 printk(KERN_ERR 703 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n", 704 __FUNCTION__, l, status); 705 max = SMP_CACHE_BYTES; 706 /* The safest setup for "flush_icache_range()" */ 707 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 708 cci.pcci_unified = 1; 709 } 710 line_size = 1 << cci.pcci_line_size; 711 if (line_size > max) 712 max = line_size; 713 if (!cci.pcci_unified) { 714 status = ia64_pal_cache_config_info(l, 715 /* cache_type (instruction)= */ 1, 716 &cci); 717 if (status != 0) { 718 printk(KERN_ERR 719 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n", 720 __FUNCTION__, l, status); 721 /* The safest setup for "flush_icache_range()" */ 722 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 723 } 724 } 725 if (cci.pcci_stride < ia64_i_cache_stride_shift) 726 ia64_i_cache_stride_shift = cci.pcci_stride; 727 } 728 out: 729 if (max > ia64_max_cacheline_size) 730 ia64_max_cacheline_size = max; 731 } 732 733 /* 734 * cpu_init() initializes state that is per-CPU. This function acts 735 * as a 'CPU state barrier', nothing should get across. 736 */ 737 void 738 cpu_init (void) 739 { 740 extern void __devinit ia64_mmu_init (void *); 741 unsigned long num_phys_stacked; 742 pal_vm_info_2_u_t vmi; 743 unsigned int max_ctx; 744 struct cpuinfo_ia64 *cpu_info; 745 void *cpu_data; 746 747 cpu_data = per_cpu_init(); 748 749 /* 750 * We set ar.k3 so that assembly code in MCA handler can compute 751 * physical addresses of per cpu variables with a simple: 752 * phys = ar.k3 + &per_cpu_var 753 */ 754 ia64_set_kr(IA64_KR_PER_CPU_DATA, 755 ia64_tpa(cpu_data) - (long) __per_cpu_start); 756 757 get_max_cacheline_size(); 758 759 /* 760 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called 761 * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it 762 * depends on the data returned by identify_cpu(). We break the dependency by 763 * accessing cpu_data() through the canonical per-CPU address. 764 */ 765 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start); 766 identify_cpu(cpu_info); 767 768 #ifdef CONFIG_MCKINLEY 769 { 770 # define FEATURE_SET 16 771 struct ia64_pal_retval iprv; 772 773 if (cpu_info->family == 0x1f) { 774 PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0); 775 if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80)) 776 PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, 777 (iprv.v1 | 0x80), FEATURE_SET, 0); 778 } 779 } 780 #endif 781 782 /* Clear the stack memory reserved for pt_regs: */ 783 memset(ia64_task_regs(current), 0, sizeof(struct pt_regs)); 784 785 ia64_set_kr(IA64_KR_FPU_OWNER, 0); 786 787 /* 788 * Initialize the page-table base register to a global 789 * directory with all zeroes. This ensure that we can handle 790 * TLB-misses to user address-space even before we created the 791 * first user address-space. This may happen, e.g., due to 792 * aggressive use of lfetch.fault. 793 */ 794 ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page))); 795 796 /* 797 * Initialize default control register to defer speculative faults except 798 * for those arising from TLB misses, which are not deferred. The 799 * kernel MUST NOT depend on a particular setting of these bits (in other words, 800 * the kernel must have recovery code for all speculative accesses). Turn on 801 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps 802 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll 803 * be fine). 804 */ 805 ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR 806 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); 807 atomic_inc(&init_mm.mm_count); 808 current->active_mm = &init_mm; 809 if (current->mm) 810 BUG(); 811 812 ia64_mmu_init(ia64_imva(cpu_data)); 813 ia64_mca_cpu_init(ia64_imva(cpu_data)); 814 815 #ifdef CONFIG_IA32_SUPPORT 816 ia32_cpu_init(); 817 #endif 818 819 /* Clear ITC to eliminiate sched_clock() overflows in human time. */ 820 ia64_set_itc(0); 821 822 /* disable all local interrupt sources: */ 823 ia64_set_itv(1 << 16); 824 ia64_set_lrr0(1 << 16); 825 ia64_set_lrr1(1 << 16); 826 ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); 827 ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); 828 829 /* clear TPR & XTP to enable all interrupt classes: */ 830 ia64_setreg(_IA64_REG_CR_TPR, 0); 831 #ifdef CONFIG_SMP 832 normal_xtp(); 833 #endif 834 835 /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ 836 if (ia64_pal_vm_summary(NULL, &vmi) == 0) 837 max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; 838 else { 839 printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); 840 max_ctx = (1U << 15) - 1; /* use architected minimum */ 841 } 842 while (max_ctx < ia64_ctx.max_ctx) { 843 unsigned int old = ia64_ctx.max_ctx; 844 if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old) 845 break; 846 } 847 848 if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) { 849 printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical " 850 "stacked regs\n"); 851 num_phys_stacked = 96; 852 } 853 /* size of physical stacked register partition plus 8 bytes: */ 854 __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8; 855 platform_cpu_init(); 856 pm_idle = default_idle; 857 } 858 859 void 860 check_bugs (void) 861 { 862 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, 863 (unsigned long) __end___mckinley_e9_bundles); 864 } 865