1 /* 2 * linux/arch/alpha/kernel/setup.c 3 * 4 * Copyright (C) 1995 Linus Torvalds 5 */ 6 7 /* 2.3.x bootmem, 1999 Andrea Arcangeli <andrea@suse.de> */ 8 9 /* 10 * Bootup setup stuff. 11 */ 12 13 #include <linux/sched.h> 14 #include <linux/kernel.h> 15 #include <linux/mm.h> 16 #include <linux/stddef.h> 17 #include <linux/unistd.h> 18 #include <linux/ptrace.h> 19 #include <linux/slab.h> 20 #include <linux/user.h> 21 #include <linux/screen_info.h> 22 #include <linux/delay.h> 23 #include <linux/mc146818rtc.h> 24 #include <linux/console.h> 25 #include <linux/cpu.h> 26 #include <linux/errno.h> 27 #include <linux/init.h> 28 #include <linux/string.h> 29 #include <linux/ioport.h> 30 #include <linux/platform_device.h> 31 #include <linux/bootmem.h> 32 #include <linux/pci.h> 33 #include <linux/seq_file.h> 34 #include <linux/root_dev.h> 35 #include <linux/initrd.h> 36 #include <linux/eisa.h> 37 #include <linux/pfn.h> 38 #ifdef CONFIG_MAGIC_SYSRQ 39 #include <linux/sysrq.h> 40 #include <linux/reboot.h> 41 #endif 42 #include <linux/notifier.h> 43 #include <asm/setup.h> 44 #include <asm/io.h> 45 #include <linux/log2.h> 46 #include <linux/export.h> 47 48 extern struct atomic_notifier_head panic_notifier_list; 49 static int alpha_panic_event(struct notifier_block *, unsigned long, void *); 50 static struct notifier_block alpha_panic_block = { 51 alpha_panic_event, 52 NULL, 53 INT_MAX /* try to do it first */ 54 }; 55 56 #include <asm/uaccess.h> 57 #include <asm/pgtable.h> 58 #include <asm/hwrpb.h> 59 #include <asm/dma.h> 60 #include <asm/mmu_context.h> 61 #include <asm/console.h> 62 63 #include "proto.h" 64 #include "pci_impl.h" 65 66 67 struct hwrpb_struct *hwrpb; 68 EXPORT_SYMBOL(hwrpb); 69 unsigned long srm_hae; 70 71 int alpha_l1i_cacheshape; 72 int alpha_l1d_cacheshape; 73 int alpha_l2_cacheshape; 74 int alpha_l3_cacheshape; 75 76 #ifdef CONFIG_VERBOSE_MCHECK 77 /* 0=minimum, 1=verbose, 2=all */ 78 /* These can be overridden via the command line, ie "verbose_mcheck=2") */ 79 unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON; 80 #endif 81 82 #ifdef CONFIG_NUMA 83 struct cpumask node_to_cpumask_map[MAX_NUMNODES] __read_mostly; 84 EXPORT_SYMBOL(node_to_cpumask_map); 85 #endif 86 87 /* Which processor we booted from. */ 88 int boot_cpuid; 89 90 /* 91 * Using SRM callbacks for initial console output. This works from 92 * setup_arch() time through the end of time_init(), as those places 93 * are under our (Alpha) control. 94 95 * "srmcons" specified in the boot command arguments allows us to 96 * see kernel messages during the period of time before the true 97 * console device is "registered" during console_init(). 98 * As of this version (2.5.59), console_init() will call 99 * disable_early_printk() as the last action before initializing 100 * the console drivers. That's the last possible time srmcons can be 101 * unregistered without interfering with console behavior. 102 * 103 * By default, OFF; set it with a bootcommand arg of "srmcons" or 104 * "console=srm". The meaning of these two args is: 105 * "srmcons" - early callback prints 106 * "console=srm" - full callback based console, including early prints 107 */ 108 int srmcons_output = 0; 109 110 /* Enforce a memory size limit; useful for testing. By default, none. */ 111 unsigned long mem_size_limit = 0; 112 113 /* Set AGP GART window size (0 means disabled). */ 114 unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE; 115 116 #ifdef CONFIG_ALPHA_GENERIC 117 struct alpha_machine_vector alpha_mv; 118 int alpha_using_srm; 119 EXPORT_SYMBOL(alpha_using_srm); 120 #endif 121 122 static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long, 123 unsigned long); 124 static struct alpha_machine_vector *get_sysvec_byname(const char *); 125 static void get_sysnames(unsigned long, unsigned long, unsigned long, 126 char **, char **); 127 static void determine_cpu_caches (unsigned int); 128 129 static char __initdata command_line[COMMAND_LINE_SIZE]; 130 131 /* 132 * The format of "screen_info" is strange, and due to early 133 * i386-setup code. This is just enough to make the console 134 * code think we're on a VGA color display. 135 */ 136 137 struct screen_info screen_info = { 138 .orig_x = 0, 139 .orig_y = 25, 140 .orig_video_cols = 80, 141 .orig_video_lines = 25, 142 .orig_video_isVGA = 1, 143 .orig_video_points = 16 144 }; 145 146 EXPORT_SYMBOL(screen_info); 147 148 /* 149 * The direct map I/O window, if any. This should be the same 150 * for all busses, since it's used by virt_to_bus. 151 */ 152 153 unsigned long __direct_map_base; 154 unsigned long __direct_map_size; 155 EXPORT_SYMBOL(__direct_map_base); 156 EXPORT_SYMBOL(__direct_map_size); 157 158 /* 159 * Declare all of the machine vectors. 160 */ 161 162 /* GCC 2.7.2 (on alpha at least) is lame. It does not support either 163 __attribute__((weak)) or #pragma weak. Bypass it and talk directly 164 to the assembler. */ 165 166 #define WEAK(X) \ 167 extern struct alpha_machine_vector X; \ 168 asm(".weak "#X) 169 170 WEAK(alcor_mv); 171 WEAK(alphabook1_mv); 172 WEAK(avanti_mv); 173 WEAK(cabriolet_mv); 174 WEAK(clipper_mv); 175 WEAK(dp264_mv); 176 WEAK(eb164_mv); 177 WEAK(eb64p_mv); 178 WEAK(eb66_mv); 179 WEAK(eb66p_mv); 180 WEAK(eiger_mv); 181 WEAK(jensen_mv); 182 WEAK(lx164_mv); 183 WEAK(lynx_mv); 184 WEAK(marvel_ev7_mv); 185 WEAK(miata_mv); 186 WEAK(mikasa_mv); 187 WEAK(mikasa_primo_mv); 188 WEAK(monet_mv); 189 WEAK(nautilus_mv); 190 WEAK(noname_mv); 191 WEAK(noritake_mv); 192 WEAK(noritake_primo_mv); 193 WEAK(p2k_mv); 194 WEAK(pc164_mv); 195 WEAK(privateer_mv); 196 WEAK(rawhide_mv); 197 WEAK(ruffian_mv); 198 WEAK(rx164_mv); 199 WEAK(sable_mv); 200 WEAK(sable_gamma_mv); 201 WEAK(shark_mv); 202 WEAK(sx164_mv); 203 WEAK(takara_mv); 204 WEAK(titan_mv); 205 WEAK(webbrick_mv); 206 WEAK(wildfire_mv); 207 WEAK(xl_mv); 208 WEAK(xlt_mv); 209 210 #undef WEAK 211 212 /* 213 * I/O resources inherited from PeeCees. Except for perhaps the 214 * turbochannel alphas, everyone has these on some sort of SuperIO chip. 215 * 216 * ??? If this becomes less standard, move the struct out into the 217 * machine vector. 218 */ 219 220 static void __init 221 reserve_std_resources(void) 222 { 223 static struct resource standard_io_resources[] = { 224 { .name = "rtc", .start = -1, .end = -1 }, 225 { .name = "dma1", .start = 0x00, .end = 0x1f }, 226 { .name = "pic1", .start = 0x20, .end = 0x3f }, 227 { .name = "timer", .start = 0x40, .end = 0x5f }, 228 { .name = "keyboard", .start = 0x60, .end = 0x6f }, 229 { .name = "dma page reg", .start = 0x80, .end = 0x8f }, 230 { .name = "pic2", .start = 0xa0, .end = 0xbf }, 231 { .name = "dma2", .start = 0xc0, .end = 0xdf }, 232 }; 233 234 struct resource *io = &ioport_resource; 235 size_t i; 236 237 if (hose_head) { 238 struct pci_controller *hose; 239 for (hose = hose_head; hose; hose = hose->next) 240 if (hose->index == 0) { 241 io = hose->io_space; 242 break; 243 } 244 } 245 246 /* Fix up for the Jensen's queer RTC placement. */ 247 standard_io_resources[0].start = RTC_PORT(0); 248 standard_io_resources[0].end = RTC_PORT(0) + 0x10; 249 250 for (i = 0; i < ARRAY_SIZE(standard_io_resources); ++i) 251 request_resource(io, standard_io_resources+i); 252 } 253 254 #define PFN_MAX PFN_DOWN(0x80000000) 255 #define for_each_mem_cluster(memdesc, _cluster, i) \ 256 for ((_cluster) = (memdesc)->cluster, (i) = 0; \ 257 (i) < (memdesc)->numclusters; (i)++, (_cluster)++) 258 259 static unsigned long __init 260 get_mem_size_limit(char *s) 261 { 262 unsigned long end = 0; 263 char *from = s; 264 265 end = simple_strtoul(from, &from, 0); 266 if ( *from == 'K' || *from == 'k' ) { 267 end = end << 10; 268 from++; 269 } else if ( *from == 'M' || *from == 'm' ) { 270 end = end << 20; 271 from++; 272 } else if ( *from == 'G' || *from == 'g' ) { 273 end = end << 30; 274 from++; 275 } 276 return end >> PAGE_SHIFT; /* Return the PFN of the limit. */ 277 } 278 279 #ifdef CONFIG_BLK_DEV_INITRD 280 void * __init 281 move_initrd(unsigned long mem_limit) 282 { 283 void *start; 284 unsigned long size; 285 286 size = initrd_end - initrd_start; 287 start = __alloc_bootmem(PAGE_ALIGN(size), PAGE_SIZE, 0); 288 if (!start || __pa(start) + size > mem_limit) { 289 initrd_start = initrd_end = 0; 290 return NULL; 291 } 292 memmove(start, (void *)initrd_start, size); 293 initrd_start = (unsigned long)start; 294 initrd_end = initrd_start + size; 295 printk("initrd moved to %p\n", start); 296 return start; 297 } 298 #endif 299 300 #ifndef CONFIG_DISCONTIGMEM 301 static void __init 302 setup_memory(void *kernel_end) 303 { 304 struct memclust_struct * cluster; 305 struct memdesc_struct * memdesc; 306 unsigned long start_kernel_pfn, end_kernel_pfn; 307 unsigned long bootmap_size, bootmap_pages, bootmap_start; 308 unsigned long start, end; 309 unsigned long i; 310 311 /* Find free clusters, and init and free the bootmem accordingly. */ 312 memdesc = (struct memdesc_struct *) 313 (hwrpb->mddt_offset + (unsigned long) hwrpb); 314 315 for_each_mem_cluster(memdesc, cluster, i) { 316 printk("memcluster %lu, usage %01lx, start %8lu, end %8lu\n", 317 i, cluster->usage, cluster->start_pfn, 318 cluster->start_pfn + cluster->numpages); 319 320 /* Bit 0 is console/PALcode reserved. Bit 1 is 321 non-volatile memory -- we might want to mark 322 this for later. */ 323 if (cluster->usage & 3) 324 continue; 325 326 end = cluster->start_pfn + cluster->numpages; 327 if (end > max_low_pfn) 328 max_low_pfn = end; 329 } 330 331 /* 332 * Except for the NUMA systems (wildfire, marvel) all of the 333 * Alpha systems we run on support 32GB of memory or less. 334 * Since the NUMA systems introduce large holes in memory addressing, 335 * we can get into a situation where there is not enough contiguous 336 * memory for the memory map. 337 * 338 * Limit memory to the first 32GB to limit the NUMA systems to 339 * memory on their first node (wildfire) or 2 (marvel) to avoid 340 * not being able to produce the memory map. In order to access 341 * all of the memory on the NUMA systems, build with discontiguous 342 * memory support. 343 * 344 * If the user specified a memory limit, let that memory limit stand. 345 */ 346 if (!mem_size_limit) 347 mem_size_limit = (32ul * 1024 * 1024 * 1024) >> PAGE_SHIFT; 348 349 if (mem_size_limit && max_low_pfn >= mem_size_limit) 350 { 351 printk("setup: forcing memory size to %ldK (from %ldK).\n", 352 mem_size_limit << (PAGE_SHIFT - 10), 353 max_low_pfn << (PAGE_SHIFT - 10)); 354 max_low_pfn = mem_size_limit; 355 } 356 357 /* Find the bounds of kernel memory. */ 358 start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS); 359 end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end)); 360 bootmap_start = -1; 361 362 try_again: 363 if (max_low_pfn <= end_kernel_pfn) 364 panic("not enough memory to boot"); 365 366 /* We need to know how many physically contiguous pages 367 we'll need for the bootmap. */ 368 bootmap_pages = bootmem_bootmap_pages(max_low_pfn); 369 370 /* Now find a good region where to allocate the bootmap. */ 371 for_each_mem_cluster(memdesc, cluster, i) { 372 if (cluster->usage & 3) 373 continue; 374 375 start = cluster->start_pfn; 376 end = start + cluster->numpages; 377 if (start >= max_low_pfn) 378 continue; 379 if (end > max_low_pfn) 380 end = max_low_pfn; 381 if (start < start_kernel_pfn) { 382 if (end > end_kernel_pfn 383 && end - end_kernel_pfn >= bootmap_pages) { 384 bootmap_start = end_kernel_pfn; 385 break; 386 } else if (end > start_kernel_pfn) 387 end = start_kernel_pfn; 388 } else if (start < end_kernel_pfn) 389 start = end_kernel_pfn; 390 if (end - start >= bootmap_pages) { 391 bootmap_start = start; 392 break; 393 } 394 } 395 396 if (bootmap_start == ~0UL) { 397 max_low_pfn >>= 1; 398 goto try_again; 399 } 400 401 /* Allocate the bootmap and mark the whole MM as reserved. */ 402 bootmap_size = init_bootmem(bootmap_start, max_low_pfn); 403 404 /* Mark the free regions. */ 405 for_each_mem_cluster(memdesc, cluster, i) { 406 if (cluster->usage & 3) 407 continue; 408 409 start = cluster->start_pfn; 410 end = cluster->start_pfn + cluster->numpages; 411 if (start >= max_low_pfn) 412 continue; 413 if (end > max_low_pfn) 414 end = max_low_pfn; 415 if (start < start_kernel_pfn) { 416 if (end > end_kernel_pfn) { 417 free_bootmem(PFN_PHYS(start), 418 (PFN_PHYS(start_kernel_pfn) 419 - PFN_PHYS(start))); 420 printk("freeing pages %ld:%ld\n", 421 start, start_kernel_pfn); 422 start = end_kernel_pfn; 423 } else if (end > start_kernel_pfn) 424 end = start_kernel_pfn; 425 } else if (start < end_kernel_pfn) 426 start = end_kernel_pfn; 427 if (start >= end) 428 continue; 429 430 free_bootmem(PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start)); 431 printk("freeing pages %ld:%ld\n", start, end); 432 } 433 434 /* Reserve the bootmap memory. */ 435 reserve_bootmem(PFN_PHYS(bootmap_start), bootmap_size, 436 BOOTMEM_DEFAULT); 437 printk("reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size)); 438 439 #ifdef CONFIG_BLK_DEV_INITRD 440 initrd_start = INITRD_START; 441 if (initrd_start) { 442 initrd_end = initrd_start+INITRD_SIZE; 443 printk("Initial ramdisk at: 0x%p (%lu bytes)\n", 444 (void *) initrd_start, INITRD_SIZE); 445 446 if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) { 447 if (!move_initrd(PFN_PHYS(max_low_pfn))) 448 printk("initrd extends beyond end of memory " 449 "(0x%08lx > 0x%p)\ndisabling initrd\n", 450 initrd_end, 451 phys_to_virt(PFN_PHYS(max_low_pfn))); 452 } else { 453 reserve_bootmem(virt_to_phys((void *)initrd_start), 454 INITRD_SIZE, BOOTMEM_DEFAULT); 455 } 456 } 457 #endif /* CONFIG_BLK_DEV_INITRD */ 458 } 459 #else 460 extern void setup_memory(void *); 461 #endif /* !CONFIG_DISCONTIGMEM */ 462 463 int __init 464 page_is_ram(unsigned long pfn) 465 { 466 struct memclust_struct * cluster; 467 struct memdesc_struct * memdesc; 468 unsigned long i; 469 470 memdesc = (struct memdesc_struct *) 471 (hwrpb->mddt_offset + (unsigned long) hwrpb); 472 for_each_mem_cluster(memdesc, cluster, i) 473 { 474 if (pfn >= cluster->start_pfn && 475 pfn < cluster->start_pfn + cluster->numpages) { 476 return (cluster->usage & 3) ? 0 : 1; 477 } 478 } 479 480 return 0; 481 } 482 483 static int __init 484 register_cpus(void) 485 { 486 int i; 487 488 for_each_possible_cpu(i) { 489 struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); 490 if (!p) 491 return -ENOMEM; 492 register_cpu(p, i); 493 } 494 return 0; 495 } 496 497 arch_initcall(register_cpus); 498 499 void __init 500 setup_arch(char **cmdline_p) 501 { 502 extern char _end[]; 503 504 struct alpha_machine_vector *vec = NULL; 505 struct percpu_struct *cpu; 506 char *type_name, *var_name, *p; 507 void *kernel_end = _end; /* end of kernel */ 508 char *args = command_line; 509 510 hwrpb = (struct hwrpb_struct*) __va(INIT_HWRPB->phys_addr); 511 boot_cpuid = hard_smp_processor_id(); 512 513 /* 514 * Pre-process the system type to make sure it will be valid. 515 * 516 * This may restore real CABRIO and EB66+ family names, ie 517 * EB64+ and EB66. 518 * 519 * Oh, and "white box" AS800 (aka DIGITAL Server 3000 series) 520 * and AS1200 (DIGITAL Server 5000 series) have the type as 521 * the negative of the real one. 522 */ 523 if ((long)hwrpb->sys_type < 0) { 524 hwrpb->sys_type = -((long)hwrpb->sys_type); 525 hwrpb_update_checksum(hwrpb); 526 } 527 528 /* Register a call for panic conditions. */ 529 atomic_notifier_chain_register(&panic_notifier_list, 530 &alpha_panic_block); 531 532 #ifdef CONFIG_ALPHA_GENERIC 533 /* Assume that we've booted from SRM if we haven't booted from MILO. 534 Detect the later by looking for "MILO" in the system serial nr. */ 535 alpha_using_srm = strncmp((const char *)hwrpb->ssn, "MILO", 4) != 0; 536 #endif 537 538 /* If we are using SRM, we want to allow callbacks 539 as early as possible, so do this NOW, and then 540 they should work immediately thereafter. 541 */ 542 kernel_end = callback_init(kernel_end); 543 544 /* 545 * Locate the command line. 546 */ 547 /* Hack for Jensen... since we're restricted to 8 or 16 chars for 548 boot flags depending on the boot mode, we need some shorthand. 549 This should do for installation. */ 550 if (strcmp(COMMAND_LINE, "INSTALL") == 0) { 551 strlcpy(command_line, "root=/dev/fd0 load_ramdisk=1", sizeof command_line); 552 } else { 553 strlcpy(command_line, COMMAND_LINE, sizeof command_line); 554 } 555 strcpy(boot_command_line, command_line); 556 *cmdline_p = command_line; 557 558 /* 559 * Process command-line arguments. 560 */ 561 while ((p = strsep(&args, " \t")) != NULL) { 562 if (!*p) continue; 563 if (strncmp(p, "alpha_mv=", 9) == 0) { 564 vec = get_sysvec_byname(p+9); 565 continue; 566 } 567 if (strncmp(p, "cycle=", 6) == 0) { 568 est_cycle_freq = simple_strtol(p+6, NULL, 0); 569 continue; 570 } 571 if (strncmp(p, "mem=", 4) == 0) { 572 mem_size_limit = get_mem_size_limit(p+4); 573 continue; 574 } 575 if (strncmp(p, "srmcons", 7) == 0) { 576 srmcons_output |= 1; 577 continue; 578 } 579 if (strncmp(p, "console=srm", 11) == 0) { 580 srmcons_output |= 2; 581 continue; 582 } 583 if (strncmp(p, "gartsize=", 9) == 0) { 584 alpha_agpgart_size = 585 get_mem_size_limit(p+9) << PAGE_SHIFT; 586 continue; 587 } 588 #ifdef CONFIG_VERBOSE_MCHECK 589 if (strncmp(p, "verbose_mcheck=", 15) == 0) { 590 alpha_verbose_mcheck = simple_strtol(p+15, NULL, 0); 591 continue; 592 } 593 #endif 594 } 595 596 /* Replace the command line, now that we've killed it with strsep. */ 597 strcpy(command_line, boot_command_line); 598 599 /* If we want SRM console printk echoing early, do it now. */ 600 if (alpha_using_srm && srmcons_output) { 601 register_srm_console(); 602 603 /* 604 * If "console=srm" was specified, clear the srmcons_output 605 * flag now so that time.c won't unregister_srm_console 606 */ 607 if (srmcons_output & 2) 608 srmcons_output = 0; 609 } 610 611 #ifdef CONFIG_MAGIC_SYSRQ 612 /* If we're using SRM, make sysrq-b halt back to the prom, 613 not auto-reboot. */ 614 if (alpha_using_srm) { 615 struct sysrq_key_op *op = __sysrq_get_key_op('b'); 616 op->handler = (void *) machine_halt; 617 } 618 #endif 619 620 /* 621 * Identify and reconfigure for the current system. 622 */ 623 cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset); 624 625 get_sysnames(hwrpb->sys_type, hwrpb->sys_variation, 626 cpu->type, &type_name, &var_name); 627 if (*var_name == '0') 628 var_name = ""; 629 630 if (!vec) { 631 vec = get_sysvec(hwrpb->sys_type, hwrpb->sys_variation, 632 cpu->type); 633 } 634 635 if (!vec) { 636 panic("Unsupported system type: %s%s%s (%ld %ld)\n", 637 type_name, (*var_name ? " variation " : ""), var_name, 638 hwrpb->sys_type, hwrpb->sys_variation); 639 } 640 if (vec != &alpha_mv) { 641 alpha_mv = *vec; 642 } 643 644 printk("Booting " 645 #ifdef CONFIG_ALPHA_GENERIC 646 "GENERIC " 647 #endif 648 "on %s%s%s using machine vector %s from %s\n", 649 type_name, (*var_name ? " variation " : ""), 650 var_name, alpha_mv.vector_name, 651 (alpha_using_srm ? "SRM" : "MILO")); 652 653 printk("Major Options: " 654 #ifdef CONFIG_SMP 655 "SMP " 656 #endif 657 #ifdef CONFIG_ALPHA_EV56 658 "EV56 " 659 #endif 660 #ifdef CONFIG_ALPHA_EV67 661 "EV67 " 662 #endif 663 #ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS 664 "LEGACY_START " 665 #endif 666 #ifdef CONFIG_VERBOSE_MCHECK 667 "VERBOSE_MCHECK " 668 #endif 669 670 #ifdef CONFIG_DISCONTIGMEM 671 "DISCONTIGMEM " 672 #ifdef CONFIG_NUMA 673 "NUMA " 674 #endif 675 #endif 676 677 #ifdef CONFIG_DEBUG_SPINLOCK 678 "DEBUG_SPINLOCK " 679 #endif 680 #ifdef CONFIG_MAGIC_SYSRQ 681 "MAGIC_SYSRQ " 682 #endif 683 "\n"); 684 685 printk("Command line: %s\n", command_line); 686 687 /* 688 * Sync up the HAE. 689 * Save the SRM's current value for restoration. 690 */ 691 srm_hae = *alpha_mv.hae_register; 692 __set_hae(alpha_mv.hae_cache); 693 694 /* Reset enable correctable error reports. */ 695 wrmces(0x7); 696 697 /* Find our memory. */ 698 setup_memory(kernel_end); 699 700 /* First guess at cpu cache sizes. Do this before init_arch. */ 701 determine_cpu_caches(cpu->type); 702 703 /* Initialize the machine. Usually has to do with setting up 704 DMA windows and the like. */ 705 if (alpha_mv.init_arch) 706 alpha_mv.init_arch(); 707 708 /* Reserve standard resources. */ 709 reserve_std_resources(); 710 711 /* 712 * Give us a default console. TGA users will see nothing until 713 * chr_dev_init is called, rather late in the boot sequence. 714 */ 715 716 #ifdef CONFIG_VT 717 #if defined(CONFIG_VGA_CONSOLE) 718 conswitchp = &vga_con; 719 #elif defined(CONFIG_DUMMY_CONSOLE) 720 conswitchp = &dummy_con; 721 #endif 722 #endif 723 724 /* Default root filesystem to sda2. */ 725 ROOT_DEV = Root_SDA2; 726 727 #ifdef CONFIG_EISA 728 /* FIXME: only set this when we actually have EISA in this box? */ 729 EISA_bus = 1; 730 #endif 731 732 /* 733 * Check ASN in HWRPB for validity, report if bad. 734 * FIXME: how was this failing? Should we trust it instead, 735 * and copy the value into alpha_mv.max_asn? 736 */ 737 738 if (hwrpb->max_asn != MAX_ASN) { 739 printk("Max ASN from HWRPB is bad (0x%lx)\n", hwrpb->max_asn); 740 } 741 742 /* 743 * Identify the flock of penguins. 744 */ 745 746 #ifdef CONFIG_SMP 747 setup_smp(); 748 #endif 749 paging_init(); 750 } 751 752 static char sys_unknown[] = "Unknown"; 753 static char systype_names[][16] = { 754 "0", 755 "ADU", "Cobra", "Ruby", "Flamingo", "Mannequin", "Jensen", 756 "Pelican", "Morgan", "Sable", "Medulla", "Noname", 757 "Turbolaser", "Avanti", "Mustang", "Alcor", "Tradewind", 758 "Mikasa", "EB64", "EB66", "EB64+", "AlphaBook1", 759 "Rawhide", "K2", "Lynx", "XL", "EB164", "Noritake", 760 "Cortex", "29", "Miata", "XXM", "Takara", "Yukon", 761 "Tsunami", "Wildfire", "CUSCO", "Eiger", "Titan", "Marvel" 762 }; 763 764 static char unofficial_names[][8] = {"100", "Ruffian"}; 765 766 static char api_names[][16] = {"200", "Nautilus"}; 767 768 static char eb164_names[][8] = {"EB164", "PC164", "LX164", "SX164", "RX164"}; 769 static int eb164_indices[] = {0,0,0,1,1,1,1,1,2,2,2,2,3,3,3,3,4}; 770 771 static char alcor_names[][16] = {"Alcor", "Maverick", "Bret"}; 772 static int alcor_indices[] = {0,0,0,1,1,1,0,0,0,0,0,0,2,2,2,2,2,2}; 773 774 static char eb64p_names[][16] = {"EB64+", "Cabriolet", "AlphaPCI64"}; 775 static int eb64p_indices[] = {0,0,1,2}; 776 777 static char eb66_names[][8] = {"EB66", "EB66+"}; 778 static int eb66_indices[] = {0,0,1}; 779 780 static char marvel_names[][16] = { 781 "Marvel/EV7" 782 }; 783 static int marvel_indices[] = { 0 }; 784 785 static char rawhide_names[][16] = { 786 "Dodge", "Wrangler", "Durango", "Tincup", "DaVinci" 787 }; 788 static int rawhide_indices[] = {0,0,0,1,1,2,2,3,3,4,4}; 789 790 static char titan_names[][16] = { 791 "DEFAULT", "Privateer", "Falcon", "Granite" 792 }; 793 static int titan_indices[] = {0,1,2,2,3}; 794 795 static char tsunami_names[][16] = { 796 "0", "DP264", "Warhol", "Windjammer", "Monet", "Clipper", 797 "Goldrush", "Webbrick", "Catamaran", "Brisbane", "Melbourne", 798 "Flying Clipper", "Shark" 799 }; 800 static int tsunami_indices[] = {0,1,2,3,4,5,6,7,8,9,10,11,12}; 801 802 static struct alpha_machine_vector * __init 803 get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu) 804 { 805 static struct alpha_machine_vector *systype_vecs[] __initdata = 806 { 807 NULL, /* 0 */ 808 NULL, /* ADU */ 809 NULL, /* Cobra */ 810 NULL, /* Ruby */ 811 NULL, /* Flamingo */ 812 NULL, /* Mannequin */ 813 &jensen_mv, 814 NULL, /* Pelican */ 815 NULL, /* Morgan */ 816 NULL, /* Sable -- see below. */ 817 NULL, /* Medulla */ 818 &noname_mv, 819 NULL, /* Turbolaser */ 820 &avanti_mv, 821 NULL, /* Mustang */ 822 NULL, /* Alcor, Bret, Maverick. HWRPB inaccurate? */ 823 NULL, /* Tradewind */ 824 NULL, /* Mikasa -- see below. */ 825 NULL, /* EB64 */ 826 NULL, /* EB66 -- see variation. */ 827 NULL, /* EB64+ -- see variation. */ 828 &alphabook1_mv, 829 &rawhide_mv, 830 NULL, /* K2 */ 831 &lynx_mv, /* Lynx */ 832 &xl_mv, 833 NULL, /* EB164 -- see variation. */ 834 NULL, /* Noritake -- see below. */ 835 NULL, /* Cortex */ 836 NULL, /* 29 */ 837 &miata_mv, 838 NULL, /* XXM */ 839 &takara_mv, 840 NULL, /* Yukon */ 841 NULL, /* Tsunami -- see variation. */ 842 &wildfire_mv, /* Wildfire */ 843 NULL, /* CUSCO */ 844 &eiger_mv, /* Eiger */ 845 NULL, /* Titan */ 846 NULL, /* Marvel */ 847 }; 848 849 static struct alpha_machine_vector *unofficial_vecs[] __initdata = 850 { 851 NULL, /* 100 */ 852 &ruffian_mv, 853 }; 854 855 static struct alpha_machine_vector *api_vecs[] __initdata = 856 { 857 NULL, /* 200 */ 858 &nautilus_mv, 859 }; 860 861 static struct alpha_machine_vector *alcor_vecs[] __initdata = 862 { 863 &alcor_mv, &xlt_mv, &xlt_mv 864 }; 865 866 static struct alpha_machine_vector *eb164_vecs[] __initdata = 867 { 868 &eb164_mv, &pc164_mv, &lx164_mv, &sx164_mv, &rx164_mv 869 }; 870 871 static struct alpha_machine_vector *eb64p_vecs[] __initdata = 872 { 873 &eb64p_mv, 874 &cabriolet_mv, 875 &cabriolet_mv /* AlphaPCI64 */ 876 }; 877 878 static struct alpha_machine_vector *eb66_vecs[] __initdata = 879 { 880 &eb66_mv, 881 &eb66p_mv 882 }; 883 884 static struct alpha_machine_vector *marvel_vecs[] __initdata = 885 { 886 &marvel_ev7_mv, 887 }; 888 889 static struct alpha_machine_vector *titan_vecs[] __initdata = 890 { 891 &titan_mv, /* default */ 892 &privateer_mv, /* privateer */ 893 &titan_mv, /* falcon */ 894 &privateer_mv, /* granite */ 895 }; 896 897 static struct alpha_machine_vector *tsunami_vecs[] __initdata = 898 { 899 NULL, 900 &dp264_mv, /* dp264 */ 901 &dp264_mv, /* warhol */ 902 &dp264_mv, /* windjammer */ 903 &monet_mv, /* monet */ 904 &clipper_mv, /* clipper */ 905 &dp264_mv, /* goldrush */ 906 &webbrick_mv, /* webbrick */ 907 &dp264_mv, /* catamaran */ 908 NULL, /* brisbane? */ 909 NULL, /* melbourne? */ 910 NULL, /* flying clipper? */ 911 &shark_mv, /* shark */ 912 }; 913 914 /* ??? Do we need to distinguish between Rawhides? */ 915 916 struct alpha_machine_vector *vec; 917 918 /* Search the system tables first... */ 919 vec = NULL; 920 if (type < ARRAY_SIZE(systype_vecs)) { 921 vec = systype_vecs[type]; 922 } else if ((type > ST_API_BIAS) && 923 (type - ST_API_BIAS) < ARRAY_SIZE(api_vecs)) { 924 vec = api_vecs[type - ST_API_BIAS]; 925 } else if ((type > ST_UNOFFICIAL_BIAS) && 926 (type - ST_UNOFFICIAL_BIAS) < ARRAY_SIZE(unofficial_vecs)) { 927 vec = unofficial_vecs[type - ST_UNOFFICIAL_BIAS]; 928 } 929 930 /* If we've not found one, try for a variation. */ 931 932 if (!vec) { 933 /* Member ID is a bit-field. */ 934 unsigned long member = (variation >> 10) & 0x3f; 935 936 cpu &= 0xffffffff; /* make it usable */ 937 938 switch (type) { 939 case ST_DEC_ALCOR: 940 if (member < ARRAY_SIZE(alcor_indices)) 941 vec = alcor_vecs[alcor_indices[member]]; 942 break; 943 case ST_DEC_EB164: 944 if (member < ARRAY_SIZE(eb164_indices)) 945 vec = eb164_vecs[eb164_indices[member]]; 946 /* PC164 may show as EB164 variation with EV56 CPU, 947 but, since no true EB164 had anything but EV5... */ 948 if (vec == &eb164_mv && cpu == EV56_CPU) 949 vec = &pc164_mv; 950 break; 951 case ST_DEC_EB64P: 952 if (member < ARRAY_SIZE(eb64p_indices)) 953 vec = eb64p_vecs[eb64p_indices[member]]; 954 break; 955 case ST_DEC_EB66: 956 if (member < ARRAY_SIZE(eb66_indices)) 957 vec = eb66_vecs[eb66_indices[member]]; 958 break; 959 case ST_DEC_MARVEL: 960 if (member < ARRAY_SIZE(marvel_indices)) 961 vec = marvel_vecs[marvel_indices[member]]; 962 break; 963 case ST_DEC_TITAN: 964 vec = titan_vecs[0]; /* default */ 965 if (member < ARRAY_SIZE(titan_indices)) 966 vec = titan_vecs[titan_indices[member]]; 967 break; 968 case ST_DEC_TSUNAMI: 969 if (member < ARRAY_SIZE(tsunami_indices)) 970 vec = tsunami_vecs[tsunami_indices[member]]; 971 break; 972 case ST_DEC_1000: 973 if (cpu == EV5_CPU || cpu == EV56_CPU) 974 vec = &mikasa_primo_mv; 975 else 976 vec = &mikasa_mv; 977 break; 978 case ST_DEC_NORITAKE: 979 if (cpu == EV5_CPU || cpu == EV56_CPU) 980 vec = &noritake_primo_mv; 981 else 982 vec = &noritake_mv; 983 break; 984 case ST_DEC_2100_A500: 985 if (cpu == EV5_CPU || cpu == EV56_CPU) 986 vec = &sable_gamma_mv; 987 else 988 vec = &sable_mv; 989 break; 990 } 991 } 992 return vec; 993 } 994 995 static struct alpha_machine_vector * __init 996 get_sysvec_byname(const char *name) 997 { 998 static struct alpha_machine_vector *all_vecs[] __initdata = 999 { 1000 &alcor_mv, 1001 &alphabook1_mv, 1002 &avanti_mv, 1003 &cabriolet_mv, 1004 &clipper_mv, 1005 &dp264_mv, 1006 &eb164_mv, 1007 &eb64p_mv, 1008 &eb66_mv, 1009 &eb66p_mv, 1010 &eiger_mv, 1011 &jensen_mv, 1012 &lx164_mv, 1013 &lynx_mv, 1014 &miata_mv, 1015 &mikasa_mv, 1016 &mikasa_primo_mv, 1017 &monet_mv, 1018 &nautilus_mv, 1019 &noname_mv, 1020 &noritake_mv, 1021 &noritake_primo_mv, 1022 &p2k_mv, 1023 &pc164_mv, 1024 &privateer_mv, 1025 &rawhide_mv, 1026 &ruffian_mv, 1027 &rx164_mv, 1028 &sable_mv, 1029 &sable_gamma_mv, 1030 &shark_mv, 1031 &sx164_mv, 1032 &takara_mv, 1033 &webbrick_mv, 1034 &wildfire_mv, 1035 &xl_mv, 1036 &xlt_mv 1037 }; 1038 1039 size_t i; 1040 1041 for (i = 0; i < ARRAY_SIZE(all_vecs); ++i) { 1042 struct alpha_machine_vector *mv = all_vecs[i]; 1043 if (strcasecmp(mv->vector_name, name) == 0) 1044 return mv; 1045 } 1046 return NULL; 1047 } 1048 1049 static void 1050 get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu, 1051 char **type_name, char **variation_name) 1052 { 1053 unsigned long member; 1054 1055 /* If not in the tables, make it UNKNOWN, 1056 else set type name to family */ 1057 if (type < ARRAY_SIZE(systype_names)) { 1058 *type_name = systype_names[type]; 1059 } else if ((type > ST_API_BIAS) && 1060 (type - ST_API_BIAS) < ARRAY_SIZE(api_names)) { 1061 *type_name = api_names[type - ST_API_BIAS]; 1062 } else if ((type > ST_UNOFFICIAL_BIAS) && 1063 (type - ST_UNOFFICIAL_BIAS) < ARRAY_SIZE(unofficial_names)) { 1064 *type_name = unofficial_names[type - ST_UNOFFICIAL_BIAS]; 1065 } else { 1066 *type_name = sys_unknown; 1067 *variation_name = sys_unknown; 1068 return; 1069 } 1070 1071 /* Set variation to "0"; if variation is zero, done. */ 1072 *variation_name = systype_names[0]; 1073 if (variation == 0) { 1074 return; 1075 } 1076 1077 member = (variation >> 10) & 0x3f; /* member ID is a bit-field */ 1078 1079 cpu &= 0xffffffff; /* make it usable */ 1080 1081 switch (type) { /* select by family */ 1082 default: /* default to variation "0" for now */ 1083 break; 1084 case ST_DEC_EB164: 1085 if (member < ARRAY_SIZE(eb164_indices)) 1086 *variation_name = eb164_names[eb164_indices[member]]; 1087 /* PC164 may show as EB164 variation, but with EV56 CPU, 1088 so, since no true EB164 had anything but EV5... */ 1089 if (eb164_indices[member] == 0 && cpu == EV56_CPU) 1090 *variation_name = eb164_names[1]; /* make it PC164 */ 1091 break; 1092 case ST_DEC_ALCOR: 1093 if (member < ARRAY_SIZE(alcor_indices)) 1094 *variation_name = alcor_names[alcor_indices[member]]; 1095 break; 1096 case ST_DEC_EB64P: 1097 if (member < ARRAY_SIZE(eb64p_indices)) 1098 *variation_name = eb64p_names[eb64p_indices[member]]; 1099 break; 1100 case ST_DEC_EB66: 1101 if (member < ARRAY_SIZE(eb66_indices)) 1102 *variation_name = eb66_names[eb66_indices[member]]; 1103 break; 1104 case ST_DEC_MARVEL: 1105 if (member < ARRAY_SIZE(marvel_indices)) 1106 *variation_name = marvel_names[marvel_indices[member]]; 1107 break; 1108 case ST_DEC_RAWHIDE: 1109 if (member < ARRAY_SIZE(rawhide_indices)) 1110 *variation_name = rawhide_names[rawhide_indices[member]]; 1111 break; 1112 case ST_DEC_TITAN: 1113 *variation_name = titan_names[0]; /* default */ 1114 if (member < ARRAY_SIZE(titan_indices)) 1115 *variation_name = titan_names[titan_indices[member]]; 1116 break; 1117 case ST_DEC_TSUNAMI: 1118 if (member < ARRAY_SIZE(tsunami_indices)) 1119 *variation_name = tsunami_names[tsunami_indices[member]]; 1120 break; 1121 } 1122 } 1123 1124 /* 1125 * A change was made to the HWRPB via an ECO and the following code 1126 * tracks a part of the ECO. In HWRPB versions less than 5, the ECO 1127 * was not implemented in the console firmware. If it's revision 5 or 1128 * greater we can get the name of the platform as an ASCII string from 1129 * the HWRPB. That's what this function does. It checks the revision 1130 * level and if the string is in the HWRPB it returns the address of 1131 * the string--a pointer to the name of the platform. 1132 * 1133 * Returns: 1134 * - Pointer to a ASCII string if it's in the HWRPB 1135 * - Pointer to a blank string if the data is not in the HWRPB. 1136 */ 1137 1138 static char * 1139 platform_string(void) 1140 { 1141 struct dsr_struct *dsr; 1142 static char unk_system_string[] = "N/A"; 1143 1144 /* Go to the console for the string pointer. 1145 * If the rpb_vers is not 5 or greater the rpb 1146 * is old and does not have this data in it. 1147 */ 1148 if (hwrpb->revision < 5) 1149 return (unk_system_string); 1150 else { 1151 /* The Dynamic System Recognition struct 1152 * has the system platform name starting 1153 * after the character count of the string. 1154 */ 1155 dsr = ((struct dsr_struct *) 1156 ((char *)hwrpb + hwrpb->dsr_offset)); 1157 return ((char *)dsr + (dsr->sysname_off + 1158 sizeof(long))); 1159 } 1160 } 1161 1162 static int 1163 get_nr_processors(struct percpu_struct *cpubase, unsigned long num) 1164 { 1165 struct percpu_struct *cpu; 1166 unsigned long i; 1167 int count = 0; 1168 1169 for (i = 0; i < num; i++) { 1170 cpu = (struct percpu_struct *) 1171 ((char *)cpubase + i*hwrpb->processor_size); 1172 if ((cpu->flags & 0x1cc) == 0x1cc) 1173 count++; 1174 } 1175 return count; 1176 } 1177 1178 static void 1179 show_cache_size (struct seq_file *f, const char *which, int shape) 1180 { 1181 if (shape == -1) 1182 seq_printf (f, "%s\t\t: n/a\n", which); 1183 else if (shape == 0) 1184 seq_printf (f, "%s\t\t: unknown\n", which); 1185 else 1186 seq_printf (f, "%s\t\t: %dK, %d-way, %db line\n", 1187 which, shape >> 10, shape & 15, 1188 1 << ((shape >> 4) & 15)); 1189 } 1190 1191 static int 1192 show_cpuinfo(struct seq_file *f, void *slot) 1193 { 1194 extern struct unaligned_stat { 1195 unsigned long count, va, pc; 1196 } unaligned[2]; 1197 1198 static char cpu_names[][8] = { 1199 "EV3", "EV4", "Simulate", "LCA4", "EV5", "EV45", "EV56", 1200 "EV6", "PCA56", "PCA57", "EV67", "EV68CB", "EV68AL", 1201 "EV68CX", "EV7", "EV79", "EV69" 1202 }; 1203 1204 struct percpu_struct *cpu = slot; 1205 unsigned int cpu_index; 1206 char *cpu_name; 1207 char *systype_name; 1208 char *sysvariation_name; 1209 int nr_processors; 1210 1211 cpu_index = (unsigned) (cpu->type - 1); 1212 cpu_name = "Unknown"; 1213 if (cpu_index < ARRAY_SIZE(cpu_names)) 1214 cpu_name = cpu_names[cpu_index]; 1215 1216 get_sysnames(hwrpb->sys_type, hwrpb->sys_variation, 1217 cpu->type, &systype_name, &sysvariation_name); 1218 1219 nr_processors = get_nr_processors(cpu, hwrpb->nr_processors); 1220 1221 seq_printf(f, "cpu\t\t\t: Alpha\n" 1222 "cpu model\t\t: %s\n" 1223 "cpu variation\t\t: %ld\n" 1224 "cpu revision\t\t: %ld\n" 1225 "cpu serial number\t: %s\n" 1226 "system type\t\t: %s\n" 1227 "system variation\t: %s\n" 1228 "system revision\t\t: %ld\n" 1229 "system serial number\t: %s\n" 1230 "cycle frequency [Hz]\t: %lu %s\n" 1231 "timer frequency [Hz]\t: %lu.%02lu\n" 1232 "page size [bytes]\t: %ld\n" 1233 "phys. address bits\t: %ld\n" 1234 "max. addr. space #\t: %ld\n" 1235 "BogoMIPS\t\t: %lu.%02lu\n" 1236 "kernel unaligned acc\t: %ld (pc=%lx,va=%lx)\n" 1237 "user unaligned acc\t: %ld (pc=%lx,va=%lx)\n" 1238 "platform string\t\t: %s\n" 1239 "cpus detected\t\t: %d\n", 1240 cpu_name, cpu->variation, cpu->revision, 1241 (char*)cpu->serial_no, 1242 systype_name, sysvariation_name, hwrpb->sys_revision, 1243 (char*)hwrpb->ssn, 1244 est_cycle_freq ? : hwrpb->cycle_freq, 1245 est_cycle_freq ? "est." : "", 1246 hwrpb->intr_freq / 4096, 1247 (100 * hwrpb->intr_freq / 4096) % 100, 1248 hwrpb->pagesize, 1249 hwrpb->pa_bits, 1250 hwrpb->max_asn, 1251 loops_per_jiffy / (500000/HZ), 1252 (loops_per_jiffy / (5000/HZ)) % 100, 1253 unaligned[0].count, unaligned[0].pc, unaligned[0].va, 1254 unaligned[1].count, unaligned[1].pc, unaligned[1].va, 1255 platform_string(), nr_processors); 1256 1257 #ifdef CONFIG_SMP 1258 seq_printf(f, "cpus active\t\t: %u\n" 1259 "cpu active mask\t\t: %016lx\n", 1260 num_online_cpus(), cpumask_bits(cpu_possible_mask)[0]); 1261 #endif 1262 1263 show_cache_size (f, "L1 Icache", alpha_l1i_cacheshape); 1264 show_cache_size (f, "L1 Dcache", alpha_l1d_cacheshape); 1265 show_cache_size (f, "L2 cache", alpha_l2_cacheshape); 1266 show_cache_size (f, "L3 cache", alpha_l3_cacheshape); 1267 1268 return 0; 1269 } 1270 1271 static int __init 1272 read_mem_block(int *addr, int stride, int size) 1273 { 1274 long nloads = size / stride, cnt, tmp; 1275 1276 __asm__ __volatile__( 1277 " rpcc %0\n" 1278 "1: ldl %3,0(%2)\n" 1279 " subq %1,1,%1\n" 1280 /* Next two XORs introduce an explicit data dependency between 1281 consecutive loads in the loop, which will give us true load 1282 latency. */ 1283 " xor %3,%2,%2\n" 1284 " xor %3,%2,%2\n" 1285 " addq %2,%4,%2\n" 1286 " bne %1,1b\n" 1287 " rpcc %3\n" 1288 " subl %3,%0,%0\n" 1289 : "=&r" (cnt), "=&r" (nloads), "=&r" (addr), "=&r" (tmp) 1290 : "r" (stride), "1" (nloads), "2" (addr)); 1291 1292 return cnt / (size / stride); 1293 } 1294 1295 #define CSHAPE(totalsize, linesize, assoc) \ 1296 ((totalsize & ~0xff) | (linesize << 4) | assoc) 1297 1298 /* ??? EV5 supports up to 64M, but did the systems with more than 1299 16M of BCACHE ever exist? */ 1300 #define MAX_BCACHE_SIZE 16*1024*1024 1301 1302 /* Note that the offchip caches are direct mapped on all Alphas. */ 1303 static int __init 1304 external_cache_probe(int minsize, int width) 1305 { 1306 int cycles, prev_cycles = 1000000; 1307 int stride = 1 << width; 1308 long size = minsize, maxsize = MAX_BCACHE_SIZE * 2; 1309 1310 if (maxsize > (max_low_pfn + 1) << PAGE_SHIFT) 1311 maxsize = 1 << (ilog2(max_low_pfn + 1) + PAGE_SHIFT); 1312 1313 /* Get the first block cached. */ 1314 read_mem_block(__va(0), stride, size); 1315 1316 while (size < maxsize) { 1317 /* Get an average load latency in cycles. */ 1318 cycles = read_mem_block(__va(0), stride, size); 1319 if (cycles > prev_cycles * 2) { 1320 /* Fine, we exceed the cache. */ 1321 printk("%ldK Bcache detected; load hit latency %d " 1322 "cycles, load miss latency %d cycles\n", 1323 size >> 11, prev_cycles, cycles); 1324 return CSHAPE(size >> 1, width, 1); 1325 } 1326 /* Try to get the next block cached. */ 1327 read_mem_block(__va(size), stride, size); 1328 prev_cycles = cycles; 1329 size <<= 1; 1330 } 1331 return -1; /* No BCACHE found. */ 1332 } 1333 1334 static void __init 1335 determine_cpu_caches (unsigned int cpu_type) 1336 { 1337 int L1I, L1D, L2, L3; 1338 1339 switch (cpu_type) { 1340 case EV4_CPU: 1341 case EV45_CPU: 1342 { 1343 if (cpu_type == EV4_CPU) 1344 L1I = CSHAPE(8*1024, 5, 1); 1345 else 1346 L1I = CSHAPE(16*1024, 5, 1); 1347 L1D = L1I; 1348 L3 = -1; 1349 1350 /* BIU_CTL is a write-only Abox register. PALcode has a 1351 shadow copy, and may be available from some versions 1352 of the CSERVE PALcall. If we can get it, then 1353 1354 unsigned long biu_ctl, size; 1355 size = 128*1024 * (1 << ((biu_ctl >> 28) & 7)); 1356 L2 = CSHAPE (size, 5, 1); 1357 1358 Unfortunately, we can't rely on that. 1359 */ 1360 L2 = external_cache_probe(128*1024, 5); 1361 break; 1362 } 1363 1364 case LCA4_CPU: 1365 { 1366 unsigned long car, size; 1367 1368 L1I = L1D = CSHAPE(8*1024, 5, 1); 1369 L3 = -1; 1370 1371 car = *(vuip) phys_to_virt (0x120000078UL); 1372 size = 64*1024 * (1 << ((car >> 5) & 7)); 1373 /* No typo -- 8 byte cacheline size. Whodathunk. */ 1374 L2 = (car & 1 ? CSHAPE (size, 3, 1) : -1); 1375 break; 1376 } 1377 1378 case EV5_CPU: 1379 case EV56_CPU: 1380 { 1381 unsigned long sc_ctl, width; 1382 1383 L1I = L1D = CSHAPE(8*1024, 5, 1); 1384 1385 /* Check the line size of the Scache. */ 1386 sc_ctl = *(vulp) phys_to_virt (0xfffff000a8UL); 1387 width = sc_ctl & 0x1000 ? 6 : 5; 1388 L2 = CSHAPE (96*1024, width, 3); 1389 1390 /* BC_CONTROL and BC_CONFIG are write-only IPRs. PALcode 1391 has a shadow copy, and may be available from some versions 1392 of the CSERVE PALcall. If we can get it, then 1393 1394 unsigned long bc_control, bc_config, size; 1395 size = 1024*1024 * (1 << ((bc_config & 7) - 1)); 1396 L3 = (bc_control & 1 ? CSHAPE (size, width, 1) : -1); 1397 1398 Unfortunately, we can't rely on that. 1399 */ 1400 L3 = external_cache_probe(1024*1024, width); 1401 break; 1402 } 1403 1404 case PCA56_CPU: 1405 case PCA57_CPU: 1406 { 1407 if (cpu_type == PCA56_CPU) { 1408 L1I = CSHAPE(16*1024, 6, 1); 1409 L1D = CSHAPE(8*1024, 5, 1); 1410 } else { 1411 L1I = CSHAPE(32*1024, 6, 2); 1412 L1D = CSHAPE(16*1024, 5, 1); 1413 } 1414 L3 = -1; 1415 1416 #if 0 1417 unsigned long cbox_config, size; 1418 1419 cbox_config = *(vulp) phys_to_virt (0xfffff00008UL); 1420 size = 512*1024 * (1 << ((cbox_config >> 12) & 3)); 1421 1422 L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1); 1423 #else 1424 L2 = external_cache_probe(512*1024, 6); 1425 #endif 1426 break; 1427 } 1428 1429 case EV6_CPU: 1430 case EV67_CPU: 1431 case EV68CB_CPU: 1432 case EV68AL_CPU: 1433 case EV68CX_CPU: 1434 case EV69_CPU: 1435 L1I = L1D = CSHAPE(64*1024, 6, 2); 1436 L2 = external_cache_probe(1024*1024, 6); 1437 L3 = -1; 1438 break; 1439 1440 case EV7_CPU: 1441 case EV79_CPU: 1442 L1I = L1D = CSHAPE(64*1024, 6, 2); 1443 L2 = CSHAPE(7*1024*1024/4, 6, 7); 1444 L3 = -1; 1445 break; 1446 1447 default: 1448 /* Nothing known about this cpu type. */ 1449 L1I = L1D = L2 = L3 = 0; 1450 break; 1451 } 1452 1453 alpha_l1i_cacheshape = L1I; 1454 alpha_l1d_cacheshape = L1D; 1455 alpha_l2_cacheshape = L2; 1456 alpha_l3_cacheshape = L3; 1457 } 1458 1459 /* 1460 * We show only CPU #0 info. 1461 */ 1462 static void * 1463 c_start(struct seq_file *f, loff_t *pos) 1464 { 1465 return *pos ? NULL : (char *)hwrpb + hwrpb->processor_offset; 1466 } 1467 1468 static void * 1469 c_next(struct seq_file *f, void *v, loff_t *pos) 1470 { 1471 return NULL; 1472 } 1473 1474 static void 1475 c_stop(struct seq_file *f, void *v) 1476 { 1477 } 1478 1479 const struct seq_operations cpuinfo_op = { 1480 .start = c_start, 1481 .next = c_next, 1482 .stop = c_stop, 1483 .show = show_cpuinfo, 1484 }; 1485 1486 1487 static int 1488 alpha_panic_event(struct notifier_block *this, unsigned long event, void *ptr) 1489 { 1490 #if 1 1491 /* FIXME FIXME FIXME */ 1492 /* If we are using SRM and serial console, just hard halt here. */ 1493 if (alpha_using_srm && srmcons_output) 1494 __halt(); 1495 #endif 1496 return NOTIFY_DONE; 1497 } 1498 1499 static __init int add_pcspkr(void) 1500 { 1501 struct platform_device *pd; 1502 int ret; 1503 1504 pd = platform_device_alloc("pcspkr", -1); 1505 if (!pd) 1506 return -ENOMEM; 1507 1508 ret = platform_device_add(pd); 1509 if (ret) 1510 platform_device_put(pd); 1511 1512 return ret; 1513 } 1514 device_initcall(add_pcspkr); 1515