1 /* 2 * 64-bit pSeries and RS/6000 setup code. 3 * 4 * Copyright (C) 1995 Linus Torvalds 5 * Adapted from 'alpha' version by Gary Thomas 6 * Modified by Cort Dougan (cort@cs.nmt.edu) 7 * Modified by PPC64 Team, IBM Corp 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 12 * 2 of the License, or (at your option) any later version. 13 */ 14 15 /* 16 * bootup setup stuff.. 17 */ 18 19 #include <linux/cpu.h> 20 #include <linux/errno.h> 21 #include <linux/sched.h> 22 #include <linux/kernel.h> 23 #include <linux/mm.h> 24 #include <linux/stddef.h> 25 #include <linux/unistd.h> 26 #include <linux/user.h> 27 #include <linux/tty.h> 28 #include <linux/major.h> 29 #include <linux/interrupt.h> 30 #include <linux/reboot.h> 31 #include <linux/init.h> 32 #include <linux/ioport.h> 33 #include <linux/console.h> 34 #include <linux/pci.h> 35 #include <linux/utsname.h> 36 #include <linux/adb.h> 37 #include <linux/export.h> 38 #include <linux/delay.h> 39 #include <linux/irq.h> 40 #include <linux/seq_file.h> 41 #include <linux/root_dev.h> 42 #include <linux/of.h> 43 #include <linux/of_pci.h> 44 45 #include <asm/mmu.h> 46 #include <asm/processor.h> 47 #include <asm/io.h> 48 #include <asm/pgtable.h> 49 #include <asm/prom.h> 50 #include <asm/rtas.h> 51 #include <asm/pci-bridge.h> 52 #include <asm/iommu.h> 53 #include <asm/dma.h> 54 #include <asm/machdep.h> 55 #include <asm/irq.h> 56 #include <asm/time.h> 57 #include <asm/nvram.h> 58 #include <asm/pmc.h> 59 #include <asm/xics.h> 60 #include <asm/ppc-pci.h> 61 #include <asm/i8259.h> 62 #include <asm/udbg.h> 63 #include <asm/smp.h> 64 #include <asm/firmware.h> 65 #include <asm/eeh.h> 66 #include <asm/reg.h> 67 #include <asm/plpar_wrappers.h> 68 #include <asm/kexec.h> 69 #include <asm/isa-bridge.h> 70 71 #include "pseries.h" 72 73 int CMO_PrPSP = -1; 74 int CMO_SecPSP = -1; 75 unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K); 76 EXPORT_SYMBOL(CMO_PageSize); 77 78 int fwnmi_active; /* TRUE if an FWNMI handler is present */ 79 80 static void pSeries_show_cpuinfo(struct seq_file *m) 81 { 82 struct device_node *root; 83 const char *model = ""; 84 85 root = of_find_node_by_path("/"); 86 if (root) 87 model = of_get_property(root, "model", NULL); 88 seq_printf(m, "machine\t\t: CHRP %s\n", model); 89 of_node_put(root); 90 if (radix_enabled()) 91 seq_printf(m, "MMU\t\t: Radix\n"); 92 else 93 seq_printf(m, "MMU\t\t: Hash\n"); 94 } 95 96 /* Initialize firmware assisted non-maskable interrupts if 97 * the firmware supports this feature. 98 */ 99 static void __init fwnmi_init(void) 100 { 101 unsigned long system_reset_addr, machine_check_addr; 102 103 int ibm_nmi_register = rtas_token("ibm,nmi-register"); 104 if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE) 105 return; 106 107 /* If the kernel's not linked at zero we point the firmware at low 108 * addresses anyway, and use a trampoline to get to the real code. */ 109 system_reset_addr = __pa(system_reset_fwnmi) - PHYSICAL_START; 110 machine_check_addr = __pa(machine_check_fwnmi) - PHYSICAL_START; 111 112 if (0 == rtas_call(ibm_nmi_register, 2, 1, NULL, system_reset_addr, 113 machine_check_addr)) 114 fwnmi_active = 1; 115 } 116 117 static void pseries_8259_cascade(struct irq_desc *desc) 118 { 119 struct irq_chip *chip = irq_desc_get_chip(desc); 120 unsigned int cascade_irq = i8259_irq(); 121 122 if (cascade_irq) 123 generic_handle_irq(cascade_irq); 124 125 chip->irq_eoi(&desc->irq_data); 126 } 127 128 static void __init pseries_setup_i8259_cascade(void) 129 { 130 struct device_node *np, *old, *found = NULL; 131 unsigned int cascade; 132 const u32 *addrp; 133 unsigned long intack = 0; 134 int naddr; 135 136 for_each_node_by_type(np, "interrupt-controller") { 137 if (of_device_is_compatible(np, "chrp,iic")) { 138 found = np; 139 break; 140 } 141 } 142 143 if (found == NULL) { 144 printk(KERN_DEBUG "pic: no ISA interrupt controller\n"); 145 return; 146 } 147 148 cascade = irq_of_parse_and_map(found, 0); 149 if (!cascade) { 150 printk(KERN_ERR "pic: failed to map cascade interrupt"); 151 return; 152 } 153 pr_debug("pic: cascade mapped to irq %d\n", cascade); 154 155 for (old = of_node_get(found); old != NULL ; old = np) { 156 np = of_get_parent(old); 157 of_node_put(old); 158 if (np == NULL) 159 break; 160 if (strcmp(np->name, "pci") != 0) 161 continue; 162 addrp = of_get_property(np, "8259-interrupt-acknowledge", NULL); 163 if (addrp == NULL) 164 continue; 165 naddr = of_n_addr_cells(np); 166 intack = addrp[naddr-1]; 167 if (naddr > 1) 168 intack |= ((unsigned long)addrp[naddr-2]) << 32; 169 } 170 if (intack) 171 printk(KERN_DEBUG "pic: PCI 8259 intack at 0x%016lx\n", intack); 172 i8259_init(found, intack); 173 of_node_put(found); 174 irq_set_chained_handler(cascade, pseries_8259_cascade); 175 } 176 177 static void __init pseries_init_irq(void) 178 { 179 xics_init(); 180 pseries_setup_i8259_cascade(); 181 } 182 183 static void pseries_lpar_enable_pmcs(void) 184 { 185 unsigned long set, reset; 186 187 set = 1UL << 63; 188 reset = 0; 189 plpar_hcall_norets(H_PERFMON, set, reset); 190 } 191 192 static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data) 193 { 194 struct of_reconfig_data *rd = data; 195 struct device_node *parent, *np = rd->dn; 196 struct pci_dn *pdn; 197 int err = NOTIFY_OK; 198 199 switch (action) { 200 case OF_RECONFIG_ATTACH_NODE: 201 parent = of_get_parent(np); 202 pdn = parent ? PCI_DN(parent) : NULL; 203 if (pdn) 204 pci_add_device_node_info(pdn->phb, np); 205 206 of_node_put(parent); 207 break; 208 case OF_RECONFIG_DETACH_NODE: 209 pdn = PCI_DN(np); 210 if (pdn) 211 list_del(&pdn->list); 212 break; 213 default: 214 err = NOTIFY_DONE; 215 break; 216 } 217 return err; 218 } 219 220 static struct notifier_block pci_dn_reconfig_nb = { 221 .notifier_call = pci_dn_reconfig_notifier, 222 }; 223 224 struct kmem_cache *dtl_cache; 225 226 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 227 /* 228 * Allocate space for the dispatch trace log for all possible cpus 229 * and register the buffers with the hypervisor. This is used for 230 * computing time stolen by the hypervisor. 231 */ 232 static int alloc_dispatch_logs(void) 233 { 234 int cpu, ret; 235 struct paca_struct *pp; 236 struct dtl_entry *dtl; 237 238 if (!firmware_has_feature(FW_FEATURE_SPLPAR)) 239 return 0; 240 241 if (!dtl_cache) 242 return 0; 243 244 for_each_possible_cpu(cpu) { 245 pp = &paca[cpu]; 246 dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL); 247 if (!dtl) { 248 pr_warn("Failed to allocate dispatch trace log for cpu %d\n", 249 cpu); 250 pr_warn("Stolen time statistics will be unreliable\n"); 251 break; 252 } 253 254 pp->dtl_ridx = 0; 255 pp->dispatch_log = dtl; 256 pp->dispatch_log_end = dtl + N_DISPATCH_LOG; 257 pp->dtl_curr = dtl; 258 } 259 260 /* Register the DTL for the current (boot) cpu */ 261 dtl = get_paca()->dispatch_log; 262 get_paca()->dtl_ridx = 0; 263 get_paca()->dtl_curr = dtl; 264 get_paca()->lppaca_ptr->dtl_idx = 0; 265 266 /* hypervisor reads buffer length from this field */ 267 dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES); 268 ret = register_dtl(hard_smp_processor_id(), __pa(dtl)); 269 if (ret) 270 pr_err("WARNING: DTL registration of cpu %d (hw %d) failed " 271 "with %d\n", smp_processor_id(), 272 hard_smp_processor_id(), ret); 273 get_paca()->lppaca_ptr->dtl_enable_mask = 2; 274 275 return 0; 276 } 277 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ 278 static inline int alloc_dispatch_logs(void) 279 { 280 return 0; 281 } 282 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ 283 284 static int alloc_dispatch_log_kmem_cache(void) 285 { 286 dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES, 287 DISPATCH_LOG_BYTES, 0, NULL); 288 if (!dtl_cache) { 289 pr_warn("Failed to create dispatch trace log buffer cache\n"); 290 pr_warn("Stolen time statistics will be unreliable\n"); 291 return 0; 292 } 293 294 return alloc_dispatch_logs(); 295 } 296 machine_early_initcall(pseries, alloc_dispatch_log_kmem_cache); 297 298 static void pseries_lpar_idle(void) 299 { 300 /* 301 * Default handler to go into low thread priority and possibly 302 * low power mode by ceding processor to hypervisor 303 */ 304 305 /* Indicate to hypervisor that we are idle. */ 306 get_lppaca()->idle = 1; 307 308 /* 309 * Yield the processor to the hypervisor. We return if 310 * an external interrupt occurs (which are driven prior 311 * to returning here) or if a prod occurs from another 312 * processor. When returning here, external interrupts 313 * are enabled. 314 */ 315 cede_processor(); 316 317 get_lppaca()->idle = 0; 318 } 319 320 /* 321 * Enable relocation on during exceptions. This has partition wide scope and 322 * may take a while to complete, if it takes longer than one second we will 323 * just give up rather than wasting any more time on this - if that turns out 324 * to ever be a problem in practice we can move this into a kernel thread to 325 * finish off the process later in boot. 326 */ 327 void pseries_enable_reloc_on_exc(void) 328 { 329 long rc; 330 unsigned int delay, total_delay = 0; 331 332 while (1) { 333 rc = enable_reloc_on_exceptions(); 334 if (!H_IS_LONG_BUSY(rc)) { 335 if (rc == H_P2) { 336 pr_info("Relocation on exceptions not" 337 " supported\n"); 338 } else if (rc != H_SUCCESS) { 339 pr_warn("Unable to enable relocation" 340 " on exceptions: %ld\n", rc); 341 } 342 break; 343 } 344 345 delay = get_longbusy_msecs(rc); 346 total_delay += delay; 347 if (total_delay > 1000) { 348 pr_warn("Warning: Giving up waiting to enable " 349 "relocation on exceptions (%u msec)!\n", 350 total_delay); 351 return; 352 } 353 354 mdelay(delay); 355 } 356 } 357 EXPORT_SYMBOL(pseries_enable_reloc_on_exc); 358 359 void pseries_disable_reloc_on_exc(void) 360 { 361 long rc; 362 363 while (1) { 364 rc = disable_reloc_on_exceptions(); 365 if (!H_IS_LONG_BUSY(rc)) 366 break; 367 mdelay(get_longbusy_msecs(rc)); 368 } 369 if (rc != H_SUCCESS) 370 pr_warning("Warning: Failed to disable relocation on " 371 "exceptions: %ld\n", rc); 372 } 373 EXPORT_SYMBOL(pseries_disable_reloc_on_exc); 374 375 #ifdef CONFIG_KEXEC_CORE 376 static void pSeries_machine_kexec(struct kimage *image) 377 { 378 if (firmware_has_feature(FW_FEATURE_SET_MODE)) 379 pseries_disable_reloc_on_exc(); 380 381 default_machine_kexec(image); 382 } 383 #endif 384 385 #ifdef __LITTLE_ENDIAN__ 386 void pseries_big_endian_exceptions(void) 387 { 388 long rc; 389 390 while (1) { 391 rc = enable_big_endian_exceptions(); 392 if (!H_IS_LONG_BUSY(rc)) 393 break; 394 mdelay(get_longbusy_msecs(rc)); 395 } 396 397 /* 398 * At this point it is unlikely panic() will get anything 399 * out to the user, since this is called very late in kexec 400 * but at least this will stop us from continuing on further 401 * and creating an even more difficult to debug situation. 402 * 403 * There is a known problem when kdump'ing, if cpus are offline 404 * the above call will fail. Rather than panicking again, keep 405 * going and hope the kdump kernel is also little endian, which 406 * it usually is. 407 */ 408 if (rc && !kdump_in_progress()) 409 panic("Could not enable big endian exceptions"); 410 } 411 412 void pseries_little_endian_exceptions(void) 413 { 414 long rc; 415 416 while (1) { 417 rc = enable_little_endian_exceptions(); 418 if (!H_IS_LONG_BUSY(rc)) 419 break; 420 mdelay(get_longbusy_msecs(rc)); 421 } 422 if (rc) { 423 ppc_md.progress("H_SET_MODE LE exception fail", 0); 424 panic("Could not enable little endian exceptions"); 425 } 426 } 427 #endif 428 429 static void __init find_and_init_phbs(void) 430 { 431 struct device_node *node; 432 struct pci_controller *phb; 433 struct device_node *root = of_find_node_by_path("/"); 434 435 for_each_child_of_node(root, node) { 436 if (node->type == NULL || (strcmp(node->type, "pci") != 0 && 437 strcmp(node->type, "pciex") != 0)) 438 continue; 439 440 phb = pcibios_alloc_controller(node); 441 if (!phb) 442 continue; 443 rtas_setup_phb(phb); 444 pci_process_bridge_OF_ranges(phb, node, 0); 445 isa_bridge_find_early(phb); 446 phb->controller_ops = pseries_pci_controller_ops; 447 } 448 449 of_node_put(root); 450 451 /* 452 * PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties 453 * in chosen. 454 */ 455 of_pci_check_probe_only(); 456 } 457 458 static void __init pSeries_setup_arch(void) 459 { 460 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); 461 462 /* Discover PIC type and setup ppc_md accordingly */ 463 smp_init_pseries(); 464 465 466 /* openpic global configuration register (64-bit format). */ 467 /* openpic Interrupt Source Unit pointer (64-bit format). */ 468 /* python0 facility area (mmio) (64-bit format) REAL address. */ 469 470 /* init to some ~sane value until calibrate_delay() runs */ 471 loops_per_jiffy = 50000000; 472 473 fwnmi_init(); 474 475 /* By default, only probe PCI (can be overridden by rtas_pci) */ 476 pci_add_flags(PCI_PROBE_ONLY); 477 478 /* Find and initialize PCI host bridges */ 479 init_pci_config_tokens(); 480 find_and_init_phbs(); 481 of_reconfig_notifier_register(&pci_dn_reconfig_nb); 482 483 pSeries_nvram_init(); 484 485 if (firmware_has_feature(FW_FEATURE_LPAR)) { 486 vpa_init(boot_cpuid); 487 ppc_md.power_save = pseries_lpar_idle; 488 ppc_md.enable_pmcs = pseries_lpar_enable_pmcs; 489 } else { 490 /* No special idle routine */ 491 ppc_md.enable_pmcs = power4_enable_pmcs; 492 } 493 494 ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare; 495 } 496 497 static int __init pSeries_init_panel(void) 498 { 499 /* Manually leave the kernel version on the panel. */ 500 #ifdef __BIG_ENDIAN__ 501 ppc_md.progress("Linux ppc64\n", 0); 502 #else 503 ppc_md.progress("Linux ppc64le\n", 0); 504 #endif 505 ppc_md.progress(init_utsname()->version, 0); 506 507 return 0; 508 } 509 machine_arch_initcall(pseries, pSeries_init_panel); 510 511 static int pseries_set_dabr(unsigned long dabr, unsigned long dabrx) 512 { 513 return plpar_hcall_norets(H_SET_DABR, dabr); 514 } 515 516 static int pseries_set_xdabr(unsigned long dabr, unsigned long dabrx) 517 { 518 /* Have to set at least one bit in the DABRX according to PAPR */ 519 if (dabrx == 0 && dabr == 0) 520 dabrx = DABRX_USER; 521 /* PAPR says we can only set kernel and user bits */ 522 dabrx &= DABRX_KERNEL | DABRX_USER; 523 524 return plpar_hcall_norets(H_SET_XDABR, dabr, dabrx); 525 } 526 527 static int pseries_set_dawr(unsigned long dawr, unsigned long dawrx) 528 { 529 /* PAPR says we can't set HYP */ 530 dawrx &= ~DAWRX_HYP; 531 532 return plapr_set_watchpoint0(dawr, dawrx); 533 } 534 535 #define CMO_CHARACTERISTICS_TOKEN 44 536 #define CMO_MAXLENGTH 1026 537 538 void pSeries_coalesce_init(void) 539 { 540 struct hvcall_mpp_x_data mpp_x_data; 541 542 if (firmware_has_feature(FW_FEATURE_CMO) && !h_get_mpp_x(&mpp_x_data)) 543 powerpc_firmware_features |= FW_FEATURE_XCMO; 544 else 545 powerpc_firmware_features &= ~FW_FEATURE_XCMO; 546 } 547 548 /** 549 * fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions, 550 * handle that here. (Stolen from parse_system_parameter_string) 551 */ 552 static void pSeries_cmo_feature_init(void) 553 { 554 char *ptr, *key, *value, *end; 555 int call_status; 556 int page_order = IOMMU_PAGE_SHIFT_4K; 557 558 pr_debug(" -> fw_cmo_feature_init()\n"); 559 spin_lock(&rtas_data_buf_lock); 560 memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE); 561 call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, 562 NULL, 563 CMO_CHARACTERISTICS_TOKEN, 564 __pa(rtas_data_buf), 565 RTAS_DATA_BUF_SIZE); 566 567 if (call_status != 0) { 568 spin_unlock(&rtas_data_buf_lock); 569 pr_debug("CMO not available\n"); 570 pr_debug(" <- fw_cmo_feature_init()\n"); 571 return; 572 } 573 574 end = rtas_data_buf + CMO_MAXLENGTH - 2; 575 ptr = rtas_data_buf + 2; /* step over strlen value */ 576 key = value = ptr; 577 578 while (*ptr && (ptr <= end)) { 579 /* Separate the key and value by replacing '=' with '\0' and 580 * point the value at the string after the '=' 581 */ 582 if (ptr[0] == '=') { 583 ptr[0] = '\0'; 584 value = ptr + 1; 585 } else if (ptr[0] == '\0' || ptr[0] == ',') { 586 /* Terminate the string containing the key/value pair */ 587 ptr[0] = '\0'; 588 589 if (key == value) { 590 pr_debug("Malformed key/value pair\n"); 591 /* Never found a '=', end processing */ 592 break; 593 } 594 595 if (0 == strcmp(key, "CMOPageSize")) 596 page_order = simple_strtol(value, NULL, 10); 597 else if (0 == strcmp(key, "PrPSP")) 598 CMO_PrPSP = simple_strtol(value, NULL, 10); 599 else if (0 == strcmp(key, "SecPSP")) 600 CMO_SecPSP = simple_strtol(value, NULL, 10); 601 value = key = ptr + 1; 602 } 603 ptr++; 604 } 605 606 /* Page size is returned as the power of 2 of the page size, 607 * convert to the page size in bytes before returning 608 */ 609 CMO_PageSize = 1 << page_order; 610 pr_debug("CMO_PageSize = %lu\n", CMO_PageSize); 611 612 if (CMO_PrPSP != -1 || CMO_SecPSP != -1) { 613 pr_info("CMO enabled\n"); 614 pr_debug("CMO enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP, 615 CMO_SecPSP); 616 powerpc_firmware_features |= FW_FEATURE_CMO; 617 pSeries_coalesce_init(); 618 } else 619 pr_debug("CMO not enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP, 620 CMO_SecPSP); 621 spin_unlock(&rtas_data_buf_lock); 622 pr_debug(" <- fw_cmo_feature_init()\n"); 623 } 624 625 /* 626 * Early initialization. Relocation is on but do not reference unbolted pages 627 */ 628 static void __init pseries_init(void) 629 { 630 pr_debug(" -> pseries_init()\n"); 631 632 #ifdef CONFIG_HVC_CONSOLE 633 if (firmware_has_feature(FW_FEATURE_LPAR)) 634 hvc_vio_init_early(); 635 #endif 636 if (firmware_has_feature(FW_FEATURE_XDABR)) 637 ppc_md.set_dabr = pseries_set_xdabr; 638 else if (firmware_has_feature(FW_FEATURE_DABR)) 639 ppc_md.set_dabr = pseries_set_dabr; 640 641 if (firmware_has_feature(FW_FEATURE_SET_MODE)) 642 ppc_md.set_dawr = pseries_set_dawr; 643 644 pSeries_cmo_feature_init(); 645 iommu_init_early_pSeries(); 646 647 pr_debug(" <- pseries_init()\n"); 648 } 649 650 /** 651 * pseries_power_off - tell firmware about how to power off the system. 652 * 653 * This function calls either the power-off rtas token in normal cases 654 * or the ibm,power-off-ups token (if present & requested) in case of 655 * a power failure. If power-off token is used, power on will only be 656 * possible with power button press. If ibm,power-off-ups token is used 657 * it will allow auto poweron after power is restored. 658 */ 659 static void pseries_power_off(void) 660 { 661 int rc; 662 int rtas_poweroff_ups_token = rtas_token("ibm,power-off-ups"); 663 664 if (rtas_flash_term_hook) 665 rtas_flash_term_hook(SYS_POWER_OFF); 666 667 if (rtas_poweron_auto == 0 || 668 rtas_poweroff_ups_token == RTAS_UNKNOWN_SERVICE) { 669 rc = rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1); 670 printk(KERN_INFO "RTAS power-off returned %d\n", rc); 671 } else { 672 rc = rtas_call(rtas_poweroff_ups_token, 0, 1, NULL); 673 printk(KERN_INFO "RTAS ibm,power-off-ups returned %d\n", rc); 674 } 675 for (;;); 676 } 677 678 static int __init pSeries_probe(void) 679 { 680 const char *dtype = of_get_property(of_root, "device_type", NULL); 681 682 if (dtype == NULL) 683 return 0; 684 if (strcmp(dtype, "chrp")) 685 return 0; 686 687 /* Cell blades firmware claims to be chrp while it's not. Until this 688 * is fixed, we need to avoid those here. 689 */ 690 if (of_machine_is_compatible("IBM,CPBW-1.0") || 691 of_machine_is_compatible("IBM,CBEA")) 692 return 0; 693 694 pm_power_off = pseries_power_off; 695 696 pr_debug("Machine is%s LPAR !\n", 697 (powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not"); 698 699 pseries_init(); 700 701 return 1; 702 } 703 704 static int pSeries_pci_probe_mode(struct pci_bus *bus) 705 { 706 if (firmware_has_feature(FW_FEATURE_LPAR)) 707 return PCI_PROBE_DEVTREE; 708 return PCI_PROBE_NORMAL; 709 } 710 711 struct pci_controller_ops pseries_pci_controller_ops = { 712 .probe_mode = pSeries_pci_probe_mode, 713 }; 714 715 define_machine(pseries) { 716 .name = "pSeries", 717 .probe = pSeries_probe, 718 .setup_arch = pSeries_setup_arch, 719 .init_IRQ = pseries_init_irq, 720 .show_cpuinfo = pSeries_show_cpuinfo, 721 .log_error = pSeries_log_error, 722 .pcibios_fixup = pSeries_final_fixup, 723 .restart = rtas_restart, 724 .halt = rtas_halt, 725 .panic = rtas_os_term, 726 .get_boot_time = rtas_get_boot_time, 727 .get_rtc_time = rtas_get_rtc_time, 728 .set_rtc_time = rtas_set_rtc_time, 729 .calibrate_decr = generic_calibrate_decr, 730 .progress = rtas_progress, 731 .system_reset_exception = pSeries_system_reset_exception, 732 .machine_check_exception = pSeries_machine_check_exception, 733 #ifdef CONFIG_KEXEC_CORE 734 .machine_kexec = pSeries_machine_kexec, 735 .kexec_cpu_down = pseries_kexec_cpu_down, 736 #endif 737 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE 738 .memory_block_size = pseries_memory_block_size, 739 #endif 740 }; 741