1 /* 2 * PowerNV OPAL high level interfaces 3 * 4 * Copyright 2011 IBM Corp. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #define pr_fmt(fmt) "opal: " fmt 13 14 #include <linux/printk.h> 15 #include <linux/types.h> 16 #include <linux/of.h> 17 #include <linux/of_fdt.h> 18 #include <linux/of_platform.h> 19 #include <linux/interrupt.h> 20 #include <linux/notifier.h> 21 #include <linux/slab.h> 22 #include <linux/sched.h> 23 #include <linux/kobject.h> 24 #include <linux/delay.h> 25 #include <linux/memblock.h> 26 27 #include <asm/machdep.h> 28 #include <asm/opal.h> 29 #include <asm/firmware.h> 30 #include <asm/mce.h> 31 32 #include "powernv.h" 33 34 /* /sys/firmware/opal */ 35 struct kobject *opal_kobj; 36 37 struct opal { 38 u64 base; 39 u64 entry; 40 u64 size; 41 } opal; 42 43 struct mcheck_recoverable_range { 44 u64 start_addr; 45 u64 end_addr; 46 u64 recover_addr; 47 }; 48 49 static struct mcheck_recoverable_range *mc_recoverable_range; 50 static int mc_recoverable_range_len; 51 52 struct device_node *opal_node; 53 static DEFINE_SPINLOCK(opal_write_lock); 54 static unsigned int *opal_irqs; 55 static unsigned int opal_irq_count; 56 static ATOMIC_NOTIFIER_HEAD(opal_notifier_head); 57 static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX]; 58 static DEFINE_SPINLOCK(opal_notifier_lock); 59 static uint64_t last_notified_mask = 0x0ul; 60 static atomic_t opal_notifier_hold = ATOMIC_INIT(0); 61 62 static void opal_reinit_cores(void) 63 { 64 /* Do the actual re-init, This will clobber all FPRs, VRs, etc... 65 * 66 * It will preserve non volatile GPRs and HSPRG0/1. It will 67 * also restore HIDs and other SPRs to their original value 68 * but it might clobber a bunch. 69 */ 70 #ifdef __BIG_ENDIAN__ 71 opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_BE); 72 #else 73 opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_LE); 74 #endif 75 } 76 77 int __init early_init_dt_scan_opal(unsigned long node, 78 const char *uname, int depth, void *data) 79 { 80 const void *basep, *entryp, *sizep; 81 int basesz, entrysz, runtimesz; 82 83 if (depth != 1 || strcmp(uname, "ibm,opal") != 0) 84 return 0; 85 86 basep = of_get_flat_dt_prop(node, "opal-base-address", &basesz); 87 entryp = of_get_flat_dt_prop(node, "opal-entry-address", &entrysz); 88 sizep = of_get_flat_dt_prop(node, "opal-runtime-size", &runtimesz); 89 90 if (!basep || !entryp || !sizep) 91 return 1; 92 93 opal.base = of_read_number(basep, basesz/4); 94 opal.entry = of_read_number(entryp, entrysz/4); 95 opal.size = of_read_number(sizep, runtimesz/4); 96 97 pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%d)\n", 98 opal.base, basep, basesz); 99 pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%d)\n", 100 opal.entry, entryp, entrysz); 101 pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n", 102 opal.size, sizep, runtimesz); 103 104 powerpc_firmware_features |= FW_FEATURE_OPAL; 105 if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) { 106 powerpc_firmware_features |= FW_FEATURE_OPALv2; 107 powerpc_firmware_features |= FW_FEATURE_OPALv3; 108 pr_info("OPAL V3 detected !\n"); 109 } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) { 110 powerpc_firmware_features |= FW_FEATURE_OPALv2; 111 pr_info("OPAL V2 detected !\n"); 112 } else { 113 pr_info("OPAL V1 detected !\n"); 114 } 115 116 /* Reinit all cores with the right endian */ 117 opal_reinit_cores(); 118 119 /* Restore some bits */ 120 if (cur_cpu_spec->cpu_restore) 121 cur_cpu_spec->cpu_restore(); 122 123 return 1; 124 } 125 126 int __init early_init_dt_scan_recoverable_ranges(unsigned long node, 127 const char *uname, int depth, void *data) 128 { 129 int i, psize, size; 130 const __be32 *prop; 131 132 if (depth != 1 || strcmp(uname, "ibm,opal") != 0) 133 return 0; 134 135 prop = of_get_flat_dt_prop(node, "mcheck-recoverable-ranges", &psize); 136 137 if (!prop) 138 return 1; 139 140 pr_debug("Found machine check recoverable ranges.\n"); 141 142 /* 143 * Calculate number of available entries. 144 * 145 * Each recoverable address range entry is (start address, len, 146 * recovery address), 2 cells each for start and recovery address, 147 * 1 cell for len, totalling 5 cells per entry. 148 */ 149 mc_recoverable_range_len = psize / (sizeof(*prop) * 5); 150 151 /* Sanity check */ 152 if (!mc_recoverable_range_len) 153 return 1; 154 155 /* Size required to hold all the entries. */ 156 size = mc_recoverable_range_len * 157 sizeof(struct mcheck_recoverable_range); 158 159 /* 160 * Allocate a buffer to hold the MC recoverable ranges. We would be 161 * accessing them in real mode, hence it needs to be within 162 * RMO region. 163 */ 164 mc_recoverable_range =__va(memblock_alloc_base(size, __alignof__(u64), 165 ppc64_rma_size)); 166 memset(mc_recoverable_range, 0, size); 167 168 for (i = 0; i < mc_recoverable_range_len; i++) { 169 mc_recoverable_range[i].start_addr = 170 of_read_number(prop + (i * 5) + 0, 2); 171 mc_recoverable_range[i].end_addr = 172 mc_recoverable_range[i].start_addr + 173 of_read_number(prop + (i * 5) + 2, 1); 174 mc_recoverable_range[i].recover_addr = 175 of_read_number(prop + (i * 5) + 3, 2); 176 177 pr_debug("Machine check recoverable range: %llx..%llx: %llx\n", 178 mc_recoverable_range[i].start_addr, 179 mc_recoverable_range[i].end_addr, 180 mc_recoverable_range[i].recover_addr); 181 } 182 return 1; 183 } 184 185 static int __init opal_register_exception_handlers(void) 186 { 187 #ifdef __BIG_ENDIAN__ 188 u64 glue; 189 190 if (!(powerpc_firmware_features & FW_FEATURE_OPAL)) 191 return -ENODEV; 192 193 /* Hookup some exception handlers except machine check. We use the 194 * fwnmi area at 0x7000 to provide the glue space to OPAL 195 */ 196 glue = 0x7000; 197 198 /* 199 * Check if we are running on newer firmware that exports 200 * OPAL_HANDLE_HMI token. If yes, then don't ask OPAL to patch 201 * the HMI interrupt and we catch it directly in Linux. 202 * 203 * For older firmware (i.e currently released POWER8 System Firmware 204 * as of today <= SV810_087), we fallback to old behavior and let OPAL 205 * patch the HMI vector and handle it inside OPAL firmware. 206 * 207 * For newer firmware (in development/yet to be released) we will 208 * start catching/handling HMI directly in Linux. 209 */ 210 if (!opal_check_token(OPAL_HANDLE_HMI)) { 211 pr_info("Old firmware detected, OPAL handles HMIs.\n"); 212 opal_register_exception_handler( 213 OPAL_HYPERVISOR_MAINTENANCE_HANDLER, 214 0, glue); 215 glue += 128; 216 } 217 218 opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue); 219 #endif 220 221 return 0; 222 } 223 machine_early_initcall(powernv, opal_register_exception_handlers); 224 225 int opal_notifier_register(struct notifier_block *nb) 226 { 227 if (!nb) { 228 pr_warning("%s: Invalid argument (%p)\n", 229 __func__, nb); 230 return -EINVAL; 231 } 232 233 atomic_notifier_chain_register(&opal_notifier_head, nb); 234 return 0; 235 } 236 EXPORT_SYMBOL_GPL(opal_notifier_register); 237 238 int opal_notifier_unregister(struct notifier_block *nb) 239 { 240 if (!nb) { 241 pr_warning("%s: Invalid argument (%p)\n", 242 __func__, nb); 243 return -EINVAL; 244 } 245 246 atomic_notifier_chain_unregister(&opal_notifier_head, nb); 247 return 0; 248 } 249 EXPORT_SYMBOL_GPL(opal_notifier_unregister); 250 251 static void opal_do_notifier(uint64_t events) 252 { 253 unsigned long flags; 254 uint64_t changed_mask; 255 256 if (atomic_read(&opal_notifier_hold)) 257 return; 258 259 spin_lock_irqsave(&opal_notifier_lock, flags); 260 changed_mask = last_notified_mask ^ events; 261 last_notified_mask = events; 262 spin_unlock_irqrestore(&opal_notifier_lock, flags); 263 264 /* 265 * We feed with the event bits and changed bits for 266 * enough information to the callback. 267 */ 268 atomic_notifier_call_chain(&opal_notifier_head, 269 events, (void *)changed_mask); 270 } 271 272 void opal_notifier_update_evt(uint64_t evt_mask, 273 uint64_t evt_val) 274 { 275 unsigned long flags; 276 277 spin_lock_irqsave(&opal_notifier_lock, flags); 278 last_notified_mask &= ~evt_mask; 279 last_notified_mask |= evt_val; 280 spin_unlock_irqrestore(&opal_notifier_lock, flags); 281 } 282 283 void opal_notifier_enable(void) 284 { 285 int64_t rc; 286 __be64 evt = 0; 287 288 atomic_set(&opal_notifier_hold, 0); 289 290 /* Process pending events */ 291 rc = opal_poll_events(&evt); 292 if (rc == OPAL_SUCCESS && evt) 293 opal_do_notifier(be64_to_cpu(evt)); 294 } 295 296 void opal_notifier_disable(void) 297 { 298 atomic_set(&opal_notifier_hold, 1); 299 } 300 301 /* 302 * Opal message notifier based on message type. Allow subscribers to get 303 * notified for specific messgae type. 304 */ 305 int opal_message_notifier_register(enum OpalMessageType msg_type, 306 struct notifier_block *nb) 307 { 308 if (!nb) { 309 pr_warning("%s: Invalid argument (%p)\n", 310 __func__, nb); 311 return -EINVAL; 312 } 313 if (msg_type > OPAL_MSG_TYPE_MAX) { 314 pr_warning("%s: Invalid message type argument (%d)\n", 315 __func__, msg_type); 316 return -EINVAL; 317 } 318 return atomic_notifier_chain_register( 319 &opal_msg_notifier_head[msg_type], nb); 320 } 321 322 static void opal_message_do_notify(uint32_t msg_type, void *msg) 323 { 324 /* notify subscribers */ 325 atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type], 326 msg_type, msg); 327 } 328 329 static void opal_handle_message(void) 330 { 331 s64 ret; 332 /* 333 * TODO: pre-allocate a message buffer depending on opal-msg-size 334 * value in /proc/device-tree. 335 */ 336 static struct opal_msg msg; 337 u32 type; 338 339 ret = opal_get_msg(__pa(&msg), sizeof(msg)); 340 /* No opal message pending. */ 341 if (ret == OPAL_RESOURCE) 342 return; 343 344 /* check for errors. */ 345 if (ret) { 346 pr_warning("%s: Failed to retrieve opal message, err=%lld\n", 347 __func__, ret); 348 return; 349 } 350 351 type = be32_to_cpu(msg.msg_type); 352 353 /* Sanity check */ 354 if (type > OPAL_MSG_TYPE_MAX) { 355 pr_warning("%s: Unknown message type: %u\n", __func__, type); 356 return; 357 } 358 opal_message_do_notify(type, (void *)&msg); 359 } 360 361 static int opal_message_notify(struct notifier_block *nb, 362 unsigned long events, void *change) 363 { 364 if (events & OPAL_EVENT_MSG_PENDING) 365 opal_handle_message(); 366 return 0; 367 } 368 369 static struct notifier_block opal_message_nb = { 370 .notifier_call = opal_message_notify, 371 .next = NULL, 372 .priority = 0, 373 }; 374 375 static int __init opal_message_init(void) 376 { 377 int ret, i; 378 379 for (i = 0; i < OPAL_MSG_TYPE_MAX; i++) 380 ATOMIC_INIT_NOTIFIER_HEAD(&opal_msg_notifier_head[i]); 381 382 ret = opal_notifier_register(&opal_message_nb); 383 if (ret) { 384 pr_err("%s: Can't register OPAL event notifier (%d)\n", 385 __func__, ret); 386 return ret; 387 } 388 return 0; 389 } 390 machine_early_initcall(powernv, opal_message_init); 391 392 int opal_get_chars(uint32_t vtermno, char *buf, int count) 393 { 394 s64 rc; 395 __be64 evt, len; 396 397 if (!opal.entry) 398 return -ENODEV; 399 opal_poll_events(&evt); 400 if ((be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_INPUT) == 0) 401 return 0; 402 len = cpu_to_be64(count); 403 rc = opal_console_read(vtermno, &len, buf); 404 if (rc == OPAL_SUCCESS) 405 return be64_to_cpu(len); 406 return 0; 407 } 408 409 int opal_put_chars(uint32_t vtermno, const char *data, int total_len) 410 { 411 int written = 0; 412 __be64 olen; 413 s64 len, rc; 414 unsigned long flags; 415 __be64 evt; 416 417 if (!opal.entry) 418 return -ENODEV; 419 420 /* We want put_chars to be atomic to avoid mangling of hvsi 421 * packets. To do that, we first test for room and return 422 * -EAGAIN if there isn't enough. 423 * 424 * Unfortunately, opal_console_write_buffer_space() doesn't 425 * appear to work on opal v1, so we just assume there is 426 * enough room and be done with it 427 */ 428 spin_lock_irqsave(&opal_write_lock, flags); 429 if (firmware_has_feature(FW_FEATURE_OPALv2)) { 430 rc = opal_console_write_buffer_space(vtermno, &olen); 431 len = be64_to_cpu(olen); 432 if (rc || len < total_len) { 433 spin_unlock_irqrestore(&opal_write_lock, flags); 434 /* Closed -> drop characters */ 435 if (rc) 436 return total_len; 437 opal_poll_events(NULL); 438 return -EAGAIN; 439 } 440 } 441 442 /* We still try to handle partial completions, though they 443 * should no longer happen. 444 */ 445 rc = OPAL_BUSY; 446 while(total_len > 0 && (rc == OPAL_BUSY || 447 rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) { 448 olen = cpu_to_be64(total_len); 449 rc = opal_console_write(vtermno, &olen, data); 450 len = be64_to_cpu(olen); 451 452 /* Closed or other error drop */ 453 if (rc != OPAL_SUCCESS && rc != OPAL_BUSY && 454 rc != OPAL_BUSY_EVENT) { 455 written = total_len; 456 break; 457 } 458 if (rc == OPAL_SUCCESS) { 459 total_len -= len; 460 data += len; 461 written += len; 462 } 463 /* This is a bit nasty but we need that for the console to 464 * flush when there aren't any interrupts. We will clean 465 * things a bit later to limit that to synchronous path 466 * such as the kernel console and xmon/udbg 467 */ 468 do 469 opal_poll_events(&evt); 470 while(rc == OPAL_SUCCESS && 471 (be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT)); 472 } 473 spin_unlock_irqrestore(&opal_write_lock, flags); 474 return written; 475 } 476 477 static int opal_recover_mce(struct pt_regs *regs, 478 struct machine_check_event *evt) 479 { 480 int recovered = 0; 481 uint64_t ea = get_mce_fault_addr(evt); 482 483 if (!(regs->msr & MSR_RI)) { 484 /* If MSR_RI isn't set, we cannot recover */ 485 recovered = 0; 486 } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) { 487 /* Platform corrected itself */ 488 recovered = 1; 489 } else if (ea && !is_kernel_addr(ea)) { 490 /* 491 * Faulting address is not in kernel text. We should be fine. 492 * We need to find which process uses this address. 493 * For now, kill the task if we have received exception when 494 * in userspace. 495 * 496 * TODO: Queue up this address for hwpoisioning later. 497 */ 498 if (user_mode(regs) && !is_global_init(current)) { 499 _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip); 500 recovered = 1; 501 } else 502 recovered = 0; 503 } else if (user_mode(regs) && !is_global_init(current) && 504 evt->severity == MCE_SEV_ERROR_SYNC) { 505 /* 506 * If we have received a synchronous error when in userspace 507 * kill the task. 508 */ 509 _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip); 510 recovered = 1; 511 } 512 return recovered; 513 } 514 515 int opal_machine_check(struct pt_regs *regs) 516 { 517 struct machine_check_event evt; 518 519 if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) 520 return 0; 521 522 /* Print things out */ 523 if (evt.version != MCE_V1) { 524 pr_err("Machine Check Exception, Unknown event version %d !\n", 525 evt.version); 526 return 0; 527 } 528 machine_check_print_event_info(&evt); 529 530 if (opal_recover_mce(regs, &evt)) 531 return 1; 532 return 0; 533 } 534 535 /* Early hmi handler called in real mode. */ 536 int opal_hmi_exception_early(struct pt_regs *regs) 537 { 538 s64 rc; 539 540 /* 541 * call opal hmi handler. Pass paca address as token. 542 * The return value OPAL_SUCCESS is an indication that there is 543 * an HMI event generated waiting to pull by Linux. 544 */ 545 rc = opal_handle_hmi(); 546 if (rc == OPAL_SUCCESS) { 547 local_paca->hmi_event_available = 1; 548 return 1; 549 } 550 return 0; 551 } 552 553 /* HMI exception handler called in virtual mode during check_irq_replay. */ 554 int opal_handle_hmi_exception(struct pt_regs *regs) 555 { 556 s64 rc; 557 __be64 evt = 0; 558 559 /* 560 * Check if HMI event is available. 561 * if Yes, then call opal_poll_events to pull opal messages and 562 * process them. 563 */ 564 if (!local_paca->hmi_event_available) 565 return 0; 566 567 local_paca->hmi_event_available = 0; 568 rc = opal_poll_events(&evt); 569 if (rc == OPAL_SUCCESS && evt) 570 opal_do_notifier(be64_to_cpu(evt)); 571 572 return 1; 573 } 574 575 static uint64_t find_recovery_address(uint64_t nip) 576 { 577 int i; 578 579 for (i = 0; i < mc_recoverable_range_len; i++) 580 if ((nip >= mc_recoverable_range[i].start_addr) && 581 (nip < mc_recoverable_range[i].end_addr)) 582 return mc_recoverable_range[i].recover_addr; 583 return 0; 584 } 585 586 bool opal_mce_check_early_recovery(struct pt_regs *regs) 587 { 588 uint64_t recover_addr = 0; 589 590 if (!opal.base || !opal.size) 591 goto out; 592 593 if ((regs->nip >= opal.base) && 594 (regs->nip <= (opal.base + opal.size))) 595 recover_addr = find_recovery_address(regs->nip); 596 597 /* 598 * Setup regs->nip to rfi into fixup address. 599 */ 600 if (recover_addr) 601 regs->nip = recover_addr; 602 603 out: 604 return !!recover_addr; 605 } 606 607 static irqreturn_t opal_interrupt(int irq, void *data) 608 { 609 __be64 events; 610 611 opal_handle_interrupt(virq_to_hw(irq), &events); 612 613 opal_do_notifier(be64_to_cpu(events)); 614 615 return IRQ_HANDLED; 616 } 617 618 static int opal_sysfs_init(void) 619 { 620 opal_kobj = kobject_create_and_add("opal", firmware_kobj); 621 if (!opal_kobj) { 622 pr_warn("kobject_create_and_add opal failed\n"); 623 return -ENOMEM; 624 } 625 626 return 0; 627 } 628 629 static ssize_t symbol_map_read(struct file *fp, struct kobject *kobj, 630 struct bin_attribute *bin_attr, 631 char *buf, loff_t off, size_t count) 632 { 633 return memory_read_from_buffer(buf, count, &off, bin_attr->private, 634 bin_attr->size); 635 } 636 637 static BIN_ATTR_RO(symbol_map, 0); 638 639 static void opal_export_symmap(void) 640 { 641 const __be64 *syms; 642 unsigned int size; 643 struct device_node *fw; 644 int rc; 645 646 fw = of_find_node_by_path("/ibm,opal/firmware"); 647 if (!fw) 648 return; 649 syms = of_get_property(fw, "symbol-map", &size); 650 if (!syms || size != 2 * sizeof(__be64)) 651 return; 652 653 /* Setup attributes */ 654 bin_attr_symbol_map.private = __va(be64_to_cpu(syms[0])); 655 bin_attr_symbol_map.size = be64_to_cpu(syms[1]); 656 657 rc = sysfs_create_bin_file(opal_kobj, &bin_attr_symbol_map); 658 if (rc) 659 pr_warn("Error %d creating OPAL symbols file\n", rc); 660 } 661 662 static void __init opal_dump_region_init(void) 663 { 664 void *addr; 665 uint64_t size; 666 int rc; 667 668 /* Register kernel log buffer */ 669 addr = log_buf_addr_get(); 670 if (addr == NULL) 671 return; 672 673 size = log_buf_len_get(); 674 if (size == 0) 675 return; 676 677 rc = opal_register_dump_region(OPAL_DUMP_REGION_LOG_BUF, 678 __pa(addr), size); 679 /* Don't warn if this is just an older OPAL that doesn't 680 * know about that call 681 */ 682 if (rc && rc != OPAL_UNSUPPORTED) 683 pr_warn("DUMP: Failed to register kernel log buffer. " 684 "rc = %d\n", rc); 685 } 686 687 static void opal_ipmi_init(struct device_node *opal_node) 688 { 689 struct device_node *np; 690 691 for_each_child_of_node(opal_node, np) 692 if (of_device_is_compatible(np, "ibm,opal-ipmi")) 693 of_platform_device_create(np, NULL, NULL); 694 } 695 696 static void opal_i2c_create_devs(void) 697 { 698 struct device_node *np; 699 700 for_each_compatible_node(np, NULL, "ibm,opal-i2c") 701 of_platform_device_create(np, NULL, NULL); 702 } 703 704 static void __init opal_irq_init(struct device_node *dn) 705 { 706 const __be32 *irqs; 707 int i, irqlen; 708 709 /* Get interrupt property */ 710 irqs = of_get_property(opal_node, "opal-interrupts", &irqlen); 711 opal_irq_count = irqs ? (irqlen / 4) : 0; 712 pr_debug("Found %d interrupts reserved for OPAL\n", opal_irq_count); 713 if (!opal_irq_count) 714 return; 715 716 /* Install interrupt handlers */ 717 opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL); 718 for (i = 0; irqs && i < opal_irq_count; i++, irqs++) { 719 unsigned int irq, virq; 720 int rc; 721 722 /* Get hardware and virtual IRQ */ 723 irq = be32_to_cpup(irqs); 724 virq = irq_create_mapping(NULL, irq); 725 if (virq == NO_IRQ) { 726 pr_warn("Failed to map irq 0x%x\n", irq); 727 continue; 728 } 729 730 /* Install interrupt handler */ 731 rc = request_irq(virq, opal_interrupt, 0, "opal", NULL); 732 if (rc) { 733 irq_dispose_mapping(virq); 734 pr_warn("Error %d requesting irq %d (0x%x)\n", 735 rc, virq, irq); 736 continue; 737 } 738 739 /* Cache IRQ */ 740 opal_irqs[i] = virq; 741 } 742 } 743 744 static int __init opal_init(void) 745 { 746 struct device_node *np, *consoles; 747 int rc; 748 749 opal_node = of_find_node_by_path("/ibm,opal"); 750 if (!opal_node) { 751 pr_warn("Device node not found\n"); 752 return -ENODEV; 753 } 754 755 /* Register OPAL consoles if any ports */ 756 if (firmware_has_feature(FW_FEATURE_OPALv2)) 757 consoles = of_find_node_by_path("/ibm,opal/consoles"); 758 else 759 consoles = of_node_get(opal_node); 760 if (consoles) { 761 for_each_child_of_node(consoles, np) { 762 if (strcmp(np->name, "serial")) 763 continue; 764 of_platform_device_create(np, NULL, NULL); 765 } 766 of_node_put(consoles); 767 } 768 769 /* Create i2c platform devices */ 770 opal_i2c_create_devs(); 771 772 /* Find all OPAL interrupts and request them */ 773 opal_irq_init(opal_node); 774 775 /* Create "opal" kobject under /sys/firmware */ 776 rc = opal_sysfs_init(); 777 if (rc == 0) { 778 /* Export symbol map to userspace */ 779 opal_export_symmap(); 780 /* Setup dump region interface */ 781 opal_dump_region_init(); 782 /* Setup error log interface */ 783 rc = opal_elog_init(); 784 /* Setup code update interface */ 785 opal_flash_init(); 786 /* Setup platform dump extract interface */ 787 opal_platform_dump_init(); 788 /* Setup system parameters interface */ 789 opal_sys_param_init(); 790 /* Setup message log interface. */ 791 opal_msglog_init(); 792 } 793 794 opal_ipmi_init(opal_node); 795 796 return 0; 797 } 798 machine_subsys_initcall(powernv, opal_init); 799 800 void opal_shutdown(void) 801 { 802 unsigned int i; 803 long rc = OPAL_BUSY; 804 805 /* First free interrupts, which will also mask them */ 806 for (i = 0; i < opal_irq_count; i++) { 807 if (opal_irqs[i]) 808 free_irq(opal_irqs[i], NULL); 809 opal_irqs[i] = 0; 810 } 811 812 /* 813 * Then sync with OPAL which ensure anything that can 814 * potentially write to our memory has completed such 815 * as an ongoing dump retrieval 816 */ 817 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { 818 rc = opal_sync_host_reboot(); 819 if (rc == OPAL_BUSY) 820 opal_poll_events(NULL); 821 else 822 mdelay(10); 823 } 824 825 /* Unregister memory dump region */ 826 opal_unregister_dump_region(OPAL_DUMP_REGION_LOG_BUF); 827 } 828 829 /* Export this so that test modules can use it */ 830 EXPORT_SYMBOL_GPL(opal_invalid_call); 831 EXPORT_SYMBOL_GPL(opal_ipmi_send); 832 EXPORT_SYMBOL_GPL(opal_ipmi_recv); 833 834 /* Convert a region of vmalloc memory to an opal sg list */ 835 struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr, 836 unsigned long vmalloc_size) 837 { 838 struct opal_sg_list *sg, *first = NULL; 839 unsigned long i = 0; 840 841 sg = kzalloc(PAGE_SIZE, GFP_KERNEL); 842 if (!sg) 843 goto nomem; 844 845 first = sg; 846 847 while (vmalloc_size > 0) { 848 uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT; 849 uint64_t length = min(vmalloc_size, PAGE_SIZE); 850 851 sg->entry[i].data = cpu_to_be64(data); 852 sg->entry[i].length = cpu_to_be64(length); 853 i++; 854 855 if (i >= SG_ENTRIES_PER_NODE) { 856 struct opal_sg_list *next; 857 858 next = kzalloc(PAGE_SIZE, GFP_KERNEL); 859 if (!next) 860 goto nomem; 861 862 sg->length = cpu_to_be64( 863 i * sizeof(struct opal_sg_entry) + 16); 864 i = 0; 865 sg->next = cpu_to_be64(__pa(next)); 866 sg = next; 867 } 868 869 vmalloc_addr += length; 870 vmalloc_size -= length; 871 } 872 873 sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16); 874 875 return first; 876 877 nomem: 878 pr_err("%s : Failed to allocate memory\n", __func__); 879 opal_free_sg_list(first); 880 return NULL; 881 } 882 883 void opal_free_sg_list(struct opal_sg_list *sg) 884 { 885 while (sg) { 886 uint64_t next = be64_to_cpu(sg->next); 887 888 kfree(sg); 889 890 if (next) 891 sg = __va(next); 892 else 893 sg = NULL; 894 } 895 } 896 897 EXPORT_SYMBOL_GPL(opal_poll_events); 898 EXPORT_SYMBOL_GPL(opal_rtc_read); 899 EXPORT_SYMBOL_GPL(opal_rtc_write); 900 EXPORT_SYMBOL_GPL(opal_tpo_read); 901 EXPORT_SYMBOL_GPL(opal_tpo_write); 902 EXPORT_SYMBOL_GPL(opal_i2c_request); 903