1 /* 2 * PowerNV OPAL high level interfaces 3 * 4 * Copyright 2011 IBM Corp. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #define pr_fmt(fmt) "opal: " fmt 13 14 #include <linux/printk.h> 15 #include <linux/types.h> 16 #include <linux/of.h> 17 #include <linux/of_fdt.h> 18 #include <linux/of_platform.h> 19 #include <linux/interrupt.h> 20 #include <linux/notifier.h> 21 #include <linux/slab.h> 22 #include <linux/sched.h> 23 #include <linux/kobject.h> 24 #include <linux/delay.h> 25 #include <linux/memblock.h> 26 #include <linux/kthread.h> 27 #include <linux/freezer.h> 28 29 #include <asm/machdep.h> 30 #include <asm/opal.h> 31 #include <asm/firmware.h> 32 #include <asm/mce.h> 33 34 #include "powernv.h" 35 36 /* /sys/firmware/opal */ 37 struct kobject *opal_kobj; 38 39 struct opal { 40 u64 base; 41 u64 entry; 42 u64 size; 43 } opal; 44 45 struct mcheck_recoverable_range { 46 u64 start_addr; 47 u64 end_addr; 48 u64 recover_addr; 49 }; 50 51 static struct mcheck_recoverable_range *mc_recoverable_range; 52 static int mc_recoverable_range_len; 53 54 struct device_node *opal_node; 55 static DEFINE_SPINLOCK(opal_write_lock); 56 static unsigned int *opal_irqs; 57 static unsigned int opal_irq_count; 58 static ATOMIC_NOTIFIER_HEAD(opal_notifier_head); 59 static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX]; 60 static DEFINE_SPINLOCK(opal_notifier_lock); 61 static uint64_t last_notified_mask = 0x0ul; 62 static atomic_t opal_notifier_hold = ATOMIC_INIT(0); 63 static uint32_t opal_heartbeat; 64 65 static void opal_reinit_cores(void) 66 { 67 /* Do the actual re-init, This will clobber all FPRs, VRs, etc... 68 * 69 * It will preserve non volatile GPRs and HSPRG0/1. It will 70 * also restore HIDs and other SPRs to their original value 71 * but it might clobber a bunch. 72 */ 73 #ifdef __BIG_ENDIAN__ 74 opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_BE); 75 #else 76 opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_LE); 77 #endif 78 } 79 80 int __init early_init_dt_scan_opal(unsigned long node, 81 const char *uname, int depth, void *data) 82 { 83 const void *basep, *entryp, *sizep; 84 int basesz, entrysz, runtimesz; 85 86 if (depth != 1 || strcmp(uname, "ibm,opal") != 0) 87 return 0; 88 89 basep = of_get_flat_dt_prop(node, "opal-base-address", &basesz); 90 entryp = of_get_flat_dt_prop(node, "opal-entry-address", &entrysz); 91 sizep = of_get_flat_dt_prop(node, "opal-runtime-size", &runtimesz); 92 93 if (!basep || !entryp || !sizep) 94 return 1; 95 96 opal.base = of_read_number(basep, basesz/4); 97 opal.entry = of_read_number(entryp, entrysz/4); 98 opal.size = of_read_number(sizep, runtimesz/4); 99 100 pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%d)\n", 101 opal.base, basep, basesz); 102 pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%d)\n", 103 opal.entry, entryp, entrysz); 104 pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n", 105 opal.size, sizep, runtimesz); 106 107 powerpc_firmware_features |= FW_FEATURE_OPAL; 108 if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) { 109 powerpc_firmware_features |= FW_FEATURE_OPALv2; 110 powerpc_firmware_features |= FW_FEATURE_OPALv3; 111 pr_info("OPAL V3 detected !\n"); 112 } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) { 113 powerpc_firmware_features |= FW_FEATURE_OPALv2; 114 pr_info("OPAL V2 detected !\n"); 115 } else { 116 pr_info("OPAL V1 detected !\n"); 117 } 118 119 /* Reinit all cores with the right endian */ 120 opal_reinit_cores(); 121 122 /* Restore some bits */ 123 if (cur_cpu_spec->cpu_restore) 124 cur_cpu_spec->cpu_restore(); 125 126 return 1; 127 } 128 129 int __init early_init_dt_scan_recoverable_ranges(unsigned long node, 130 const char *uname, int depth, void *data) 131 { 132 int i, psize, size; 133 const __be32 *prop; 134 135 if (depth != 1 || strcmp(uname, "ibm,opal") != 0) 136 return 0; 137 138 prop = of_get_flat_dt_prop(node, "mcheck-recoverable-ranges", &psize); 139 140 if (!prop) 141 return 1; 142 143 pr_debug("Found machine check recoverable ranges.\n"); 144 145 /* 146 * Calculate number of available entries. 147 * 148 * Each recoverable address range entry is (start address, len, 149 * recovery address), 2 cells each for start and recovery address, 150 * 1 cell for len, totalling 5 cells per entry. 151 */ 152 mc_recoverable_range_len = psize / (sizeof(*prop) * 5); 153 154 /* Sanity check */ 155 if (!mc_recoverable_range_len) 156 return 1; 157 158 /* Size required to hold all the entries. */ 159 size = mc_recoverable_range_len * 160 sizeof(struct mcheck_recoverable_range); 161 162 /* 163 * Allocate a buffer to hold the MC recoverable ranges. We would be 164 * accessing them in real mode, hence it needs to be within 165 * RMO region. 166 */ 167 mc_recoverable_range =__va(memblock_alloc_base(size, __alignof__(u64), 168 ppc64_rma_size)); 169 memset(mc_recoverable_range, 0, size); 170 171 for (i = 0; i < mc_recoverable_range_len; i++) { 172 mc_recoverable_range[i].start_addr = 173 of_read_number(prop + (i * 5) + 0, 2); 174 mc_recoverable_range[i].end_addr = 175 mc_recoverable_range[i].start_addr + 176 of_read_number(prop + (i * 5) + 2, 1); 177 mc_recoverable_range[i].recover_addr = 178 of_read_number(prop + (i * 5) + 3, 2); 179 180 pr_debug("Machine check recoverable range: %llx..%llx: %llx\n", 181 mc_recoverable_range[i].start_addr, 182 mc_recoverable_range[i].end_addr, 183 mc_recoverable_range[i].recover_addr); 184 } 185 return 1; 186 } 187 188 static int __init opal_register_exception_handlers(void) 189 { 190 #ifdef __BIG_ENDIAN__ 191 u64 glue; 192 193 if (!(powerpc_firmware_features & FW_FEATURE_OPAL)) 194 return -ENODEV; 195 196 /* Hookup some exception handlers except machine check. We use the 197 * fwnmi area at 0x7000 to provide the glue space to OPAL 198 */ 199 glue = 0x7000; 200 201 /* 202 * Check if we are running on newer firmware that exports 203 * OPAL_HANDLE_HMI token. If yes, then don't ask OPAL to patch 204 * the HMI interrupt and we catch it directly in Linux. 205 * 206 * For older firmware (i.e currently released POWER8 System Firmware 207 * as of today <= SV810_087), we fallback to old behavior and let OPAL 208 * patch the HMI vector and handle it inside OPAL firmware. 209 * 210 * For newer firmware (in development/yet to be released) we will 211 * start catching/handling HMI directly in Linux. 212 */ 213 if (!opal_check_token(OPAL_HANDLE_HMI)) { 214 pr_info("Old firmware detected, OPAL handles HMIs.\n"); 215 opal_register_exception_handler( 216 OPAL_HYPERVISOR_MAINTENANCE_HANDLER, 217 0, glue); 218 glue += 128; 219 } 220 221 opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue); 222 #endif 223 224 return 0; 225 } 226 machine_early_initcall(powernv, opal_register_exception_handlers); 227 228 int opal_notifier_register(struct notifier_block *nb) 229 { 230 if (!nb) { 231 pr_warning("%s: Invalid argument (%p)\n", 232 __func__, nb); 233 return -EINVAL; 234 } 235 236 atomic_notifier_chain_register(&opal_notifier_head, nb); 237 return 0; 238 } 239 EXPORT_SYMBOL_GPL(opal_notifier_register); 240 241 int opal_notifier_unregister(struct notifier_block *nb) 242 { 243 if (!nb) { 244 pr_warning("%s: Invalid argument (%p)\n", 245 __func__, nb); 246 return -EINVAL; 247 } 248 249 atomic_notifier_chain_unregister(&opal_notifier_head, nb); 250 return 0; 251 } 252 EXPORT_SYMBOL_GPL(opal_notifier_unregister); 253 254 static void opal_do_notifier(uint64_t events) 255 { 256 unsigned long flags; 257 uint64_t changed_mask; 258 259 if (atomic_read(&opal_notifier_hold)) 260 return; 261 262 spin_lock_irqsave(&opal_notifier_lock, flags); 263 changed_mask = last_notified_mask ^ events; 264 last_notified_mask = events; 265 spin_unlock_irqrestore(&opal_notifier_lock, flags); 266 267 /* 268 * We feed with the event bits and changed bits for 269 * enough information to the callback. 270 */ 271 atomic_notifier_call_chain(&opal_notifier_head, 272 events, (void *)changed_mask); 273 } 274 275 void opal_notifier_update_evt(uint64_t evt_mask, 276 uint64_t evt_val) 277 { 278 unsigned long flags; 279 280 spin_lock_irqsave(&opal_notifier_lock, flags); 281 last_notified_mask &= ~evt_mask; 282 last_notified_mask |= evt_val; 283 spin_unlock_irqrestore(&opal_notifier_lock, flags); 284 } 285 286 void opal_notifier_enable(void) 287 { 288 int64_t rc; 289 __be64 evt = 0; 290 291 atomic_set(&opal_notifier_hold, 0); 292 293 /* Process pending events */ 294 rc = opal_poll_events(&evt); 295 if (rc == OPAL_SUCCESS && evt) 296 opal_do_notifier(be64_to_cpu(evt)); 297 } 298 299 void opal_notifier_disable(void) 300 { 301 atomic_set(&opal_notifier_hold, 1); 302 } 303 304 /* 305 * Opal message notifier based on message type. Allow subscribers to get 306 * notified for specific messgae type. 307 */ 308 int opal_message_notifier_register(enum OpalMessageType msg_type, 309 struct notifier_block *nb) 310 { 311 if (!nb || msg_type >= OPAL_MSG_TYPE_MAX) { 312 pr_warning("%s: Invalid arguments, msg_type:%d\n", 313 __func__, msg_type); 314 return -EINVAL; 315 } 316 317 return atomic_notifier_chain_register( 318 &opal_msg_notifier_head[msg_type], nb); 319 } 320 321 static void opal_message_do_notify(uint32_t msg_type, void *msg) 322 { 323 /* notify subscribers */ 324 atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type], 325 msg_type, msg); 326 } 327 328 static void opal_handle_message(void) 329 { 330 s64 ret; 331 /* 332 * TODO: pre-allocate a message buffer depending on opal-msg-size 333 * value in /proc/device-tree. 334 */ 335 static struct opal_msg msg; 336 u32 type; 337 338 ret = opal_get_msg(__pa(&msg), sizeof(msg)); 339 /* No opal message pending. */ 340 if (ret == OPAL_RESOURCE) 341 return; 342 343 /* check for errors. */ 344 if (ret) { 345 pr_warning("%s: Failed to retrieve opal message, err=%lld\n", 346 __func__, ret); 347 return; 348 } 349 350 type = be32_to_cpu(msg.msg_type); 351 352 /* Sanity check */ 353 if (type >= OPAL_MSG_TYPE_MAX) { 354 pr_warning("%s: Unknown message type: %u\n", __func__, type); 355 return; 356 } 357 opal_message_do_notify(type, (void *)&msg); 358 } 359 360 static int opal_message_notify(struct notifier_block *nb, 361 unsigned long events, void *change) 362 { 363 if (events & OPAL_EVENT_MSG_PENDING) 364 opal_handle_message(); 365 return 0; 366 } 367 368 static struct notifier_block opal_message_nb = { 369 .notifier_call = opal_message_notify, 370 .next = NULL, 371 .priority = 0, 372 }; 373 374 static int __init opal_message_init(void) 375 { 376 int ret, i; 377 378 for (i = 0; i < OPAL_MSG_TYPE_MAX; i++) 379 ATOMIC_INIT_NOTIFIER_HEAD(&opal_msg_notifier_head[i]); 380 381 ret = opal_notifier_register(&opal_message_nb); 382 if (ret) { 383 pr_err("%s: Can't register OPAL event notifier (%d)\n", 384 __func__, ret); 385 return ret; 386 } 387 return 0; 388 } 389 machine_early_initcall(powernv, opal_message_init); 390 391 int opal_get_chars(uint32_t vtermno, char *buf, int count) 392 { 393 s64 rc; 394 __be64 evt, len; 395 396 if (!opal.entry) 397 return -ENODEV; 398 opal_poll_events(&evt); 399 if ((be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_INPUT) == 0) 400 return 0; 401 len = cpu_to_be64(count); 402 rc = opal_console_read(vtermno, &len, buf); 403 if (rc == OPAL_SUCCESS) 404 return be64_to_cpu(len); 405 return 0; 406 } 407 408 int opal_put_chars(uint32_t vtermno, const char *data, int total_len) 409 { 410 int written = 0; 411 __be64 olen; 412 s64 len, rc; 413 unsigned long flags; 414 __be64 evt; 415 416 if (!opal.entry) 417 return -ENODEV; 418 419 /* We want put_chars to be atomic to avoid mangling of hvsi 420 * packets. To do that, we first test for room and return 421 * -EAGAIN if there isn't enough. 422 * 423 * Unfortunately, opal_console_write_buffer_space() doesn't 424 * appear to work on opal v1, so we just assume there is 425 * enough room and be done with it 426 */ 427 spin_lock_irqsave(&opal_write_lock, flags); 428 if (firmware_has_feature(FW_FEATURE_OPALv2)) { 429 rc = opal_console_write_buffer_space(vtermno, &olen); 430 len = be64_to_cpu(olen); 431 if (rc || len < total_len) { 432 spin_unlock_irqrestore(&opal_write_lock, flags); 433 /* Closed -> drop characters */ 434 if (rc) 435 return total_len; 436 opal_poll_events(NULL); 437 return -EAGAIN; 438 } 439 } 440 441 /* We still try to handle partial completions, though they 442 * should no longer happen. 443 */ 444 rc = OPAL_BUSY; 445 while(total_len > 0 && (rc == OPAL_BUSY || 446 rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) { 447 olen = cpu_to_be64(total_len); 448 rc = opal_console_write(vtermno, &olen, data); 449 len = be64_to_cpu(olen); 450 451 /* Closed or other error drop */ 452 if (rc != OPAL_SUCCESS && rc != OPAL_BUSY && 453 rc != OPAL_BUSY_EVENT) { 454 written = total_len; 455 break; 456 } 457 if (rc == OPAL_SUCCESS) { 458 total_len -= len; 459 data += len; 460 written += len; 461 } 462 /* This is a bit nasty but we need that for the console to 463 * flush when there aren't any interrupts. We will clean 464 * things a bit later to limit that to synchronous path 465 * such as the kernel console and xmon/udbg 466 */ 467 do 468 opal_poll_events(&evt); 469 while(rc == OPAL_SUCCESS && 470 (be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT)); 471 } 472 spin_unlock_irqrestore(&opal_write_lock, flags); 473 return written; 474 } 475 476 static int opal_recover_mce(struct pt_regs *regs, 477 struct machine_check_event *evt) 478 { 479 int recovered = 0; 480 uint64_t ea = get_mce_fault_addr(evt); 481 482 if (!(regs->msr & MSR_RI)) { 483 /* If MSR_RI isn't set, we cannot recover */ 484 recovered = 0; 485 } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) { 486 /* Platform corrected itself */ 487 recovered = 1; 488 } else if (ea && !is_kernel_addr(ea)) { 489 /* 490 * Faulting address is not in kernel text. We should be fine. 491 * We need to find which process uses this address. 492 * For now, kill the task if we have received exception when 493 * in userspace. 494 * 495 * TODO: Queue up this address for hwpoisioning later. 496 */ 497 if (user_mode(regs) && !is_global_init(current)) { 498 _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip); 499 recovered = 1; 500 } else 501 recovered = 0; 502 } else if (user_mode(regs) && !is_global_init(current) && 503 evt->severity == MCE_SEV_ERROR_SYNC) { 504 /* 505 * If we have received a synchronous error when in userspace 506 * kill the task. 507 */ 508 _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip); 509 recovered = 1; 510 } 511 return recovered; 512 } 513 514 int opal_machine_check(struct pt_regs *regs) 515 { 516 struct machine_check_event evt; 517 518 if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) 519 return 0; 520 521 /* Print things out */ 522 if (evt.version != MCE_V1) { 523 pr_err("Machine Check Exception, Unknown event version %d !\n", 524 evt.version); 525 return 0; 526 } 527 machine_check_print_event_info(&evt); 528 529 if (opal_recover_mce(regs, &evt)) 530 return 1; 531 return 0; 532 } 533 534 /* Early hmi handler called in real mode. */ 535 int opal_hmi_exception_early(struct pt_regs *regs) 536 { 537 s64 rc; 538 539 /* 540 * call opal hmi handler. Pass paca address as token. 541 * The return value OPAL_SUCCESS is an indication that there is 542 * an HMI event generated waiting to pull by Linux. 543 */ 544 rc = opal_handle_hmi(); 545 if (rc == OPAL_SUCCESS) { 546 local_paca->hmi_event_available = 1; 547 return 1; 548 } 549 return 0; 550 } 551 552 /* HMI exception handler called in virtual mode during check_irq_replay. */ 553 int opal_handle_hmi_exception(struct pt_regs *regs) 554 { 555 s64 rc; 556 __be64 evt = 0; 557 558 /* 559 * Check if HMI event is available. 560 * if Yes, then call opal_poll_events to pull opal messages and 561 * process them. 562 */ 563 if (!local_paca->hmi_event_available) 564 return 0; 565 566 local_paca->hmi_event_available = 0; 567 rc = opal_poll_events(&evt); 568 if (rc == OPAL_SUCCESS && evt) 569 opal_do_notifier(be64_to_cpu(evt)); 570 571 return 1; 572 } 573 574 static uint64_t find_recovery_address(uint64_t nip) 575 { 576 int i; 577 578 for (i = 0; i < mc_recoverable_range_len; i++) 579 if ((nip >= mc_recoverable_range[i].start_addr) && 580 (nip < mc_recoverable_range[i].end_addr)) 581 return mc_recoverable_range[i].recover_addr; 582 return 0; 583 } 584 585 bool opal_mce_check_early_recovery(struct pt_regs *regs) 586 { 587 uint64_t recover_addr = 0; 588 589 if (!opal.base || !opal.size) 590 goto out; 591 592 if ((regs->nip >= opal.base) && 593 (regs->nip <= (opal.base + opal.size))) 594 recover_addr = find_recovery_address(regs->nip); 595 596 /* 597 * Setup regs->nip to rfi into fixup address. 598 */ 599 if (recover_addr) 600 regs->nip = recover_addr; 601 602 out: 603 return !!recover_addr; 604 } 605 606 static irqreturn_t opal_interrupt(int irq, void *data) 607 { 608 __be64 events; 609 610 opal_handle_interrupt(virq_to_hw(irq), &events); 611 612 opal_do_notifier(be64_to_cpu(events)); 613 614 return IRQ_HANDLED; 615 } 616 617 static int opal_sysfs_init(void) 618 { 619 opal_kobj = kobject_create_and_add("opal", firmware_kobj); 620 if (!opal_kobj) { 621 pr_warn("kobject_create_and_add opal failed\n"); 622 return -ENOMEM; 623 } 624 625 return 0; 626 } 627 628 static ssize_t symbol_map_read(struct file *fp, struct kobject *kobj, 629 struct bin_attribute *bin_attr, 630 char *buf, loff_t off, size_t count) 631 { 632 return memory_read_from_buffer(buf, count, &off, bin_attr->private, 633 bin_attr->size); 634 } 635 636 static BIN_ATTR_RO(symbol_map, 0); 637 638 static void opal_export_symmap(void) 639 { 640 const __be64 *syms; 641 unsigned int size; 642 struct device_node *fw; 643 int rc; 644 645 fw = of_find_node_by_path("/ibm,opal/firmware"); 646 if (!fw) 647 return; 648 syms = of_get_property(fw, "symbol-map", &size); 649 if (!syms || size != 2 * sizeof(__be64)) 650 return; 651 652 /* Setup attributes */ 653 bin_attr_symbol_map.private = __va(be64_to_cpu(syms[0])); 654 bin_attr_symbol_map.size = be64_to_cpu(syms[1]); 655 656 rc = sysfs_create_bin_file(opal_kobj, &bin_attr_symbol_map); 657 if (rc) 658 pr_warn("Error %d creating OPAL symbols file\n", rc); 659 } 660 661 static void __init opal_dump_region_init(void) 662 { 663 void *addr; 664 uint64_t size; 665 int rc; 666 667 /* Register kernel log buffer */ 668 addr = log_buf_addr_get(); 669 if (addr == NULL) 670 return; 671 672 size = log_buf_len_get(); 673 if (size == 0) 674 return; 675 676 rc = opal_register_dump_region(OPAL_DUMP_REGION_LOG_BUF, 677 __pa(addr), size); 678 /* Don't warn if this is just an older OPAL that doesn't 679 * know about that call 680 */ 681 if (rc && rc != OPAL_UNSUPPORTED) 682 pr_warn("DUMP: Failed to register kernel log buffer. " 683 "rc = %d\n", rc); 684 } 685 686 static void opal_ipmi_init(struct device_node *opal_node) 687 { 688 struct device_node *np; 689 690 for_each_child_of_node(opal_node, np) 691 if (of_device_is_compatible(np, "ibm,opal-ipmi")) 692 of_platform_device_create(np, NULL, NULL); 693 } 694 695 static void opal_i2c_create_devs(void) 696 { 697 struct device_node *np; 698 699 for_each_compatible_node(np, NULL, "ibm,opal-i2c") 700 of_platform_device_create(np, NULL, NULL); 701 } 702 703 static void __init opal_irq_init(struct device_node *dn) 704 { 705 const __be32 *irqs; 706 int i, irqlen; 707 708 /* Get interrupt property */ 709 irqs = of_get_property(opal_node, "opal-interrupts", &irqlen); 710 opal_irq_count = irqs ? (irqlen / 4) : 0; 711 pr_debug("Found %d interrupts reserved for OPAL\n", opal_irq_count); 712 if (!opal_irq_count) 713 return; 714 715 /* Install interrupt handlers */ 716 opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL); 717 for (i = 0; irqs && i < opal_irq_count; i++, irqs++) { 718 unsigned int irq, virq; 719 int rc; 720 721 /* Get hardware and virtual IRQ */ 722 irq = be32_to_cpup(irqs); 723 virq = irq_create_mapping(NULL, irq); 724 if (virq == NO_IRQ) { 725 pr_warn("Failed to map irq 0x%x\n", irq); 726 continue; 727 } 728 729 /* Install interrupt handler */ 730 rc = request_irq(virq, opal_interrupt, 0, "opal", NULL); 731 if (rc) { 732 irq_dispose_mapping(virq); 733 pr_warn("Error %d requesting irq %d (0x%x)\n", 734 rc, virq, irq); 735 continue; 736 } 737 738 /* Cache IRQ */ 739 opal_irqs[i] = virq; 740 } 741 } 742 743 static int kopald(void *unused) 744 { 745 set_freezable(); 746 do { 747 try_to_freeze(); 748 opal_poll_events(NULL); 749 msleep_interruptible(opal_heartbeat); 750 } while (!kthread_should_stop()); 751 752 return 0; 753 } 754 755 static void opal_init_heartbeat(void) 756 { 757 /* Old firwmware, we assume the HVC heartbeat is sufficient */ 758 if (of_property_read_u32(opal_node, "ibm,heartbeat-ms", 759 &opal_heartbeat) != 0) 760 opal_heartbeat = 0; 761 762 if (opal_heartbeat) 763 kthread_run(kopald, NULL, "kopald"); 764 } 765 766 static int __init opal_init(void) 767 { 768 struct device_node *np, *consoles; 769 int rc; 770 771 opal_node = of_find_node_by_path("/ibm,opal"); 772 if (!opal_node) { 773 pr_warn("Device node not found\n"); 774 return -ENODEV; 775 } 776 777 /* Register OPAL consoles if any ports */ 778 if (firmware_has_feature(FW_FEATURE_OPALv2)) 779 consoles = of_find_node_by_path("/ibm,opal/consoles"); 780 else 781 consoles = of_node_get(opal_node); 782 if (consoles) { 783 for_each_child_of_node(consoles, np) { 784 if (strcmp(np->name, "serial")) 785 continue; 786 of_platform_device_create(np, NULL, NULL); 787 } 788 of_node_put(consoles); 789 } 790 791 /* Create i2c platform devices */ 792 opal_i2c_create_devs(); 793 794 /* Setup a heatbeat thread if requested by OPAL */ 795 opal_init_heartbeat(); 796 797 /* Find all OPAL interrupts and request them */ 798 opal_irq_init(opal_node); 799 800 /* Create "opal" kobject under /sys/firmware */ 801 rc = opal_sysfs_init(); 802 if (rc == 0) { 803 /* Export symbol map to userspace */ 804 opal_export_symmap(); 805 /* Setup dump region interface */ 806 opal_dump_region_init(); 807 /* Setup error log interface */ 808 rc = opal_elog_init(); 809 /* Setup code update interface */ 810 opal_flash_init(); 811 /* Setup platform dump extract interface */ 812 opal_platform_dump_init(); 813 /* Setup system parameters interface */ 814 opal_sys_param_init(); 815 /* Setup message log interface. */ 816 opal_msglog_init(); 817 } 818 819 /* Initialize OPAL IPMI backend */ 820 opal_ipmi_init(opal_node); 821 822 return 0; 823 } 824 machine_subsys_initcall(powernv, opal_init); 825 826 void opal_shutdown(void) 827 { 828 unsigned int i; 829 long rc = OPAL_BUSY; 830 831 /* First free interrupts, which will also mask them */ 832 for (i = 0; i < opal_irq_count; i++) { 833 if (opal_irqs[i]) 834 free_irq(opal_irqs[i], NULL); 835 opal_irqs[i] = 0; 836 } 837 838 /* 839 * Then sync with OPAL which ensure anything that can 840 * potentially write to our memory has completed such 841 * as an ongoing dump retrieval 842 */ 843 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { 844 rc = opal_sync_host_reboot(); 845 if (rc == OPAL_BUSY) 846 opal_poll_events(NULL); 847 else 848 mdelay(10); 849 } 850 851 /* Unregister memory dump region */ 852 opal_unregister_dump_region(OPAL_DUMP_REGION_LOG_BUF); 853 } 854 855 /* Export this so that test modules can use it */ 856 EXPORT_SYMBOL_GPL(opal_invalid_call); 857 EXPORT_SYMBOL_GPL(opal_ipmi_send); 858 EXPORT_SYMBOL_GPL(opal_ipmi_recv); 859 860 /* Convert a region of vmalloc memory to an opal sg list */ 861 struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr, 862 unsigned long vmalloc_size) 863 { 864 struct opal_sg_list *sg, *first = NULL; 865 unsigned long i = 0; 866 867 sg = kzalloc(PAGE_SIZE, GFP_KERNEL); 868 if (!sg) 869 goto nomem; 870 871 first = sg; 872 873 while (vmalloc_size > 0) { 874 uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT; 875 uint64_t length = min(vmalloc_size, PAGE_SIZE); 876 877 sg->entry[i].data = cpu_to_be64(data); 878 sg->entry[i].length = cpu_to_be64(length); 879 i++; 880 881 if (i >= SG_ENTRIES_PER_NODE) { 882 struct opal_sg_list *next; 883 884 next = kzalloc(PAGE_SIZE, GFP_KERNEL); 885 if (!next) 886 goto nomem; 887 888 sg->length = cpu_to_be64( 889 i * sizeof(struct opal_sg_entry) + 16); 890 i = 0; 891 sg->next = cpu_to_be64(__pa(next)); 892 sg = next; 893 } 894 895 vmalloc_addr += length; 896 vmalloc_size -= length; 897 } 898 899 sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16); 900 901 return first; 902 903 nomem: 904 pr_err("%s : Failed to allocate memory\n", __func__); 905 opal_free_sg_list(first); 906 return NULL; 907 } 908 909 void opal_free_sg_list(struct opal_sg_list *sg) 910 { 911 while (sg) { 912 uint64_t next = be64_to_cpu(sg->next); 913 914 kfree(sg); 915 916 if (next) 917 sg = __va(next); 918 else 919 sg = NULL; 920 } 921 } 922 923 EXPORT_SYMBOL_GPL(opal_poll_events); 924 EXPORT_SYMBOL_GPL(opal_rtc_read); 925 EXPORT_SYMBOL_GPL(opal_rtc_write); 926 EXPORT_SYMBOL_GPL(opal_tpo_read); 927 EXPORT_SYMBOL_GPL(opal_tpo_write); 928 EXPORT_SYMBOL_GPL(opal_i2c_request); 929