1 /* 2 * PowerNV OPAL high level interfaces 3 * 4 * Copyright 2011 IBM Corp. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #undef DEBUG 13 14 #include <linux/types.h> 15 #include <linux/of.h> 16 #include <linux/of_fdt.h> 17 #include <linux/of_platform.h> 18 #include <linux/interrupt.h> 19 #include <linux/notifier.h> 20 #include <linux/slab.h> 21 #include <linux/sched.h> 22 #include <linux/kobject.h> 23 #include <linux/delay.h> 24 #include <linux/memblock.h> 25 26 #include <asm/machdep.h> 27 #include <asm/opal.h> 28 #include <asm/firmware.h> 29 #include <asm/mce.h> 30 31 #include "powernv.h" 32 33 /* /sys/firmware/opal */ 34 struct kobject *opal_kobj; 35 36 struct opal { 37 u64 base; 38 u64 entry; 39 u64 size; 40 } opal; 41 42 struct mcheck_recoverable_range { 43 u64 start_addr; 44 u64 end_addr; 45 u64 recover_addr; 46 }; 47 48 static struct mcheck_recoverable_range *mc_recoverable_range; 49 static int mc_recoverable_range_len; 50 51 struct device_node *opal_node; 52 static DEFINE_SPINLOCK(opal_write_lock); 53 extern u64 opal_mc_secondary_handler[]; 54 static unsigned int *opal_irqs; 55 static unsigned int opal_irq_count; 56 static ATOMIC_NOTIFIER_HEAD(opal_notifier_head); 57 static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX]; 58 static DEFINE_SPINLOCK(opal_notifier_lock); 59 static uint64_t last_notified_mask = 0x0ul; 60 static atomic_t opal_notifier_hold = ATOMIC_INIT(0); 61 62 static void opal_reinit_cores(void) 63 { 64 /* Do the actual re-init, This will clobber all FPRs, VRs, etc... 65 * 66 * It will preserve non volatile GPRs and HSPRG0/1. It will 67 * also restore HIDs and other SPRs to their original value 68 * but it might clobber a bunch. 69 */ 70 #ifdef __BIG_ENDIAN__ 71 opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_BE); 72 #else 73 opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_LE); 74 #endif 75 } 76 77 int __init early_init_dt_scan_opal(unsigned long node, 78 const char *uname, int depth, void *data) 79 { 80 const void *basep, *entryp, *sizep; 81 int basesz, entrysz, runtimesz; 82 83 if (depth != 1 || strcmp(uname, "ibm,opal") != 0) 84 return 0; 85 86 basep = of_get_flat_dt_prop(node, "opal-base-address", &basesz); 87 entryp = of_get_flat_dt_prop(node, "opal-entry-address", &entrysz); 88 sizep = of_get_flat_dt_prop(node, "opal-runtime-size", &runtimesz); 89 90 if (!basep || !entryp || !sizep) 91 return 1; 92 93 opal.base = of_read_number(basep, basesz/4); 94 opal.entry = of_read_number(entryp, entrysz/4); 95 opal.size = of_read_number(sizep, runtimesz/4); 96 97 pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%d)\n", 98 opal.base, basep, basesz); 99 pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%d)\n", 100 opal.entry, entryp, entrysz); 101 pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n", 102 opal.size, sizep, runtimesz); 103 104 powerpc_firmware_features |= FW_FEATURE_OPAL; 105 if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) { 106 powerpc_firmware_features |= FW_FEATURE_OPALv2; 107 powerpc_firmware_features |= FW_FEATURE_OPALv3; 108 pr_info("OPAL V3 detected !\n"); 109 } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) { 110 powerpc_firmware_features |= FW_FEATURE_OPALv2; 111 pr_info("OPAL V2 detected !\n"); 112 } else { 113 pr_info("OPAL V1 detected !\n"); 114 } 115 116 /* Reinit all cores with the right endian */ 117 opal_reinit_cores(); 118 119 /* Restore some bits */ 120 if (cur_cpu_spec->cpu_restore) 121 cur_cpu_spec->cpu_restore(); 122 123 return 1; 124 } 125 126 int __init early_init_dt_scan_recoverable_ranges(unsigned long node, 127 const char *uname, int depth, void *data) 128 { 129 int i, psize, size; 130 const __be32 *prop; 131 132 if (depth != 1 || strcmp(uname, "ibm,opal") != 0) 133 return 0; 134 135 prop = of_get_flat_dt_prop(node, "mcheck-recoverable-ranges", &psize); 136 137 if (!prop) 138 return 1; 139 140 pr_debug("Found machine check recoverable ranges.\n"); 141 142 /* 143 * Calculate number of available entries. 144 * 145 * Each recoverable address range entry is (start address, len, 146 * recovery address), 2 cells each for start and recovery address, 147 * 1 cell for len, totalling 5 cells per entry. 148 */ 149 mc_recoverable_range_len = psize / (sizeof(*prop) * 5); 150 151 /* Sanity check */ 152 if (!mc_recoverable_range_len) 153 return 1; 154 155 /* Size required to hold all the entries. */ 156 size = mc_recoverable_range_len * 157 sizeof(struct mcheck_recoverable_range); 158 159 /* 160 * Allocate a buffer to hold the MC recoverable ranges. We would be 161 * accessing them in real mode, hence it needs to be within 162 * RMO region. 163 */ 164 mc_recoverable_range =__va(memblock_alloc_base(size, __alignof__(u64), 165 ppc64_rma_size)); 166 memset(mc_recoverable_range, 0, size); 167 168 for (i = 0; i < mc_recoverable_range_len; i++) { 169 mc_recoverable_range[i].start_addr = 170 of_read_number(prop + (i * 5) + 0, 2); 171 mc_recoverable_range[i].end_addr = 172 mc_recoverable_range[i].start_addr + 173 of_read_number(prop + (i * 5) + 2, 1); 174 mc_recoverable_range[i].recover_addr = 175 of_read_number(prop + (i * 5) + 3, 2); 176 177 pr_debug("Machine check recoverable range: %llx..%llx: %llx\n", 178 mc_recoverable_range[i].start_addr, 179 mc_recoverable_range[i].end_addr, 180 mc_recoverable_range[i].recover_addr); 181 } 182 return 1; 183 } 184 185 static int __init opal_register_exception_handlers(void) 186 { 187 #ifdef __BIG_ENDIAN__ 188 u64 glue; 189 190 if (!(powerpc_firmware_features & FW_FEATURE_OPAL)) 191 return -ENODEV; 192 193 /* Hookup some exception handlers except machine check. We use the 194 * fwnmi area at 0x7000 to provide the glue space to OPAL 195 */ 196 glue = 0x7000; 197 198 /* 199 * Check if we are running on newer firmware that exports 200 * OPAL_HANDLE_HMI token. If yes, then don't ask OPAL to patch 201 * the HMI interrupt and we catch it directly in Linux. 202 * 203 * For older firmware (i.e currently released POWER8 System Firmware 204 * as of today <= SV810_087), we fallback to old behavior and let OPAL 205 * patch the HMI vector and handle it inside OPAL firmware. 206 * 207 * For newer firmware (in development/yet to be released) we will 208 * start catching/handling HMI directly in Linux. 209 */ 210 if (!opal_check_token(OPAL_HANDLE_HMI)) { 211 pr_info("opal: Old firmware detected, OPAL handles HMIs.\n"); 212 opal_register_exception_handler( 213 OPAL_HYPERVISOR_MAINTENANCE_HANDLER, 214 0, glue); 215 glue += 128; 216 } 217 218 opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue); 219 #endif 220 221 return 0; 222 } 223 machine_early_initcall(powernv, opal_register_exception_handlers); 224 225 int opal_notifier_register(struct notifier_block *nb) 226 { 227 if (!nb) { 228 pr_warning("%s: Invalid argument (%p)\n", 229 __func__, nb); 230 return -EINVAL; 231 } 232 233 atomic_notifier_chain_register(&opal_notifier_head, nb); 234 return 0; 235 } 236 EXPORT_SYMBOL_GPL(opal_notifier_register); 237 238 int opal_notifier_unregister(struct notifier_block *nb) 239 { 240 if (!nb) { 241 pr_warning("%s: Invalid argument (%p)\n", 242 __func__, nb); 243 return -EINVAL; 244 } 245 246 atomic_notifier_chain_unregister(&opal_notifier_head, nb); 247 return 0; 248 } 249 EXPORT_SYMBOL_GPL(opal_notifier_unregister); 250 251 static void opal_do_notifier(uint64_t events) 252 { 253 unsigned long flags; 254 uint64_t changed_mask; 255 256 if (atomic_read(&opal_notifier_hold)) 257 return; 258 259 spin_lock_irqsave(&opal_notifier_lock, flags); 260 changed_mask = last_notified_mask ^ events; 261 last_notified_mask = events; 262 spin_unlock_irqrestore(&opal_notifier_lock, flags); 263 264 /* 265 * We feed with the event bits and changed bits for 266 * enough information to the callback. 267 */ 268 atomic_notifier_call_chain(&opal_notifier_head, 269 events, (void *)changed_mask); 270 } 271 272 void opal_notifier_update_evt(uint64_t evt_mask, 273 uint64_t evt_val) 274 { 275 unsigned long flags; 276 277 spin_lock_irqsave(&opal_notifier_lock, flags); 278 last_notified_mask &= ~evt_mask; 279 last_notified_mask |= evt_val; 280 spin_unlock_irqrestore(&opal_notifier_lock, flags); 281 } 282 283 void opal_notifier_enable(void) 284 { 285 int64_t rc; 286 __be64 evt = 0; 287 288 atomic_set(&opal_notifier_hold, 0); 289 290 /* Process pending events */ 291 rc = opal_poll_events(&evt); 292 if (rc == OPAL_SUCCESS && evt) 293 opal_do_notifier(be64_to_cpu(evt)); 294 } 295 296 void opal_notifier_disable(void) 297 { 298 atomic_set(&opal_notifier_hold, 1); 299 } 300 301 /* 302 * Opal message notifier based on message type. Allow subscribers to get 303 * notified for specific messgae type. 304 */ 305 int opal_message_notifier_register(enum OpalMessageType msg_type, 306 struct notifier_block *nb) 307 { 308 if (!nb) { 309 pr_warning("%s: Invalid argument (%p)\n", 310 __func__, nb); 311 return -EINVAL; 312 } 313 if (msg_type > OPAL_MSG_TYPE_MAX) { 314 pr_warning("%s: Invalid message type argument (%d)\n", 315 __func__, msg_type); 316 return -EINVAL; 317 } 318 return atomic_notifier_chain_register( 319 &opal_msg_notifier_head[msg_type], nb); 320 } 321 322 static void opal_message_do_notify(uint32_t msg_type, void *msg) 323 { 324 /* notify subscribers */ 325 atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type], 326 msg_type, msg); 327 } 328 329 static void opal_handle_message(void) 330 { 331 s64 ret; 332 /* 333 * TODO: pre-allocate a message buffer depending on opal-msg-size 334 * value in /proc/device-tree. 335 */ 336 static struct opal_msg msg; 337 u32 type; 338 339 ret = opal_get_msg(__pa(&msg), sizeof(msg)); 340 /* No opal message pending. */ 341 if (ret == OPAL_RESOURCE) 342 return; 343 344 /* check for errors. */ 345 if (ret) { 346 pr_warning("%s: Failed to retrieve opal message, err=%lld\n", 347 __func__, ret); 348 return; 349 } 350 351 type = be32_to_cpu(msg.msg_type); 352 353 /* Sanity check */ 354 if (type > OPAL_MSG_TYPE_MAX) { 355 pr_warning("%s: Unknown message type: %u\n", __func__, type); 356 return; 357 } 358 opal_message_do_notify(type, (void *)&msg); 359 } 360 361 static int opal_message_notify(struct notifier_block *nb, 362 unsigned long events, void *change) 363 { 364 if (events & OPAL_EVENT_MSG_PENDING) 365 opal_handle_message(); 366 return 0; 367 } 368 369 static struct notifier_block opal_message_nb = { 370 .notifier_call = opal_message_notify, 371 .next = NULL, 372 .priority = 0, 373 }; 374 375 static int __init opal_message_init(void) 376 { 377 int ret, i; 378 379 for (i = 0; i < OPAL_MSG_TYPE_MAX; i++) 380 ATOMIC_INIT_NOTIFIER_HEAD(&opal_msg_notifier_head[i]); 381 382 ret = opal_notifier_register(&opal_message_nb); 383 if (ret) { 384 pr_err("%s: Can't register OPAL event notifier (%d)\n", 385 __func__, ret); 386 return ret; 387 } 388 return 0; 389 } 390 machine_early_initcall(powernv, opal_message_init); 391 392 int opal_get_chars(uint32_t vtermno, char *buf, int count) 393 { 394 s64 rc; 395 __be64 evt, len; 396 397 if (!opal.entry) 398 return -ENODEV; 399 opal_poll_events(&evt); 400 if ((be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_INPUT) == 0) 401 return 0; 402 len = cpu_to_be64(count); 403 rc = opal_console_read(vtermno, &len, buf); 404 if (rc == OPAL_SUCCESS) 405 return be64_to_cpu(len); 406 return 0; 407 } 408 409 int opal_put_chars(uint32_t vtermno, const char *data, int total_len) 410 { 411 int written = 0; 412 __be64 olen; 413 s64 len, rc; 414 unsigned long flags; 415 __be64 evt; 416 417 if (!opal.entry) 418 return -ENODEV; 419 420 /* We want put_chars to be atomic to avoid mangling of hvsi 421 * packets. To do that, we first test for room and return 422 * -EAGAIN if there isn't enough. 423 * 424 * Unfortunately, opal_console_write_buffer_space() doesn't 425 * appear to work on opal v1, so we just assume there is 426 * enough room and be done with it 427 */ 428 spin_lock_irqsave(&opal_write_lock, flags); 429 if (firmware_has_feature(FW_FEATURE_OPALv2)) { 430 rc = opal_console_write_buffer_space(vtermno, &olen); 431 len = be64_to_cpu(olen); 432 if (rc || len < total_len) { 433 spin_unlock_irqrestore(&opal_write_lock, flags); 434 /* Closed -> drop characters */ 435 if (rc) 436 return total_len; 437 opal_poll_events(NULL); 438 return -EAGAIN; 439 } 440 } 441 442 /* We still try to handle partial completions, though they 443 * should no longer happen. 444 */ 445 rc = OPAL_BUSY; 446 while(total_len > 0 && (rc == OPAL_BUSY || 447 rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) { 448 olen = cpu_to_be64(total_len); 449 rc = opal_console_write(vtermno, &olen, data); 450 len = be64_to_cpu(olen); 451 452 /* Closed or other error drop */ 453 if (rc != OPAL_SUCCESS && rc != OPAL_BUSY && 454 rc != OPAL_BUSY_EVENT) { 455 written = total_len; 456 break; 457 } 458 if (rc == OPAL_SUCCESS) { 459 total_len -= len; 460 data += len; 461 written += len; 462 } 463 /* This is a bit nasty but we need that for the console to 464 * flush when there aren't any interrupts. We will clean 465 * things a bit later to limit that to synchronous path 466 * such as the kernel console and xmon/udbg 467 */ 468 do 469 opal_poll_events(&evt); 470 while(rc == OPAL_SUCCESS && 471 (be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT)); 472 } 473 spin_unlock_irqrestore(&opal_write_lock, flags); 474 return written; 475 } 476 477 static int opal_recover_mce(struct pt_regs *regs, 478 struct machine_check_event *evt) 479 { 480 int recovered = 0; 481 uint64_t ea = get_mce_fault_addr(evt); 482 483 if (!(regs->msr & MSR_RI)) { 484 /* If MSR_RI isn't set, we cannot recover */ 485 recovered = 0; 486 } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) { 487 /* Platform corrected itself */ 488 recovered = 1; 489 } else if (ea && !is_kernel_addr(ea)) { 490 /* 491 * Faulting address is not in kernel text. We should be fine. 492 * We need to find which process uses this address. 493 * For now, kill the task if we have received exception when 494 * in userspace. 495 * 496 * TODO: Queue up this address for hwpoisioning later. 497 */ 498 if (user_mode(regs) && !is_global_init(current)) { 499 _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip); 500 recovered = 1; 501 } else 502 recovered = 0; 503 } else if (user_mode(regs) && !is_global_init(current) && 504 evt->severity == MCE_SEV_ERROR_SYNC) { 505 /* 506 * If we have received a synchronous error when in userspace 507 * kill the task. 508 */ 509 _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip); 510 recovered = 1; 511 } 512 return recovered; 513 } 514 515 int opal_machine_check(struct pt_regs *regs) 516 { 517 struct machine_check_event evt; 518 519 if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) 520 return 0; 521 522 /* Print things out */ 523 if (evt.version != MCE_V1) { 524 pr_err("Machine Check Exception, Unknown event version %d !\n", 525 evt.version); 526 return 0; 527 } 528 machine_check_print_event_info(&evt); 529 530 if (opal_recover_mce(regs, &evt)) 531 return 1; 532 return 0; 533 } 534 535 /* Early hmi handler called in real mode. */ 536 int opal_hmi_exception_early(struct pt_regs *regs) 537 { 538 s64 rc; 539 540 /* 541 * call opal hmi handler. Pass paca address as token. 542 * The return value OPAL_SUCCESS is an indication that there is 543 * an HMI event generated waiting to pull by Linux. 544 */ 545 rc = opal_handle_hmi(); 546 if (rc == OPAL_SUCCESS) { 547 local_paca->hmi_event_available = 1; 548 return 1; 549 } 550 return 0; 551 } 552 553 /* HMI exception handler called in virtual mode during check_irq_replay. */ 554 int opal_handle_hmi_exception(struct pt_regs *regs) 555 { 556 s64 rc; 557 __be64 evt = 0; 558 559 /* 560 * Check if HMI event is available. 561 * if Yes, then call opal_poll_events to pull opal messages and 562 * process them. 563 */ 564 if (!local_paca->hmi_event_available) 565 return 0; 566 567 local_paca->hmi_event_available = 0; 568 rc = opal_poll_events(&evt); 569 if (rc == OPAL_SUCCESS && evt) 570 opal_do_notifier(be64_to_cpu(evt)); 571 572 return 1; 573 } 574 575 static uint64_t find_recovery_address(uint64_t nip) 576 { 577 int i; 578 579 for (i = 0; i < mc_recoverable_range_len; i++) 580 if ((nip >= mc_recoverable_range[i].start_addr) && 581 (nip < mc_recoverable_range[i].end_addr)) 582 return mc_recoverable_range[i].recover_addr; 583 return 0; 584 } 585 586 bool opal_mce_check_early_recovery(struct pt_regs *regs) 587 { 588 uint64_t recover_addr = 0; 589 590 if (!opal.base || !opal.size) 591 goto out; 592 593 if ((regs->nip >= opal.base) && 594 (regs->nip <= (opal.base + opal.size))) 595 recover_addr = find_recovery_address(regs->nip); 596 597 /* 598 * Setup regs->nip to rfi into fixup address. 599 */ 600 if (recover_addr) 601 regs->nip = recover_addr; 602 603 out: 604 return !!recover_addr; 605 } 606 607 static irqreturn_t opal_interrupt(int irq, void *data) 608 { 609 __be64 events; 610 611 opal_handle_interrupt(virq_to_hw(irq), &events); 612 613 opal_do_notifier(be64_to_cpu(events)); 614 615 return IRQ_HANDLED; 616 } 617 618 static int opal_sysfs_init(void) 619 { 620 opal_kobj = kobject_create_and_add("opal", firmware_kobj); 621 if (!opal_kobj) { 622 pr_warn("kobject_create_and_add opal failed\n"); 623 return -ENOMEM; 624 } 625 626 return 0; 627 } 628 629 static void __init opal_dump_region_init(void) 630 { 631 void *addr; 632 uint64_t size; 633 int rc; 634 635 /* Register kernel log buffer */ 636 addr = log_buf_addr_get(); 637 size = log_buf_len_get(); 638 rc = opal_register_dump_region(OPAL_DUMP_REGION_LOG_BUF, 639 __pa(addr), size); 640 /* Don't warn if this is just an older OPAL that doesn't 641 * know about that call 642 */ 643 if (rc && rc != OPAL_UNSUPPORTED) 644 pr_warn("DUMP: Failed to register kernel log buffer. " 645 "rc = %d\n", rc); 646 } 647 static int __init opal_init(void) 648 { 649 struct device_node *np, *consoles; 650 const __be32 *irqs; 651 int rc, i, irqlen; 652 653 opal_node = of_find_node_by_path("/ibm,opal"); 654 if (!opal_node) { 655 pr_warn("opal: Node not found\n"); 656 return -ENODEV; 657 } 658 659 /* Register OPAL consoles if any ports */ 660 if (firmware_has_feature(FW_FEATURE_OPALv2)) 661 consoles = of_find_node_by_path("/ibm,opal/consoles"); 662 else 663 consoles = of_node_get(opal_node); 664 if (consoles) { 665 for_each_child_of_node(consoles, np) { 666 if (strcmp(np->name, "serial")) 667 continue; 668 of_platform_device_create(np, NULL, NULL); 669 } 670 of_node_put(consoles); 671 } 672 673 /* Find all OPAL interrupts and request them */ 674 irqs = of_get_property(opal_node, "opal-interrupts", &irqlen); 675 pr_debug("opal: Found %d interrupts reserved for OPAL\n", 676 irqs ? (irqlen / 4) : 0); 677 opal_irq_count = irqlen / 4; 678 opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL); 679 for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) { 680 unsigned int hwirq = be32_to_cpup(irqs); 681 unsigned int irq = irq_create_mapping(NULL, hwirq); 682 if (irq == NO_IRQ) { 683 pr_warning("opal: Failed to map irq 0x%x\n", hwirq); 684 continue; 685 } 686 rc = request_irq(irq, opal_interrupt, 0, "opal", NULL); 687 if (rc) 688 pr_warning("opal: Error %d requesting irq %d" 689 " (0x%x)\n", rc, irq, hwirq); 690 opal_irqs[i] = irq; 691 } 692 693 /* Create "opal" kobject under /sys/firmware */ 694 rc = opal_sysfs_init(); 695 if (rc == 0) { 696 /* Setup dump region interface */ 697 opal_dump_region_init(); 698 /* Setup error log interface */ 699 rc = opal_elog_init(); 700 /* Setup code update interface */ 701 opal_flash_init(); 702 /* Setup platform dump extract interface */ 703 opal_platform_dump_init(); 704 /* Setup system parameters interface */ 705 opal_sys_param_init(); 706 /* Setup message log interface. */ 707 opal_msglog_init(); 708 } 709 710 return 0; 711 } 712 machine_subsys_initcall(powernv, opal_init); 713 714 void opal_shutdown(void) 715 { 716 unsigned int i; 717 long rc = OPAL_BUSY; 718 719 /* First free interrupts, which will also mask them */ 720 for (i = 0; i < opal_irq_count; i++) { 721 if (opal_irqs[i]) 722 free_irq(opal_irqs[i], NULL); 723 opal_irqs[i] = 0; 724 } 725 726 /* 727 * Then sync with OPAL which ensure anything that can 728 * potentially write to our memory has completed such 729 * as an ongoing dump retrieval 730 */ 731 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { 732 rc = opal_sync_host_reboot(); 733 if (rc == OPAL_BUSY) 734 opal_poll_events(NULL); 735 else 736 mdelay(10); 737 } 738 739 /* Unregister memory dump region */ 740 opal_unregister_dump_region(OPAL_DUMP_REGION_LOG_BUF); 741 } 742 743 /* Export this so that test modules can use it */ 744 EXPORT_SYMBOL_GPL(opal_invalid_call); 745 746 /* Convert a region of vmalloc memory to an opal sg list */ 747 struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr, 748 unsigned long vmalloc_size) 749 { 750 struct opal_sg_list *sg, *first = NULL; 751 unsigned long i = 0; 752 753 sg = kzalloc(PAGE_SIZE, GFP_KERNEL); 754 if (!sg) 755 goto nomem; 756 757 first = sg; 758 759 while (vmalloc_size > 0) { 760 uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT; 761 uint64_t length = min(vmalloc_size, PAGE_SIZE); 762 763 sg->entry[i].data = cpu_to_be64(data); 764 sg->entry[i].length = cpu_to_be64(length); 765 i++; 766 767 if (i >= SG_ENTRIES_PER_NODE) { 768 struct opal_sg_list *next; 769 770 next = kzalloc(PAGE_SIZE, GFP_KERNEL); 771 if (!next) 772 goto nomem; 773 774 sg->length = cpu_to_be64( 775 i * sizeof(struct opal_sg_entry) + 16); 776 i = 0; 777 sg->next = cpu_to_be64(__pa(next)); 778 sg = next; 779 } 780 781 vmalloc_addr += length; 782 vmalloc_size -= length; 783 } 784 785 sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16); 786 787 return first; 788 789 nomem: 790 pr_err("%s : Failed to allocate memory\n", __func__); 791 opal_free_sg_list(first); 792 return NULL; 793 } 794 795 void opal_free_sg_list(struct opal_sg_list *sg) 796 { 797 while (sg) { 798 uint64_t next = be64_to_cpu(sg->next); 799 800 kfree(sg); 801 802 if (next) 803 sg = __va(next); 804 else 805 sg = NULL; 806 } 807 } 808