1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2017 Arm Ltd. 3 #define pr_fmt(fmt) "sdei: " fmt 4 5 #include <acpi/ghes.h> 6 #include <linux/acpi.h> 7 #include <linux/arm_sdei.h> 8 #include <linux/arm-smccc.h> 9 #include <linux/atomic.h> 10 #include <linux/bitops.h> 11 #include <linux/compiler.h> 12 #include <linux/cpuhotplug.h> 13 #include <linux/cpu.h> 14 #include <linux/cpu_pm.h> 15 #include <linux/errno.h> 16 #include <linux/hardirq.h> 17 #include <linux/kernel.h> 18 #include <linux/kprobes.h> 19 #include <linux/kvm_host.h> 20 #include <linux/list.h> 21 #include <linux/mutex.h> 22 #include <linux/notifier.h> 23 #include <linux/of.h> 24 #include <linux/of_platform.h> 25 #include <linux/percpu.h> 26 #include <linux/platform_device.h> 27 #include <linux/pm.h> 28 #include <linux/ptrace.h> 29 #include <linux/preempt.h> 30 #include <linux/reboot.h> 31 #include <linux/slab.h> 32 #include <linux/smp.h> 33 #include <linux/spinlock.h> 34 #include <linux/uaccess.h> 35 36 /* 37 * The call to use to reach the firmware. 38 */ 39 static asmlinkage void (*sdei_firmware_call)(unsigned long function_id, 40 unsigned long arg0, unsigned long arg1, 41 unsigned long arg2, unsigned long arg3, 42 unsigned long arg4, struct arm_smccc_res *res); 43 44 /* entry point from firmware to arch asm code */ 45 static unsigned long sdei_entry_point; 46 47 struct sdei_event { 48 /* These three are protected by the sdei_list_lock */ 49 struct list_head list; 50 bool reregister; 51 bool reenable; 52 53 u32 event_num; 54 u8 type; 55 u8 priority; 56 57 /* This pointer is handed to firmware as the event argument. */ 58 union { 59 /* Shared events */ 60 struct sdei_registered_event *registered; 61 62 /* CPU private events */ 63 struct sdei_registered_event __percpu *private_registered; 64 }; 65 }; 66 67 /* Take the mutex for any API call or modification. Take the mutex first. */ 68 static DEFINE_MUTEX(sdei_events_lock); 69 70 /* and then hold this when modifying the list */ 71 static DEFINE_SPINLOCK(sdei_list_lock); 72 static LIST_HEAD(sdei_list); 73 74 /* Private events are registered/enabled via IPI passing one of these */ 75 struct sdei_crosscall_args { 76 struct sdei_event *event; 77 atomic_t errors; 78 int first_error; 79 }; 80 81 #define CROSSCALL_INIT(arg, event) (arg.event = event, \ 82 arg.first_error = 0, \ 83 atomic_set(&arg.errors, 0)) 84 85 static inline int sdei_do_cross_call(void *fn, struct sdei_event * event) 86 { 87 struct sdei_crosscall_args arg; 88 89 CROSSCALL_INIT(arg, event); 90 on_each_cpu(fn, &arg, true); 91 92 return arg.first_error; 93 } 94 95 static inline void 96 sdei_cross_call_return(struct sdei_crosscall_args *arg, int err) 97 { 98 if (err && (atomic_inc_return(&arg->errors) == 1)) 99 arg->first_error = err; 100 } 101 102 static int sdei_to_linux_errno(unsigned long sdei_err) 103 { 104 switch (sdei_err) { 105 case SDEI_NOT_SUPPORTED: 106 return -EOPNOTSUPP; 107 case SDEI_INVALID_PARAMETERS: 108 return -EINVAL; 109 case SDEI_DENIED: 110 return -EPERM; 111 case SDEI_PENDING: 112 return -EINPROGRESS; 113 case SDEI_OUT_OF_RESOURCE: 114 return -ENOMEM; 115 } 116 117 /* Not an error value ... */ 118 return sdei_err; 119 } 120 121 /* 122 * If x0 is any of these values, then the call failed, use sdei_to_linux_errno() 123 * to translate. 124 */ 125 static int sdei_is_err(struct arm_smccc_res *res) 126 { 127 switch (res->a0) { 128 case SDEI_NOT_SUPPORTED: 129 case SDEI_INVALID_PARAMETERS: 130 case SDEI_DENIED: 131 case SDEI_PENDING: 132 case SDEI_OUT_OF_RESOURCE: 133 return true; 134 } 135 136 return false; 137 } 138 139 static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0, 140 unsigned long arg1, unsigned long arg2, 141 unsigned long arg3, unsigned long arg4, 142 u64 *result) 143 { 144 int err = 0; 145 struct arm_smccc_res res; 146 147 if (sdei_firmware_call) { 148 sdei_firmware_call(function_id, arg0, arg1, arg2, arg3, arg4, 149 &res); 150 if (sdei_is_err(&res)) 151 err = sdei_to_linux_errno(res.a0); 152 } else { 153 /* 154 * !sdei_firmware_call means we failed to probe or called 155 * sdei_mark_interface_broken(). -EIO is not an error returned 156 * by sdei_to_linux_errno() and is used to suppress messages 157 * from this driver. 158 */ 159 err = -EIO; 160 res.a0 = SDEI_NOT_SUPPORTED; 161 } 162 163 if (result) 164 *result = res.a0; 165 166 return err; 167 } 168 NOKPROBE_SYMBOL(invoke_sdei_fn); 169 170 static struct sdei_event *sdei_event_find(u32 event_num) 171 { 172 struct sdei_event *e, *found = NULL; 173 174 lockdep_assert_held(&sdei_events_lock); 175 176 spin_lock(&sdei_list_lock); 177 list_for_each_entry(e, &sdei_list, list) { 178 if (e->event_num == event_num) { 179 found = e; 180 break; 181 } 182 } 183 spin_unlock(&sdei_list_lock); 184 185 return found; 186 } 187 188 int sdei_api_event_context(u32 query, u64 *result) 189 { 190 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_CONTEXT, query, 0, 0, 0, 0, 191 result); 192 } 193 NOKPROBE_SYMBOL(sdei_api_event_context); 194 195 static int sdei_api_event_get_info(u32 event, u32 info, u64 *result) 196 { 197 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0, 198 0, 0, result); 199 } 200 201 static struct sdei_event *sdei_event_create(u32 event_num, 202 sdei_event_callback *cb, 203 void *cb_arg) 204 { 205 int err; 206 u64 result; 207 struct sdei_event *event; 208 struct sdei_registered_event *reg; 209 210 lockdep_assert_held(&sdei_events_lock); 211 212 event = kzalloc(sizeof(*event), GFP_KERNEL); 213 if (!event) 214 return ERR_PTR(-ENOMEM); 215 216 INIT_LIST_HEAD(&event->list); 217 event->event_num = event_num; 218 219 err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY, 220 &result); 221 if (err) { 222 kfree(event); 223 return ERR_PTR(err); 224 } 225 event->priority = result; 226 227 err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_TYPE, 228 &result); 229 if (err) { 230 kfree(event); 231 return ERR_PTR(err); 232 } 233 event->type = result; 234 235 if (event->type == SDEI_EVENT_TYPE_SHARED) { 236 reg = kzalloc(sizeof(*reg), GFP_KERNEL); 237 if (!reg) { 238 kfree(event); 239 return ERR_PTR(-ENOMEM); 240 } 241 242 reg->event_num = event_num; 243 reg->priority = event->priority; 244 245 reg->callback = cb; 246 reg->callback_arg = cb_arg; 247 event->registered = reg; 248 } else { 249 int cpu; 250 struct sdei_registered_event __percpu *regs; 251 252 regs = alloc_percpu(struct sdei_registered_event); 253 if (!regs) { 254 kfree(event); 255 return ERR_PTR(-ENOMEM); 256 } 257 258 for_each_possible_cpu(cpu) { 259 reg = per_cpu_ptr(regs, cpu); 260 261 reg->event_num = event->event_num; 262 reg->priority = event->priority; 263 reg->callback = cb; 264 reg->callback_arg = cb_arg; 265 } 266 267 event->private_registered = regs; 268 } 269 270 if (sdei_event_find(event_num)) { 271 kfree(event->registered); 272 kfree(event); 273 event = ERR_PTR(-EBUSY); 274 } else { 275 spin_lock(&sdei_list_lock); 276 list_add(&event->list, &sdei_list); 277 spin_unlock(&sdei_list_lock); 278 } 279 280 return event; 281 } 282 283 static void sdei_event_destroy(struct sdei_event *event) 284 { 285 lockdep_assert_held(&sdei_events_lock); 286 287 spin_lock(&sdei_list_lock); 288 list_del(&event->list); 289 spin_unlock(&sdei_list_lock); 290 291 if (event->type == SDEI_EVENT_TYPE_SHARED) 292 kfree(event->registered); 293 else 294 free_percpu(event->private_registered); 295 296 kfree(event); 297 } 298 299 static int sdei_api_get_version(u64 *version) 300 { 301 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_VERSION, 0, 0, 0, 0, 0, version); 302 } 303 304 int sdei_mask_local_cpu(void) 305 { 306 int err; 307 308 WARN_ON_ONCE(preemptible()); 309 310 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL); 311 if (err && err != -EIO) { 312 pr_warn_once("failed to mask CPU[%u]: %d\n", 313 smp_processor_id(), err); 314 return err; 315 } 316 317 return 0; 318 } 319 320 static void _ipi_mask_cpu(void *ignored) 321 { 322 sdei_mask_local_cpu(); 323 } 324 325 int sdei_unmask_local_cpu(void) 326 { 327 int err; 328 329 WARN_ON_ONCE(preemptible()); 330 331 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL); 332 if (err && err != -EIO) { 333 pr_warn_once("failed to unmask CPU[%u]: %d\n", 334 smp_processor_id(), err); 335 return err; 336 } 337 338 return 0; 339 } 340 341 static void _ipi_unmask_cpu(void *ignored) 342 { 343 sdei_unmask_local_cpu(); 344 } 345 346 static void _ipi_private_reset(void *ignored) 347 { 348 int err; 349 350 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0, 351 NULL); 352 if (err && err != -EIO) 353 pr_warn_once("failed to reset CPU[%u]: %d\n", 354 smp_processor_id(), err); 355 } 356 357 static int sdei_api_shared_reset(void) 358 { 359 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_SHARED_RESET, 0, 0, 0, 0, 0, 360 NULL); 361 } 362 363 static void sdei_mark_interface_broken(void) 364 { 365 pr_err("disabling SDEI firmware interface\n"); 366 on_each_cpu(&_ipi_mask_cpu, NULL, true); 367 sdei_firmware_call = NULL; 368 } 369 370 static int sdei_platform_reset(void) 371 { 372 int err; 373 374 on_each_cpu(&_ipi_private_reset, NULL, true); 375 err = sdei_api_shared_reset(); 376 if (err) { 377 pr_err("Failed to reset platform: %d\n", err); 378 sdei_mark_interface_broken(); 379 } 380 381 return err; 382 } 383 384 static int sdei_api_event_enable(u32 event_num) 385 { 386 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, event_num, 0, 0, 0, 387 0, NULL); 388 } 389 390 /* Called directly by the hotplug callbacks */ 391 static void _local_event_enable(void *data) 392 { 393 int err; 394 struct sdei_crosscall_args *arg = data; 395 396 WARN_ON_ONCE(preemptible()); 397 398 err = sdei_api_event_enable(arg->event->event_num); 399 400 sdei_cross_call_return(arg, err); 401 } 402 403 int sdei_event_enable(u32 event_num) 404 { 405 int err = -EINVAL; 406 struct sdei_event *event; 407 408 mutex_lock(&sdei_events_lock); 409 event = sdei_event_find(event_num); 410 if (!event) { 411 mutex_unlock(&sdei_events_lock); 412 return -ENOENT; 413 } 414 415 spin_lock(&sdei_list_lock); 416 event->reenable = true; 417 spin_unlock(&sdei_list_lock); 418 419 if (event->type == SDEI_EVENT_TYPE_SHARED) 420 err = sdei_api_event_enable(event->event_num); 421 else 422 err = sdei_do_cross_call(_local_event_enable, event); 423 mutex_unlock(&sdei_events_lock); 424 425 return err; 426 } 427 EXPORT_SYMBOL(sdei_event_enable); 428 429 static int sdei_api_event_disable(u32 event_num) 430 { 431 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, event_num, 0, 0, 432 0, 0, NULL); 433 } 434 435 static void _ipi_event_disable(void *data) 436 { 437 int err; 438 struct sdei_crosscall_args *arg = data; 439 440 err = sdei_api_event_disable(arg->event->event_num); 441 442 sdei_cross_call_return(arg, err); 443 } 444 445 int sdei_event_disable(u32 event_num) 446 { 447 int err = -EINVAL; 448 struct sdei_event *event; 449 450 mutex_lock(&sdei_events_lock); 451 event = sdei_event_find(event_num); 452 if (!event) { 453 mutex_unlock(&sdei_events_lock); 454 return -ENOENT; 455 } 456 457 spin_lock(&sdei_list_lock); 458 event->reenable = false; 459 spin_unlock(&sdei_list_lock); 460 461 if (event->type == SDEI_EVENT_TYPE_SHARED) 462 err = sdei_api_event_disable(event->event_num); 463 else 464 err = sdei_do_cross_call(_ipi_event_disable, event); 465 mutex_unlock(&sdei_events_lock); 466 467 return err; 468 } 469 EXPORT_SYMBOL(sdei_event_disable); 470 471 static int sdei_api_event_unregister(u32 event_num) 472 { 473 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_UNREGISTER, event_num, 0, 474 0, 0, 0, NULL); 475 } 476 477 /* Called directly by the hotplug callbacks */ 478 static void _local_event_unregister(void *data) 479 { 480 int err; 481 struct sdei_crosscall_args *arg = data; 482 483 WARN_ON_ONCE(preemptible()); 484 485 err = sdei_api_event_unregister(arg->event->event_num); 486 487 sdei_cross_call_return(arg, err); 488 } 489 490 static int _sdei_event_unregister(struct sdei_event *event) 491 { 492 lockdep_assert_held(&sdei_events_lock); 493 494 spin_lock(&sdei_list_lock); 495 event->reregister = false; 496 event->reenable = false; 497 spin_unlock(&sdei_list_lock); 498 499 if (event->type == SDEI_EVENT_TYPE_SHARED) 500 return sdei_api_event_unregister(event->event_num); 501 502 return sdei_do_cross_call(_local_event_unregister, event); 503 } 504 505 int sdei_event_unregister(u32 event_num) 506 { 507 int err; 508 struct sdei_event *event; 509 510 WARN_ON(in_nmi()); 511 512 mutex_lock(&sdei_events_lock); 513 event = sdei_event_find(event_num); 514 do { 515 if (!event) { 516 pr_warn("Event %u not registered\n", event_num); 517 err = -ENOENT; 518 break; 519 } 520 521 err = _sdei_event_unregister(event); 522 if (err) 523 break; 524 525 sdei_event_destroy(event); 526 } while (0); 527 mutex_unlock(&sdei_events_lock); 528 529 return err; 530 } 531 EXPORT_SYMBOL(sdei_event_unregister); 532 533 /* 534 * unregister events, but don't destroy them as they are re-registered by 535 * sdei_reregister_shared(). 536 */ 537 static int sdei_unregister_shared(void) 538 { 539 int err = 0; 540 struct sdei_event *event; 541 542 mutex_lock(&sdei_events_lock); 543 spin_lock(&sdei_list_lock); 544 list_for_each_entry(event, &sdei_list, list) { 545 if (event->type != SDEI_EVENT_TYPE_SHARED) 546 continue; 547 548 err = _sdei_event_unregister(event); 549 if (err) 550 break; 551 } 552 spin_unlock(&sdei_list_lock); 553 mutex_unlock(&sdei_events_lock); 554 555 return err; 556 } 557 558 static int sdei_api_event_register(u32 event_num, unsigned long entry_point, 559 void *arg, u64 flags, u64 affinity) 560 { 561 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_REGISTER, event_num, 562 (unsigned long)entry_point, (unsigned long)arg, 563 flags, affinity, NULL); 564 } 565 566 /* Called directly by the hotplug callbacks */ 567 static void _local_event_register(void *data) 568 { 569 int err; 570 struct sdei_registered_event *reg; 571 struct sdei_crosscall_args *arg = data; 572 573 WARN_ON(preemptible()); 574 575 reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id()); 576 err = sdei_api_event_register(arg->event->event_num, sdei_entry_point, 577 reg, 0, 0); 578 579 sdei_cross_call_return(arg, err); 580 } 581 582 static int _sdei_event_register(struct sdei_event *event) 583 { 584 int err; 585 586 lockdep_assert_held(&sdei_events_lock); 587 588 spin_lock(&sdei_list_lock); 589 event->reregister = true; 590 spin_unlock(&sdei_list_lock); 591 592 if (event->type == SDEI_EVENT_TYPE_SHARED) 593 return sdei_api_event_register(event->event_num, 594 sdei_entry_point, 595 event->registered, 596 SDEI_EVENT_REGISTER_RM_ANY, 0); 597 598 599 err = sdei_do_cross_call(_local_event_register, event); 600 if (err) { 601 spin_lock(&sdei_list_lock); 602 event->reregister = false; 603 event->reenable = false; 604 spin_unlock(&sdei_list_lock); 605 606 sdei_do_cross_call(_local_event_unregister, event); 607 } 608 609 return err; 610 } 611 612 int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg) 613 { 614 int err; 615 struct sdei_event *event; 616 617 WARN_ON(in_nmi()); 618 619 mutex_lock(&sdei_events_lock); 620 do { 621 if (sdei_event_find(event_num)) { 622 pr_warn("Event %u already registered\n", event_num); 623 err = -EBUSY; 624 break; 625 } 626 627 event = sdei_event_create(event_num, cb, arg); 628 if (IS_ERR(event)) { 629 err = PTR_ERR(event); 630 pr_warn("Failed to create event %u: %d\n", event_num, 631 err); 632 break; 633 } 634 635 err = _sdei_event_register(event); 636 if (err) { 637 sdei_event_destroy(event); 638 pr_warn("Failed to register event %u: %d\n", event_num, 639 err); 640 } 641 } while (0); 642 mutex_unlock(&sdei_events_lock); 643 644 return err; 645 } 646 EXPORT_SYMBOL(sdei_event_register); 647 648 static int sdei_reregister_event(struct sdei_event *event) 649 { 650 int err; 651 652 lockdep_assert_held(&sdei_events_lock); 653 654 err = _sdei_event_register(event); 655 if (err) { 656 pr_err("Failed to re-register event %u\n", event->event_num); 657 sdei_event_destroy(event); 658 return err; 659 } 660 661 if (event->reenable) { 662 if (event->type == SDEI_EVENT_TYPE_SHARED) 663 err = sdei_api_event_enable(event->event_num); 664 else 665 err = sdei_do_cross_call(_local_event_enable, event); 666 } 667 668 if (err) 669 pr_err("Failed to re-enable event %u\n", event->event_num); 670 671 return err; 672 } 673 674 static int sdei_reregister_shared(void) 675 { 676 int err = 0; 677 struct sdei_event *event; 678 679 mutex_lock(&sdei_events_lock); 680 spin_lock(&sdei_list_lock); 681 list_for_each_entry(event, &sdei_list, list) { 682 if (event->type != SDEI_EVENT_TYPE_SHARED) 683 continue; 684 685 if (event->reregister) { 686 err = sdei_reregister_event(event); 687 if (err) 688 break; 689 } 690 } 691 spin_unlock(&sdei_list_lock); 692 mutex_unlock(&sdei_events_lock); 693 694 return err; 695 } 696 697 static int sdei_cpuhp_down(unsigned int cpu) 698 { 699 struct sdei_event *event; 700 struct sdei_crosscall_args arg; 701 702 /* un-register private events */ 703 spin_lock(&sdei_list_lock); 704 list_for_each_entry(event, &sdei_list, list) { 705 if (event->type == SDEI_EVENT_TYPE_SHARED) 706 continue; 707 708 CROSSCALL_INIT(arg, event); 709 /* call the cross-call function locally... */ 710 _local_event_unregister(&arg); 711 if (arg.first_error) 712 pr_err("Failed to unregister event %u: %d\n", 713 event->event_num, arg.first_error); 714 } 715 spin_unlock(&sdei_list_lock); 716 717 return sdei_mask_local_cpu(); 718 } 719 720 static int sdei_cpuhp_up(unsigned int cpu) 721 { 722 struct sdei_event *event; 723 struct sdei_crosscall_args arg; 724 725 /* re-register/enable private events */ 726 spin_lock(&sdei_list_lock); 727 list_for_each_entry(event, &sdei_list, list) { 728 if (event->type == SDEI_EVENT_TYPE_SHARED) 729 continue; 730 731 if (event->reregister) { 732 CROSSCALL_INIT(arg, event); 733 /* call the cross-call function locally... */ 734 _local_event_register(&arg); 735 if (arg.first_error) 736 pr_err("Failed to re-register event %u: %d\n", 737 event->event_num, arg.first_error); 738 } 739 740 if (event->reenable) { 741 CROSSCALL_INIT(arg, event); 742 _local_event_enable(&arg); 743 if (arg.first_error) 744 pr_err("Failed to re-enable event %u: %d\n", 745 event->event_num, arg.first_error); 746 } 747 } 748 spin_unlock(&sdei_list_lock); 749 750 return sdei_unmask_local_cpu(); 751 } 752 753 /* When entering idle, mask/unmask events for this cpu */ 754 static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action, 755 void *data) 756 { 757 int rv; 758 759 switch (action) { 760 case CPU_PM_ENTER: 761 rv = sdei_mask_local_cpu(); 762 break; 763 case CPU_PM_EXIT: 764 case CPU_PM_ENTER_FAILED: 765 rv = sdei_unmask_local_cpu(); 766 break; 767 default: 768 return NOTIFY_DONE; 769 } 770 771 if (rv) 772 return notifier_from_errno(rv); 773 774 return NOTIFY_OK; 775 } 776 777 static struct notifier_block sdei_pm_nb = { 778 .notifier_call = sdei_pm_notifier, 779 }; 780 781 static int sdei_device_suspend(struct device *dev) 782 { 783 on_each_cpu(_ipi_mask_cpu, NULL, true); 784 785 return 0; 786 } 787 788 static int sdei_device_resume(struct device *dev) 789 { 790 on_each_cpu(_ipi_unmask_cpu, NULL, true); 791 792 return 0; 793 } 794 795 /* 796 * We need all events to be reregistered when we resume from hibernate. 797 * 798 * The sequence is freeze->thaw. Reboot. freeze->restore. We unregister 799 * events during freeze, then re-register and re-enable them during thaw 800 * and restore. 801 */ 802 static int sdei_device_freeze(struct device *dev) 803 { 804 int err; 805 806 /* unregister private events */ 807 cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING); 808 809 err = sdei_unregister_shared(); 810 if (err) 811 return err; 812 813 return 0; 814 } 815 816 static int sdei_device_thaw(struct device *dev) 817 { 818 int err; 819 820 /* re-register shared events */ 821 err = sdei_reregister_shared(); 822 if (err) { 823 pr_warn("Failed to re-register shared events...\n"); 824 sdei_mark_interface_broken(); 825 return err; 826 } 827 828 err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI", 829 &sdei_cpuhp_up, &sdei_cpuhp_down); 830 if (err) 831 pr_warn("Failed to re-register CPU hotplug notifier...\n"); 832 833 return err; 834 } 835 836 static int sdei_device_restore(struct device *dev) 837 { 838 int err; 839 840 err = sdei_platform_reset(); 841 if (err) 842 return err; 843 844 return sdei_device_thaw(dev); 845 } 846 847 static const struct dev_pm_ops sdei_pm_ops = { 848 .suspend = sdei_device_suspend, 849 .resume = sdei_device_resume, 850 .freeze = sdei_device_freeze, 851 .thaw = sdei_device_thaw, 852 .restore = sdei_device_restore, 853 }; 854 855 /* 856 * Mask all CPUs and unregister all events on panic, reboot or kexec. 857 */ 858 static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action, 859 void *data) 860 { 861 /* 862 * We are going to reset the interface, after this there is no point 863 * doing work when we take CPUs offline. 864 */ 865 cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING); 866 867 sdei_platform_reset(); 868 869 return NOTIFY_OK; 870 } 871 872 static struct notifier_block sdei_reboot_nb = { 873 .notifier_call = sdei_reboot_notifier, 874 }; 875 876 static void sdei_smccc_smc(unsigned long function_id, 877 unsigned long arg0, unsigned long arg1, 878 unsigned long arg2, unsigned long arg3, 879 unsigned long arg4, struct arm_smccc_res *res) 880 { 881 arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res); 882 } 883 NOKPROBE_SYMBOL(sdei_smccc_smc); 884 885 static void sdei_smccc_hvc(unsigned long function_id, 886 unsigned long arg0, unsigned long arg1, 887 unsigned long arg2, unsigned long arg3, 888 unsigned long arg4, struct arm_smccc_res *res) 889 { 890 arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res); 891 } 892 NOKPROBE_SYMBOL(sdei_smccc_hvc); 893 894 int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb, 895 sdei_event_callback *critical_cb) 896 { 897 int err; 898 u64 result; 899 u32 event_num; 900 sdei_event_callback *cb; 901 902 if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES)) 903 return -EOPNOTSUPP; 904 905 event_num = ghes->generic->notify.vector; 906 if (event_num == 0) { 907 /* 908 * Event 0 is reserved by the specification for 909 * SDEI_EVENT_SIGNAL. 910 */ 911 return -EINVAL; 912 } 913 914 err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY, 915 &result); 916 if (err) 917 return err; 918 919 if (result == SDEI_EVENT_PRIORITY_CRITICAL) 920 cb = critical_cb; 921 else 922 cb = normal_cb; 923 924 err = sdei_event_register(event_num, cb, ghes); 925 if (!err) 926 err = sdei_event_enable(event_num); 927 928 return err; 929 } 930 931 int sdei_unregister_ghes(struct ghes *ghes) 932 { 933 int i; 934 int err; 935 u32 event_num = ghes->generic->notify.vector; 936 937 might_sleep(); 938 939 if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES)) 940 return -EOPNOTSUPP; 941 942 /* 943 * The event may be running on another CPU. Disable it 944 * to stop new events, then try to unregister a few times. 945 */ 946 err = sdei_event_disable(event_num); 947 if (err) 948 return err; 949 950 for (i = 0; i < 3; i++) { 951 err = sdei_event_unregister(event_num); 952 if (err != -EINPROGRESS) 953 break; 954 955 schedule(); 956 } 957 958 return err; 959 } 960 961 static int sdei_get_conduit(struct platform_device *pdev) 962 { 963 const char *method; 964 struct device_node *np = pdev->dev.of_node; 965 966 sdei_firmware_call = NULL; 967 if (np) { 968 if (of_property_read_string(np, "method", &method)) { 969 pr_warn("missing \"method\" property\n"); 970 return CONDUIT_INVALID; 971 } 972 973 if (!strcmp("hvc", method)) { 974 sdei_firmware_call = &sdei_smccc_hvc; 975 return CONDUIT_HVC; 976 } else if (!strcmp("smc", method)) { 977 sdei_firmware_call = &sdei_smccc_smc; 978 return CONDUIT_SMC; 979 } 980 981 pr_warn("invalid \"method\" property: %s\n", method); 982 } else if (IS_ENABLED(CONFIG_ACPI) && !acpi_disabled) { 983 if (acpi_psci_use_hvc()) { 984 sdei_firmware_call = &sdei_smccc_hvc; 985 return CONDUIT_HVC; 986 } else { 987 sdei_firmware_call = &sdei_smccc_smc; 988 return CONDUIT_SMC; 989 } 990 } 991 992 return CONDUIT_INVALID; 993 } 994 995 static int sdei_probe(struct platform_device *pdev) 996 { 997 int err; 998 u64 ver = 0; 999 int conduit; 1000 1001 conduit = sdei_get_conduit(pdev); 1002 if (!sdei_firmware_call) 1003 return 0; 1004 1005 err = sdei_api_get_version(&ver); 1006 if (err == -EOPNOTSUPP) 1007 pr_err("advertised but not implemented in platform firmware\n"); 1008 if (err) { 1009 pr_err("Failed to get SDEI version: %d\n", err); 1010 sdei_mark_interface_broken(); 1011 return err; 1012 } 1013 1014 pr_info("SDEIv%d.%d (0x%x) detected in firmware.\n", 1015 (int)SDEI_VERSION_MAJOR(ver), (int)SDEI_VERSION_MINOR(ver), 1016 (int)SDEI_VERSION_VENDOR(ver)); 1017 1018 if (SDEI_VERSION_MAJOR(ver) != 1) { 1019 pr_warn("Conflicting SDEI version detected.\n"); 1020 sdei_mark_interface_broken(); 1021 return -EINVAL; 1022 } 1023 1024 err = sdei_platform_reset(); 1025 if (err) 1026 return err; 1027 1028 sdei_entry_point = sdei_arch_get_entry_point(conduit); 1029 if (!sdei_entry_point) { 1030 /* Not supported due to hardware or boot configuration */ 1031 sdei_mark_interface_broken(); 1032 return 0; 1033 } 1034 1035 err = cpu_pm_register_notifier(&sdei_pm_nb); 1036 if (err) { 1037 pr_warn("Failed to register CPU PM notifier...\n"); 1038 goto error; 1039 } 1040 1041 err = register_reboot_notifier(&sdei_reboot_nb); 1042 if (err) { 1043 pr_warn("Failed to register reboot notifier...\n"); 1044 goto remove_cpupm; 1045 } 1046 1047 err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI", 1048 &sdei_cpuhp_up, &sdei_cpuhp_down); 1049 if (err) { 1050 pr_warn("Failed to register CPU hotplug notifier...\n"); 1051 goto remove_reboot; 1052 } 1053 1054 return 0; 1055 1056 remove_reboot: 1057 unregister_reboot_notifier(&sdei_reboot_nb); 1058 1059 remove_cpupm: 1060 cpu_pm_unregister_notifier(&sdei_pm_nb); 1061 1062 error: 1063 sdei_mark_interface_broken(); 1064 return err; 1065 } 1066 1067 static const struct of_device_id sdei_of_match[] = { 1068 { .compatible = "arm,sdei-1.0" }, 1069 {} 1070 }; 1071 1072 static struct platform_driver sdei_driver = { 1073 .driver = { 1074 .name = "sdei", 1075 .pm = &sdei_pm_ops, 1076 .of_match_table = sdei_of_match, 1077 }, 1078 .probe = sdei_probe, 1079 }; 1080 1081 static bool __init sdei_present_dt(void) 1082 { 1083 struct device_node *np, *fw_np; 1084 1085 fw_np = of_find_node_by_name(NULL, "firmware"); 1086 if (!fw_np) 1087 return false; 1088 1089 np = of_find_matching_node(fw_np, sdei_of_match); 1090 if (!np) 1091 return false; 1092 of_node_put(np); 1093 1094 return true; 1095 } 1096 1097 static bool __init sdei_present_acpi(void) 1098 { 1099 acpi_status status; 1100 struct platform_device *pdev; 1101 struct acpi_table_header *sdei_table_header; 1102 1103 if (acpi_disabled) 1104 return false; 1105 1106 status = acpi_get_table(ACPI_SIG_SDEI, 0, &sdei_table_header); 1107 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 1108 const char *msg = acpi_format_exception(status); 1109 1110 pr_info("Failed to get ACPI:SDEI table, %s\n", msg); 1111 } 1112 if (ACPI_FAILURE(status)) 1113 return false; 1114 1115 pdev = platform_device_register_simple(sdei_driver.driver.name, 0, NULL, 1116 0); 1117 if (IS_ERR(pdev)) 1118 return false; 1119 1120 return true; 1121 } 1122 1123 static int __init sdei_init(void) 1124 { 1125 if (sdei_present_dt() || sdei_present_acpi()) 1126 platform_driver_register(&sdei_driver); 1127 1128 return 0; 1129 } 1130 1131 /* 1132 * On an ACPI system SDEI needs to be ready before HEST:GHES tries to register 1133 * its events. ACPI is initialised from a subsys_initcall(), GHES is initialised 1134 * by device_initcall(). We want to be called in the middle. 1135 */ 1136 subsys_initcall_sync(sdei_init); 1137 1138 int sdei_event_handler(struct pt_regs *regs, 1139 struct sdei_registered_event *arg) 1140 { 1141 int err; 1142 mm_segment_t orig_addr_limit; 1143 u32 event_num = arg->event_num; 1144 1145 orig_addr_limit = get_fs(); 1146 set_fs(USER_DS); 1147 1148 err = arg->callback(event_num, regs, arg->callback_arg); 1149 if (err) 1150 pr_err_ratelimited("event %u on CPU %u failed with error: %d\n", 1151 event_num, smp_processor_id(), err); 1152 1153 set_fs(orig_addr_limit); 1154 1155 return err; 1156 } 1157 NOKPROBE_SYMBOL(sdei_event_handler); 1158