1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2017 Arm Ltd. 3 #define pr_fmt(fmt) "sdei: " fmt 4 5 #include <acpi/ghes.h> 6 #include <linux/acpi.h> 7 #include <linux/arm_sdei.h> 8 #include <linux/arm-smccc.h> 9 #include <linux/atomic.h> 10 #include <linux/bitops.h> 11 #include <linux/compiler.h> 12 #include <linux/cpuhotplug.h> 13 #include <linux/cpu.h> 14 #include <linux/cpu_pm.h> 15 #include <linux/errno.h> 16 #include <linux/hardirq.h> 17 #include <linux/kernel.h> 18 #include <linux/kprobes.h> 19 #include <linux/kvm_host.h> 20 #include <linux/list.h> 21 #include <linux/mutex.h> 22 #include <linux/notifier.h> 23 #include <linux/of.h> 24 #include <linux/of_platform.h> 25 #include <linux/percpu.h> 26 #include <linux/platform_device.h> 27 #include <linux/pm.h> 28 #include <linux/ptrace.h> 29 #include <linux/preempt.h> 30 #include <linux/reboot.h> 31 #include <linux/slab.h> 32 #include <linux/smp.h> 33 #include <linux/spinlock.h> 34 #include <linux/uaccess.h> 35 36 /* 37 * The call to use to reach the firmware. 38 */ 39 static asmlinkage void (*sdei_firmware_call)(unsigned long function_id, 40 unsigned long arg0, unsigned long arg1, 41 unsigned long arg2, unsigned long arg3, 42 unsigned long arg4, struct arm_smccc_res *res); 43 44 /* entry point from firmware to arch asm code */ 45 static unsigned long sdei_entry_point; 46 47 struct sdei_event { 48 /* These three are protected by the sdei_list_lock */ 49 struct list_head list; 50 bool reregister; 51 bool reenable; 52 53 u32 event_num; 54 u8 type; 55 u8 priority; 56 57 /* This pointer is handed to firmware as the event argument. */ 58 union { 59 /* Shared events */ 60 struct sdei_registered_event *registered; 61 62 /* CPU private events */ 63 struct sdei_registered_event __percpu *private_registered; 64 }; 65 }; 66 67 /* Take the mutex for any API call or modification. Take the mutex first. */ 68 static DEFINE_MUTEX(sdei_events_lock); 69 70 /* and then hold this when modifying the list */ 71 static DEFINE_SPINLOCK(sdei_list_lock); 72 static LIST_HEAD(sdei_list); 73 74 /* Private events are registered/enabled via IPI passing one of these */ 75 struct sdei_crosscall_args { 76 struct sdei_event *event; 77 atomic_t errors; 78 int first_error; 79 }; 80 81 #define CROSSCALL_INIT(arg, event) (arg.event = event, \ 82 arg.first_error = 0, \ 83 atomic_set(&arg.errors, 0)) 84 85 static inline int sdei_do_cross_call(void *fn, struct sdei_event * event) 86 { 87 struct sdei_crosscall_args arg; 88 89 CROSSCALL_INIT(arg, event); 90 on_each_cpu(fn, &arg, true); 91 92 return arg.first_error; 93 } 94 95 static inline void 96 sdei_cross_call_return(struct sdei_crosscall_args *arg, int err) 97 { 98 if (err && (atomic_inc_return(&arg->errors) == 1)) 99 arg->first_error = err; 100 } 101 102 static int sdei_to_linux_errno(unsigned long sdei_err) 103 { 104 switch (sdei_err) { 105 case SDEI_NOT_SUPPORTED: 106 return -EOPNOTSUPP; 107 case SDEI_INVALID_PARAMETERS: 108 return -EINVAL; 109 case SDEI_DENIED: 110 return -EPERM; 111 case SDEI_PENDING: 112 return -EINPROGRESS; 113 case SDEI_OUT_OF_RESOURCE: 114 return -ENOMEM; 115 } 116 117 /* Not an error value ... */ 118 return sdei_err; 119 } 120 121 /* 122 * If x0 is any of these values, then the call failed, use sdei_to_linux_errno() 123 * to translate. 124 */ 125 static int sdei_is_err(struct arm_smccc_res *res) 126 { 127 switch (res->a0) { 128 case SDEI_NOT_SUPPORTED: 129 case SDEI_INVALID_PARAMETERS: 130 case SDEI_DENIED: 131 case SDEI_PENDING: 132 case SDEI_OUT_OF_RESOURCE: 133 return true; 134 } 135 136 return false; 137 } 138 139 static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0, 140 unsigned long arg1, unsigned long arg2, 141 unsigned long arg3, unsigned long arg4, 142 u64 *result) 143 { 144 int err = 0; 145 struct arm_smccc_res res; 146 147 if (sdei_firmware_call) { 148 sdei_firmware_call(function_id, arg0, arg1, arg2, arg3, arg4, 149 &res); 150 if (sdei_is_err(&res)) 151 err = sdei_to_linux_errno(res.a0); 152 } else { 153 /* 154 * !sdei_firmware_call means we failed to probe or called 155 * sdei_mark_interface_broken(). -EIO is not an error returned 156 * by sdei_to_linux_errno() and is used to suppress messages 157 * from this driver. 158 */ 159 err = -EIO; 160 res.a0 = SDEI_NOT_SUPPORTED; 161 } 162 163 if (result) 164 *result = res.a0; 165 166 return err; 167 } 168 NOKPROBE_SYMBOL(invoke_sdei_fn); 169 170 static struct sdei_event *sdei_event_find(u32 event_num) 171 { 172 struct sdei_event *e, *found = NULL; 173 174 lockdep_assert_held(&sdei_events_lock); 175 176 spin_lock(&sdei_list_lock); 177 list_for_each_entry(e, &sdei_list, list) { 178 if (e->event_num == event_num) { 179 found = e; 180 break; 181 } 182 } 183 spin_unlock(&sdei_list_lock); 184 185 return found; 186 } 187 188 int sdei_api_event_context(u32 query, u64 *result) 189 { 190 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_CONTEXT, query, 0, 0, 0, 0, 191 result); 192 } 193 NOKPROBE_SYMBOL(sdei_api_event_context); 194 195 static int sdei_api_event_get_info(u32 event, u32 info, u64 *result) 196 { 197 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0, 198 0, 0, result); 199 } 200 201 static struct sdei_event *sdei_event_create(u32 event_num, 202 sdei_event_callback *cb, 203 void *cb_arg) 204 { 205 int err; 206 u64 result; 207 struct sdei_event *event; 208 struct sdei_registered_event *reg; 209 210 lockdep_assert_held(&sdei_events_lock); 211 212 event = kzalloc(sizeof(*event), GFP_KERNEL); 213 if (!event) 214 return ERR_PTR(-ENOMEM); 215 216 INIT_LIST_HEAD(&event->list); 217 event->event_num = event_num; 218 219 err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY, 220 &result); 221 if (err) { 222 kfree(event); 223 return ERR_PTR(err); 224 } 225 event->priority = result; 226 227 err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_TYPE, 228 &result); 229 if (err) { 230 kfree(event); 231 return ERR_PTR(err); 232 } 233 event->type = result; 234 235 if (event->type == SDEI_EVENT_TYPE_SHARED) { 236 reg = kzalloc(sizeof(*reg), GFP_KERNEL); 237 if (!reg) { 238 kfree(event); 239 return ERR_PTR(-ENOMEM); 240 } 241 242 reg->event_num = event_num; 243 reg->priority = event->priority; 244 245 reg->callback = cb; 246 reg->callback_arg = cb_arg; 247 event->registered = reg; 248 } else { 249 int cpu; 250 struct sdei_registered_event __percpu *regs; 251 252 regs = alloc_percpu(struct sdei_registered_event); 253 if (!regs) { 254 kfree(event); 255 return ERR_PTR(-ENOMEM); 256 } 257 258 for_each_possible_cpu(cpu) { 259 reg = per_cpu_ptr(regs, cpu); 260 261 reg->event_num = event->event_num; 262 reg->priority = event->priority; 263 reg->callback = cb; 264 reg->callback_arg = cb_arg; 265 } 266 267 event->private_registered = regs; 268 } 269 270 spin_lock(&sdei_list_lock); 271 list_add(&event->list, &sdei_list); 272 spin_unlock(&sdei_list_lock); 273 274 return event; 275 } 276 277 static void sdei_event_destroy_llocked(struct sdei_event *event) 278 { 279 lockdep_assert_held(&sdei_events_lock); 280 lockdep_assert_held(&sdei_list_lock); 281 282 list_del(&event->list); 283 284 if (event->type == SDEI_EVENT_TYPE_SHARED) 285 kfree(event->registered); 286 else 287 free_percpu(event->private_registered); 288 289 kfree(event); 290 } 291 292 static void sdei_event_destroy(struct sdei_event *event) 293 { 294 spin_lock(&sdei_list_lock); 295 sdei_event_destroy_llocked(event); 296 spin_unlock(&sdei_list_lock); 297 } 298 299 static int sdei_api_get_version(u64 *version) 300 { 301 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_VERSION, 0, 0, 0, 0, 0, version); 302 } 303 304 int sdei_mask_local_cpu(void) 305 { 306 int err; 307 308 WARN_ON_ONCE(preemptible()); 309 310 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL); 311 if (err && err != -EIO) { 312 pr_warn_once("failed to mask CPU[%u]: %d\n", 313 smp_processor_id(), err); 314 return err; 315 } 316 317 return 0; 318 } 319 320 static void _ipi_mask_cpu(void *ignored) 321 { 322 sdei_mask_local_cpu(); 323 } 324 325 int sdei_unmask_local_cpu(void) 326 { 327 int err; 328 329 WARN_ON_ONCE(preemptible()); 330 331 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL); 332 if (err && err != -EIO) { 333 pr_warn_once("failed to unmask CPU[%u]: %d\n", 334 smp_processor_id(), err); 335 return err; 336 } 337 338 return 0; 339 } 340 341 static void _ipi_unmask_cpu(void *ignored) 342 { 343 sdei_unmask_local_cpu(); 344 } 345 346 static void _ipi_private_reset(void *ignored) 347 { 348 int err; 349 350 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0, 351 NULL); 352 if (err && err != -EIO) 353 pr_warn_once("failed to reset CPU[%u]: %d\n", 354 smp_processor_id(), err); 355 } 356 357 static int sdei_api_shared_reset(void) 358 { 359 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_SHARED_RESET, 0, 0, 0, 0, 0, 360 NULL); 361 } 362 363 static void sdei_mark_interface_broken(void) 364 { 365 pr_err("disabling SDEI firmware interface\n"); 366 on_each_cpu(&_ipi_mask_cpu, NULL, true); 367 sdei_firmware_call = NULL; 368 } 369 370 static int sdei_platform_reset(void) 371 { 372 int err; 373 374 on_each_cpu(&_ipi_private_reset, NULL, true); 375 err = sdei_api_shared_reset(); 376 if (err) { 377 pr_err("Failed to reset platform: %d\n", err); 378 sdei_mark_interface_broken(); 379 } 380 381 return err; 382 } 383 384 static int sdei_api_event_enable(u32 event_num) 385 { 386 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, event_num, 0, 0, 0, 387 0, NULL); 388 } 389 390 /* Called directly by the hotplug callbacks */ 391 static void _local_event_enable(void *data) 392 { 393 int err; 394 struct sdei_crosscall_args *arg = data; 395 396 WARN_ON_ONCE(preemptible()); 397 398 err = sdei_api_event_enable(arg->event->event_num); 399 400 sdei_cross_call_return(arg, err); 401 } 402 403 int sdei_event_enable(u32 event_num) 404 { 405 int err = -EINVAL; 406 struct sdei_event *event; 407 408 mutex_lock(&sdei_events_lock); 409 event = sdei_event_find(event_num); 410 if (!event) { 411 mutex_unlock(&sdei_events_lock); 412 return -ENOENT; 413 } 414 415 416 cpus_read_lock(); 417 if (event->type == SDEI_EVENT_TYPE_SHARED) 418 err = sdei_api_event_enable(event->event_num); 419 else 420 err = sdei_do_cross_call(_local_event_enable, event); 421 422 if (!err) { 423 spin_lock(&sdei_list_lock); 424 event->reenable = true; 425 spin_unlock(&sdei_list_lock); 426 } 427 cpus_read_unlock(); 428 mutex_unlock(&sdei_events_lock); 429 430 return err; 431 } 432 EXPORT_SYMBOL(sdei_event_enable); 433 434 static int sdei_api_event_disable(u32 event_num) 435 { 436 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, event_num, 0, 0, 437 0, 0, NULL); 438 } 439 440 static void _ipi_event_disable(void *data) 441 { 442 int err; 443 struct sdei_crosscall_args *arg = data; 444 445 err = sdei_api_event_disable(arg->event->event_num); 446 447 sdei_cross_call_return(arg, err); 448 } 449 450 int sdei_event_disable(u32 event_num) 451 { 452 int err = -EINVAL; 453 struct sdei_event *event; 454 455 mutex_lock(&sdei_events_lock); 456 event = sdei_event_find(event_num); 457 if (!event) { 458 mutex_unlock(&sdei_events_lock); 459 return -ENOENT; 460 } 461 462 spin_lock(&sdei_list_lock); 463 event->reenable = false; 464 spin_unlock(&sdei_list_lock); 465 466 if (event->type == SDEI_EVENT_TYPE_SHARED) 467 err = sdei_api_event_disable(event->event_num); 468 else 469 err = sdei_do_cross_call(_ipi_event_disable, event); 470 mutex_unlock(&sdei_events_lock); 471 472 return err; 473 } 474 EXPORT_SYMBOL(sdei_event_disable); 475 476 static int sdei_api_event_unregister(u32 event_num) 477 { 478 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_UNREGISTER, event_num, 0, 479 0, 0, 0, NULL); 480 } 481 482 /* Called directly by the hotplug callbacks */ 483 static void _local_event_unregister(void *data) 484 { 485 int err; 486 struct sdei_crosscall_args *arg = data; 487 488 WARN_ON_ONCE(preemptible()); 489 490 err = sdei_api_event_unregister(arg->event->event_num); 491 492 sdei_cross_call_return(arg, err); 493 } 494 495 static int _sdei_event_unregister(struct sdei_event *event) 496 { 497 lockdep_assert_held(&sdei_events_lock); 498 499 if (event->type == SDEI_EVENT_TYPE_SHARED) 500 return sdei_api_event_unregister(event->event_num); 501 502 return sdei_do_cross_call(_local_event_unregister, event); 503 } 504 505 int sdei_event_unregister(u32 event_num) 506 { 507 int err; 508 struct sdei_event *event; 509 510 WARN_ON(in_nmi()); 511 512 mutex_lock(&sdei_events_lock); 513 event = sdei_event_find(event_num); 514 do { 515 if (!event) { 516 pr_warn("Event %u not registered\n", event_num); 517 err = -ENOENT; 518 break; 519 } 520 521 spin_lock(&sdei_list_lock); 522 event->reregister = false; 523 event->reenable = false; 524 spin_unlock(&sdei_list_lock); 525 526 err = _sdei_event_unregister(event); 527 if (err) 528 break; 529 530 sdei_event_destroy(event); 531 } while (0); 532 mutex_unlock(&sdei_events_lock); 533 534 return err; 535 } 536 EXPORT_SYMBOL(sdei_event_unregister); 537 538 /* 539 * unregister events, but don't destroy them as they are re-registered by 540 * sdei_reregister_shared(). 541 */ 542 static int sdei_unregister_shared(void) 543 { 544 int err = 0; 545 struct sdei_event *event; 546 547 mutex_lock(&sdei_events_lock); 548 spin_lock(&sdei_list_lock); 549 list_for_each_entry(event, &sdei_list, list) { 550 if (event->type != SDEI_EVENT_TYPE_SHARED) 551 continue; 552 553 err = _sdei_event_unregister(event); 554 if (err) 555 break; 556 } 557 spin_unlock(&sdei_list_lock); 558 mutex_unlock(&sdei_events_lock); 559 560 return err; 561 } 562 563 static int sdei_api_event_register(u32 event_num, unsigned long entry_point, 564 void *arg, u64 flags, u64 affinity) 565 { 566 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_REGISTER, event_num, 567 (unsigned long)entry_point, (unsigned long)arg, 568 flags, affinity, NULL); 569 } 570 571 /* Called directly by the hotplug callbacks */ 572 static void _local_event_register(void *data) 573 { 574 int err; 575 struct sdei_registered_event *reg; 576 struct sdei_crosscall_args *arg = data; 577 578 WARN_ON(preemptible()); 579 580 reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id()); 581 err = sdei_api_event_register(arg->event->event_num, sdei_entry_point, 582 reg, 0, 0); 583 584 sdei_cross_call_return(arg, err); 585 } 586 587 static int _sdei_event_register(struct sdei_event *event) 588 { 589 int err; 590 591 lockdep_assert_held(&sdei_events_lock); 592 593 if (event->type == SDEI_EVENT_TYPE_SHARED) 594 return sdei_api_event_register(event->event_num, 595 sdei_entry_point, 596 event->registered, 597 SDEI_EVENT_REGISTER_RM_ANY, 0); 598 599 err = sdei_do_cross_call(_local_event_register, event); 600 if (err) 601 sdei_do_cross_call(_local_event_unregister, event); 602 603 return err; 604 } 605 606 int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg) 607 { 608 int err; 609 struct sdei_event *event; 610 611 WARN_ON(in_nmi()); 612 613 mutex_lock(&sdei_events_lock); 614 do { 615 if (sdei_event_find(event_num)) { 616 pr_warn("Event %u already registered\n", event_num); 617 err = -EBUSY; 618 break; 619 } 620 621 event = sdei_event_create(event_num, cb, arg); 622 if (IS_ERR(event)) { 623 err = PTR_ERR(event); 624 pr_warn("Failed to create event %u: %d\n", event_num, 625 err); 626 break; 627 } 628 629 cpus_read_lock(); 630 err = _sdei_event_register(event); 631 if (err) { 632 sdei_event_destroy(event); 633 pr_warn("Failed to register event %u: %d\n", event_num, 634 err); 635 } else { 636 spin_lock(&sdei_list_lock); 637 event->reregister = true; 638 spin_unlock(&sdei_list_lock); 639 } 640 cpus_read_unlock(); 641 } while (0); 642 mutex_unlock(&sdei_events_lock); 643 644 return err; 645 } 646 EXPORT_SYMBOL(sdei_event_register); 647 648 static int sdei_reregister_event_llocked(struct sdei_event *event) 649 { 650 int err; 651 652 lockdep_assert_held(&sdei_events_lock); 653 lockdep_assert_held(&sdei_list_lock); 654 655 err = _sdei_event_register(event); 656 if (err) { 657 pr_err("Failed to re-register event %u\n", event->event_num); 658 sdei_event_destroy_llocked(event); 659 return err; 660 } 661 662 if (event->reenable) { 663 if (event->type == SDEI_EVENT_TYPE_SHARED) 664 err = sdei_api_event_enable(event->event_num); 665 else 666 err = sdei_do_cross_call(_local_event_enable, event); 667 } 668 669 if (err) 670 pr_err("Failed to re-enable event %u\n", event->event_num); 671 672 return err; 673 } 674 675 static int sdei_reregister_shared(void) 676 { 677 int err = 0; 678 struct sdei_event *event; 679 680 mutex_lock(&sdei_events_lock); 681 spin_lock(&sdei_list_lock); 682 list_for_each_entry(event, &sdei_list, list) { 683 if (event->type != SDEI_EVENT_TYPE_SHARED) 684 continue; 685 686 if (event->reregister) { 687 err = sdei_reregister_event_llocked(event); 688 if (err) 689 break; 690 } 691 } 692 spin_unlock(&sdei_list_lock); 693 mutex_unlock(&sdei_events_lock); 694 695 return err; 696 } 697 698 static int sdei_cpuhp_down(unsigned int cpu) 699 { 700 struct sdei_event *event; 701 struct sdei_crosscall_args arg; 702 703 /* un-register private events */ 704 spin_lock(&sdei_list_lock); 705 list_for_each_entry(event, &sdei_list, list) { 706 if (event->type == SDEI_EVENT_TYPE_SHARED) 707 continue; 708 709 CROSSCALL_INIT(arg, event); 710 /* call the cross-call function locally... */ 711 _local_event_unregister(&arg); 712 if (arg.first_error) 713 pr_err("Failed to unregister event %u: %d\n", 714 event->event_num, arg.first_error); 715 } 716 spin_unlock(&sdei_list_lock); 717 718 return sdei_mask_local_cpu(); 719 } 720 721 static int sdei_cpuhp_up(unsigned int cpu) 722 { 723 struct sdei_event *event; 724 struct sdei_crosscall_args arg; 725 726 /* re-register/enable private events */ 727 spin_lock(&sdei_list_lock); 728 list_for_each_entry(event, &sdei_list, list) { 729 if (event->type == SDEI_EVENT_TYPE_SHARED) 730 continue; 731 732 if (event->reregister) { 733 CROSSCALL_INIT(arg, event); 734 /* call the cross-call function locally... */ 735 _local_event_register(&arg); 736 if (arg.first_error) 737 pr_err("Failed to re-register event %u: %d\n", 738 event->event_num, arg.first_error); 739 } 740 741 if (event->reenable) { 742 CROSSCALL_INIT(arg, event); 743 _local_event_enable(&arg); 744 if (arg.first_error) 745 pr_err("Failed to re-enable event %u: %d\n", 746 event->event_num, arg.first_error); 747 } 748 } 749 spin_unlock(&sdei_list_lock); 750 751 return sdei_unmask_local_cpu(); 752 } 753 754 /* When entering idle, mask/unmask events for this cpu */ 755 static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action, 756 void *data) 757 { 758 int rv; 759 760 switch (action) { 761 case CPU_PM_ENTER: 762 rv = sdei_mask_local_cpu(); 763 break; 764 case CPU_PM_EXIT: 765 case CPU_PM_ENTER_FAILED: 766 rv = sdei_unmask_local_cpu(); 767 break; 768 default: 769 return NOTIFY_DONE; 770 } 771 772 if (rv) 773 return notifier_from_errno(rv); 774 775 return NOTIFY_OK; 776 } 777 778 static struct notifier_block sdei_pm_nb = { 779 .notifier_call = sdei_pm_notifier, 780 }; 781 782 static int sdei_device_suspend(struct device *dev) 783 { 784 on_each_cpu(_ipi_mask_cpu, NULL, true); 785 786 return 0; 787 } 788 789 static int sdei_device_resume(struct device *dev) 790 { 791 on_each_cpu(_ipi_unmask_cpu, NULL, true); 792 793 return 0; 794 } 795 796 /* 797 * We need all events to be reregistered when we resume from hibernate. 798 * 799 * The sequence is freeze->thaw. Reboot. freeze->restore. We unregister 800 * events during freeze, then re-register and re-enable them during thaw 801 * and restore. 802 */ 803 static int sdei_device_freeze(struct device *dev) 804 { 805 int err; 806 807 /* unregister private events */ 808 cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING); 809 810 err = sdei_unregister_shared(); 811 if (err) 812 return err; 813 814 return 0; 815 } 816 817 static int sdei_device_thaw(struct device *dev) 818 { 819 int err; 820 821 /* re-register shared events */ 822 err = sdei_reregister_shared(); 823 if (err) { 824 pr_warn("Failed to re-register shared events...\n"); 825 sdei_mark_interface_broken(); 826 return err; 827 } 828 829 err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI", 830 &sdei_cpuhp_up, &sdei_cpuhp_down); 831 if (err) 832 pr_warn("Failed to re-register CPU hotplug notifier...\n"); 833 834 return err; 835 } 836 837 static int sdei_device_restore(struct device *dev) 838 { 839 int err; 840 841 err = sdei_platform_reset(); 842 if (err) 843 return err; 844 845 return sdei_device_thaw(dev); 846 } 847 848 static const struct dev_pm_ops sdei_pm_ops = { 849 .suspend = sdei_device_suspend, 850 .resume = sdei_device_resume, 851 .freeze = sdei_device_freeze, 852 .thaw = sdei_device_thaw, 853 .restore = sdei_device_restore, 854 }; 855 856 /* 857 * Mask all CPUs and unregister all events on panic, reboot or kexec. 858 */ 859 static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action, 860 void *data) 861 { 862 /* 863 * We are going to reset the interface, after this there is no point 864 * doing work when we take CPUs offline. 865 */ 866 cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING); 867 868 sdei_platform_reset(); 869 870 return NOTIFY_OK; 871 } 872 873 static struct notifier_block sdei_reboot_nb = { 874 .notifier_call = sdei_reboot_notifier, 875 }; 876 877 static void sdei_smccc_smc(unsigned long function_id, 878 unsigned long arg0, unsigned long arg1, 879 unsigned long arg2, unsigned long arg3, 880 unsigned long arg4, struct arm_smccc_res *res) 881 { 882 arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res); 883 } 884 NOKPROBE_SYMBOL(sdei_smccc_smc); 885 886 static void sdei_smccc_hvc(unsigned long function_id, 887 unsigned long arg0, unsigned long arg1, 888 unsigned long arg2, unsigned long arg3, 889 unsigned long arg4, struct arm_smccc_res *res) 890 { 891 arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res); 892 } 893 NOKPROBE_SYMBOL(sdei_smccc_hvc); 894 895 int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb, 896 sdei_event_callback *critical_cb) 897 { 898 int err; 899 u64 result; 900 u32 event_num; 901 sdei_event_callback *cb; 902 903 if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES)) 904 return -EOPNOTSUPP; 905 906 event_num = ghes->generic->notify.vector; 907 if (event_num == 0) { 908 /* 909 * Event 0 is reserved by the specification for 910 * SDEI_EVENT_SIGNAL. 911 */ 912 return -EINVAL; 913 } 914 915 err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY, 916 &result); 917 if (err) 918 return err; 919 920 if (result == SDEI_EVENT_PRIORITY_CRITICAL) 921 cb = critical_cb; 922 else 923 cb = normal_cb; 924 925 err = sdei_event_register(event_num, cb, ghes); 926 if (!err) 927 err = sdei_event_enable(event_num); 928 929 return err; 930 } 931 932 int sdei_unregister_ghes(struct ghes *ghes) 933 { 934 int i; 935 int err; 936 u32 event_num = ghes->generic->notify.vector; 937 938 might_sleep(); 939 940 if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES)) 941 return -EOPNOTSUPP; 942 943 /* 944 * The event may be running on another CPU. Disable it 945 * to stop new events, then try to unregister a few times. 946 */ 947 err = sdei_event_disable(event_num); 948 if (err) 949 return err; 950 951 for (i = 0; i < 3; i++) { 952 err = sdei_event_unregister(event_num); 953 if (err != -EINPROGRESS) 954 break; 955 956 schedule(); 957 } 958 959 return err; 960 } 961 962 static int sdei_get_conduit(struct platform_device *pdev) 963 { 964 const char *method; 965 struct device_node *np = pdev->dev.of_node; 966 967 sdei_firmware_call = NULL; 968 if (np) { 969 if (of_property_read_string(np, "method", &method)) { 970 pr_warn("missing \"method\" property\n"); 971 return SMCCC_CONDUIT_NONE; 972 } 973 974 if (!strcmp("hvc", method)) { 975 sdei_firmware_call = &sdei_smccc_hvc; 976 return SMCCC_CONDUIT_HVC; 977 } else if (!strcmp("smc", method)) { 978 sdei_firmware_call = &sdei_smccc_smc; 979 return SMCCC_CONDUIT_SMC; 980 } 981 982 pr_warn("invalid \"method\" property: %s\n", method); 983 } else if (IS_ENABLED(CONFIG_ACPI) && !acpi_disabled) { 984 if (acpi_psci_use_hvc()) { 985 sdei_firmware_call = &sdei_smccc_hvc; 986 return SMCCC_CONDUIT_HVC; 987 } else { 988 sdei_firmware_call = &sdei_smccc_smc; 989 return SMCCC_CONDUIT_SMC; 990 } 991 } 992 993 return SMCCC_CONDUIT_NONE; 994 } 995 996 static int sdei_probe(struct platform_device *pdev) 997 { 998 int err; 999 u64 ver = 0; 1000 int conduit; 1001 1002 conduit = sdei_get_conduit(pdev); 1003 if (!sdei_firmware_call) 1004 return 0; 1005 1006 err = sdei_api_get_version(&ver); 1007 if (err == -EOPNOTSUPP) 1008 pr_err("advertised but not implemented in platform firmware\n"); 1009 if (err) { 1010 pr_err("Failed to get SDEI version: %d\n", err); 1011 sdei_mark_interface_broken(); 1012 return err; 1013 } 1014 1015 pr_info("SDEIv%d.%d (0x%x) detected in firmware.\n", 1016 (int)SDEI_VERSION_MAJOR(ver), (int)SDEI_VERSION_MINOR(ver), 1017 (int)SDEI_VERSION_VENDOR(ver)); 1018 1019 if (SDEI_VERSION_MAJOR(ver) != 1) { 1020 pr_warn("Conflicting SDEI version detected.\n"); 1021 sdei_mark_interface_broken(); 1022 return -EINVAL; 1023 } 1024 1025 err = sdei_platform_reset(); 1026 if (err) 1027 return err; 1028 1029 sdei_entry_point = sdei_arch_get_entry_point(conduit); 1030 if (!sdei_entry_point) { 1031 /* Not supported due to hardware or boot configuration */ 1032 sdei_mark_interface_broken(); 1033 return 0; 1034 } 1035 1036 err = cpu_pm_register_notifier(&sdei_pm_nb); 1037 if (err) { 1038 pr_warn("Failed to register CPU PM notifier...\n"); 1039 goto error; 1040 } 1041 1042 err = register_reboot_notifier(&sdei_reboot_nb); 1043 if (err) { 1044 pr_warn("Failed to register reboot notifier...\n"); 1045 goto remove_cpupm; 1046 } 1047 1048 err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI", 1049 &sdei_cpuhp_up, &sdei_cpuhp_down); 1050 if (err) { 1051 pr_warn("Failed to register CPU hotplug notifier...\n"); 1052 goto remove_reboot; 1053 } 1054 1055 return 0; 1056 1057 remove_reboot: 1058 unregister_reboot_notifier(&sdei_reboot_nb); 1059 1060 remove_cpupm: 1061 cpu_pm_unregister_notifier(&sdei_pm_nb); 1062 1063 error: 1064 sdei_mark_interface_broken(); 1065 return err; 1066 } 1067 1068 static const struct of_device_id sdei_of_match[] = { 1069 { .compatible = "arm,sdei-1.0" }, 1070 {} 1071 }; 1072 1073 static struct platform_driver sdei_driver = { 1074 .driver = { 1075 .name = "sdei", 1076 .pm = &sdei_pm_ops, 1077 .of_match_table = sdei_of_match, 1078 }, 1079 .probe = sdei_probe, 1080 }; 1081 1082 static bool __init sdei_present_dt(void) 1083 { 1084 struct device_node *np, *fw_np; 1085 1086 fw_np = of_find_node_by_name(NULL, "firmware"); 1087 if (!fw_np) 1088 return false; 1089 1090 np = of_find_matching_node(fw_np, sdei_of_match); 1091 if (!np) 1092 return false; 1093 of_node_put(np); 1094 1095 return true; 1096 } 1097 1098 static bool __init sdei_present_acpi(void) 1099 { 1100 acpi_status status; 1101 struct platform_device *pdev; 1102 struct acpi_table_header *sdei_table_header; 1103 1104 if (acpi_disabled) 1105 return false; 1106 1107 status = acpi_get_table(ACPI_SIG_SDEI, 0, &sdei_table_header); 1108 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 1109 const char *msg = acpi_format_exception(status); 1110 1111 pr_info("Failed to get ACPI:SDEI table, %s\n", msg); 1112 } 1113 if (ACPI_FAILURE(status)) 1114 return false; 1115 1116 pdev = platform_device_register_simple(sdei_driver.driver.name, 0, NULL, 1117 0); 1118 if (IS_ERR(pdev)) 1119 return false; 1120 1121 return true; 1122 } 1123 1124 static int __init sdei_init(void) 1125 { 1126 if (sdei_present_dt() || sdei_present_acpi()) 1127 platform_driver_register(&sdei_driver); 1128 1129 return 0; 1130 } 1131 1132 /* 1133 * On an ACPI system SDEI needs to be ready before HEST:GHES tries to register 1134 * its events. ACPI is initialised from a subsys_initcall(), GHES is initialised 1135 * by device_initcall(). We want to be called in the middle. 1136 */ 1137 subsys_initcall_sync(sdei_init); 1138 1139 int sdei_event_handler(struct pt_regs *regs, 1140 struct sdei_registered_event *arg) 1141 { 1142 int err; 1143 mm_segment_t orig_addr_limit; 1144 u32 event_num = arg->event_num; 1145 1146 orig_addr_limit = get_fs(); 1147 set_fs(USER_DS); 1148 1149 err = arg->callback(event_num, regs, arg->callback_arg); 1150 if (err) 1151 pr_err_ratelimited("event %u on CPU %u failed with error: %d\n", 1152 event_num, smp_processor_id(), err); 1153 1154 set_fs(orig_addr_limit); 1155 1156 return err; 1157 } 1158 NOKPROBE_SYMBOL(sdei_event_handler); 1159