1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2017 Arm Ltd. 3 #define pr_fmt(fmt) "sdei: " fmt 4 5 #include <acpi/ghes.h> 6 #include <linux/acpi.h> 7 #include <linux/arm_sdei.h> 8 #include <linux/arm-smccc.h> 9 #include <linux/atomic.h> 10 #include <linux/bitops.h> 11 #include <linux/compiler.h> 12 #include <linux/cpuhotplug.h> 13 #include <linux/cpu.h> 14 #include <linux/cpu_pm.h> 15 #include <linux/errno.h> 16 #include <linux/hardirq.h> 17 #include <linux/kernel.h> 18 #include <linux/kprobes.h> 19 #include <linux/kvm_host.h> 20 #include <linux/list.h> 21 #include <linux/mutex.h> 22 #include <linux/notifier.h> 23 #include <linux/of.h> 24 #include <linux/of_platform.h> 25 #include <linux/percpu.h> 26 #include <linux/platform_device.h> 27 #include <linux/pm.h> 28 #include <linux/ptrace.h> 29 #include <linux/preempt.h> 30 #include <linux/reboot.h> 31 #include <linux/slab.h> 32 #include <linux/smp.h> 33 #include <linux/spinlock.h> 34 #include <linux/uaccess.h> 35 36 /* 37 * The call to use to reach the firmware. 38 */ 39 static asmlinkage void (*sdei_firmware_call)(unsigned long function_id, 40 unsigned long arg0, unsigned long arg1, 41 unsigned long arg2, unsigned long arg3, 42 unsigned long arg4, struct arm_smccc_res *res); 43 44 /* entry point from firmware to arch asm code */ 45 static unsigned long sdei_entry_point; 46 47 struct sdei_event { 48 /* These three are protected by the sdei_list_lock */ 49 struct list_head list; 50 bool reregister; 51 bool reenable; 52 53 u32 event_num; 54 u8 type; 55 u8 priority; 56 57 /* This pointer is handed to firmware as the event argument. */ 58 union { 59 /* Shared events */ 60 struct sdei_registered_event *registered; 61 62 /* CPU private events */ 63 struct sdei_registered_event __percpu *private_registered; 64 }; 65 }; 66 67 /* Take the mutex for any API call or modification. Take the mutex first. */ 68 static DEFINE_MUTEX(sdei_events_lock); 69 70 /* and then hold this when modifying the list */ 71 static DEFINE_SPINLOCK(sdei_list_lock); 72 static LIST_HEAD(sdei_list); 73 74 /* Private events are registered/enabled via IPI passing one of these */ 75 struct sdei_crosscall_args { 76 struct sdei_event *event; 77 atomic_t errors; 78 int first_error; 79 }; 80 81 #define CROSSCALL_INIT(arg, event) (arg.event = event, \ 82 arg.first_error = 0, \ 83 atomic_set(&arg.errors, 0)) 84 85 static inline int sdei_do_cross_call(void *fn, struct sdei_event * event) 86 { 87 struct sdei_crosscall_args arg; 88 89 CROSSCALL_INIT(arg, event); 90 on_each_cpu(fn, &arg, true); 91 92 return arg.first_error; 93 } 94 95 static inline void 96 sdei_cross_call_return(struct sdei_crosscall_args *arg, int err) 97 { 98 if (err && (atomic_inc_return(&arg->errors) == 1)) 99 arg->first_error = err; 100 } 101 102 static int sdei_to_linux_errno(unsigned long sdei_err) 103 { 104 switch (sdei_err) { 105 case SDEI_NOT_SUPPORTED: 106 return -EOPNOTSUPP; 107 case SDEI_INVALID_PARAMETERS: 108 return -EINVAL; 109 case SDEI_DENIED: 110 return -EPERM; 111 case SDEI_PENDING: 112 return -EINPROGRESS; 113 case SDEI_OUT_OF_RESOURCE: 114 return -ENOMEM; 115 } 116 117 /* Not an error value ... */ 118 return sdei_err; 119 } 120 121 /* 122 * If x0 is any of these values, then the call failed, use sdei_to_linux_errno() 123 * to translate. 124 */ 125 static int sdei_is_err(struct arm_smccc_res *res) 126 { 127 switch (res->a0) { 128 case SDEI_NOT_SUPPORTED: 129 case SDEI_INVALID_PARAMETERS: 130 case SDEI_DENIED: 131 case SDEI_PENDING: 132 case SDEI_OUT_OF_RESOURCE: 133 return true; 134 } 135 136 return false; 137 } 138 139 static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0, 140 unsigned long arg1, unsigned long arg2, 141 unsigned long arg3, unsigned long arg4, 142 u64 *result) 143 { 144 int err = 0; 145 struct arm_smccc_res res; 146 147 if (sdei_firmware_call) { 148 sdei_firmware_call(function_id, arg0, arg1, arg2, arg3, arg4, 149 &res); 150 if (sdei_is_err(&res)) 151 err = sdei_to_linux_errno(res.a0); 152 } else { 153 /* 154 * !sdei_firmware_call means we failed to probe or called 155 * sdei_mark_interface_broken(). -EIO is not an error returned 156 * by sdei_to_linux_errno() and is used to suppress messages 157 * from this driver. 158 */ 159 err = -EIO; 160 res.a0 = SDEI_NOT_SUPPORTED; 161 } 162 163 if (result) 164 *result = res.a0; 165 166 return err; 167 } 168 NOKPROBE_SYMBOL(invoke_sdei_fn); 169 170 static struct sdei_event *sdei_event_find(u32 event_num) 171 { 172 struct sdei_event *e, *found = NULL; 173 174 lockdep_assert_held(&sdei_events_lock); 175 176 spin_lock(&sdei_list_lock); 177 list_for_each_entry(e, &sdei_list, list) { 178 if (e->event_num == event_num) { 179 found = e; 180 break; 181 } 182 } 183 spin_unlock(&sdei_list_lock); 184 185 return found; 186 } 187 188 int sdei_api_event_context(u32 query, u64 *result) 189 { 190 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_CONTEXT, query, 0, 0, 0, 0, 191 result); 192 } 193 NOKPROBE_SYMBOL(sdei_api_event_context); 194 195 static int sdei_api_event_get_info(u32 event, u32 info, u64 *result) 196 { 197 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0, 198 0, 0, result); 199 } 200 201 static struct sdei_event *sdei_event_create(u32 event_num, 202 sdei_event_callback *cb, 203 void *cb_arg) 204 { 205 int err; 206 u64 result; 207 struct sdei_event *event; 208 struct sdei_registered_event *reg; 209 210 lockdep_assert_held(&sdei_events_lock); 211 212 event = kzalloc(sizeof(*event), GFP_KERNEL); 213 if (!event) 214 return ERR_PTR(-ENOMEM); 215 216 INIT_LIST_HEAD(&event->list); 217 event->event_num = event_num; 218 219 err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY, 220 &result); 221 if (err) { 222 kfree(event); 223 return ERR_PTR(err); 224 } 225 event->priority = result; 226 227 err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_TYPE, 228 &result); 229 if (err) { 230 kfree(event); 231 return ERR_PTR(err); 232 } 233 event->type = result; 234 235 if (event->type == SDEI_EVENT_TYPE_SHARED) { 236 reg = kzalloc(sizeof(*reg), GFP_KERNEL); 237 if (!reg) { 238 kfree(event); 239 return ERR_PTR(-ENOMEM); 240 } 241 242 reg->event_num = event_num; 243 reg->priority = event->priority; 244 245 reg->callback = cb; 246 reg->callback_arg = cb_arg; 247 event->registered = reg; 248 } else { 249 int cpu; 250 struct sdei_registered_event __percpu *regs; 251 252 regs = alloc_percpu(struct sdei_registered_event); 253 if (!regs) { 254 kfree(event); 255 return ERR_PTR(-ENOMEM); 256 } 257 258 for_each_possible_cpu(cpu) { 259 reg = per_cpu_ptr(regs, cpu); 260 261 reg->event_num = event->event_num; 262 reg->priority = event->priority; 263 reg->callback = cb; 264 reg->callback_arg = cb_arg; 265 } 266 267 event->private_registered = regs; 268 } 269 270 spin_lock(&sdei_list_lock); 271 list_add(&event->list, &sdei_list); 272 spin_unlock(&sdei_list_lock); 273 274 return event; 275 } 276 277 static void sdei_event_destroy_llocked(struct sdei_event *event) 278 { 279 lockdep_assert_held(&sdei_events_lock); 280 lockdep_assert_held(&sdei_list_lock); 281 282 list_del(&event->list); 283 284 if (event->type == SDEI_EVENT_TYPE_SHARED) 285 kfree(event->registered); 286 else 287 free_percpu(event->private_registered); 288 289 kfree(event); 290 } 291 292 static void sdei_event_destroy(struct sdei_event *event) 293 { 294 spin_lock(&sdei_list_lock); 295 sdei_event_destroy_llocked(event); 296 spin_unlock(&sdei_list_lock); 297 } 298 299 static int sdei_api_get_version(u64 *version) 300 { 301 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_VERSION, 0, 0, 0, 0, 0, version); 302 } 303 304 int sdei_mask_local_cpu(void) 305 { 306 int err; 307 308 WARN_ON_ONCE(preemptible()); 309 310 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL); 311 if (err && err != -EIO) { 312 pr_warn_once("failed to mask CPU[%u]: %d\n", 313 smp_processor_id(), err); 314 return err; 315 } 316 317 return 0; 318 } 319 320 static void _ipi_mask_cpu(void *ignored) 321 { 322 sdei_mask_local_cpu(); 323 } 324 325 int sdei_unmask_local_cpu(void) 326 { 327 int err; 328 329 WARN_ON_ONCE(preemptible()); 330 331 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL); 332 if (err && err != -EIO) { 333 pr_warn_once("failed to unmask CPU[%u]: %d\n", 334 smp_processor_id(), err); 335 return err; 336 } 337 338 return 0; 339 } 340 341 static void _ipi_unmask_cpu(void *ignored) 342 { 343 sdei_unmask_local_cpu(); 344 } 345 346 static void _ipi_private_reset(void *ignored) 347 { 348 int err; 349 350 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0, 351 NULL); 352 if (err && err != -EIO) 353 pr_warn_once("failed to reset CPU[%u]: %d\n", 354 smp_processor_id(), err); 355 } 356 357 static int sdei_api_shared_reset(void) 358 { 359 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_SHARED_RESET, 0, 0, 0, 0, 0, 360 NULL); 361 } 362 363 static void sdei_mark_interface_broken(void) 364 { 365 pr_err("disabling SDEI firmware interface\n"); 366 on_each_cpu(&_ipi_mask_cpu, NULL, true); 367 sdei_firmware_call = NULL; 368 } 369 370 static int sdei_platform_reset(void) 371 { 372 int err; 373 374 on_each_cpu(&_ipi_private_reset, NULL, true); 375 err = sdei_api_shared_reset(); 376 if (err) { 377 pr_err("Failed to reset platform: %d\n", err); 378 sdei_mark_interface_broken(); 379 } 380 381 return err; 382 } 383 384 static int sdei_api_event_enable(u32 event_num) 385 { 386 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, event_num, 0, 0, 0, 387 0, NULL); 388 } 389 390 /* Called directly by the hotplug callbacks */ 391 static void _local_event_enable(void *data) 392 { 393 int err; 394 struct sdei_crosscall_args *arg = data; 395 396 WARN_ON_ONCE(preemptible()); 397 398 err = sdei_api_event_enable(arg->event->event_num); 399 400 sdei_cross_call_return(arg, err); 401 } 402 403 int sdei_event_enable(u32 event_num) 404 { 405 int err = -EINVAL; 406 struct sdei_event *event; 407 408 mutex_lock(&sdei_events_lock); 409 event = sdei_event_find(event_num); 410 if (!event) { 411 mutex_unlock(&sdei_events_lock); 412 return -ENOENT; 413 } 414 415 416 cpus_read_lock(); 417 if (event->type == SDEI_EVENT_TYPE_SHARED) 418 err = sdei_api_event_enable(event->event_num); 419 else 420 err = sdei_do_cross_call(_local_event_enable, event); 421 422 if (!err) { 423 spin_lock(&sdei_list_lock); 424 event->reenable = true; 425 spin_unlock(&sdei_list_lock); 426 } 427 cpus_read_unlock(); 428 mutex_unlock(&sdei_events_lock); 429 430 return err; 431 } 432 433 static int sdei_api_event_disable(u32 event_num) 434 { 435 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, event_num, 0, 0, 436 0, 0, NULL); 437 } 438 439 static void _ipi_event_disable(void *data) 440 { 441 int err; 442 struct sdei_crosscall_args *arg = data; 443 444 err = sdei_api_event_disable(arg->event->event_num); 445 446 sdei_cross_call_return(arg, err); 447 } 448 449 int sdei_event_disable(u32 event_num) 450 { 451 int err = -EINVAL; 452 struct sdei_event *event; 453 454 mutex_lock(&sdei_events_lock); 455 event = sdei_event_find(event_num); 456 if (!event) { 457 mutex_unlock(&sdei_events_lock); 458 return -ENOENT; 459 } 460 461 spin_lock(&sdei_list_lock); 462 event->reenable = false; 463 spin_unlock(&sdei_list_lock); 464 465 if (event->type == SDEI_EVENT_TYPE_SHARED) 466 err = sdei_api_event_disable(event->event_num); 467 else 468 err = sdei_do_cross_call(_ipi_event_disable, event); 469 mutex_unlock(&sdei_events_lock); 470 471 return err; 472 } 473 474 static int sdei_api_event_unregister(u32 event_num) 475 { 476 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_UNREGISTER, event_num, 0, 477 0, 0, 0, NULL); 478 } 479 480 /* Called directly by the hotplug callbacks */ 481 static void _local_event_unregister(void *data) 482 { 483 int err; 484 struct sdei_crosscall_args *arg = data; 485 486 WARN_ON_ONCE(preemptible()); 487 488 err = sdei_api_event_unregister(arg->event->event_num); 489 490 sdei_cross_call_return(arg, err); 491 } 492 493 static int _sdei_event_unregister(struct sdei_event *event) 494 { 495 lockdep_assert_held(&sdei_events_lock); 496 497 if (event->type == SDEI_EVENT_TYPE_SHARED) 498 return sdei_api_event_unregister(event->event_num); 499 500 return sdei_do_cross_call(_local_event_unregister, event); 501 } 502 503 int sdei_event_unregister(u32 event_num) 504 { 505 int err; 506 struct sdei_event *event; 507 508 WARN_ON(in_nmi()); 509 510 mutex_lock(&sdei_events_lock); 511 event = sdei_event_find(event_num); 512 do { 513 if (!event) { 514 pr_warn("Event %u not registered\n", event_num); 515 err = -ENOENT; 516 break; 517 } 518 519 spin_lock(&sdei_list_lock); 520 event->reregister = false; 521 event->reenable = false; 522 spin_unlock(&sdei_list_lock); 523 524 err = _sdei_event_unregister(event); 525 if (err) 526 break; 527 528 sdei_event_destroy(event); 529 } while (0); 530 mutex_unlock(&sdei_events_lock); 531 532 return err; 533 } 534 535 /* 536 * unregister events, but don't destroy them as they are re-registered by 537 * sdei_reregister_shared(). 538 */ 539 static int sdei_unregister_shared(void) 540 { 541 int err = 0; 542 struct sdei_event *event; 543 544 mutex_lock(&sdei_events_lock); 545 spin_lock(&sdei_list_lock); 546 list_for_each_entry(event, &sdei_list, list) { 547 if (event->type != SDEI_EVENT_TYPE_SHARED) 548 continue; 549 550 err = _sdei_event_unregister(event); 551 if (err) 552 break; 553 } 554 spin_unlock(&sdei_list_lock); 555 mutex_unlock(&sdei_events_lock); 556 557 return err; 558 } 559 560 static int sdei_api_event_register(u32 event_num, unsigned long entry_point, 561 void *arg, u64 flags, u64 affinity) 562 { 563 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_REGISTER, event_num, 564 (unsigned long)entry_point, (unsigned long)arg, 565 flags, affinity, NULL); 566 } 567 568 /* Called directly by the hotplug callbacks */ 569 static void _local_event_register(void *data) 570 { 571 int err; 572 struct sdei_registered_event *reg; 573 struct sdei_crosscall_args *arg = data; 574 575 WARN_ON(preemptible()); 576 577 reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id()); 578 err = sdei_api_event_register(arg->event->event_num, sdei_entry_point, 579 reg, 0, 0); 580 581 sdei_cross_call_return(arg, err); 582 } 583 584 static int _sdei_event_register(struct sdei_event *event) 585 { 586 int err; 587 588 lockdep_assert_held(&sdei_events_lock); 589 590 if (event->type == SDEI_EVENT_TYPE_SHARED) 591 return sdei_api_event_register(event->event_num, 592 sdei_entry_point, 593 event->registered, 594 SDEI_EVENT_REGISTER_RM_ANY, 0); 595 596 err = sdei_do_cross_call(_local_event_register, event); 597 if (err) 598 sdei_do_cross_call(_local_event_unregister, event); 599 600 return err; 601 } 602 603 int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg) 604 { 605 int err; 606 struct sdei_event *event; 607 608 WARN_ON(in_nmi()); 609 610 mutex_lock(&sdei_events_lock); 611 do { 612 if (sdei_event_find(event_num)) { 613 pr_warn("Event %u already registered\n", event_num); 614 err = -EBUSY; 615 break; 616 } 617 618 event = sdei_event_create(event_num, cb, arg); 619 if (IS_ERR(event)) { 620 err = PTR_ERR(event); 621 pr_warn("Failed to create event %u: %d\n", event_num, 622 err); 623 break; 624 } 625 626 cpus_read_lock(); 627 err = _sdei_event_register(event); 628 if (err) { 629 sdei_event_destroy(event); 630 pr_warn("Failed to register event %u: %d\n", event_num, 631 err); 632 } else { 633 spin_lock(&sdei_list_lock); 634 event->reregister = true; 635 spin_unlock(&sdei_list_lock); 636 } 637 cpus_read_unlock(); 638 } while (0); 639 mutex_unlock(&sdei_events_lock); 640 641 return err; 642 } 643 644 static int sdei_reregister_event_llocked(struct sdei_event *event) 645 { 646 int err; 647 648 lockdep_assert_held(&sdei_events_lock); 649 lockdep_assert_held(&sdei_list_lock); 650 651 err = _sdei_event_register(event); 652 if (err) { 653 pr_err("Failed to re-register event %u\n", event->event_num); 654 sdei_event_destroy_llocked(event); 655 return err; 656 } 657 658 if (event->reenable) { 659 if (event->type == SDEI_EVENT_TYPE_SHARED) 660 err = sdei_api_event_enable(event->event_num); 661 else 662 err = sdei_do_cross_call(_local_event_enable, event); 663 } 664 665 if (err) 666 pr_err("Failed to re-enable event %u\n", event->event_num); 667 668 return err; 669 } 670 671 static int sdei_reregister_shared(void) 672 { 673 int err = 0; 674 struct sdei_event *event; 675 676 mutex_lock(&sdei_events_lock); 677 spin_lock(&sdei_list_lock); 678 list_for_each_entry(event, &sdei_list, list) { 679 if (event->type != SDEI_EVENT_TYPE_SHARED) 680 continue; 681 682 if (event->reregister) { 683 err = sdei_reregister_event_llocked(event); 684 if (err) 685 break; 686 } 687 } 688 spin_unlock(&sdei_list_lock); 689 mutex_unlock(&sdei_events_lock); 690 691 return err; 692 } 693 694 static int sdei_cpuhp_down(unsigned int cpu) 695 { 696 struct sdei_event *event; 697 struct sdei_crosscall_args arg; 698 699 /* un-register private events */ 700 spin_lock(&sdei_list_lock); 701 list_for_each_entry(event, &sdei_list, list) { 702 if (event->type == SDEI_EVENT_TYPE_SHARED) 703 continue; 704 705 CROSSCALL_INIT(arg, event); 706 /* call the cross-call function locally... */ 707 _local_event_unregister(&arg); 708 if (arg.first_error) 709 pr_err("Failed to unregister event %u: %d\n", 710 event->event_num, arg.first_error); 711 } 712 spin_unlock(&sdei_list_lock); 713 714 return sdei_mask_local_cpu(); 715 } 716 717 static int sdei_cpuhp_up(unsigned int cpu) 718 { 719 struct sdei_event *event; 720 struct sdei_crosscall_args arg; 721 722 /* re-register/enable private events */ 723 spin_lock(&sdei_list_lock); 724 list_for_each_entry(event, &sdei_list, list) { 725 if (event->type == SDEI_EVENT_TYPE_SHARED) 726 continue; 727 728 if (event->reregister) { 729 CROSSCALL_INIT(arg, event); 730 /* call the cross-call function locally... */ 731 _local_event_register(&arg); 732 if (arg.first_error) 733 pr_err("Failed to re-register event %u: %d\n", 734 event->event_num, arg.first_error); 735 } 736 737 if (event->reenable) { 738 CROSSCALL_INIT(arg, event); 739 _local_event_enable(&arg); 740 if (arg.first_error) 741 pr_err("Failed to re-enable event %u: %d\n", 742 event->event_num, arg.first_error); 743 } 744 } 745 spin_unlock(&sdei_list_lock); 746 747 return sdei_unmask_local_cpu(); 748 } 749 750 /* When entering idle, mask/unmask events for this cpu */ 751 static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action, 752 void *data) 753 { 754 int rv; 755 756 switch (action) { 757 case CPU_PM_ENTER: 758 rv = sdei_mask_local_cpu(); 759 break; 760 case CPU_PM_EXIT: 761 case CPU_PM_ENTER_FAILED: 762 rv = sdei_unmask_local_cpu(); 763 break; 764 default: 765 return NOTIFY_DONE; 766 } 767 768 if (rv) 769 return notifier_from_errno(rv); 770 771 return NOTIFY_OK; 772 } 773 774 static struct notifier_block sdei_pm_nb = { 775 .notifier_call = sdei_pm_notifier, 776 }; 777 778 static int sdei_device_suspend(struct device *dev) 779 { 780 on_each_cpu(_ipi_mask_cpu, NULL, true); 781 782 return 0; 783 } 784 785 static int sdei_device_resume(struct device *dev) 786 { 787 on_each_cpu(_ipi_unmask_cpu, NULL, true); 788 789 return 0; 790 } 791 792 /* 793 * We need all events to be reregistered when we resume from hibernate. 794 * 795 * The sequence is freeze->thaw. Reboot. freeze->restore. We unregister 796 * events during freeze, then re-register and re-enable them during thaw 797 * and restore. 798 */ 799 static int sdei_device_freeze(struct device *dev) 800 { 801 int err; 802 803 /* unregister private events */ 804 cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING); 805 806 err = sdei_unregister_shared(); 807 if (err) 808 return err; 809 810 return 0; 811 } 812 813 static int sdei_device_thaw(struct device *dev) 814 { 815 int err; 816 817 /* re-register shared events */ 818 err = sdei_reregister_shared(); 819 if (err) { 820 pr_warn("Failed to re-register shared events...\n"); 821 sdei_mark_interface_broken(); 822 return err; 823 } 824 825 err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI", 826 &sdei_cpuhp_up, &sdei_cpuhp_down); 827 if (err) 828 pr_warn("Failed to re-register CPU hotplug notifier...\n"); 829 830 return err; 831 } 832 833 static int sdei_device_restore(struct device *dev) 834 { 835 int err; 836 837 err = sdei_platform_reset(); 838 if (err) 839 return err; 840 841 return sdei_device_thaw(dev); 842 } 843 844 static const struct dev_pm_ops sdei_pm_ops = { 845 .suspend = sdei_device_suspend, 846 .resume = sdei_device_resume, 847 .freeze = sdei_device_freeze, 848 .thaw = sdei_device_thaw, 849 .restore = sdei_device_restore, 850 }; 851 852 /* 853 * Mask all CPUs and unregister all events on panic, reboot or kexec. 854 */ 855 static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action, 856 void *data) 857 { 858 /* 859 * We are going to reset the interface, after this there is no point 860 * doing work when we take CPUs offline. 861 */ 862 cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING); 863 864 sdei_platform_reset(); 865 866 return NOTIFY_OK; 867 } 868 869 static struct notifier_block sdei_reboot_nb = { 870 .notifier_call = sdei_reboot_notifier, 871 }; 872 873 static void sdei_smccc_smc(unsigned long function_id, 874 unsigned long arg0, unsigned long arg1, 875 unsigned long arg2, unsigned long arg3, 876 unsigned long arg4, struct arm_smccc_res *res) 877 { 878 arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res); 879 } 880 NOKPROBE_SYMBOL(sdei_smccc_smc); 881 882 static void sdei_smccc_hvc(unsigned long function_id, 883 unsigned long arg0, unsigned long arg1, 884 unsigned long arg2, unsigned long arg3, 885 unsigned long arg4, struct arm_smccc_res *res) 886 { 887 arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res); 888 } 889 NOKPROBE_SYMBOL(sdei_smccc_hvc); 890 891 int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb, 892 sdei_event_callback *critical_cb) 893 { 894 int err; 895 u64 result; 896 u32 event_num; 897 sdei_event_callback *cb; 898 899 if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES)) 900 return -EOPNOTSUPP; 901 902 event_num = ghes->generic->notify.vector; 903 if (event_num == 0) { 904 /* 905 * Event 0 is reserved by the specification for 906 * SDEI_EVENT_SIGNAL. 907 */ 908 return -EINVAL; 909 } 910 911 err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY, 912 &result); 913 if (err) 914 return err; 915 916 if (result == SDEI_EVENT_PRIORITY_CRITICAL) 917 cb = critical_cb; 918 else 919 cb = normal_cb; 920 921 err = sdei_event_register(event_num, cb, ghes); 922 if (!err) 923 err = sdei_event_enable(event_num); 924 925 return err; 926 } 927 928 int sdei_unregister_ghes(struct ghes *ghes) 929 { 930 int i; 931 int err; 932 u32 event_num = ghes->generic->notify.vector; 933 934 might_sleep(); 935 936 if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES)) 937 return -EOPNOTSUPP; 938 939 /* 940 * The event may be running on another CPU. Disable it 941 * to stop new events, then try to unregister a few times. 942 */ 943 err = sdei_event_disable(event_num); 944 if (err) 945 return err; 946 947 for (i = 0; i < 3; i++) { 948 err = sdei_event_unregister(event_num); 949 if (err != -EINPROGRESS) 950 break; 951 952 schedule(); 953 } 954 955 return err; 956 } 957 958 static int sdei_get_conduit(struct platform_device *pdev) 959 { 960 const char *method; 961 struct device_node *np = pdev->dev.of_node; 962 963 sdei_firmware_call = NULL; 964 if (np) { 965 if (of_property_read_string(np, "method", &method)) { 966 pr_warn("missing \"method\" property\n"); 967 return SMCCC_CONDUIT_NONE; 968 } 969 970 if (!strcmp("hvc", method)) { 971 sdei_firmware_call = &sdei_smccc_hvc; 972 return SMCCC_CONDUIT_HVC; 973 } else if (!strcmp("smc", method)) { 974 sdei_firmware_call = &sdei_smccc_smc; 975 return SMCCC_CONDUIT_SMC; 976 } 977 978 pr_warn("invalid \"method\" property: %s\n", method); 979 } else if (IS_ENABLED(CONFIG_ACPI) && !acpi_disabled) { 980 if (acpi_psci_use_hvc()) { 981 sdei_firmware_call = &sdei_smccc_hvc; 982 return SMCCC_CONDUIT_HVC; 983 } else { 984 sdei_firmware_call = &sdei_smccc_smc; 985 return SMCCC_CONDUIT_SMC; 986 } 987 } 988 989 return SMCCC_CONDUIT_NONE; 990 } 991 992 static int sdei_probe(struct platform_device *pdev) 993 { 994 int err; 995 u64 ver = 0; 996 int conduit; 997 998 conduit = sdei_get_conduit(pdev); 999 if (!sdei_firmware_call) 1000 return 0; 1001 1002 err = sdei_api_get_version(&ver); 1003 if (err == -EOPNOTSUPP) 1004 pr_err("advertised but not implemented in platform firmware\n"); 1005 if (err) { 1006 pr_err("Failed to get SDEI version: %d\n", err); 1007 sdei_mark_interface_broken(); 1008 return err; 1009 } 1010 1011 pr_info("SDEIv%d.%d (0x%x) detected in firmware.\n", 1012 (int)SDEI_VERSION_MAJOR(ver), (int)SDEI_VERSION_MINOR(ver), 1013 (int)SDEI_VERSION_VENDOR(ver)); 1014 1015 if (SDEI_VERSION_MAJOR(ver) != 1) { 1016 pr_warn("Conflicting SDEI version detected.\n"); 1017 sdei_mark_interface_broken(); 1018 return -EINVAL; 1019 } 1020 1021 err = sdei_platform_reset(); 1022 if (err) 1023 return err; 1024 1025 sdei_entry_point = sdei_arch_get_entry_point(conduit); 1026 if (!sdei_entry_point) { 1027 /* Not supported due to hardware or boot configuration */ 1028 sdei_mark_interface_broken(); 1029 return 0; 1030 } 1031 1032 err = cpu_pm_register_notifier(&sdei_pm_nb); 1033 if (err) { 1034 pr_warn("Failed to register CPU PM notifier...\n"); 1035 goto error; 1036 } 1037 1038 err = register_reboot_notifier(&sdei_reboot_nb); 1039 if (err) { 1040 pr_warn("Failed to register reboot notifier...\n"); 1041 goto remove_cpupm; 1042 } 1043 1044 err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI", 1045 &sdei_cpuhp_up, &sdei_cpuhp_down); 1046 if (err) { 1047 pr_warn("Failed to register CPU hotplug notifier...\n"); 1048 goto remove_reboot; 1049 } 1050 1051 return 0; 1052 1053 remove_reboot: 1054 unregister_reboot_notifier(&sdei_reboot_nb); 1055 1056 remove_cpupm: 1057 cpu_pm_unregister_notifier(&sdei_pm_nb); 1058 1059 error: 1060 sdei_mark_interface_broken(); 1061 return err; 1062 } 1063 1064 static const struct of_device_id sdei_of_match[] = { 1065 { .compatible = "arm,sdei-1.0" }, 1066 {} 1067 }; 1068 1069 static struct platform_driver sdei_driver = { 1070 .driver = { 1071 .name = "sdei", 1072 .pm = &sdei_pm_ops, 1073 .of_match_table = sdei_of_match, 1074 }, 1075 .probe = sdei_probe, 1076 }; 1077 1078 static bool __init sdei_present_acpi(void) 1079 { 1080 acpi_status status; 1081 struct acpi_table_header *sdei_table_header; 1082 1083 if (acpi_disabled) 1084 return false; 1085 1086 status = acpi_get_table(ACPI_SIG_SDEI, 0, &sdei_table_header); 1087 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 1088 const char *msg = acpi_format_exception(status); 1089 1090 pr_info("Failed to get ACPI:SDEI table, %s\n", msg); 1091 } 1092 if (ACPI_FAILURE(status)) 1093 return false; 1094 1095 acpi_put_table(sdei_table_header); 1096 1097 return true; 1098 } 1099 1100 static int __init sdei_init(void) 1101 { 1102 int ret = platform_driver_register(&sdei_driver); 1103 1104 if (!ret && sdei_present_acpi()) { 1105 struct platform_device *pdev; 1106 1107 pdev = platform_device_register_simple(sdei_driver.driver.name, 1108 0, NULL, 0); 1109 if (IS_ERR(pdev)) 1110 pr_info("Failed to register ACPI:SDEI platform device %ld\n", 1111 PTR_ERR(pdev)); 1112 } 1113 1114 return ret; 1115 } 1116 1117 /* 1118 * On an ACPI system SDEI needs to be ready before HEST:GHES tries to register 1119 * its events. ACPI is initialised from a subsys_initcall(), GHES is initialised 1120 * by device_initcall(). We want to be called in the middle. 1121 */ 1122 subsys_initcall_sync(sdei_init); 1123 1124 int sdei_event_handler(struct pt_regs *regs, 1125 struct sdei_registered_event *arg) 1126 { 1127 int err; 1128 mm_segment_t orig_addr_limit; 1129 u32 event_num = arg->event_num; 1130 1131 /* 1132 * Save restore 'fs'. 1133 * The architecture's entry code save/restores 'fs' when taking an 1134 * exception from the kernel. This ensures addr_limit isn't inherited 1135 * if you interrupted something that allowed the uaccess routines to 1136 * access kernel memory. 1137 * Do the same here because this doesn't come via the same entry code. 1138 */ 1139 orig_addr_limit = get_fs(); 1140 set_fs(USER_DS); 1141 1142 err = arg->callback(event_num, regs, arg->callback_arg); 1143 if (err) 1144 pr_err_ratelimited("event %u on CPU %u failed with error: %d\n", 1145 event_num, smp_processor_id(), err); 1146 1147 set_fs(orig_addr_limit); 1148 1149 return err; 1150 } 1151 NOKPROBE_SYMBOL(sdei_event_handler); 1152