xref: /openbmc/linux/drivers/firmware/arm_sdei.c (revision b4e18b29)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2017 Arm Ltd.
3 #define pr_fmt(fmt) "sdei: " fmt
4 
5 #include <acpi/ghes.h>
6 #include <linux/acpi.h>
7 #include <linux/arm_sdei.h>
8 #include <linux/arm-smccc.h>
9 #include <linux/atomic.h>
10 #include <linux/bitops.h>
11 #include <linux/compiler.h>
12 #include <linux/cpuhotplug.h>
13 #include <linux/cpu.h>
14 #include <linux/cpu_pm.h>
15 #include <linux/errno.h>
16 #include <linux/hardirq.h>
17 #include <linux/kernel.h>
18 #include <linux/kprobes.h>
19 #include <linux/kvm_host.h>
20 #include <linux/list.h>
21 #include <linux/mutex.h>
22 #include <linux/notifier.h>
23 #include <linux/of.h>
24 #include <linux/of_platform.h>
25 #include <linux/percpu.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm.h>
28 #include <linux/ptrace.h>
29 #include <linux/preempt.h>
30 #include <linux/reboot.h>
31 #include <linux/slab.h>
32 #include <linux/smp.h>
33 #include <linux/spinlock.h>
34 
35 /*
36  * The call to use to reach the firmware.
37  */
38 static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
39 		      unsigned long arg0, unsigned long arg1,
40 		      unsigned long arg2, unsigned long arg3,
41 		      unsigned long arg4, struct arm_smccc_res *res);
42 
43 /* entry point from firmware to arch asm code */
44 static unsigned long sdei_entry_point;
45 
46 struct sdei_event {
47 	/* These three are protected by the sdei_list_lock */
48 	struct list_head	list;
49 	bool			reregister;
50 	bool			reenable;
51 
52 	u32			event_num;
53 	u8			type;
54 	u8			priority;
55 
56 	/* This pointer is handed to firmware as the event argument. */
57 	union {
58 		/* Shared events */
59 		struct sdei_registered_event *registered;
60 
61 		/* CPU private events */
62 		struct sdei_registered_event __percpu *private_registered;
63 	};
64 };
65 
66 /* Take the mutex for any API call or modification. Take the mutex first. */
67 static DEFINE_MUTEX(sdei_events_lock);
68 
69 /* and then hold this when modifying the list */
70 static DEFINE_SPINLOCK(sdei_list_lock);
71 static LIST_HEAD(sdei_list);
72 
73 /* Private events are registered/enabled via IPI passing one of these */
74 struct sdei_crosscall_args {
75 	struct sdei_event *event;
76 	atomic_t errors;
77 	int first_error;
78 };
79 
80 #define CROSSCALL_INIT(arg, event)		\
81 	do {					\
82 		arg.event = event;		\
83 		arg.first_error = 0;		\
84 		atomic_set(&arg.errors, 0);	\
85 	} while (0)
86 
87 static inline int sdei_do_local_call(smp_call_func_t fn,
88 				     struct sdei_event *event)
89 {
90 	struct sdei_crosscall_args arg;
91 
92 	CROSSCALL_INIT(arg, event);
93 	fn(&arg);
94 
95 	return arg.first_error;
96 }
97 
98 static inline int sdei_do_cross_call(smp_call_func_t fn,
99 				     struct sdei_event *event)
100 {
101 	struct sdei_crosscall_args arg;
102 
103 	CROSSCALL_INIT(arg, event);
104 	on_each_cpu(fn, &arg, true);
105 
106 	return arg.first_error;
107 }
108 
109 static inline void
110 sdei_cross_call_return(struct sdei_crosscall_args *arg, int err)
111 {
112 	if (err && (atomic_inc_return(&arg->errors) == 1))
113 		arg->first_error = err;
114 }
115 
116 static int sdei_to_linux_errno(unsigned long sdei_err)
117 {
118 	switch (sdei_err) {
119 	case SDEI_NOT_SUPPORTED:
120 		return -EOPNOTSUPP;
121 	case SDEI_INVALID_PARAMETERS:
122 		return -EINVAL;
123 	case SDEI_DENIED:
124 		return -EPERM;
125 	case SDEI_PENDING:
126 		return -EINPROGRESS;
127 	case SDEI_OUT_OF_RESOURCE:
128 		return -ENOMEM;
129 	}
130 
131 	return 0;
132 }
133 
134 static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0,
135 			  unsigned long arg1, unsigned long arg2,
136 			  unsigned long arg3, unsigned long arg4,
137 			  u64 *result)
138 {
139 	int err;
140 	struct arm_smccc_res res;
141 
142 	if (sdei_firmware_call) {
143 		sdei_firmware_call(function_id, arg0, arg1, arg2, arg3, arg4,
144 				   &res);
145 		err = sdei_to_linux_errno(res.a0);
146 	} else {
147 		/*
148 		 * !sdei_firmware_call means we failed to probe or called
149 		 * sdei_mark_interface_broken(). -EIO is not an error returned
150 		 * by sdei_to_linux_errno() and is used to suppress messages
151 		 * from this driver.
152 		 */
153 		err = -EIO;
154 		res.a0 = SDEI_NOT_SUPPORTED;
155 	}
156 
157 	if (result)
158 		*result = res.a0;
159 
160 	return err;
161 }
162 NOKPROBE_SYMBOL(invoke_sdei_fn);
163 
164 static struct sdei_event *sdei_event_find(u32 event_num)
165 {
166 	struct sdei_event *e, *found = NULL;
167 
168 	lockdep_assert_held(&sdei_events_lock);
169 
170 	spin_lock(&sdei_list_lock);
171 	list_for_each_entry(e, &sdei_list, list) {
172 		if (e->event_num == event_num) {
173 			found = e;
174 			break;
175 		}
176 	}
177 	spin_unlock(&sdei_list_lock);
178 
179 	return found;
180 }
181 
182 int sdei_api_event_context(u32 query, u64 *result)
183 {
184 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_CONTEXT, query, 0, 0, 0, 0,
185 			      result);
186 }
187 NOKPROBE_SYMBOL(sdei_api_event_context);
188 
189 static int sdei_api_event_get_info(u32 event, u32 info, u64 *result)
190 {
191 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0,
192 			      0, 0, result);
193 }
194 
195 static struct sdei_event *sdei_event_create(u32 event_num,
196 					    sdei_event_callback *cb,
197 					    void *cb_arg)
198 {
199 	int err;
200 	u64 result;
201 	struct sdei_event *event;
202 	struct sdei_registered_event *reg;
203 
204 	lockdep_assert_held(&sdei_events_lock);
205 
206 	event = kzalloc(sizeof(*event), GFP_KERNEL);
207 	if (!event) {
208 		err = -ENOMEM;
209 		goto fail;
210 	}
211 
212 	INIT_LIST_HEAD(&event->list);
213 	event->event_num = event_num;
214 
215 	err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
216 				      &result);
217 	if (err)
218 		goto fail;
219 	event->priority = result;
220 
221 	err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_TYPE,
222 				      &result);
223 	if (err)
224 		goto fail;
225 	event->type = result;
226 
227 	if (event->type == SDEI_EVENT_TYPE_SHARED) {
228 		reg = kzalloc(sizeof(*reg), GFP_KERNEL);
229 		if (!reg) {
230 			err = -ENOMEM;
231 			goto fail;
232 		}
233 
234 		reg->event_num = event->event_num;
235 		reg->priority = event->priority;
236 
237 		reg->callback = cb;
238 		reg->callback_arg = cb_arg;
239 		event->registered = reg;
240 	} else {
241 		int cpu;
242 		struct sdei_registered_event __percpu *regs;
243 
244 		regs = alloc_percpu(struct sdei_registered_event);
245 		if (!regs) {
246 			err = -ENOMEM;
247 			goto fail;
248 		}
249 
250 		for_each_possible_cpu(cpu) {
251 			reg = per_cpu_ptr(regs, cpu);
252 
253 			reg->event_num = event->event_num;
254 			reg->priority = event->priority;
255 			reg->callback = cb;
256 			reg->callback_arg = cb_arg;
257 		}
258 
259 		event->private_registered = regs;
260 	}
261 
262 	spin_lock(&sdei_list_lock);
263 	list_add(&event->list, &sdei_list);
264 	spin_unlock(&sdei_list_lock);
265 
266 	return event;
267 
268 fail:
269 	kfree(event);
270 	return ERR_PTR(err);
271 }
272 
273 static void sdei_event_destroy_llocked(struct sdei_event *event)
274 {
275 	lockdep_assert_held(&sdei_events_lock);
276 	lockdep_assert_held(&sdei_list_lock);
277 
278 	list_del(&event->list);
279 
280 	if (event->type == SDEI_EVENT_TYPE_SHARED)
281 		kfree(event->registered);
282 	else
283 		free_percpu(event->private_registered);
284 
285 	kfree(event);
286 }
287 
288 static void sdei_event_destroy(struct sdei_event *event)
289 {
290 	spin_lock(&sdei_list_lock);
291 	sdei_event_destroy_llocked(event);
292 	spin_unlock(&sdei_list_lock);
293 }
294 
295 static int sdei_api_get_version(u64 *version)
296 {
297 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_VERSION, 0, 0, 0, 0, 0, version);
298 }
299 
300 int sdei_mask_local_cpu(void)
301 {
302 	int err;
303 
304 	WARN_ON_ONCE(preemptible());
305 
306 	err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
307 	if (err && err != -EIO) {
308 		pr_warn_once("failed to mask CPU[%u]: %d\n",
309 			      smp_processor_id(), err);
310 		return err;
311 	}
312 
313 	return 0;
314 }
315 
316 static void _ipi_mask_cpu(void *ignored)
317 {
318 	sdei_mask_local_cpu();
319 }
320 
321 int sdei_unmask_local_cpu(void)
322 {
323 	int err;
324 
325 	WARN_ON_ONCE(preemptible());
326 
327 	err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
328 	if (err && err != -EIO) {
329 		pr_warn_once("failed to unmask CPU[%u]: %d\n",
330 			     smp_processor_id(), err);
331 		return err;
332 	}
333 
334 	return 0;
335 }
336 
337 static void _ipi_unmask_cpu(void *ignored)
338 {
339 	sdei_unmask_local_cpu();
340 }
341 
342 static void _ipi_private_reset(void *ignored)
343 {
344 	int err;
345 
346 	err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
347 			     NULL);
348 	if (err && err != -EIO)
349 		pr_warn_once("failed to reset CPU[%u]: %d\n",
350 			     smp_processor_id(), err);
351 }
352 
353 static int sdei_api_shared_reset(void)
354 {
355 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_SHARED_RESET, 0, 0, 0, 0, 0,
356 			      NULL);
357 }
358 
359 static void sdei_mark_interface_broken(void)
360 {
361 	pr_err("disabling SDEI firmware interface\n");
362 	on_each_cpu(&_ipi_mask_cpu, NULL, true);
363 	sdei_firmware_call = NULL;
364 }
365 
366 static int sdei_platform_reset(void)
367 {
368 	int err;
369 
370 	on_each_cpu(&_ipi_private_reset, NULL, true);
371 	err = sdei_api_shared_reset();
372 	if (err) {
373 		pr_err("Failed to reset platform: %d\n", err);
374 		sdei_mark_interface_broken();
375 	}
376 
377 	return err;
378 }
379 
380 static int sdei_api_event_enable(u32 event_num)
381 {
382 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, event_num, 0, 0, 0,
383 			      0, NULL);
384 }
385 
386 /* Called directly by the hotplug callbacks */
387 static void _local_event_enable(void *data)
388 {
389 	int err;
390 	struct sdei_crosscall_args *arg = data;
391 
392 	WARN_ON_ONCE(preemptible());
393 
394 	err = sdei_api_event_enable(arg->event->event_num);
395 
396 	sdei_cross_call_return(arg, err);
397 }
398 
399 int sdei_event_enable(u32 event_num)
400 {
401 	int err = -EINVAL;
402 	struct sdei_event *event;
403 
404 	mutex_lock(&sdei_events_lock);
405 	event = sdei_event_find(event_num);
406 	if (!event) {
407 		mutex_unlock(&sdei_events_lock);
408 		return -ENOENT;
409 	}
410 
411 
412 	cpus_read_lock();
413 	if (event->type == SDEI_EVENT_TYPE_SHARED)
414 		err = sdei_api_event_enable(event->event_num);
415 	else
416 		err = sdei_do_cross_call(_local_event_enable, event);
417 
418 	if (!err) {
419 		spin_lock(&sdei_list_lock);
420 		event->reenable = true;
421 		spin_unlock(&sdei_list_lock);
422 	}
423 	cpus_read_unlock();
424 	mutex_unlock(&sdei_events_lock);
425 
426 	return err;
427 }
428 
429 static int sdei_api_event_disable(u32 event_num)
430 {
431 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, event_num, 0, 0,
432 			      0, 0, NULL);
433 }
434 
435 static void _ipi_event_disable(void *data)
436 {
437 	int err;
438 	struct sdei_crosscall_args *arg = data;
439 
440 	err = sdei_api_event_disable(arg->event->event_num);
441 
442 	sdei_cross_call_return(arg, err);
443 }
444 
445 int sdei_event_disable(u32 event_num)
446 {
447 	int err = -EINVAL;
448 	struct sdei_event *event;
449 
450 	mutex_lock(&sdei_events_lock);
451 	event = sdei_event_find(event_num);
452 	if (!event) {
453 		mutex_unlock(&sdei_events_lock);
454 		return -ENOENT;
455 	}
456 
457 	spin_lock(&sdei_list_lock);
458 	event->reenable = false;
459 	spin_unlock(&sdei_list_lock);
460 
461 	if (event->type == SDEI_EVENT_TYPE_SHARED)
462 		err = sdei_api_event_disable(event->event_num);
463 	else
464 		err = sdei_do_cross_call(_ipi_event_disable, event);
465 	mutex_unlock(&sdei_events_lock);
466 
467 	return err;
468 }
469 
470 static int sdei_api_event_unregister(u32 event_num)
471 {
472 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_UNREGISTER, event_num, 0,
473 			      0, 0, 0, NULL);
474 }
475 
476 /* Called directly by the hotplug callbacks */
477 static void _local_event_unregister(void *data)
478 {
479 	int err;
480 	struct sdei_crosscall_args *arg = data;
481 
482 	WARN_ON_ONCE(preemptible());
483 
484 	err = sdei_api_event_unregister(arg->event->event_num);
485 
486 	sdei_cross_call_return(arg, err);
487 }
488 
489 int sdei_event_unregister(u32 event_num)
490 {
491 	int err;
492 	struct sdei_event *event;
493 
494 	WARN_ON(in_nmi());
495 
496 	mutex_lock(&sdei_events_lock);
497 	event = sdei_event_find(event_num);
498 	if (!event) {
499 		pr_warn("Event %u not registered\n", event_num);
500 		err = -ENOENT;
501 		goto unlock;
502 	}
503 
504 	spin_lock(&sdei_list_lock);
505 	event->reregister = false;
506 	event->reenable = false;
507 	spin_unlock(&sdei_list_lock);
508 
509 	if (event->type == SDEI_EVENT_TYPE_SHARED)
510 		err = sdei_api_event_unregister(event->event_num);
511 	else
512 		err = sdei_do_cross_call(_local_event_unregister, event);
513 
514 	if (err)
515 		goto unlock;
516 
517 	sdei_event_destroy(event);
518 unlock:
519 	mutex_unlock(&sdei_events_lock);
520 
521 	return err;
522 }
523 
524 /*
525  * unregister events, but don't destroy them as they are re-registered by
526  * sdei_reregister_shared().
527  */
528 static int sdei_unregister_shared(void)
529 {
530 	int err = 0;
531 	struct sdei_event *event;
532 
533 	mutex_lock(&sdei_events_lock);
534 	spin_lock(&sdei_list_lock);
535 	list_for_each_entry(event, &sdei_list, list) {
536 		if (event->type != SDEI_EVENT_TYPE_SHARED)
537 			continue;
538 
539 		err = sdei_api_event_unregister(event->event_num);
540 		if (err)
541 			break;
542 	}
543 	spin_unlock(&sdei_list_lock);
544 	mutex_unlock(&sdei_events_lock);
545 
546 	return err;
547 }
548 
549 static int sdei_api_event_register(u32 event_num, unsigned long entry_point,
550 				   void *arg, u64 flags, u64 affinity)
551 {
552 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_REGISTER, event_num,
553 			      (unsigned long)entry_point, (unsigned long)arg,
554 			      flags, affinity, NULL);
555 }
556 
557 /* Called directly by the hotplug callbacks */
558 static void _local_event_register(void *data)
559 {
560 	int err;
561 	struct sdei_registered_event *reg;
562 	struct sdei_crosscall_args *arg = data;
563 
564 	WARN_ON(preemptible());
565 
566 	reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
567 	err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
568 				      reg, 0, 0);
569 
570 	sdei_cross_call_return(arg, err);
571 }
572 
573 int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
574 {
575 	int err;
576 	struct sdei_event *event;
577 
578 	WARN_ON(in_nmi());
579 
580 	mutex_lock(&sdei_events_lock);
581 	if (sdei_event_find(event_num)) {
582 		pr_warn("Event %u already registered\n", event_num);
583 		err = -EBUSY;
584 		goto unlock;
585 	}
586 
587 	event = sdei_event_create(event_num, cb, arg);
588 	if (IS_ERR(event)) {
589 		err = PTR_ERR(event);
590 		pr_warn("Failed to create event %u: %d\n", event_num, err);
591 		goto unlock;
592 	}
593 
594 	cpus_read_lock();
595 	if (event->type == SDEI_EVENT_TYPE_SHARED) {
596 		err = sdei_api_event_register(event->event_num,
597 					      sdei_entry_point,
598 					      event->registered,
599 					      SDEI_EVENT_REGISTER_RM_ANY, 0);
600 	} else {
601 		err = sdei_do_cross_call(_local_event_register, event);
602 		if (err)
603 			sdei_do_cross_call(_local_event_unregister, event);
604 	}
605 
606 	if (err) {
607 		sdei_event_destroy(event);
608 		pr_warn("Failed to register event %u: %d\n", event_num, err);
609 		goto cpu_unlock;
610 	}
611 
612 	spin_lock(&sdei_list_lock);
613 	event->reregister = true;
614 	spin_unlock(&sdei_list_lock);
615 cpu_unlock:
616 	cpus_read_unlock();
617 unlock:
618 	mutex_unlock(&sdei_events_lock);
619 	return err;
620 }
621 
622 static int sdei_reregister_shared(void)
623 {
624 	int err = 0;
625 	struct sdei_event *event;
626 
627 	mutex_lock(&sdei_events_lock);
628 	spin_lock(&sdei_list_lock);
629 	list_for_each_entry(event, &sdei_list, list) {
630 		if (event->type != SDEI_EVENT_TYPE_SHARED)
631 			continue;
632 
633 		if (event->reregister) {
634 			err = sdei_api_event_register(event->event_num,
635 					sdei_entry_point, event->registered,
636 					SDEI_EVENT_REGISTER_RM_ANY, 0);
637 			if (err) {
638 				pr_err("Failed to re-register event %u\n",
639 				       event->event_num);
640 				sdei_event_destroy_llocked(event);
641 				break;
642 			}
643 		}
644 
645 		if (event->reenable) {
646 			err = sdei_api_event_enable(event->event_num);
647 			if (err) {
648 				pr_err("Failed to re-enable event %u\n",
649 				       event->event_num);
650 				break;
651 			}
652 		}
653 	}
654 	spin_unlock(&sdei_list_lock);
655 	mutex_unlock(&sdei_events_lock);
656 
657 	return err;
658 }
659 
660 static int sdei_cpuhp_down(unsigned int cpu)
661 {
662 	struct sdei_event *event;
663 	int err;
664 
665 	/* un-register private events */
666 	spin_lock(&sdei_list_lock);
667 	list_for_each_entry(event, &sdei_list, list) {
668 		if (event->type == SDEI_EVENT_TYPE_SHARED)
669 			continue;
670 
671 		err = sdei_do_local_call(_local_event_unregister, event);
672 		if (err) {
673 			pr_err("Failed to unregister event %u: %d\n",
674 			       event->event_num, err);
675 		}
676 	}
677 	spin_unlock(&sdei_list_lock);
678 
679 	return sdei_mask_local_cpu();
680 }
681 
682 static int sdei_cpuhp_up(unsigned int cpu)
683 {
684 	struct sdei_event *event;
685 	int err;
686 
687 	/* re-register/enable private events */
688 	spin_lock(&sdei_list_lock);
689 	list_for_each_entry(event, &sdei_list, list) {
690 		if (event->type == SDEI_EVENT_TYPE_SHARED)
691 			continue;
692 
693 		if (event->reregister) {
694 			err = sdei_do_local_call(_local_event_register, event);
695 			if (err) {
696 				pr_err("Failed to re-register event %u: %d\n",
697 				       event->event_num, err);
698 			}
699 		}
700 
701 		if (event->reenable) {
702 			err = sdei_do_local_call(_local_event_enable, event);
703 			if (err) {
704 				pr_err("Failed to re-enable event %u: %d\n",
705 				       event->event_num, err);
706 			}
707 		}
708 	}
709 	spin_unlock(&sdei_list_lock);
710 
711 	return sdei_unmask_local_cpu();
712 }
713 
714 /* When entering idle, mask/unmask events for this cpu */
715 static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
716 			    void *data)
717 {
718 	int rv;
719 
720 	switch (action) {
721 	case CPU_PM_ENTER:
722 		rv = sdei_mask_local_cpu();
723 		break;
724 	case CPU_PM_EXIT:
725 	case CPU_PM_ENTER_FAILED:
726 		rv = sdei_unmask_local_cpu();
727 		break;
728 	default:
729 		return NOTIFY_DONE;
730 	}
731 
732 	if (rv)
733 		return notifier_from_errno(rv);
734 
735 	return NOTIFY_OK;
736 }
737 
738 static struct notifier_block sdei_pm_nb = {
739 	.notifier_call = sdei_pm_notifier,
740 };
741 
742 static int sdei_device_suspend(struct device *dev)
743 {
744 	on_each_cpu(_ipi_mask_cpu, NULL, true);
745 
746 	return 0;
747 }
748 
749 static int sdei_device_resume(struct device *dev)
750 {
751 	on_each_cpu(_ipi_unmask_cpu, NULL, true);
752 
753 	return 0;
754 }
755 
756 /*
757  * We need all events to be reregistered when we resume from hibernate.
758  *
759  * The sequence is freeze->thaw. Reboot. freeze->restore. We unregister
760  * events during freeze, then re-register and re-enable them during thaw
761  * and restore.
762  */
763 static int sdei_device_freeze(struct device *dev)
764 {
765 	int err;
766 
767 	/* unregister private events */
768 	cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
769 
770 	err = sdei_unregister_shared();
771 	if (err)
772 		return err;
773 
774 	return 0;
775 }
776 
777 static int sdei_device_thaw(struct device *dev)
778 {
779 	int err;
780 
781 	/* re-register shared events */
782 	err = sdei_reregister_shared();
783 	if (err) {
784 		pr_warn("Failed to re-register shared events...\n");
785 		sdei_mark_interface_broken();
786 		return err;
787 	}
788 
789 	err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
790 				&sdei_cpuhp_up, &sdei_cpuhp_down);
791 	if (err)
792 		pr_warn("Failed to re-register CPU hotplug notifier...\n");
793 
794 	return err;
795 }
796 
797 static int sdei_device_restore(struct device *dev)
798 {
799 	int err;
800 
801 	err = sdei_platform_reset();
802 	if (err)
803 		return err;
804 
805 	return sdei_device_thaw(dev);
806 }
807 
808 static const struct dev_pm_ops sdei_pm_ops = {
809 	.suspend = sdei_device_suspend,
810 	.resume = sdei_device_resume,
811 	.freeze = sdei_device_freeze,
812 	.thaw = sdei_device_thaw,
813 	.restore = sdei_device_restore,
814 };
815 
816 /*
817  * Mask all CPUs and unregister all events on panic, reboot or kexec.
818  */
819 static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
820 				void *data)
821 {
822 	/*
823 	 * We are going to reset the interface, after this there is no point
824 	 * doing work when we take CPUs offline.
825 	 */
826 	cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
827 
828 	sdei_platform_reset();
829 
830 	return NOTIFY_OK;
831 }
832 
833 static struct notifier_block sdei_reboot_nb = {
834 	.notifier_call = sdei_reboot_notifier,
835 };
836 
837 static void sdei_smccc_smc(unsigned long function_id,
838 			   unsigned long arg0, unsigned long arg1,
839 			   unsigned long arg2, unsigned long arg3,
840 			   unsigned long arg4, struct arm_smccc_res *res)
841 {
842 	arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
843 }
844 NOKPROBE_SYMBOL(sdei_smccc_smc);
845 
846 static void sdei_smccc_hvc(unsigned long function_id,
847 			   unsigned long arg0, unsigned long arg1,
848 			   unsigned long arg2, unsigned long arg3,
849 			   unsigned long arg4, struct arm_smccc_res *res)
850 {
851 	arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
852 }
853 NOKPROBE_SYMBOL(sdei_smccc_hvc);
854 
855 int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb,
856 		       sdei_event_callback *critical_cb)
857 {
858 	int err;
859 	u64 result;
860 	u32 event_num;
861 	sdei_event_callback *cb;
862 
863 	if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
864 		return -EOPNOTSUPP;
865 
866 	event_num = ghes->generic->notify.vector;
867 	if (event_num == 0) {
868 		/*
869 		 * Event 0 is reserved by the specification for
870 		 * SDEI_EVENT_SIGNAL.
871 		 */
872 		return -EINVAL;
873 	}
874 
875 	err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
876 				      &result);
877 	if (err)
878 		return err;
879 
880 	if (result == SDEI_EVENT_PRIORITY_CRITICAL)
881 		cb = critical_cb;
882 	else
883 		cb = normal_cb;
884 
885 	err = sdei_event_register(event_num, cb, ghes);
886 	if (!err)
887 		err = sdei_event_enable(event_num);
888 
889 	return err;
890 }
891 
892 int sdei_unregister_ghes(struct ghes *ghes)
893 {
894 	int i;
895 	int err;
896 	u32 event_num = ghes->generic->notify.vector;
897 
898 	might_sleep();
899 
900 	if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
901 		return -EOPNOTSUPP;
902 
903 	/*
904 	 * The event may be running on another CPU. Disable it
905 	 * to stop new events, then try to unregister a few times.
906 	 */
907 	err = sdei_event_disable(event_num);
908 	if (err)
909 		return err;
910 
911 	for (i = 0; i < 3; i++) {
912 		err = sdei_event_unregister(event_num);
913 		if (err != -EINPROGRESS)
914 			break;
915 
916 		schedule();
917 	}
918 
919 	return err;
920 }
921 
922 static int sdei_get_conduit(struct platform_device *pdev)
923 {
924 	const char *method;
925 	struct device_node *np = pdev->dev.of_node;
926 
927 	sdei_firmware_call = NULL;
928 	if (np) {
929 		if (of_property_read_string(np, "method", &method)) {
930 			pr_warn("missing \"method\" property\n");
931 			return SMCCC_CONDUIT_NONE;
932 		}
933 
934 		if (!strcmp("hvc", method)) {
935 			sdei_firmware_call = &sdei_smccc_hvc;
936 			return SMCCC_CONDUIT_HVC;
937 		} else if (!strcmp("smc", method)) {
938 			sdei_firmware_call = &sdei_smccc_smc;
939 			return SMCCC_CONDUIT_SMC;
940 		}
941 
942 		pr_warn("invalid \"method\" property: %s\n", method);
943 	} else if (!acpi_disabled) {
944 		if (acpi_psci_use_hvc()) {
945 			sdei_firmware_call = &sdei_smccc_hvc;
946 			return SMCCC_CONDUIT_HVC;
947 		} else {
948 			sdei_firmware_call = &sdei_smccc_smc;
949 			return SMCCC_CONDUIT_SMC;
950 		}
951 	}
952 
953 	return SMCCC_CONDUIT_NONE;
954 }
955 
956 static int sdei_probe(struct platform_device *pdev)
957 {
958 	int err;
959 	u64 ver = 0;
960 	int conduit;
961 
962 	conduit = sdei_get_conduit(pdev);
963 	if (!sdei_firmware_call)
964 		return 0;
965 
966 	err = sdei_api_get_version(&ver);
967 	if (err) {
968 		pr_err("Failed to get SDEI version: %d\n", err);
969 		sdei_mark_interface_broken();
970 		return err;
971 	}
972 
973 	pr_info("SDEIv%d.%d (0x%x) detected in firmware.\n",
974 		(int)SDEI_VERSION_MAJOR(ver), (int)SDEI_VERSION_MINOR(ver),
975 		(int)SDEI_VERSION_VENDOR(ver));
976 
977 	if (SDEI_VERSION_MAJOR(ver) != 1) {
978 		pr_warn("Conflicting SDEI version detected.\n");
979 		sdei_mark_interface_broken();
980 		return -EINVAL;
981 	}
982 
983 	err = sdei_platform_reset();
984 	if (err)
985 		return err;
986 
987 	sdei_entry_point = sdei_arch_get_entry_point(conduit);
988 	if (!sdei_entry_point) {
989 		/* Not supported due to hardware or boot configuration */
990 		sdei_mark_interface_broken();
991 		return 0;
992 	}
993 
994 	err = cpu_pm_register_notifier(&sdei_pm_nb);
995 	if (err) {
996 		pr_warn("Failed to register CPU PM notifier...\n");
997 		goto error;
998 	}
999 
1000 	err = register_reboot_notifier(&sdei_reboot_nb);
1001 	if (err) {
1002 		pr_warn("Failed to register reboot notifier...\n");
1003 		goto remove_cpupm;
1004 	}
1005 
1006 	err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
1007 				&sdei_cpuhp_up, &sdei_cpuhp_down);
1008 	if (err) {
1009 		pr_warn("Failed to register CPU hotplug notifier...\n");
1010 		goto remove_reboot;
1011 	}
1012 
1013 	return 0;
1014 
1015 remove_reboot:
1016 	unregister_reboot_notifier(&sdei_reboot_nb);
1017 
1018 remove_cpupm:
1019 	cpu_pm_unregister_notifier(&sdei_pm_nb);
1020 
1021 error:
1022 	sdei_mark_interface_broken();
1023 	return err;
1024 }
1025 
1026 static const struct of_device_id sdei_of_match[] = {
1027 	{ .compatible = "arm,sdei-1.0" },
1028 	{}
1029 };
1030 
1031 static struct platform_driver sdei_driver = {
1032 	.driver		= {
1033 		.name			= "sdei",
1034 		.pm			= &sdei_pm_ops,
1035 		.of_match_table		= sdei_of_match,
1036 	},
1037 	.probe		= sdei_probe,
1038 };
1039 
1040 static bool __init sdei_present_acpi(void)
1041 {
1042 	acpi_status status;
1043 	struct acpi_table_header *sdei_table_header;
1044 
1045 	if (acpi_disabled)
1046 		return false;
1047 
1048 	status = acpi_get_table(ACPI_SIG_SDEI, 0, &sdei_table_header);
1049 	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
1050 		const char *msg = acpi_format_exception(status);
1051 
1052 		pr_info("Failed to get ACPI:SDEI table, %s\n", msg);
1053 	}
1054 	if (ACPI_FAILURE(status))
1055 		return false;
1056 
1057 	acpi_put_table(sdei_table_header);
1058 
1059 	return true;
1060 }
1061 
1062 static int __init sdei_init(void)
1063 {
1064 	struct platform_device *pdev;
1065 	int ret;
1066 
1067 	ret = platform_driver_register(&sdei_driver);
1068 	if (ret || !sdei_present_acpi())
1069 		return ret;
1070 
1071 	pdev = platform_device_register_simple(sdei_driver.driver.name,
1072 					       0, NULL, 0);
1073 	if (IS_ERR(pdev)) {
1074 		ret = PTR_ERR(pdev);
1075 		platform_driver_unregister(&sdei_driver);
1076 		pr_info("Failed to register ACPI:SDEI platform device %d\n",
1077 			ret);
1078 	}
1079 
1080 	return ret;
1081 }
1082 
1083 /*
1084  * On an ACPI system SDEI needs to be ready before HEST:GHES tries to register
1085  * its events. ACPI is initialised from a subsys_initcall(), GHES is initialised
1086  * by device_initcall(). We want to be called in the middle.
1087  */
1088 subsys_initcall_sync(sdei_init);
1089 
1090 int sdei_event_handler(struct pt_regs *regs,
1091 		       struct sdei_registered_event *arg)
1092 {
1093 	int err;
1094 	u32 event_num = arg->event_num;
1095 
1096 	err = arg->callback(event_num, regs, arg->callback_arg);
1097 	if (err)
1098 		pr_err_ratelimited("event %u on CPU %u failed with error: %d\n",
1099 				   event_num, smp_processor_id(), err);
1100 
1101 	return err;
1102 }
1103 NOKPROBE_SYMBOL(sdei_event_handler);
1104