xref: /openbmc/linux/drivers/firmware/arm_sdei.c (revision 3ddc8b84)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2017 Arm Ltd.
3 #define pr_fmt(fmt) "sdei: " fmt
4 
5 #include <acpi/ghes.h>
6 #include <linux/acpi.h>
7 #include <linux/arm_sdei.h>
8 #include <linux/arm-smccc.h>
9 #include <linux/atomic.h>
10 #include <linux/bitops.h>
11 #include <linux/compiler.h>
12 #include <linux/cpuhotplug.h>
13 #include <linux/cpu.h>
14 #include <linux/cpu_pm.h>
15 #include <linux/errno.h>
16 #include <linux/hardirq.h>
17 #include <linux/kernel.h>
18 #include <linux/kprobes.h>
19 #include <linux/kvm_host.h>
20 #include <linux/list.h>
21 #include <linux/mutex.h>
22 #include <linux/notifier.h>
23 #include <linux/of.h>
24 #include <linux/of_platform.h>
25 #include <linux/percpu.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm.h>
28 #include <linux/ptrace.h>
29 #include <linux/preempt.h>
30 #include <linux/reboot.h>
31 #include <linux/slab.h>
32 #include <linux/smp.h>
33 #include <linux/spinlock.h>
34 
35 /*
36  * The call to use to reach the firmware.
37  */
38 static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
39 		      unsigned long arg0, unsigned long arg1,
40 		      unsigned long arg2, unsigned long arg3,
41 		      unsigned long arg4, struct arm_smccc_res *res);
42 
43 /* entry point from firmware to arch asm code */
44 static unsigned long sdei_entry_point;
45 
46 static int sdei_hp_state;
47 
48 struct sdei_event {
49 	/* These three are protected by the sdei_list_lock */
50 	struct list_head	list;
51 	bool			reregister;
52 	bool			reenable;
53 
54 	u32			event_num;
55 	u8			type;
56 	u8			priority;
57 
58 	/* This pointer is handed to firmware as the event argument. */
59 	union {
60 		/* Shared events */
61 		struct sdei_registered_event *registered;
62 
63 		/* CPU private events */
64 		struct sdei_registered_event __percpu *private_registered;
65 	};
66 };
67 
68 /* Take the mutex for any API call or modification. Take the mutex first. */
69 static DEFINE_MUTEX(sdei_events_lock);
70 
71 /* and then hold this when modifying the list */
72 static DEFINE_SPINLOCK(sdei_list_lock);
73 static LIST_HEAD(sdei_list);
74 
75 /* Private events are registered/enabled via IPI passing one of these */
76 struct sdei_crosscall_args {
77 	struct sdei_event *event;
78 	atomic_t errors;
79 	int first_error;
80 };
81 
82 #define CROSSCALL_INIT(arg, event)		\
83 	do {					\
84 		arg.event = event;		\
85 		arg.first_error = 0;		\
86 		atomic_set(&arg.errors, 0);	\
87 	} while (0)
88 
89 static inline int sdei_do_local_call(smp_call_func_t fn,
90 				     struct sdei_event *event)
91 {
92 	struct sdei_crosscall_args arg;
93 
94 	CROSSCALL_INIT(arg, event);
95 	fn(&arg);
96 
97 	return arg.first_error;
98 }
99 
100 static inline int sdei_do_cross_call(smp_call_func_t fn,
101 				     struct sdei_event *event)
102 {
103 	struct sdei_crosscall_args arg;
104 
105 	CROSSCALL_INIT(arg, event);
106 	on_each_cpu(fn, &arg, true);
107 
108 	return arg.first_error;
109 }
110 
111 static inline void
112 sdei_cross_call_return(struct sdei_crosscall_args *arg, int err)
113 {
114 	if (err && (atomic_inc_return(&arg->errors) == 1))
115 		arg->first_error = err;
116 }
117 
118 static int sdei_to_linux_errno(unsigned long sdei_err)
119 {
120 	switch (sdei_err) {
121 	case SDEI_NOT_SUPPORTED:
122 		return -EOPNOTSUPP;
123 	case SDEI_INVALID_PARAMETERS:
124 		return -EINVAL;
125 	case SDEI_DENIED:
126 		return -EPERM;
127 	case SDEI_PENDING:
128 		return -EINPROGRESS;
129 	case SDEI_OUT_OF_RESOURCE:
130 		return -ENOMEM;
131 	}
132 
133 	return 0;
134 }
135 
136 static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0,
137 			  unsigned long arg1, unsigned long arg2,
138 			  unsigned long arg3, unsigned long arg4,
139 			  u64 *result)
140 {
141 	int err;
142 	struct arm_smccc_res res;
143 
144 	if (sdei_firmware_call) {
145 		sdei_firmware_call(function_id, arg0, arg1, arg2, arg3, arg4,
146 				   &res);
147 		err = sdei_to_linux_errno(res.a0);
148 	} else {
149 		/*
150 		 * !sdei_firmware_call means we failed to probe or called
151 		 * sdei_mark_interface_broken(). -EIO is not an error returned
152 		 * by sdei_to_linux_errno() and is used to suppress messages
153 		 * from this driver.
154 		 */
155 		err = -EIO;
156 		res.a0 = SDEI_NOT_SUPPORTED;
157 	}
158 
159 	if (result)
160 		*result = res.a0;
161 
162 	return err;
163 }
164 NOKPROBE_SYMBOL(invoke_sdei_fn);
165 
166 static struct sdei_event *sdei_event_find(u32 event_num)
167 {
168 	struct sdei_event *e, *found = NULL;
169 
170 	lockdep_assert_held(&sdei_events_lock);
171 
172 	spin_lock(&sdei_list_lock);
173 	list_for_each_entry(e, &sdei_list, list) {
174 		if (e->event_num == event_num) {
175 			found = e;
176 			break;
177 		}
178 	}
179 	spin_unlock(&sdei_list_lock);
180 
181 	return found;
182 }
183 
184 int sdei_api_event_context(u32 query, u64 *result)
185 {
186 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_CONTEXT, query, 0, 0, 0, 0,
187 			      result);
188 }
189 NOKPROBE_SYMBOL(sdei_api_event_context);
190 
191 static int sdei_api_event_get_info(u32 event, u32 info, u64 *result)
192 {
193 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0,
194 			      0, 0, result);
195 }
196 
197 static struct sdei_event *sdei_event_create(u32 event_num,
198 					    sdei_event_callback *cb,
199 					    void *cb_arg)
200 {
201 	int err;
202 	u64 result;
203 	struct sdei_event *event;
204 	struct sdei_registered_event *reg;
205 
206 	lockdep_assert_held(&sdei_events_lock);
207 
208 	event = kzalloc(sizeof(*event), GFP_KERNEL);
209 	if (!event) {
210 		err = -ENOMEM;
211 		goto fail;
212 	}
213 
214 	INIT_LIST_HEAD(&event->list);
215 	event->event_num = event_num;
216 
217 	err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
218 				      &result);
219 	if (err)
220 		goto fail;
221 	event->priority = result;
222 
223 	err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_TYPE,
224 				      &result);
225 	if (err)
226 		goto fail;
227 	event->type = result;
228 
229 	if (event->type == SDEI_EVENT_TYPE_SHARED) {
230 		reg = kzalloc(sizeof(*reg), GFP_KERNEL);
231 		if (!reg) {
232 			err = -ENOMEM;
233 			goto fail;
234 		}
235 
236 		reg->event_num = event->event_num;
237 		reg->priority = event->priority;
238 
239 		reg->callback = cb;
240 		reg->callback_arg = cb_arg;
241 		event->registered = reg;
242 	} else {
243 		int cpu;
244 		struct sdei_registered_event __percpu *regs;
245 
246 		regs = alloc_percpu(struct sdei_registered_event);
247 		if (!regs) {
248 			err = -ENOMEM;
249 			goto fail;
250 		}
251 
252 		for_each_possible_cpu(cpu) {
253 			reg = per_cpu_ptr(regs, cpu);
254 
255 			reg->event_num = event->event_num;
256 			reg->priority = event->priority;
257 			reg->callback = cb;
258 			reg->callback_arg = cb_arg;
259 		}
260 
261 		event->private_registered = regs;
262 	}
263 
264 	spin_lock(&sdei_list_lock);
265 	list_add(&event->list, &sdei_list);
266 	spin_unlock(&sdei_list_lock);
267 
268 	return event;
269 
270 fail:
271 	kfree(event);
272 	return ERR_PTR(err);
273 }
274 
275 static void sdei_event_destroy_llocked(struct sdei_event *event)
276 {
277 	lockdep_assert_held(&sdei_events_lock);
278 	lockdep_assert_held(&sdei_list_lock);
279 
280 	list_del(&event->list);
281 
282 	if (event->type == SDEI_EVENT_TYPE_SHARED)
283 		kfree(event->registered);
284 	else
285 		free_percpu(event->private_registered);
286 
287 	kfree(event);
288 }
289 
290 static void sdei_event_destroy(struct sdei_event *event)
291 {
292 	spin_lock(&sdei_list_lock);
293 	sdei_event_destroy_llocked(event);
294 	spin_unlock(&sdei_list_lock);
295 }
296 
297 static int sdei_api_get_version(u64 *version)
298 {
299 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_VERSION, 0, 0, 0, 0, 0, version);
300 }
301 
302 int sdei_mask_local_cpu(void)
303 {
304 	int err;
305 
306 	err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
307 	if (err && err != -EIO) {
308 		pr_warn_once("failed to mask CPU[%u]: %d\n",
309 			      smp_processor_id(), err);
310 		return err;
311 	}
312 
313 	return 0;
314 }
315 
316 static void _ipi_mask_cpu(void *ignored)
317 {
318 	WARN_ON_ONCE(preemptible());
319 	sdei_mask_local_cpu();
320 }
321 
322 int sdei_unmask_local_cpu(void)
323 {
324 	int err;
325 
326 	err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
327 	if (err && err != -EIO) {
328 		pr_warn_once("failed to unmask CPU[%u]: %d\n",
329 			     smp_processor_id(), err);
330 		return err;
331 	}
332 
333 	return 0;
334 }
335 
336 static void _ipi_unmask_cpu(void *ignored)
337 {
338 	WARN_ON_ONCE(preemptible());
339 	sdei_unmask_local_cpu();
340 }
341 
342 static void _ipi_private_reset(void *ignored)
343 {
344 	int err;
345 
346 	WARN_ON_ONCE(preemptible());
347 
348 	err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
349 			     NULL);
350 	if (err && err != -EIO)
351 		pr_warn_once("failed to reset CPU[%u]: %d\n",
352 			     smp_processor_id(), err);
353 }
354 
355 static int sdei_api_shared_reset(void)
356 {
357 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_SHARED_RESET, 0, 0, 0, 0, 0,
358 			      NULL);
359 }
360 
361 static void sdei_mark_interface_broken(void)
362 {
363 	pr_err("disabling SDEI firmware interface\n");
364 	on_each_cpu(&_ipi_mask_cpu, NULL, true);
365 	sdei_firmware_call = NULL;
366 }
367 
368 static int sdei_platform_reset(void)
369 {
370 	int err;
371 
372 	on_each_cpu(&_ipi_private_reset, NULL, true);
373 	err = sdei_api_shared_reset();
374 	if (err) {
375 		pr_err("Failed to reset platform: %d\n", err);
376 		sdei_mark_interface_broken();
377 	}
378 
379 	return err;
380 }
381 
382 static int sdei_api_event_enable(u32 event_num)
383 {
384 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, event_num, 0, 0, 0,
385 			      0, NULL);
386 }
387 
388 /* Called directly by the hotplug callbacks */
389 static void _local_event_enable(void *data)
390 {
391 	int err;
392 	struct sdei_crosscall_args *arg = data;
393 
394 	err = sdei_api_event_enable(arg->event->event_num);
395 
396 	sdei_cross_call_return(arg, err);
397 }
398 
399 int sdei_event_enable(u32 event_num)
400 {
401 	int err = -EINVAL;
402 	struct sdei_event *event;
403 
404 	mutex_lock(&sdei_events_lock);
405 	event = sdei_event_find(event_num);
406 	if (!event) {
407 		mutex_unlock(&sdei_events_lock);
408 		return -ENOENT;
409 	}
410 
411 
412 	cpus_read_lock();
413 	if (event->type == SDEI_EVENT_TYPE_SHARED)
414 		err = sdei_api_event_enable(event->event_num);
415 	else
416 		err = sdei_do_cross_call(_local_event_enable, event);
417 
418 	if (!err) {
419 		spin_lock(&sdei_list_lock);
420 		event->reenable = true;
421 		spin_unlock(&sdei_list_lock);
422 	}
423 	cpus_read_unlock();
424 	mutex_unlock(&sdei_events_lock);
425 
426 	return err;
427 }
428 
429 static int sdei_api_event_disable(u32 event_num)
430 {
431 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, event_num, 0, 0,
432 			      0, 0, NULL);
433 }
434 
435 static void _ipi_event_disable(void *data)
436 {
437 	int err;
438 	struct sdei_crosscall_args *arg = data;
439 
440 	err = sdei_api_event_disable(arg->event->event_num);
441 
442 	sdei_cross_call_return(arg, err);
443 }
444 
445 int sdei_event_disable(u32 event_num)
446 {
447 	int err = -EINVAL;
448 	struct sdei_event *event;
449 
450 	mutex_lock(&sdei_events_lock);
451 	event = sdei_event_find(event_num);
452 	if (!event) {
453 		mutex_unlock(&sdei_events_lock);
454 		return -ENOENT;
455 	}
456 
457 	spin_lock(&sdei_list_lock);
458 	event->reenable = false;
459 	spin_unlock(&sdei_list_lock);
460 
461 	if (event->type == SDEI_EVENT_TYPE_SHARED)
462 		err = sdei_api_event_disable(event->event_num);
463 	else
464 		err = sdei_do_cross_call(_ipi_event_disable, event);
465 	mutex_unlock(&sdei_events_lock);
466 
467 	return err;
468 }
469 
470 static int sdei_api_event_unregister(u32 event_num)
471 {
472 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_UNREGISTER, event_num, 0,
473 			      0, 0, 0, NULL);
474 }
475 
476 /* Called directly by the hotplug callbacks */
477 static void _local_event_unregister(void *data)
478 {
479 	int err;
480 	struct sdei_crosscall_args *arg = data;
481 
482 	err = sdei_api_event_unregister(arg->event->event_num);
483 
484 	sdei_cross_call_return(arg, err);
485 }
486 
487 int sdei_event_unregister(u32 event_num)
488 {
489 	int err;
490 	struct sdei_event *event;
491 
492 	WARN_ON(in_nmi());
493 
494 	mutex_lock(&sdei_events_lock);
495 	event = sdei_event_find(event_num);
496 	if (!event) {
497 		pr_warn("Event %u not registered\n", event_num);
498 		err = -ENOENT;
499 		goto unlock;
500 	}
501 
502 	spin_lock(&sdei_list_lock);
503 	event->reregister = false;
504 	event->reenable = false;
505 	spin_unlock(&sdei_list_lock);
506 
507 	if (event->type == SDEI_EVENT_TYPE_SHARED)
508 		err = sdei_api_event_unregister(event->event_num);
509 	else
510 		err = sdei_do_cross_call(_local_event_unregister, event);
511 
512 	if (err)
513 		goto unlock;
514 
515 	sdei_event_destroy(event);
516 unlock:
517 	mutex_unlock(&sdei_events_lock);
518 
519 	return err;
520 }
521 
522 /*
523  * unregister events, but don't destroy them as they are re-registered by
524  * sdei_reregister_shared().
525  */
526 static int sdei_unregister_shared(void)
527 {
528 	int err = 0;
529 	struct sdei_event *event;
530 
531 	mutex_lock(&sdei_events_lock);
532 	spin_lock(&sdei_list_lock);
533 	list_for_each_entry(event, &sdei_list, list) {
534 		if (event->type != SDEI_EVENT_TYPE_SHARED)
535 			continue;
536 
537 		err = sdei_api_event_unregister(event->event_num);
538 		if (err)
539 			break;
540 	}
541 	spin_unlock(&sdei_list_lock);
542 	mutex_unlock(&sdei_events_lock);
543 
544 	return err;
545 }
546 
547 static int sdei_api_event_register(u32 event_num, unsigned long entry_point,
548 				   void *arg, u64 flags, u64 affinity)
549 {
550 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_REGISTER, event_num,
551 			      (unsigned long)entry_point, (unsigned long)arg,
552 			      flags, affinity, NULL);
553 }
554 
555 /* Called directly by the hotplug callbacks */
556 static void _local_event_register(void *data)
557 {
558 	int err;
559 	struct sdei_registered_event *reg;
560 	struct sdei_crosscall_args *arg = data;
561 
562 	reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
563 	err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
564 				      reg, 0, 0);
565 
566 	sdei_cross_call_return(arg, err);
567 }
568 
569 int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
570 {
571 	int err;
572 	struct sdei_event *event;
573 
574 	WARN_ON(in_nmi());
575 
576 	mutex_lock(&sdei_events_lock);
577 	if (sdei_event_find(event_num)) {
578 		pr_warn("Event %u already registered\n", event_num);
579 		err = -EBUSY;
580 		goto unlock;
581 	}
582 
583 	event = sdei_event_create(event_num, cb, arg);
584 	if (IS_ERR(event)) {
585 		err = PTR_ERR(event);
586 		pr_warn("Failed to create event %u: %d\n", event_num, err);
587 		goto unlock;
588 	}
589 
590 	cpus_read_lock();
591 	if (event->type == SDEI_EVENT_TYPE_SHARED) {
592 		err = sdei_api_event_register(event->event_num,
593 					      sdei_entry_point,
594 					      event->registered,
595 					      SDEI_EVENT_REGISTER_RM_ANY, 0);
596 	} else {
597 		err = sdei_do_cross_call(_local_event_register, event);
598 		if (err)
599 			sdei_do_cross_call(_local_event_unregister, event);
600 	}
601 
602 	if (err) {
603 		sdei_event_destroy(event);
604 		pr_warn("Failed to register event %u: %d\n", event_num, err);
605 		goto cpu_unlock;
606 	}
607 
608 	spin_lock(&sdei_list_lock);
609 	event->reregister = true;
610 	spin_unlock(&sdei_list_lock);
611 cpu_unlock:
612 	cpus_read_unlock();
613 unlock:
614 	mutex_unlock(&sdei_events_lock);
615 	return err;
616 }
617 
618 static int sdei_reregister_shared(void)
619 {
620 	int err = 0;
621 	struct sdei_event *event;
622 
623 	mutex_lock(&sdei_events_lock);
624 	spin_lock(&sdei_list_lock);
625 	list_for_each_entry(event, &sdei_list, list) {
626 		if (event->type != SDEI_EVENT_TYPE_SHARED)
627 			continue;
628 
629 		if (event->reregister) {
630 			err = sdei_api_event_register(event->event_num,
631 					sdei_entry_point, event->registered,
632 					SDEI_EVENT_REGISTER_RM_ANY, 0);
633 			if (err) {
634 				pr_err("Failed to re-register event %u\n",
635 				       event->event_num);
636 				sdei_event_destroy_llocked(event);
637 				break;
638 			}
639 		}
640 
641 		if (event->reenable) {
642 			err = sdei_api_event_enable(event->event_num);
643 			if (err) {
644 				pr_err("Failed to re-enable event %u\n",
645 				       event->event_num);
646 				break;
647 			}
648 		}
649 	}
650 	spin_unlock(&sdei_list_lock);
651 	mutex_unlock(&sdei_events_lock);
652 
653 	return err;
654 }
655 
656 static int sdei_cpuhp_down(unsigned int cpu)
657 {
658 	struct sdei_event *event;
659 	int err;
660 
661 	/* un-register private events */
662 	spin_lock(&sdei_list_lock);
663 	list_for_each_entry(event, &sdei_list, list) {
664 		if (event->type == SDEI_EVENT_TYPE_SHARED)
665 			continue;
666 
667 		err = sdei_do_local_call(_local_event_unregister, event);
668 		if (err) {
669 			pr_err("Failed to unregister event %u: %d\n",
670 			       event->event_num, err);
671 		}
672 	}
673 	spin_unlock(&sdei_list_lock);
674 
675 	return sdei_mask_local_cpu();
676 }
677 
678 static int sdei_cpuhp_up(unsigned int cpu)
679 {
680 	struct sdei_event *event;
681 	int err;
682 
683 	/* re-register/enable private events */
684 	spin_lock(&sdei_list_lock);
685 	list_for_each_entry(event, &sdei_list, list) {
686 		if (event->type == SDEI_EVENT_TYPE_SHARED)
687 			continue;
688 
689 		if (event->reregister) {
690 			err = sdei_do_local_call(_local_event_register, event);
691 			if (err) {
692 				pr_err("Failed to re-register event %u: %d\n",
693 				       event->event_num, err);
694 			}
695 		}
696 
697 		if (event->reenable) {
698 			err = sdei_do_local_call(_local_event_enable, event);
699 			if (err) {
700 				pr_err("Failed to re-enable event %u: %d\n",
701 				       event->event_num, err);
702 			}
703 		}
704 	}
705 	spin_unlock(&sdei_list_lock);
706 
707 	return sdei_unmask_local_cpu();
708 }
709 
710 /* When entering idle, mask/unmask events for this cpu */
711 static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
712 			    void *data)
713 {
714 	int rv;
715 
716 	WARN_ON_ONCE(preemptible());
717 
718 	switch (action) {
719 	case CPU_PM_ENTER:
720 		rv = sdei_mask_local_cpu();
721 		break;
722 	case CPU_PM_EXIT:
723 	case CPU_PM_ENTER_FAILED:
724 		rv = sdei_unmask_local_cpu();
725 		break;
726 	default:
727 		return NOTIFY_DONE;
728 	}
729 
730 	if (rv)
731 		return notifier_from_errno(rv);
732 
733 	return NOTIFY_OK;
734 }
735 
736 static struct notifier_block sdei_pm_nb = {
737 	.notifier_call = sdei_pm_notifier,
738 };
739 
740 static int sdei_device_suspend(struct device *dev)
741 {
742 	on_each_cpu(_ipi_mask_cpu, NULL, true);
743 
744 	return 0;
745 }
746 
747 static int sdei_device_resume(struct device *dev)
748 {
749 	on_each_cpu(_ipi_unmask_cpu, NULL, true);
750 
751 	return 0;
752 }
753 
754 /*
755  * We need all events to be reregistered when we resume from hibernate.
756  *
757  * The sequence is freeze->thaw. Reboot. freeze->restore. We unregister
758  * events during freeze, then re-register and re-enable them during thaw
759  * and restore.
760  */
761 static int sdei_device_freeze(struct device *dev)
762 {
763 	int err;
764 
765 	/* unregister private events */
766 	cpuhp_remove_state(sdei_entry_point);
767 
768 	err = sdei_unregister_shared();
769 	if (err)
770 		return err;
771 
772 	return 0;
773 }
774 
775 static int sdei_device_thaw(struct device *dev)
776 {
777 	int err;
778 
779 	/* re-register shared events */
780 	err = sdei_reregister_shared();
781 	if (err) {
782 		pr_warn("Failed to re-register shared events...\n");
783 		sdei_mark_interface_broken();
784 		return err;
785 	}
786 
787 	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
788 				&sdei_cpuhp_up, &sdei_cpuhp_down);
789 	if (err < 0) {
790 		pr_warn("Failed to re-register CPU hotplug notifier...\n");
791 		return err;
792 	}
793 
794 	sdei_hp_state = err;
795 	return 0;
796 }
797 
798 static int sdei_device_restore(struct device *dev)
799 {
800 	int err;
801 
802 	err = sdei_platform_reset();
803 	if (err)
804 		return err;
805 
806 	return sdei_device_thaw(dev);
807 }
808 
809 static const struct dev_pm_ops sdei_pm_ops = {
810 	.suspend = sdei_device_suspend,
811 	.resume = sdei_device_resume,
812 	.freeze = sdei_device_freeze,
813 	.thaw = sdei_device_thaw,
814 	.restore = sdei_device_restore,
815 };
816 
817 /*
818  * Mask all CPUs and unregister all events on panic, reboot or kexec.
819  */
820 static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
821 				void *data)
822 {
823 	/*
824 	 * We are going to reset the interface, after this there is no point
825 	 * doing work when we take CPUs offline.
826 	 */
827 	cpuhp_remove_state(sdei_hp_state);
828 
829 	sdei_platform_reset();
830 
831 	return NOTIFY_OK;
832 }
833 
834 static struct notifier_block sdei_reboot_nb = {
835 	.notifier_call = sdei_reboot_notifier,
836 };
837 
838 static void sdei_smccc_smc(unsigned long function_id,
839 			   unsigned long arg0, unsigned long arg1,
840 			   unsigned long arg2, unsigned long arg3,
841 			   unsigned long arg4, struct arm_smccc_res *res)
842 {
843 	arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
844 }
845 NOKPROBE_SYMBOL(sdei_smccc_smc);
846 
847 static void sdei_smccc_hvc(unsigned long function_id,
848 			   unsigned long arg0, unsigned long arg1,
849 			   unsigned long arg2, unsigned long arg3,
850 			   unsigned long arg4, struct arm_smccc_res *res)
851 {
852 	arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
853 }
854 NOKPROBE_SYMBOL(sdei_smccc_hvc);
855 
856 int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb,
857 		       sdei_event_callback *critical_cb)
858 {
859 	int err;
860 	u64 result;
861 	u32 event_num;
862 	sdei_event_callback *cb;
863 
864 	if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
865 		return -EOPNOTSUPP;
866 
867 	event_num = ghes->generic->notify.vector;
868 	if (event_num == 0) {
869 		/*
870 		 * Event 0 is reserved by the specification for
871 		 * SDEI_EVENT_SIGNAL.
872 		 */
873 		return -EINVAL;
874 	}
875 
876 	err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
877 				      &result);
878 	if (err)
879 		return err;
880 
881 	if (result == SDEI_EVENT_PRIORITY_CRITICAL)
882 		cb = critical_cb;
883 	else
884 		cb = normal_cb;
885 
886 	err = sdei_event_register(event_num, cb, ghes);
887 	if (!err)
888 		err = sdei_event_enable(event_num);
889 
890 	return err;
891 }
892 
893 int sdei_unregister_ghes(struct ghes *ghes)
894 {
895 	int i;
896 	int err;
897 	u32 event_num = ghes->generic->notify.vector;
898 
899 	might_sleep();
900 
901 	if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
902 		return -EOPNOTSUPP;
903 
904 	/*
905 	 * The event may be running on another CPU. Disable it
906 	 * to stop new events, then try to unregister a few times.
907 	 */
908 	err = sdei_event_disable(event_num);
909 	if (err)
910 		return err;
911 
912 	for (i = 0; i < 3; i++) {
913 		err = sdei_event_unregister(event_num);
914 		if (err != -EINPROGRESS)
915 			break;
916 
917 		schedule();
918 	}
919 
920 	return err;
921 }
922 
923 static int sdei_get_conduit(struct platform_device *pdev)
924 {
925 	const char *method;
926 	struct device_node *np = pdev->dev.of_node;
927 
928 	sdei_firmware_call = NULL;
929 	if (np) {
930 		if (of_property_read_string(np, "method", &method)) {
931 			pr_warn("missing \"method\" property\n");
932 			return SMCCC_CONDUIT_NONE;
933 		}
934 
935 		if (!strcmp("hvc", method)) {
936 			sdei_firmware_call = &sdei_smccc_hvc;
937 			return SMCCC_CONDUIT_HVC;
938 		} else if (!strcmp("smc", method)) {
939 			sdei_firmware_call = &sdei_smccc_smc;
940 			return SMCCC_CONDUIT_SMC;
941 		}
942 
943 		pr_warn("invalid \"method\" property: %s\n", method);
944 	} else if (!acpi_disabled) {
945 		if (acpi_psci_use_hvc()) {
946 			sdei_firmware_call = &sdei_smccc_hvc;
947 			return SMCCC_CONDUIT_HVC;
948 		} else {
949 			sdei_firmware_call = &sdei_smccc_smc;
950 			return SMCCC_CONDUIT_SMC;
951 		}
952 	}
953 
954 	return SMCCC_CONDUIT_NONE;
955 }
956 
957 static int sdei_probe(struct platform_device *pdev)
958 {
959 	int err;
960 	u64 ver = 0;
961 	int conduit;
962 
963 	conduit = sdei_get_conduit(pdev);
964 	if (!sdei_firmware_call)
965 		return 0;
966 
967 	err = sdei_api_get_version(&ver);
968 	if (err) {
969 		pr_err("Failed to get SDEI version: %d\n", err);
970 		sdei_mark_interface_broken();
971 		return err;
972 	}
973 
974 	pr_info("SDEIv%d.%d (0x%x) detected in firmware.\n",
975 		(int)SDEI_VERSION_MAJOR(ver), (int)SDEI_VERSION_MINOR(ver),
976 		(int)SDEI_VERSION_VENDOR(ver));
977 
978 	if (SDEI_VERSION_MAJOR(ver) != 1) {
979 		pr_warn("Conflicting SDEI version detected.\n");
980 		sdei_mark_interface_broken();
981 		return -EINVAL;
982 	}
983 
984 	err = sdei_platform_reset();
985 	if (err)
986 		return err;
987 
988 	sdei_entry_point = sdei_arch_get_entry_point(conduit);
989 	if (!sdei_entry_point) {
990 		/* Not supported due to hardware or boot configuration */
991 		sdei_mark_interface_broken();
992 		return 0;
993 	}
994 
995 	err = cpu_pm_register_notifier(&sdei_pm_nb);
996 	if (err) {
997 		pr_warn("Failed to register CPU PM notifier...\n");
998 		goto error;
999 	}
1000 
1001 	err = register_reboot_notifier(&sdei_reboot_nb);
1002 	if (err) {
1003 		pr_warn("Failed to register reboot notifier...\n");
1004 		goto remove_cpupm;
1005 	}
1006 
1007 	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
1008 				&sdei_cpuhp_up, &sdei_cpuhp_down);
1009 	if (err < 0) {
1010 		pr_warn("Failed to register CPU hotplug notifier...\n");
1011 		goto remove_reboot;
1012 	}
1013 
1014 	sdei_hp_state = err;
1015 
1016 	return 0;
1017 
1018 remove_reboot:
1019 	unregister_reboot_notifier(&sdei_reboot_nb);
1020 
1021 remove_cpupm:
1022 	cpu_pm_unregister_notifier(&sdei_pm_nb);
1023 
1024 error:
1025 	sdei_mark_interface_broken();
1026 	return err;
1027 }
1028 
1029 static const struct of_device_id sdei_of_match[] = {
1030 	{ .compatible = "arm,sdei-1.0" },
1031 	{}
1032 };
1033 
1034 static struct platform_driver sdei_driver = {
1035 	.driver		= {
1036 		.name			= "sdei",
1037 		.pm			= &sdei_pm_ops,
1038 		.of_match_table		= sdei_of_match,
1039 	},
1040 	.probe		= sdei_probe,
1041 };
1042 
1043 static bool __init sdei_present_acpi(void)
1044 {
1045 	acpi_status status;
1046 	struct acpi_table_header *sdei_table_header;
1047 
1048 	if (acpi_disabled)
1049 		return false;
1050 
1051 	status = acpi_get_table(ACPI_SIG_SDEI, 0, &sdei_table_header);
1052 	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
1053 		const char *msg = acpi_format_exception(status);
1054 
1055 		pr_info("Failed to get ACPI:SDEI table, %s\n", msg);
1056 	}
1057 	if (ACPI_FAILURE(status))
1058 		return false;
1059 
1060 	acpi_put_table(sdei_table_header);
1061 
1062 	return true;
1063 }
1064 
1065 void __init sdei_init(void)
1066 {
1067 	struct platform_device *pdev;
1068 	int ret;
1069 
1070 	ret = platform_driver_register(&sdei_driver);
1071 	if (ret || !sdei_present_acpi())
1072 		return;
1073 
1074 	pdev = platform_device_register_simple(sdei_driver.driver.name,
1075 					       0, NULL, 0);
1076 	if (IS_ERR(pdev)) {
1077 		ret = PTR_ERR(pdev);
1078 		platform_driver_unregister(&sdei_driver);
1079 		pr_info("Failed to register ACPI:SDEI platform device %d\n",
1080 			ret);
1081 	}
1082 }
1083 
1084 int sdei_event_handler(struct pt_regs *regs,
1085 		       struct sdei_registered_event *arg)
1086 {
1087 	int err;
1088 	u32 event_num = arg->event_num;
1089 
1090 	err = arg->callback(event_num, regs, arg->callback_arg);
1091 	if (err)
1092 		pr_err_ratelimited("event %u on CPU %u failed with error: %d\n",
1093 				   event_num, smp_processor_id(), err);
1094 
1095 	return err;
1096 }
1097 NOKPROBE_SYMBOL(sdei_event_handler);
1098 
1099 void sdei_handler_abort(void)
1100 {
1101 	/*
1102 	 * If the crash happened in an SDEI event handler then we need to
1103 	 * finish the handler with the firmware so that we can have working
1104 	 * interrupts in the crash kernel.
1105 	 */
1106 	if (__this_cpu_read(sdei_active_critical_event)) {
1107 	        pr_warn("still in SDEI critical event context, attempting to finish handler.\n");
1108 	        __sdei_handler_abort();
1109 	        __this_cpu_write(sdei_active_critical_event, NULL);
1110 	}
1111 	if (__this_cpu_read(sdei_active_normal_event)) {
1112 	        pr_warn("still in SDEI normal event context, attempting to finish handler.\n");
1113 	        __sdei_handler_abort();
1114 	        __this_cpu_write(sdei_active_normal_event, NULL);
1115 	}
1116 }
1117