xref: /openbmc/linux/drivers/firmware/arm_sdei.c (revision a27c04e1)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2017 Arm Ltd.
3 #define pr_fmt(fmt) "sdei: " fmt
4 
5 #include <acpi/ghes.h>
6 #include <linux/acpi.h>
7 #include <linux/arm_sdei.h>
8 #include <linux/arm-smccc.h>
9 #include <linux/atomic.h>
10 #include <linux/bitops.h>
11 #include <linux/compiler.h>
12 #include <linux/cpuhotplug.h>
13 #include <linux/cpu.h>
14 #include <linux/cpu_pm.h>
15 #include <linux/errno.h>
16 #include <linux/hardirq.h>
17 #include <linux/kernel.h>
18 #include <linux/kprobes.h>
19 #include <linux/kvm_host.h>
20 #include <linux/list.h>
21 #include <linux/mutex.h>
22 #include <linux/notifier.h>
23 #include <linux/of.h>
24 #include <linux/of_platform.h>
25 #include <linux/percpu.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm.h>
28 #include <linux/ptrace.h>
29 #include <linux/preempt.h>
30 #include <linux/reboot.h>
31 #include <linux/slab.h>
32 #include <linux/smp.h>
33 #include <linux/spinlock.h>
34 #include <linux/uaccess.h>
35 
36 /*
37  * The call to use to reach the firmware.
38  */
39 static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
40 		      unsigned long arg0, unsigned long arg1,
41 		      unsigned long arg2, unsigned long arg3,
42 		      unsigned long arg4, struct arm_smccc_res *res);
43 
44 /* entry point from firmware to arch asm code */
45 static unsigned long sdei_entry_point;
46 
47 struct sdei_event {
48 	/* These three are protected by the sdei_list_lock */
49 	struct list_head	list;
50 	bool			reregister;
51 	bool			reenable;
52 
53 	u32			event_num;
54 	u8			type;
55 	u8			priority;
56 
57 	/* This pointer is handed to firmware as the event argument. */
58 	union {
59 		/* Shared events */
60 		struct sdei_registered_event *registered;
61 
62 		/* CPU private events */
63 		struct sdei_registered_event __percpu *private_registered;
64 	};
65 };
66 
67 /* Take the mutex for any API call or modification. Take the mutex first. */
68 static DEFINE_MUTEX(sdei_events_lock);
69 
70 /* and then hold this when modifying the list */
71 static DEFINE_SPINLOCK(sdei_list_lock);
72 static LIST_HEAD(sdei_list);
73 
74 /* Private events are registered/enabled via IPI passing one of these */
75 struct sdei_crosscall_args {
76 	struct sdei_event *event;
77 	atomic_t errors;
78 	int first_error;
79 };
80 
81 #define CROSSCALL_INIT(arg, event)		\
82 	do {					\
83 		arg.event = event;		\
84 		arg.first_error = 0;		\
85 		atomic_set(&arg.errors, 0);	\
86 	} while (0)
87 
88 static inline int sdei_do_cross_call(smp_call_func_t fn,
89 				     struct sdei_event *event)
90 {
91 	struct sdei_crosscall_args arg;
92 
93 	CROSSCALL_INIT(arg, event);
94 	on_each_cpu(fn, &arg, true);
95 
96 	return arg.first_error;
97 }
98 
99 static inline void
100 sdei_cross_call_return(struct sdei_crosscall_args *arg, int err)
101 {
102 	if (err && (atomic_inc_return(&arg->errors) == 1))
103 		arg->first_error = err;
104 }
105 
106 static int sdei_to_linux_errno(unsigned long sdei_err)
107 {
108 	switch (sdei_err) {
109 	case SDEI_NOT_SUPPORTED:
110 		return -EOPNOTSUPP;
111 	case SDEI_INVALID_PARAMETERS:
112 		return -EINVAL;
113 	case SDEI_DENIED:
114 		return -EPERM;
115 	case SDEI_PENDING:
116 		return -EINPROGRESS;
117 	case SDEI_OUT_OF_RESOURCE:
118 		return -ENOMEM;
119 	}
120 
121 	return 0;
122 }
123 
124 static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0,
125 			  unsigned long arg1, unsigned long arg2,
126 			  unsigned long arg3, unsigned long arg4,
127 			  u64 *result)
128 {
129 	int err;
130 	struct arm_smccc_res res;
131 
132 	if (sdei_firmware_call) {
133 		sdei_firmware_call(function_id, arg0, arg1, arg2, arg3, arg4,
134 				   &res);
135 		err = sdei_to_linux_errno(res.a0);
136 	} else {
137 		/*
138 		 * !sdei_firmware_call means we failed to probe or called
139 		 * sdei_mark_interface_broken(). -EIO is not an error returned
140 		 * by sdei_to_linux_errno() and is used to suppress messages
141 		 * from this driver.
142 		 */
143 		err = -EIO;
144 		res.a0 = SDEI_NOT_SUPPORTED;
145 	}
146 
147 	if (result)
148 		*result = res.a0;
149 
150 	return err;
151 }
152 NOKPROBE_SYMBOL(invoke_sdei_fn);
153 
154 static struct sdei_event *sdei_event_find(u32 event_num)
155 {
156 	struct sdei_event *e, *found = NULL;
157 
158 	lockdep_assert_held(&sdei_events_lock);
159 
160 	spin_lock(&sdei_list_lock);
161 	list_for_each_entry(e, &sdei_list, list) {
162 		if (e->event_num == event_num) {
163 			found = e;
164 			break;
165 		}
166 	}
167 	spin_unlock(&sdei_list_lock);
168 
169 	return found;
170 }
171 
172 int sdei_api_event_context(u32 query, u64 *result)
173 {
174 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_CONTEXT, query, 0, 0, 0, 0,
175 			      result);
176 }
177 NOKPROBE_SYMBOL(sdei_api_event_context);
178 
179 static int sdei_api_event_get_info(u32 event, u32 info, u64 *result)
180 {
181 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0,
182 			      0, 0, result);
183 }
184 
185 static struct sdei_event *sdei_event_create(u32 event_num,
186 					    sdei_event_callback *cb,
187 					    void *cb_arg)
188 {
189 	int err;
190 	u64 result;
191 	struct sdei_event *event;
192 	struct sdei_registered_event *reg;
193 
194 	lockdep_assert_held(&sdei_events_lock);
195 
196 	event = kzalloc(sizeof(*event), GFP_KERNEL);
197 	if (!event) {
198 		err = -ENOMEM;
199 		goto fail;
200 	}
201 
202 	INIT_LIST_HEAD(&event->list);
203 	event->event_num = event_num;
204 
205 	err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
206 				      &result);
207 	if (err)
208 		goto fail;
209 	event->priority = result;
210 
211 	err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_TYPE,
212 				      &result);
213 	if (err)
214 		goto fail;
215 	event->type = result;
216 
217 	if (event->type == SDEI_EVENT_TYPE_SHARED) {
218 		reg = kzalloc(sizeof(*reg), GFP_KERNEL);
219 		if (!reg) {
220 			err = -ENOMEM;
221 			goto fail;
222 		}
223 
224 		reg->event_num = event->event_num;
225 		reg->priority = event->priority;
226 
227 		reg->callback = cb;
228 		reg->callback_arg = cb_arg;
229 		event->registered = reg;
230 	} else {
231 		int cpu;
232 		struct sdei_registered_event __percpu *regs;
233 
234 		regs = alloc_percpu(struct sdei_registered_event);
235 		if (!regs) {
236 			err = -ENOMEM;
237 			goto fail;
238 		}
239 
240 		for_each_possible_cpu(cpu) {
241 			reg = per_cpu_ptr(regs, cpu);
242 
243 			reg->event_num = event->event_num;
244 			reg->priority = event->priority;
245 			reg->callback = cb;
246 			reg->callback_arg = cb_arg;
247 		}
248 
249 		event->private_registered = regs;
250 	}
251 
252 	spin_lock(&sdei_list_lock);
253 	list_add(&event->list, &sdei_list);
254 	spin_unlock(&sdei_list_lock);
255 
256 	return event;
257 
258 fail:
259 	kfree(event);
260 	return ERR_PTR(err);
261 }
262 
263 static void sdei_event_destroy_llocked(struct sdei_event *event)
264 {
265 	lockdep_assert_held(&sdei_events_lock);
266 	lockdep_assert_held(&sdei_list_lock);
267 
268 	list_del(&event->list);
269 
270 	if (event->type == SDEI_EVENT_TYPE_SHARED)
271 		kfree(event->registered);
272 	else
273 		free_percpu(event->private_registered);
274 
275 	kfree(event);
276 }
277 
278 static void sdei_event_destroy(struct sdei_event *event)
279 {
280 	spin_lock(&sdei_list_lock);
281 	sdei_event_destroy_llocked(event);
282 	spin_unlock(&sdei_list_lock);
283 }
284 
285 static int sdei_api_get_version(u64 *version)
286 {
287 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_VERSION, 0, 0, 0, 0, 0, version);
288 }
289 
290 int sdei_mask_local_cpu(void)
291 {
292 	int err;
293 
294 	WARN_ON_ONCE(preemptible());
295 
296 	err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
297 	if (err && err != -EIO) {
298 		pr_warn_once("failed to mask CPU[%u]: %d\n",
299 			      smp_processor_id(), err);
300 		return err;
301 	}
302 
303 	return 0;
304 }
305 
306 static void _ipi_mask_cpu(void *ignored)
307 {
308 	sdei_mask_local_cpu();
309 }
310 
311 int sdei_unmask_local_cpu(void)
312 {
313 	int err;
314 
315 	WARN_ON_ONCE(preemptible());
316 
317 	err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
318 	if (err && err != -EIO) {
319 		pr_warn_once("failed to unmask CPU[%u]: %d\n",
320 			     smp_processor_id(), err);
321 		return err;
322 	}
323 
324 	return 0;
325 }
326 
327 static void _ipi_unmask_cpu(void *ignored)
328 {
329 	sdei_unmask_local_cpu();
330 }
331 
332 static void _ipi_private_reset(void *ignored)
333 {
334 	int err;
335 
336 	err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
337 			     NULL);
338 	if (err && err != -EIO)
339 		pr_warn_once("failed to reset CPU[%u]: %d\n",
340 			     smp_processor_id(), err);
341 }
342 
343 static int sdei_api_shared_reset(void)
344 {
345 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_SHARED_RESET, 0, 0, 0, 0, 0,
346 			      NULL);
347 }
348 
349 static void sdei_mark_interface_broken(void)
350 {
351 	pr_err("disabling SDEI firmware interface\n");
352 	on_each_cpu(&_ipi_mask_cpu, NULL, true);
353 	sdei_firmware_call = NULL;
354 }
355 
356 static int sdei_platform_reset(void)
357 {
358 	int err;
359 
360 	on_each_cpu(&_ipi_private_reset, NULL, true);
361 	err = sdei_api_shared_reset();
362 	if (err) {
363 		pr_err("Failed to reset platform: %d\n", err);
364 		sdei_mark_interface_broken();
365 	}
366 
367 	return err;
368 }
369 
370 static int sdei_api_event_enable(u32 event_num)
371 {
372 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, event_num, 0, 0, 0,
373 			      0, NULL);
374 }
375 
376 /* Called directly by the hotplug callbacks */
377 static void _local_event_enable(void *data)
378 {
379 	int err;
380 	struct sdei_crosscall_args *arg = data;
381 
382 	WARN_ON_ONCE(preemptible());
383 
384 	err = sdei_api_event_enable(arg->event->event_num);
385 
386 	sdei_cross_call_return(arg, err);
387 }
388 
389 int sdei_event_enable(u32 event_num)
390 {
391 	int err = -EINVAL;
392 	struct sdei_event *event;
393 
394 	mutex_lock(&sdei_events_lock);
395 	event = sdei_event_find(event_num);
396 	if (!event) {
397 		mutex_unlock(&sdei_events_lock);
398 		return -ENOENT;
399 	}
400 
401 
402 	cpus_read_lock();
403 	if (event->type == SDEI_EVENT_TYPE_SHARED)
404 		err = sdei_api_event_enable(event->event_num);
405 	else
406 		err = sdei_do_cross_call(_local_event_enable, event);
407 
408 	if (!err) {
409 		spin_lock(&sdei_list_lock);
410 		event->reenable = true;
411 		spin_unlock(&sdei_list_lock);
412 	}
413 	cpus_read_unlock();
414 	mutex_unlock(&sdei_events_lock);
415 
416 	return err;
417 }
418 
419 static int sdei_api_event_disable(u32 event_num)
420 {
421 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, event_num, 0, 0,
422 			      0, 0, NULL);
423 }
424 
425 static void _ipi_event_disable(void *data)
426 {
427 	int err;
428 	struct sdei_crosscall_args *arg = data;
429 
430 	err = sdei_api_event_disable(arg->event->event_num);
431 
432 	sdei_cross_call_return(arg, err);
433 }
434 
435 int sdei_event_disable(u32 event_num)
436 {
437 	int err = -EINVAL;
438 	struct sdei_event *event;
439 
440 	mutex_lock(&sdei_events_lock);
441 	event = sdei_event_find(event_num);
442 	if (!event) {
443 		mutex_unlock(&sdei_events_lock);
444 		return -ENOENT;
445 	}
446 
447 	spin_lock(&sdei_list_lock);
448 	event->reenable = false;
449 	spin_unlock(&sdei_list_lock);
450 
451 	if (event->type == SDEI_EVENT_TYPE_SHARED)
452 		err = sdei_api_event_disable(event->event_num);
453 	else
454 		err = sdei_do_cross_call(_ipi_event_disable, event);
455 	mutex_unlock(&sdei_events_lock);
456 
457 	return err;
458 }
459 
460 static int sdei_api_event_unregister(u32 event_num)
461 {
462 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_UNREGISTER, event_num, 0,
463 			      0, 0, 0, NULL);
464 }
465 
466 /* Called directly by the hotplug callbacks */
467 static void _local_event_unregister(void *data)
468 {
469 	int err;
470 	struct sdei_crosscall_args *arg = data;
471 
472 	WARN_ON_ONCE(preemptible());
473 
474 	err = sdei_api_event_unregister(arg->event->event_num);
475 
476 	sdei_cross_call_return(arg, err);
477 }
478 
479 static int _sdei_event_unregister(struct sdei_event *event)
480 {
481 	lockdep_assert_held(&sdei_events_lock);
482 
483 	if (event->type == SDEI_EVENT_TYPE_SHARED)
484 		return sdei_api_event_unregister(event->event_num);
485 
486 	return sdei_do_cross_call(_local_event_unregister, event);
487 }
488 
489 int sdei_event_unregister(u32 event_num)
490 {
491 	int err;
492 	struct sdei_event *event;
493 
494 	WARN_ON(in_nmi());
495 
496 	mutex_lock(&sdei_events_lock);
497 	event = sdei_event_find(event_num);
498 	if (!event) {
499 		pr_warn("Event %u not registered\n", event_num);
500 		err = -ENOENT;
501 		goto unlock;
502 	}
503 
504 	spin_lock(&sdei_list_lock);
505 	event->reregister = false;
506 	event->reenable = false;
507 	spin_unlock(&sdei_list_lock);
508 
509 	err = _sdei_event_unregister(event);
510 	if (err)
511 		goto unlock;
512 
513 	sdei_event_destroy(event);
514 unlock:
515 	mutex_unlock(&sdei_events_lock);
516 
517 	return err;
518 }
519 
520 /*
521  * unregister events, but don't destroy them as they are re-registered by
522  * sdei_reregister_shared().
523  */
524 static int sdei_unregister_shared(void)
525 {
526 	int err = 0;
527 	struct sdei_event *event;
528 
529 	mutex_lock(&sdei_events_lock);
530 	spin_lock(&sdei_list_lock);
531 	list_for_each_entry(event, &sdei_list, list) {
532 		if (event->type != SDEI_EVENT_TYPE_SHARED)
533 			continue;
534 
535 		err = _sdei_event_unregister(event);
536 		if (err)
537 			break;
538 	}
539 	spin_unlock(&sdei_list_lock);
540 	mutex_unlock(&sdei_events_lock);
541 
542 	return err;
543 }
544 
545 static int sdei_api_event_register(u32 event_num, unsigned long entry_point,
546 				   void *arg, u64 flags, u64 affinity)
547 {
548 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_REGISTER, event_num,
549 			      (unsigned long)entry_point, (unsigned long)arg,
550 			      flags, affinity, NULL);
551 }
552 
553 /* Called directly by the hotplug callbacks */
554 static void _local_event_register(void *data)
555 {
556 	int err;
557 	struct sdei_registered_event *reg;
558 	struct sdei_crosscall_args *arg = data;
559 
560 	WARN_ON(preemptible());
561 
562 	reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
563 	err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
564 				      reg, 0, 0);
565 
566 	sdei_cross_call_return(arg, err);
567 }
568 
569 static int _sdei_event_register(struct sdei_event *event)
570 {
571 	int err;
572 
573 	lockdep_assert_held(&sdei_events_lock);
574 
575 	if (event->type == SDEI_EVENT_TYPE_SHARED)
576 		return sdei_api_event_register(event->event_num,
577 					       sdei_entry_point,
578 					       event->registered,
579 					       SDEI_EVENT_REGISTER_RM_ANY, 0);
580 
581 	err = sdei_do_cross_call(_local_event_register, event);
582 	if (err)
583 		sdei_do_cross_call(_local_event_unregister, event);
584 
585 	return err;
586 }
587 
588 int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
589 {
590 	int err;
591 	struct sdei_event *event;
592 
593 	WARN_ON(in_nmi());
594 
595 	mutex_lock(&sdei_events_lock);
596 	if (sdei_event_find(event_num)) {
597 		pr_warn("Event %u already registered\n", event_num);
598 		err = -EBUSY;
599 		goto unlock;
600 	}
601 
602 	event = sdei_event_create(event_num, cb, arg);
603 	if (IS_ERR(event)) {
604 		err = PTR_ERR(event);
605 		pr_warn("Failed to create event %u: %d\n", event_num, err);
606 		goto unlock;
607 	}
608 
609 	cpus_read_lock();
610 	err = _sdei_event_register(event);
611 	if (err) {
612 		sdei_event_destroy(event);
613 		pr_warn("Failed to register event %u: %d\n", event_num, err);
614 		goto cpu_unlock;
615 	}
616 
617 	spin_lock(&sdei_list_lock);
618 	event->reregister = true;
619 	spin_unlock(&sdei_list_lock);
620 cpu_unlock:
621 	cpus_read_unlock();
622 unlock:
623 	mutex_unlock(&sdei_events_lock);
624 	return err;
625 }
626 
627 static int sdei_reregister_event_llocked(struct sdei_event *event)
628 {
629 	int err;
630 
631 	lockdep_assert_held(&sdei_events_lock);
632 	lockdep_assert_held(&sdei_list_lock);
633 
634 	err = _sdei_event_register(event);
635 	if (err) {
636 		pr_err("Failed to re-register event %u\n", event->event_num);
637 		sdei_event_destroy_llocked(event);
638 		return err;
639 	}
640 
641 	if (event->reenable) {
642 		if (event->type == SDEI_EVENT_TYPE_SHARED)
643 			err = sdei_api_event_enable(event->event_num);
644 		else
645 			err = sdei_do_cross_call(_local_event_enable, event);
646 	}
647 
648 	if (err)
649 		pr_err("Failed to re-enable event %u\n", event->event_num);
650 
651 	return err;
652 }
653 
654 static int sdei_reregister_shared(void)
655 {
656 	int err = 0;
657 	struct sdei_event *event;
658 
659 	mutex_lock(&sdei_events_lock);
660 	spin_lock(&sdei_list_lock);
661 	list_for_each_entry(event, &sdei_list, list) {
662 		if (event->type != SDEI_EVENT_TYPE_SHARED)
663 			continue;
664 
665 		if (event->reregister) {
666 			err = sdei_reregister_event_llocked(event);
667 			if (err)
668 				break;
669 		}
670 	}
671 	spin_unlock(&sdei_list_lock);
672 	mutex_unlock(&sdei_events_lock);
673 
674 	return err;
675 }
676 
677 static int sdei_cpuhp_down(unsigned int cpu)
678 {
679 	struct sdei_event *event;
680 	struct sdei_crosscall_args arg;
681 
682 	/* un-register private events */
683 	spin_lock(&sdei_list_lock);
684 	list_for_each_entry(event, &sdei_list, list) {
685 		if (event->type == SDEI_EVENT_TYPE_SHARED)
686 			continue;
687 
688 		CROSSCALL_INIT(arg, event);
689 		/* call the cross-call function locally... */
690 		_local_event_unregister(&arg);
691 		if (arg.first_error)
692 			pr_err("Failed to unregister event %u: %d\n",
693 			       event->event_num, arg.first_error);
694 	}
695 	spin_unlock(&sdei_list_lock);
696 
697 	return sdei_mask_local_cpu();
698 }
699 
700 static int sdei_cpuhp_up(unsigned int cpu)
701 {
702 	struct sdei_event *event;
703 	struct sdei_crosscall_args arg;
704 
705 	/* re-register/enable private events */
706 	spin_lock(&sdei_list_lock);
707 	list_for_each_entry(event, &sdei_list, list) {
708 		if (event->type == SDEI_EVENT_TYPE_SHARED)
709 			continue;
710 
711 		if (event->reregister) {
712 			CROSSCALL_INIT(arg, event);
713 			/* call the cross-call function locally... */
714 			_local_event_register(&arg);
715 			if (arg.first_error)
716 				pr_err("Failed to re-register event %u: %d\n",
717 				       event->event_num, arg.first_error);
718 		}
719 
720 		if (event->reenable) {
721 			CROSSCALL_INIT(arg, event);
722 			_local_event_enable(&arg);
723 			if (arg.first_error)
724 				pr_err("Failed to re-enable event %u: %d\n",
725 				       event->event_num, arg.first_error);
726 		}
727 	}
728 	spin_unlock(&sdei_list_lock);
729 
730 	return sdei_unmask_local_cpu();
731 }
732 
733 /* When entering idle, mask/unmask events for this cpu */
734 static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
735 			    void *data)
736 {
737 	int rv;
738 
739 	switch (action) {
740 	case CPU_PM_ENTER:
741 		rv = sdei_mask_local_cpu();
742 		break;
743 	case CPU_PM_EXIT:
744 	case CPU_PM_ENTER_FAILED:
745 		rv = sdei_unmask_local_cpu();
746 		break;
747 	default:
748 		return NOTIFY_DONE;
749 	}
750 
751 	if (rv)
752 		return notifier_from_errno(rv);
753 
754 	return NOTIFY_OK;
755 }
756 
757 static struct notifier_block sdei_pm_nb = {
758 	.notifier_call = sdei_pm_notifier,
759 };
760 
761 static int sdei_device_suspend(struct device *dev)
762 {
763 	on_each_cpu(_ipi_mask_cpu, NULL, true);
764 
765 	return 0;
766 }
767 
768 static int sdei_device_resume(struct device *dev)
769 {
770 	on_each_cpu(_ipi_unmask_cpu, NULL, true);
771 
772 	return 0;
773 }
774 
775 /*
776  * We need all events to be reregistered when we resume from hibernate.
777  *
778  * The sequence is freeze->thaw. Reboot. freeze->restore. We unregister
779  * events during freeze, then re-register and re-enable them during thaw
780  * and restore.
781  */
782 static int sdei_device_freeze(struct device *dev)
783 {
784 	int err;
785 
786 	/* unregister private events */
787 	cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
788 
789 	err = sdei_unregister_shared();
790 	if (err)
791 		return err;
792 
793 	return 0;
794 }
795 
796 static int sdei_device_thaw(struct device *dev)
797 {
798 	int err;
799 
800 	/* re-register shared events */
801 	err = sdei_reregister_shared();
802 	if (err) {
803 		pr_warn("Failed to re-register shared events...\n");
804 		sdei_mark_interface_broken();
805 		return err;
806 	}
807 
808 	err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
809 				&sdei_cpuhp_up, &sdei_cpuhp_down);
810 	if (err)
811 		pr_warn("Failed to re-register CPU hotplug notifier...\n");
812 
813 	return err;
814 }
815 
816 static int sdei_device_restore(struct device *dev)
817 {
818 	int err;
819 
820 	err = sdei_platform_reset();
821 	if (err)
822 		return err;
823 
824 	return sdei_device_thaw(dev);
825 }
826 
827 static const struct dev_pm_ops sdei_pm_ops = {
828 	.suspend = sdei_device_suspend,
829 	.resume = sdei_device_resume,
830 	.freeze = sdei_device_freeze,
831 	.thaw = sdei_device_thaw,
832 	.restore = sdei_device_restore,
833 };
834 
835 /*
836  * Mask all CPUs and unregister all events on panic, reboot or kexec.
837  */
838 static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
839 				void *data)
840 {
841 	/*
842 	 * We are going to reset the interface, after this there is no point
843 	 * doing work when we take CPUs offline.
844 	 */
845 	cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
846 
847 	sdei_platform_reset();
848 
849 	return NOTIFY_OK;
850 }
851 
852 static struct notifier_block sdei_reboot_nb = {
853 	.notifier_call = sdei_reboot_notifier,
854 };
855 
856 static void sdei_smccc_smc(unsigned long function_id,
857 			   unsigned long arg0, unsigned long arg1,
858 			   unsigned long arg2, unsigned long arg3,
859 			   unsigned long arg4, struct arm_smccc_res *res)
860 {
861 	arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
862 }
863 NOKPROBE_SYMBOL(sdei_smccc_smc);
864 
865 static void sdei_smccc_hvc(unsigned long function_id,
866 			   unsigned long arg0, unsigned long arg1,
867 			   unsigned long arg2, unsigned long arg3,
868 			   unsigned long arg4, struct arm_smccc_res *res)
869 {
870 	arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
871 }
872 NOKPROBE_SYMBOL(sdei_smccc_hvc);
873 
874 int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb,
875 		       sdei_event_callback *critical_cb)
876 {
877 	int err;
878 	u64 result;
879 	u32 event_num;
880 	sdei_event_callback *cb;
881 
882 	if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
883 		return -EOPNOTSUPP;
884 
885 	event_num = ghes->generic->notify.vector;
886 	if (event_num == 0) {
887 		/*
888 		 * Event 0 is reserved by the specification for
889 		 * SDEI_EVENT_SIGNAL.
890 		 */
891 		return -EINVAL;
892 	}
893 
894 	err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
895 				      &result);
896 	if (err)
897 		return err;
898 
899 	if (result == SDEI_EVENT_PRIORITY_CRITICAL)
900 		cb = critical_cb;
901 	else
902 		cb = normal_cb;
903 
904 	err = sdei_event_register(event_num, cb, ghes);
905 	if (!err)
906 		err = sdei_event_enable(event_num);
907 
908 	return err;
909 }
910 
911 int sdei_unregister_ghes(struct ghes *ghes)
912 {
913 	int i;
914 	int err;
915 	u32 event_num = ghes->generic->notify.vector;
916 
917 	might_sleep();
918 
919 	if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
920 		return -EOPNOTSUPP;
921 
922 	/*
923 	 * The event may be running on another CPU. Disable it
924 	 * to stop new events, then try to unregister a few times.
925 	 */
926 	err = sdei_event_disable(event_num);
927 	if (err)
928 		return err;
929 
930 	for (i = 0; i < 3; i++) {
931 		err = sdei_event_unregister(event_num);
932 		if (err != -EINPROGRESS)
933 			break;
934 
935 		schedule();
936 	}
937 
938 	return err;
939 }
940 
941 static int sdei_get_conduit(struct platform_device *pdev)
942 {
943 	const char *method;
944 	struct device_node *np = pdev->dev.of_node;
945 
946 	sdei_firmware_call = NULL;
947 	if (np) {
948 		if (of_property_read_string(np, "method", &method)) {
949 			pr_warn("missing \"method\" property\n");
950 			return SMCCC_CONDUIT_NONE;
951 		}
952 
953 		if (!strcmp("hvc", method)) {
954 			sdei_firmware_call = &sdei_smccc_hvc;
955 			return SMCCC_CONDUIT_HVC;
956 		} else if (!strcmp("smc", method)) {
957 			sdei_firmware_call = &sdei_smccc_smc;
958 			return SMCCC_CONDUIT_SMC;
959 		}
960 
961 		pr_warn("invalid \"method\" property: %s\n", method);
962 	} else if (!acpi_disabled) {
963 		if (acpi_psci_use_hvc()) {
964 			sdei_firmware_call = &sdei_smccc_hvc;
965 			return SMCCC_CONDUIT_HVC;
966 		} else {
967 			sdei_firmware_call = &sdei_smccc_smc;
968 			return SMCCC_CONDUIT_SMC;
969 		}
970 	}
971 
972 	return SMCCC_CONDUIT_NONE;
973 }
974 
975 static int sdei_probe(struct platform_device *pdev)
976 {
977 	int err;
978 	u64 ver = 0;
979 	int conduit;
980 
981 	conduit = sdei_get_conduit(pdev);
982 	if (!sdei_firmware_call)
983 		return 0;
984 
985 	err = sdei_api_get_version(&ver);
986 	if (err) {
987 		pr_err("Failed to get SDEI version: %d\n", err);
988 		sdei_mark_interface_broken();
989 		return err;
990 	}
991 
992 	pr_info("SDEIv%d.%d (0x%x) detected in firmware.\n",
993 		(int)SDEI_VERSION_MAJOR(ver), (int)SDEI_VERSION_MINOR(ver),
994 		(int)SDEI_VERSION_VENDOR(ver));
995 
996 	if (SDEI_VERSION_MAJOR(ver) != 1) {
997 		pr_warn("Conflicting SDEI version detected.\n");
998 		sdei_mark_interface_broken();
999 		return -EINVAL;
1000 	}
1001 
1002 	err = sdei_platform_reset();
1003 	if (err)
1004 		return err;
1005 
1006 	sdei_entry_point = sdei_arch_get_entry_point(conduit);
1007 	if (!sdei_entry_point) {
1008 		/* Not supported due to hardware or boot configuration */
1009 		sdei_mark_interface_broken();
1010 		return 0;
1011 	}
1012 
1013 	err = cpu_pm_register_notifier(&sdei_pm_nb);
1014 	if (err) {
1015 		pr_warn("Failed to register CPU PM notifier...\n");
1016 		goto error;
1017 	}
1018 
1019 	err = register_reboot_notifier(&sdei_reboot_nb);
1020 	if (err) {
1021 		pr_warn("Failed to register reboot notifier...\n");
1022 		goto remove_cpupm;
1023 	}
1024 
1025 	err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
1026 				&sdei_cpuhp_up, &sdei_cpuhp_down);
1027 	if (err) {
1028 		pr_warn("Failed to register CPU hotplug notifier...\n");
1029 		goto remove_reboot;
1030 	}
1031 
1032 	return 0;
1033 
1034 remove_reboot:
1035 	unregister_reboot_notifier(&sdei_reboot_nb);
1036 
1037 remove_cpupm:
1038 	cpu_pm_unregister_notifier(&sdei_pm_nb);
1039 
1040 error:
1041 	sdei_mark_interface_broken();
1042 	return err;
1043 }
1044 
1045 static const struct of_device_id sdei_of_match[] = {
1046 	{ .compatible = "arm,sdei-1.0" },
1047 	{}
1048 };
1049 
1050 static struct platform_driver sdei_driver = {
1051 	.driver		= {
1052 		.name			= "sdei",
1053 		.pm			= &sdei_pm_ops,
1054 		.of_match_table		= sdei_of_match,
1055 	},
1056 	.probe		= sdei_probe,
1057 };
1058 
1059 static bool __init sdei_present_acpi(void)
1060 {
1061 	acpi_status status;
1062 	struct acpi_table_header *sdei_table_header;
1063 
1064 	if (acpi_disabled)
1065 		return false;
1066 
1067 	status = acpi_get_table(ACPI_SIG_SDEI, 0, &sdei_table_header);
1068 	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
1069 		const char *msg = acpi_format_exception(status);
1070 
1071 		pr_info("Failed to get ACPI:SDEI table, %s\n", msg);
1072 	}
1073 	if (ACPI_FAILURE(status))
1074 		return false;
1075 
1076 	acpi_put_table(sdei_table_header);
1077 
1078 	return true;
1079 }
1080 
1081 static int __init sdei_init(void)
1082 {
1083 	struct platform_device *pdev;
1084 	int ret;
1085 
1086 	ret = platform_driver_register(&sdei_driver);
1087 	if (ret || !sdei_present_acpi())
1088 		return ret;
1089 
1090 	pdev = platform_device_register_simple(sdei_driver.driver.name,
1091 					       0, NULL, 0);
1092 	if (IS_ERR(pdev)) {
1093 		ret = PTR_ERR(pdev);
1094 		platform_driver_unregister(&sdei_driver);
1095 		pr_info("Failed to register ACPI:SDEI platform device %d\n",
1096 			ret);
1097 	}
1098 
1099 	return ret;
1100 }
1101 
1102 /*
1103  * On an ACPI system SDEI needs to be ready before HEST:GHES tries to register
1104  * its events. ACPI is initialised from a subsys_initcall(), GHES is initialised
1105  * by device_initcall(). We want to be called in the middle.
1106  */
1107 subsys_initcall_sync(sdei_init);
1108 
1109 int sdei_event_handler(struct pt_regs *regs,
1110 		       struct sdei_registered_event *arg)
1111 {
1112 	int err;
1113 	mm_segment_t orig_addr_limit;
1114 	u32 event_num = arg->event_num;
1115 
1116 	/*
1117 	 * Save restore 'fs'.
1118 	 * The architecture's entry code save/restores 'fs' when taking an
1119 	 * exception from the kernel. This ensures addr_limit isn't inherited
1120 	 * if you interrupted something that allowed the uaccess routines to
1121 	 * access kernel memory.
1122 	 * Do the same here because this doesn't come via the same entry code.
1123 	*/
1124 	orig_addr_limit = force_uaccess_begin();
1125 
1126 	err = arg->callback(event_num, regs, arg->callback_arg);
1127 	if (err)
1128 		pr_err_ratelimited("event %u on CPU %u failed with error: %d\n",
1129 				   event_num, smp_processor_id(), err);
1130 
1131 	force_uaccess_end(orig_addr_limit);
1132 
1133 	return err;
1134 }
1135 NOKPROBE_SYMBOL(sdei_event_handler);
1136