1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * KVM nVHE hypervisor stack tracing support.
4  *
5  * The unwinder implementation depends on the nVHE mode:
6  *
7  *   1) Non-protected nVHE mode - the host can directly access the
8  *      HYP stack pages and unwind the HYP stack in EL1. This saves having
9  *      to allocate shared buffers for the host to read the unwinded
10  *      stacktrace.
11  *
12  *   2) pKVM (protected nVHE) mode - the host cannot directly access
13  *      the HYP memory. The stack is unwinded in EL2 and dumped to a shared
14  *      buffer where the host can read and print the stacktrace.
15  *
16  * Copyright (C) 2022 Google LLC
17  */
18 #ifndef __ASM_STACKTRACE_NVHE_H
19 #define __ASM_STACKTRACE_NVHE_H
20 
21 #include <asm/stacktrace/common.h>
22 
23 /*
24  * kvm_nvhe_unwind_init - Start an unwind from the given nVHE HYP fp and pc
25  *
26  * @state : unwind_state to initialize
27  * @fp    : frame pointer at which to start the unwinding.
28  * @pc    : program counter at which to start the unwinding.
29  */
30 static inline void kvm_nvhe_unwind_init(struct unwind_state *state,
31 					unsigned long fp,
32 					unsigned long pc)
33 {
34 	unwind_init_common(state, NULL);
35 
36 	state->fp = fp;
37 	state->pc = pc;
38 }
39 
40 #ifndef __KVM_NVHE_HYPERVISOR__
41 /*
42  * Conventional (non-protected) nVHE HYP stack unwinder
43  *
44  * In non-protected mode, the unwinding is done from kernel proper context
45  * (by the host in EL1).
46  */
47 
48 DECLARE_KVM_NVHE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
49 DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
50 DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
51 
52 void kvm_nvhe_dump_backtrace(unsigned long hyp_offset);
53 
54 #endif	/* __KVM_NVHE_HYPERVISOR__ */
55 #endif	/* __ASM_STACKTRACE_NVHE_H */
56