xref: /openbmc/linux/arch/arm64/kvm/debug.c (revision 08b7cf13)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Debug and Guest Debug support
4  *
5  * Copyright (C) 2015 - Linaro Ltd
6  * Author: Alex Bennée <alex.bennee@linaro.org>
7  */
8 
9 #include <linux/kvm_host.h>
10 #include <linux/hw_breakpoint.h>
11 
12 #include <asm/debug-monitors.h>
13 #include <asm/kvm_asm.h>
14 #include <asm/kvm_arm.h>
15 #include <asm/kvm_emulate.h>
16 
17 #include "trace.h"
18 
19 /* These are the bits of MDSCR_EL1 we may manipulate */
20 #define MDSCR_EL1_DEBUG_MASK	(DBG_MDSCR_SS | \
21 				DBG_MDSCR_KDE | \
22 				DBG_MDSCR_MDE)
23 
24 static DEFINE_PER_CPU(u64, mdcr_el2);
25 
26 /**
27  * save/restore_guest_debug_regs
28  *
29  * For some debug operations we need to tweak some guest registers. As
30  * a result we need to save the state of those registers before we
31  * make those modifications.
32  *
33  * Guest access to MDSCR_EL1 is trapped by the hypervisor and handled
34  * after we have restored the preserved value to the main context.
35  */
36 static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
37 {
38 	u64 val = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
39 
40 	vcpu->arch.guest_debug_preserved.mdscr_el1 = val;
41 
42 	trace_kvm_arm_set_dreg32("Saved MDSCR_EL1",
43 				vcpu->arch.guest_debug_preserved.mdscr_el1);
44 }
45 
46 static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
47 {
48 	u64 val = vcpu->arch.guest_debug_preserved.mdscr_el1;
49 
50 	vcpu_write_sys_reg(vcpu, val, MDSCR_EL1);
51 
52 	trace_kvm_arm_set_dreg32("Restored MDSCR_EL1",
53 				vcpu_read_sys_reg(vcpu, MDSCR_EL1));
54 }
55 
56 /**
57  * kvm_arm_init_debug - grab what we need for debug
58  *
59  * Currently the sole task of this function is to retrieve the initial
60  * value of mdcr_el2 so we can preserve MDCR_EL2.HPMN which has
61  * presumably been set-up by some knowledgeable bootcode.
62  *
63  * It is called once per-cpu during CPU hyp initialisation.
64  */
65 
66 void kvm_arm_init_debug(void)
67 {
68 	__this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2));
69 }
70 
71 /**
72  * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
73  *
74  * @vcpu:	the vcpu pointer
75  *
76  * This ensures we will trap access to:
77  *  - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
78  *  - Debug ROM Address (MDCR_EL2_TDRA)
79  *  - OS related registers (MDCR_EL2_TDOSA)
80  *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
81  *  - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
82  *  - Self-hosted Trace (MDCR_EL2_TTRF/MDCR_EL2_E2TB)
83  */
84 static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
85 {
86 	/*
87 	 * This also clears MDCR_EL2_E2PB_MASK and MDCR_EL2_E2TB_MASK
88 	 * to disable guest access to the profiling and trace buffers
89 	 */
90 	vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
91 	vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
92 				MDCR_EL2_TPMS |
93 				MDCR_EL2_TTRF |
94 				MDCR_EL2_TPMCR |
95 				MDCR_EL2_TDRA |
96 				MDCR_EL2_TDOSA);
97 
98 	/* Is the VM being debugged by userspace? */
99 	if (vcpu->guest_debug)
100 		/* Route all software debug exceptions to EL2 */
101 		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
102 
103 	/*
104 	 * Trap debug register access when one of the following is true:
105 	 *  - Userspace is using the hardware to debug the guest
106 	 *  (KVM_GUESTDBG_USE_HW is set).
107 	 *  - The guest is not using debug (KVM_ARM64_DEBUG_DIRTY is clear).
108 	 *  - The guest has enabled the OS Lock (debug exceptions are blocked).
109 	 */
110 	if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) ||
111 	    !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY) ||
112 	    kvm_vcpu_os_lock_enabled(vcpu))
113 		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
114 
115 	trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
116 }
117 
118 /**
119  * kvm_arm_vcpu_init_debug - setup vcpu debug traps
120  *
121  * @vcpu:	the vcpu pointer
122  *
123  * Set vcpu initial mdcr_el2 value.
124  */
125 void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
126 {
127 	preempt_disable();
128 	kvm_arm_setup_mdcr_el2(vcpu);
129 	preempt_enable();
130 }
131 
132 /**
133  * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
134  */
135 
136 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
137 {
138 	vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state;
139 }
140 
141 /**
142  * kvm_arm_setup_debug - set up debug related stuff
143  *
144  * @vcpu:	the vcpu pointer
145  *
146  * This is called before each entry into the hypervisor to setup any
147  * debug related registers.
148  *
149  * Additionally, KVM only traps guest accesses to the debug registers if
150  * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
151  * flag on vcpu->arch.flags).  Since the guest must not interfere
152  * with the hardware state when debugging the guest, we must ensure that
153  * trapping is enabled whenever we are debugging the guest using the
154  * debug registers.
155  */
156 
157 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
158 {
159 	unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2;
160 
161 	trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
162 
163 	kvm_arm_setup_mdcr_el2(vcpu);
164 
165 	/* Check if we need to use the debug registers. */
166 	if (vcpu->guest_debug || kvm_vcpu_os_lock_enabled(vcpu)) {
167 		/* Save guest debug state */
168 		save_guest_debug_regs(vcpu);
169 
170 		/*
171 		 * Single Step (ARM ARM D2.12.3 The software step state
172 		 * machine)
173 		 *
174 		 * If we are doing Single Step we need to manipulate
175 		 * the guest's MDSCR_EL1.SS and PSTATE.SS. Once the
176 		 * step has occurred the hypervisor will trap the
177 		 * debug exception and we return to userspace.
178 		 *
179 		 * If the guest attempts to single step its userspace
180 		 * we would have to deal with a trapped exception
181 		 * while in the guest kernel. Because this would be
182 		 * hard to unwind we suppress the guest's ability to
183 		 * do so by masking MDSCR_EL.SS.
184 		 *
185 		 * This confuses guest debuggers which use
186 		 * single-step behind the scenes but everything
187 		 * returns to normal once the host is no longer
188 		 * debugging the system.
189 		 */
190 		if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
191 			*vcpu_cpsr(vcpu) |=  DBG_SPSR_SS;
192 			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
193 			mdscr |= DBG_MDSCR_SS;
194 			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
195 		} else {
196 			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
197 			mdscr &= ~DBG_MDSCR_SS;
198 			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
199 		}
200 
201 		trace_kvm_arm_set_dreg32("SPSR_EL2", *vcpu_cpsr(vcpu));
202 
203 		/*
204 		 * HW Breakpoints and watchpoints
205 		 *
206 		 * We simply switch the debug_ptr to point to our new
207 		 * external_debug_state which has been populated by the
208 		 * debug ioctl. The existing KVM_ARM64_DEBUG_DIRTY
209 		 * mechanism ensures the registers are updated on the
210 		 * world switch.
211 		 */
212 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
213 			/* Enable breakpoints/watchpoints */
214 			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
215 			mdscr |= DBG_MDSCR_MDE;
216 			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
217 
218 			vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
219 			vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
220 
221 			trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
222 						&vcpu->arch.debug_ptr->dbg_bcr[0],
223 						&vcpu->arch.debug_ptr->dbg_bvr[0]);
224 
225 			trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
226 						&vcpu->arch.debug_ptr->dbg_wcr[0],
227 						&vcpu->arch.debug_ptr->dbg_wvr[0]);
228 
229 		/*
230 		 * The OS Lock blocks debug exceptions in all ELs when it is
231 		 * enabled. If the guest has enabled the OS Lock, constrain its
232 		 * effects to the guest. Emulate the behavior by clearing
233 		 * MDSCR_EL1.MDE. In so doing, we ensure that host debug
234 		 * exceptions are unaffected by guest configuration of the OS
235 		 * Lock.
236 		 */
237 		} else if (kvm_vcpu_os_lock_enabled(vcpu)) {
238 			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
239 			mdscr &= ~DBG_MDSCR_MDE;
240 			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
241 		}
242 	}
243 
244 	BUG_ON(!vcpu->guest_debug &&
245 		vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
246 
247 	/* If KDE or MDE are set, perform a full save/restore cycle. */
248 	if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
249 		vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
250 
251 	/* Write mdcr_el2 changes since vcpu_load on VHE systems */
252 	if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2)
253 		write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
254 
255 	trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
256 }
257 
258 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
259 {
260 	trace_kvm_arm_clear_debug(vcpu->guest_debug);
261 
262 	/*
263 	 * Restore the guest's debug registers if we were using them.
264 	 */
265 	if (vcpu->guest_debug || kvm_vcpu_os_lock_enabled(vcpu)) {
266 		restore_guest_debug_regs(vcpu);
267 
268 		/*
269 		 * If we were using HW debug we need to restore the
270 		 * debug_ptr to the guest debug state.
271 		 */
272 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
273 			kvm_arm_reset_debug_ptr(vcpu);
274 
275 			trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
276 						&vcpu->arch.debug_ptr->dbg_bcr[0],
277 						&vcpu->arch.debug_ptr->dbg_bvr[0]);
278 
279 			trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
280 						&vcpu->arch.debug_ptr->dbg_wcr[0],
281 						&vcpu->arch.debug_ptr->dbg_wvr[0]);
282 		}
283 	}
284 }
285 
286 void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
287 {
288 	u64 dfr0;
289 
290 	/* For VHE, there is nothing to do */
291 	if (has_vhe())
292 		return;
293 
294 	dfr0 = read_sysreg(id_aa64dfr0_el1);
295 	/*
296 	 * If SPE is present on this CPU and is available at current EL,
297 	 * we may need to check if the host state needs to be saved.
298 	 */
299 	if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVER_SHIFT) &&
300 	    !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT)))
301 		vcpu->arch.flags |= KVM_ARM64_DEBUG_STATE_SAVE_SPE;
302 
303 	/* Check if we have TRBE implemented and available at the host */
304 	if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRBE_SHIFT) &&
305 	    !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG))
306 		vcpu->arch.flags |= KVM_ARM64_DEBUG_STATE_SAVE_TRBE;
307 }
308 
309 void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu)
310 {
311 	vcpu->arch.flags &= ~(KVM_ARM64_DEBUG_STATE_SAVE_SPE |
312 			      KVM_ARM64_DEBUG_STATE_SAVE_TRBE);
313 }
314