xref: /openbmc/linux/arch/arm64/kvm/debug.c (revision 3b23dc52)
1 /*
2  * Debug and Guest Debug support
3  *
4  * Copyright (C) 2015 - Linaro Ltd
5  * Author: Alex Bennée <alex.bennee@linaro.org>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/kvm_host.h>
21 #include <linux/hw_breakpoint.h>
22 
23 #include <asm/debug-monitors.h>
24 #include <asm/kvm_asm.h>
25 #include <asm/kvm_arm.h>
26 #include <asm/kvm_emulate.h>
27 
28 #include "trace.h"
29 
30 /* These are the bits of MDSCR_EL1 we may manipulate */
31 #define MDSCR_EL1_DEBUG_MASK	(DBG_MDSCR_SS | \
32 				DBG_MDSCR_KDE | \
33 				DBG_MDSCR_MDE)
34 
35 static DEFINE_PER_CPU(u32, mdcr_el2);
36 
37 /**
38  * save/restore_guest_debug_regs
39  *
40  * For some debug operations we need to tweak some guest registers. As
41  * a result we need to save the state of those registers before we
42  * make those modifications.
43  *
44  * Guest access to MDSCR_EL1 is trapped by the hypervisor and handled
45  * after we have restored the preserved value to the main context.
46  */
47 static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
48 {
49 	u64 val = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
50 
51 	vcpu->arch.guest_debug_preserved.mdscr_el1 = val;
52 
53 	trace_kvm_arm_set_dreg32("Saved MDSCR_EL1",
54 				vcpu->arch.guest_debug_preserved.mdscr_el1);
55 }
56 
57 static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
58 {
59 	u64 val = vcpu->arch.guest_debug_preserved.mdscr_el1;
60 
61 	vcpu_write_sys_reg(vcpu, val, MDSCR_EL1);
62 
63 	trace_kvm_arm_set_dreg32("Restored MDSCR_EL1",
64 				vcpu_read_sys_reg(vcpu, MDSCR_EL1));
65 }
66 
67 /**
68  * kvm_arm_init_debug - grab what we need for debug
69  *
70  * Currently the sole task of this function is to retrieve the initial
71  * value of mdcr_el2 so we can preserve MDCR_EL2.HPMN which has
72  * presumably been set-up by some knowledgeable bootcode.
73  *
74  * It is called once per-cpu during CPU hyp initialisation.
75  */
76 
77 void kvm_arm_init_debug(void)
78 {
79 	__this_cpu_write(mdcr_el2, kvm_call_hyp(__kvm_get_mdcr_el2));
80 }
81 
82 /**
83  * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
84  */
85 
86 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
87 {
88 	vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state;
89 }
90 
91 /**
92  * kvm_arm_setup_debug - set up debug related stuff
93  *
94  * @vcpu:	the vcpu pointer
95  *
96  * This is called before each entry into the hypervisor to setup any
97  * debug related registers. Currently this just ensures we will trap
98  * access to:
99  *  - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
100  *  - Debug ROM Address (MDCR_EL2_TDRA)
101  *  - OS related registers (MDCR_EL2_TDOSA)
102  *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
103  *
104  * Additionally, KVM only traps guest accesses to the debug registers if
105  * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
106  * flag on vcpu->arch.flags).  Since the guest must not interfere
107  * with the hardware state when debugging the guest, we must ensure that
108  * trapping is enabled whenever we are debugging the guest using the
109  * debug registers.
110  */
111 
112 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
113 {
114 	bool trap_debug = !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY);
115 	unsigned long mdscr;
116 
117 	trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
118 
119 	/*
120 	 * This also clears MDCR_EL2_E2PB_MASK to disable guest access
121 	 * to the profiling buffer.
122 	 */
123 	vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
124 	vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
125 				MDCR_EL2_TPMS |
126 				MDCR_EL2_TPMCR |
127 				MDCR_EL2_TDRA |
128 				MDCR_EL2_TDOSA);
129 
130 	/* Is Guest debugging in effect? */
131 	if (vcpu->guest_debug) {
132 		/* Route all software debug exceptions to EL2 */
133 		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
134 
135 		/* Save guest debug state */
136 		save_guest_debug_regs(vcpu);
137 
138 		/*
139 		 * Single Step (ARM ARM D2.12.3 The software step state
140 		 * machine)
141 		 *
142 		 * If we are doing Single Step we need to manipulate
143 		 * the guest's MDSCR_EL1.SS and PSTATE.SS. Once the
144 		 * step has occurred the hypervisor will trap the
145 		 * debug exception and we return to userspace.
146 		 *
147 		 * If the guest attempts to single step its userspace
148 		 * we would have to deal with a trapped exception
149 		 * while in the guest kernel. Because this would be
150 		 * hard to unwind we suppress the guest's ability to
151 		 * do so by masking MDSCR_EL.SS.
152 		 *
153 		 * This confuses guest debuggers which use
154 		 * single-step behind the scenes but everything
155 		 * returns to normal once the host is no longer
156 		 * debugging the system.
157 		 */
158 		if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
159 			*vcpu_cpsr(vcpu) |=  DBG_SPSR_SS;
160 			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
161 			mdscr |= DBG_MDSCR_SS;
162 			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
163 		} else {
164 			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
165 			mdscr &= ~DBG_MDSCR_SS;
166 			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
167 		}
168 
169 		trace_kvm_arm_set_dreg32("SPSR_EL2", *vcpu_cpsr(vcpu));
170 
171 		/*
172 		 * HW Breakpoints and watchpoints
173 		 *
174 		 * We simply switch the debug_ptr to point to our new
175 		 * external_debug_state which has been populated by the
176 		 * debug ioctl. The existing KVM_ARM64_DEBUG_DIRTY
177 		 * mechanism ensures the registers are updated on the
178 		 * world switch.
179 		 */
180 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
181 			/* Enable breakpoints/watchpoints */
182 			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
183 			mdscr |= DBG_MDSCR_MDE;
184 			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
185 
186 			vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
187 			vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
188 			trap_debug = true;
189 
190 			trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
191 						&vcpu->arch.debug_ptr->dbg_bcr[0],
192 						&vcpu->arch.debug_ptr->dbg_bvr[0]);
193 
194 			trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
195 						&vcpu->arch.debug_ptr->dbg_wcr[0],
196 						&vcpu->arch.debug_ptr->dbg_wvr[0]);
197 		}
198 	}
199 
200 	BUG_ON(!vcpu->guest_debug &&
201 		vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
202 
203 	/* Trap debug register access */
204 	if (trap_debug)
205 		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
206 
207 	/* If KDE or MDE are set, perform a full save/restore cycle. */
208 	if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
209 		vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
210 
211 	trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
212 	trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
213 }
214 
215 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
216 {
217 	trace_kvm_arm_clear_debug(vcpu->guest_debug);
218 
219 	if (vcpu->guest_debug) {
220 		restore_guest_debug_regs(vcpu);
221 
222 		/*
223 		 * If we were using HW debug we need to restore the
224 		 * debug_ptr to the guest debug state.
225 		 */
226 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
227 			kvm_arm_reset_debug_ptr(vcpu);
228 
229 			trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
230 						&vcpu->arch.debug_ptr->dbg_bcr[0],
231 						&vcpu->arch.debug_ptr->dbg_bvr[0]);
232 
233 			trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
234 						&vcpu->arch.debug_ptr->dbg_wcr[0],
235 						&vcpu->arch.debug_ptr->dbg_wvr[0]);
236 		}
237 	}
238 }
239 
240 
241 /*
242  * After successfully emulating an instruction, we might want to
243  * return to user space with a KVM_EXIT_DEBUG. We can only do this
244  * once the emulation is complete, though, so for userspace emulations
245  * we have to wait until we have re-entered KVM before calling this
246  * helper.
247  *
248  * Return true (and set exit_reason) to return to userspace or false
249  * if no further action is required.
250  */
251 bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
252 {
253 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
254 		run->exit_reason = KVM_EXIT_DEBUG;
255 		run->debug.arch.hsr = ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT;
256 		return true;
257 	}
258 	return false;
259 }
260