xref: /openbmc/linux/arch/x86/include/asm/debugreg.h (revision 79146957)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21965aae3SH. Peter Anvin #ifndef _ASM_X86_DEBUGREG_H
31965aae3SH. Peter Anvin #define _ASM_X86_DEBUGREG_H
4bb898558SAl Viro 
5f649e938SPaul Gortmaker #include <linux/bug.h>
62d08a893SSean Christopherson #include <linux/percpu.h>
7af170c50SDavid Howells #include <uapi/asm/debugreg.h>
8f649e938SPaul Gortmaker 
928b4e0d8STejun Heo DECLARE_PER_CPU(unsigned long, cpu_dr7);
10b332828cSK.Prasad 
119bad5658SJuergen Gross #ifndef CONFIG_PARAVIRT_XXL
12f649e938SPaul Gortmaker /*
13f649e938SPaul Gortmaker  * These special macros can be used to get or set a debugging register
14f649e938SPaul Gortmaker  */
15f649e938SPaul Gortmaker #define get_debugreg(var, register)				\
16f649e938SPaul Gortmaker 	(var) = native_get_debugreg(register)
17f649e938SPaul Gortmaker #define set_debugreg(value, register)				\
18f649e938SPaul Gortmaker 	native_set_debugreg(register, value)
19f649e938SPaul Gortmaker #endif
20f649e938SPaul Gortmaker 
native_get_debugreg(int regno)2128eaf871SPeter Zijlstra static __always_inline unsigned long native_get_debugreg(int regno)
22f649e938SPaul Gortmaker {
23f649e938SPaul Gortmaker 	unsigned long val = 0;	/* Damn you, gcc! */
24f649e938SPaul Gortmaker 
25f649e938SPaul Gortmaker 	switch (regno) {
26f649e938SPaul Gortmaker 	case 0:
27f649e938SPaul Gortmaker 		asm("mov %%db0, %0" :"=r" (val));
28f649e938SPaul Gortmaker 		break;
29f649e938SPaul Gortmaker 	case 1:
30f649e938SPaul Gortmaker 		asm("mov %%db1, %0" :"=r" (val));
31f649e938SPaul Gortmaker 		break;
32f649e938SPaul Gortmaker 	case 2:
33f649e938SPaul Gortmaker 		asm("mov %%db2, %0" :"=r" (val));
34f649e938SPaul Gortmaker 		break;
35f649e938SPaul Gortmaker 	case 3:
36f649e938SPaul Gortmaker 		asm("mov %%db3, %0" :"=r" (val));
37f649e938SPaul Gortmaker 		break;
38f649e938SPaul Gortmaker 	case 6:
39f649e938SPaul Gortmaker 		asm("mov %%db6, %0" :"=r" (val));
40f649e938SPaul Gortmaker 		break;
41f649e938SPaul Gortmaker 	case 7:
42f649e938SPaul Gortmaker 		/*
43f649e938SPaul Gortmaker 		 * Apply __FORCE_ORDER to DR7 reads to forbid re-ordering them
44f649e938SPaul Gortmaker 		 * with other code.
45f649e938SPaul Gortmaker 		 *
46f649e938SPaul Gortmaker 		 * This is needed because a DR7 access can cause a #VC exception
47f649e938SPaul Gortmaker 		 * when running under SEV-ES. Taking a #VC exception is not a
48f649e938SPaul Gortmaker 		 * safe thing to do just anywhere in the entry code and
49f649e938SPaul Gortmaker 		 * re-ordering might place the access into an unsafe location.
5028eaf871SPeter Zijlstra 		 *
51f649e938SPaul Gortmaker 		 * This happened in the NMI handler, where the DR7 read was
52f649e938SPaul Gortmaker 		 * re-ordered to happen before the call to sev_es_ist_enter(),
53f649e938SPaul Gortmaker 		 * causing stack recursion.
54f649e938SPaul Gortmaker 		 */
55f649e938SPaul Gortmaker 		asm volatile("mov %%db7, %0" : "=r" (val) : __FORCE_ORDER);
56f649e938SPaul Gortmaker 		break;
57f649e938SPaul Gortmaker 	default:
58f649e938SPaul Gortmaker 		BUG();
59f649e938SPaul Gortmaker 	}
60f649e938SPaul Gortmaker 	return val;
61f649e938SPaul Gortmaker }
62f649e938SPaul Gortmaker 
native_set_debugreg(int regno,unsigned long value)63f649e938SPaul Gortmaker static __always_inline void native_set_debugreg(int regno, unsigned long value)
64f649e938SPaul Gortmaker {
65f649e938SPaul Gortmaker 	switch (regno) {
66f649e938SPaul Gortmaker 	case 0:
67f649e938SPaul Gortmaker 		asm("mov %0, %%db0"	::"r" (value));
68f649e938SPaul Gortmaker 		break;
69f649e938SPaul Gortmaker 	case 1:
70f649e938SPaul Gortmaker 		asm("mov %0, %%db1"	::"r" (value));
71f649e938SPaul Gortmaker 		break;
72f649e938SPaul Gortmaker 	case 2:
73f649e938SPaul Gortmaker 		asm("mov %0, %%db2"	::"r" (value));
74f649e938SPaul Gortmaker 		break;
75f649e938SPaul Gortmaker 	case 3:
76b332828cSK.Prasad 		asm("mov %0, %%db3"	::"r" (value));
77b332828cSK.Prasad 		break;
78b332828cSK.Prasad 	case 6:
79b332828cSK.Prasad 		asm("mov %0, %%db6"	::"r" (value));
80b332828cSK.Prasad 		break;
81b332828cSK.Prasad 	case 7:
82b332828cSK.Prasad 		/*
83b332828cSK.Prasad 		 * Apply __FORCE_ORDER to DR7 writes to forbid re-ordering them
84b332828cSK.Prasad 		 * with other code.
85b332828cSK.Prasad 		 *
86b332828cSK.Prasad 		 * While is didn't happen with a DR7 write (see the DR7 read
87b332828cSK.Prasad 		 * comment above which explains where it happened), add the
8828eaf871SPeter Zijlstra 		 * __FORCE_ORDER here too to avoid similar problems in the
8959d8eb53SFrederic Weisbecker 		 * future.
900a3aee0dSTejun Heo 		 */
9159d8eb53SFrederic Weisbecker 		asm volatile("mov %0, %%db7"	::"r" (value), __FORCE_ORDER);
9259d8eb53SFrederic Weisbecker 		break;
9324f1e32cSFrederic Weisbecker 	default:
9424f1e32cSFrederic Weisbecker 		BUG();
95e1de11d4SPeter Zijlstra 	}
96e1de11d4SPeter Zijlstra }
97e1de11d4SPeter Zijlstra 
hw_breakpoint_disable(void)98e1de11d4SPeter Zijlstra static inline void hw_breakpoint_disable(void)
9984b6a349SPeter Zijlstra {
10084b6a349SPeter Zijlstra 	/* Zero the control register for HW Breakpoint */
10184b6a349SPeter Zijlstra 	set_debugreg(0UL, 7);
102e1de11d4SPeter Zijlstra 
103e1de11d4SPeter Zijlstra 	/* Zero-out the individual HW breakpoint address registers */
104e1de11d4SPeter Zijlstra 	set_debugreg(0UL, 0);
105e1de11d4SPeter Zijlstra 	set_debugreg(0UL, 1);
106e1de11d4SPeter Zijlstra 	set_debugreg(0UL, 2);
107e1de11d4SPeter Zijlstra 	set_debugreg(0UL, 3);
108e1de11d4SPeter Zijlstra }
109e1de11d4SPeter Zijlstra 
hw_breakpoint_active(void)110e1de11d4SPeter Zijlstra static __always_inline bool hw_breakpoint_active(void)
111e1de11d4SPeter Zijlstra {
112e1de11d4SPeter Zijlstra 	return __this_cpu_read(cpu_dr7) & DR_GLOBAL_ENABLE_MASK;
113e1de11d4SPeter Zijlstra }
114e1de11d4SPeter Zijlstra 
115e1de11d4SPeter Zijlstra extern void hw_breakpoint_restore(void);
116e1de11d4SPeter Zijlstra 
local_db_save(void)117e1de11d4SPeter Zijlstra static __always_inline unsigned long local_db_save(void)
118e1de11d4SPeter Zijlstra {
119e1de11d4SPeter Zijlstra 	unsigned long dr7;
120e1de11d4SPeter Zijlstra 
121e1de11d4SPeter Zijlstra 	if (static_cpu_has(X86_FEATURE_HYPERVISOR) && !hw_breakpoint_active())
122e1de11d4SPeter Zijlstra 		return 0;
123e1de11d4SPeter Zijlstra 
124e1de11d4SPeter Zijlstra 	get_debugreg(dr7, 7);
125e1de11d4SPeter Zijlstra 	dr7 &= ~0x400; /* architecturally set bit */
126e1de11d4SPeter Zijlstra 	if (dr7)
127e1de11d4SPeter Zijlstra 		set_debugreg(0, 7);
128d6d55f0bSJacob Shin 	/*
129*79146957SAlexey Kardashevskiy 	 * Ensure the compiler doesn't lower the above statements into
130*79146957SAlexey Kardashevskiy 	 * the critical section; disabling breakpoints late would not
131d6d55f0bSJacob Shin 	 * be good.
132*79146957SAlexey Kardashevskiy 	 */
133*79146957SAlexey Kardashevskiy 	barrier();
134*79146957SAlexey Kardashevskiy 
135*79146957SAlexey Kardashevskiy 	return dr7;
136*79146957SAlexey Kardashevskiy }
137d6d55f0bSJacob Shin 
local_db_restore(unsigned long dr7)13842181186SSteven Rostedt static __always_inline void local_db_restore(unsigned long dr7)
1391965aae3SH. Peter Anvin {
140 	/*
141 	 * Ensure the compiler doesn't raise this statement into
142 	 * the critical section; enabling breakpoints early would
143 	 * not be good.
144 	 */
145 	barrier();
146 	if (dr7)
147 		set_debugreg(dr7, 7);
148 }
149 
150 #ifdef CONFIG_CPU_SUP_AMD
151 extern void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr);
152 extern unsigned long amd_get_dr_addr_mask(unsigned int dr);
153 #else
amd_set_dr_addr_mask(unsigned long mask,unsigned int dr)154 static inline void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr) { }
amd_get_dr_addr_mask(unsigned int dr)155 static inline unsigned long amd_get_dr_addr_mask(unsigned int dr)
156 {
157 	return 0;
158 }
159 #endif
160 
161 #endif /* _ASM_X86_DEBUGREG_H */
162