1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21965aae3SH. Peter Anvin #ifndef _ASM_X86_DEBUGREG_H
31965aae3SH. Peter Anvin #define _ASM_X86_DEBUGREG_H
4bb898558SAl Viro
5f649e938SPaul Gortmaker #include <linux/bug.h>
62d08a893SSean Christopherson #include <linux/percpu.h>
7af170c50SDavid Howells #include <uapi/asm/debugreg.h>
8f649e938SPaul Gortmaker
928b4e0d8STejun Heo DECLARE_PER_CPU(unsigned long, cpu_dr7);
10b332828cSK.Prasad
119bad5658SJuergen Gross #ifndef CONFIG_PARAVIRT_XXL
12f649e938SPaul Gortmaker /*
13f649e938SPaul Gortmaker * These special macros can be used to get or set a debugging register
14f649e938SPaul Gortmaker */
15f649e938SPaul Gortmaker #define get_debugreg(var, register) \
16f649e938SPaul Gortmaker (var) = native_get_debugreg(register)
17f649e938SPaul Gortmaker #define set_debugreg(value, register) \
18f649e938SPaul Gortmaker native_set_debugreg(register, value)
19f649e938SPaul Gortmaker #endif
20f649e938SPaul Gortmaker
native_get_debugreg(int regno)2128eaf871SPeter Zijlstra static __always_inline unsigned long native_get_debugreg(int regno)
22f649e938SPaul Gortmaker {
23f649e938SPaul Gortmaker unsigned long val = 0; /* Damn you, gcc! */
24f649e938SPaul Gortmaker
25f649e938SPaul Gortmaker switch (regno) {
26f649e938SPaul Gortmaker case 0:
27f649e938SPaul Gortmaker asm("mov %%db0, %0" :"=r" (val));
28f649e938SPaul Gortmaker break;
29f649e938SPaul Gortmaker case 1:
30f649e938SPaul Gortmaker asm("mov %%db1, %0" :"=r" (val));
31f649e938SPaul Gortmaker break;
32f649e938SPaul Gortmaker case 2:
33f649e938SPaul Gortmaker asm("mov %%db2, %0" :"=r" (val));
34f649e938SPaul Gortmaker break;
35f649e938SPaul Gortmaker case 3:
36f649e938SPaul Gortmaker asm("mov %%db3, %0" :"=r" (val));
37f649e938SPaul Gortmaker break;
38f649e938SPaul Gortmaker case 6:
39f649e938SPaul Gortmaker asm("mov %%db6, %0" :"=r" (val));
40f649e938SPaul Gortmaker break;
41f649e938SPaul Gortmaker case 7:
429d2c7203SJoerg Roedel /*
439d2c7203SJoerg Roedel * Apply __FORCE_ORDER to DR7 reads to forbid re-ordering them
449d2c7203SJoerg Roedel * with other code.
459d2c7203SJoerg Roedel *
469d2c7203SJoerg Roedel * This is needed because a DR7 access can cause a #VC exception
479d2c7203SJoerg Roedel * when running under SEV-ES. Taking a #VC exception is not a
489d2c7203SJoerg Roedel * safe thing to do just anywhere in the entry code and
499d2c7203SJoerg Roedel * re-ordering might place the access into an unsafe location.
509d2c7203SJoerg Roedel *
519d2c7203SJoerg Roedel * This happened in the NMI handler, where the DR7 read was
529d2c7203SJoerg Roedel * re-ordered to happen before the call to sev_es_ist_enter(),
539d2c7203SJoerg Roedel * causing stack recursion.
549d2c7203SJoerg Roedel */
559d2c7203SJoerg Roedel asm volatile("mov %%db7, %0" : "=r" (val) : __FORCE_ORDER);
56f649e938SPaul Gortmaker break;
57f649e938SPaul Gortmaker default:
58f649e938SPaul Gortmaker BUG();
59f649e938SPaul Gortmaker }
60f649e938SPaul Gortmaker return val;
61f649e938SPaul Gortmaker }
62f649e938SPaul Gortmaker
native_set_debugreg(int regno,unsigned long value)6328eaf871SPeter Zijlstra static __always_inline void native_set_debugreg(int regno, unsigned long value)
64f649e938SPaul Gortmaker {
65f649e938SPaul Gortmaker switch (regno) {
66f649e938SPaul Gortmaker case 0:
67f649e938SPaul Gortmaker asm("mov %0, %%db0" ::"r" (value));
68f649e938SPaul Gortmaker break;
69f649e938SPaul Gortmaker case 1:
70f649e938SPaul Gortmaker asm("mov %0, %%db1" ::"r" (value));
71f649e938SPaul Gortmaker break;
72f649e938SPaul Gortmaker case 2:
73f649e938SPaul Gortmaker asm("mov %0, %%db2" ::"r" (value));
74f649e938SPaul Gortmaker break;
75f649e938SPaul Gortmaker case 3:
76f649e938SPaul Gortmaker asm("mov %0, %%db3" ::"r" (value));
77f649e938SPaul Gortmaker break;
78f649e938SPaul Gortmaker case 6:
79f649e938SPaul Gortmaker asm("mov %0, %%db6" ::"r" (value));
80f649e938SPaul Gortmaker break;
81f649e938SPaul Gortmaker case 7:
829d2c7203SJoerg Roedel /*
839d2c7203SJoerg Roedel * Apply __FORCE_ORDER to DR7 writes to forbid re-ordering them
849d2c7203SJoerg Roedel * with other code.
859d2c7203SJoerg Roedel *
869d2c7203SJoerg Roedel * While is didn't happen with a DR7 write (see the DR7 read
879d2c7203SJoerg Roedel * comment above which explains where it happened), add the
889d2c7203SJoerg Roedel * __FORCE_ORDER here too to avoid similar problems in the
899d2c7203SJoerg Roedel * future.
909d2c7203SJoerg Roedel */
919d2c7203SJoerg Roedel asm volatile("mov %0, %%db7" ::"r" (value), __FORCE_ORDER);
92f649e938SPaul Gortmaker break;
93f649e938SPaul Gortmaker default:
94f649e938SPaul Gortmaker BUG();
95f649e938SPaul Gortmaker }
96f649e938SPaul Gortmaker }
97f649e938SPaul Gortmaker
hw_breakpoint_disable(void)98b332828cSK.Prasad static inline void hw_breakpoint_disable(void)
99b332828cSK.Prasad {
100b332828cSK.Prasad /* Zero the control register for HW Breakpoint */
101b332828cSK.Prasad set_debugreg(0UL, 7);
102b332828cSK.Prasad
103b332828cSK.Prasad /* Zero-out the individual HW breakpoint address registers */
104b332828cSK.Prasad set_debugreg(0UL, 0);
105b332828cSK.Prasad set_debugreg(0UL, 1);
106b332828cSK.Prasad set_debugreg(0UL, 2);
107b332828cSK.Prasad set_debugreg(0UL, 3);
108b332828cSK.Prasad }
109b332828cSK.Prasad
hw_breakpoint_active(void)11028eaf871SPeter Zijlstra static __always_inline bool hw_breakpoint_active(void)
11159d8eb53SFrederic Weisbecker {
1120a3aee0dSTejun Heo return __this_cpu_read(cpu_dr7) & DR_GLOBAL_ENABLE_MASK;
11359d8eb53SFrederic Weisbecker }
11459d8eb53SFrederic Weisbecker
11524f1e32cSFrederic Weisbecker extern void hw_breakpoint_restore(void);
11624f1e32cSFrederic Weisbecker
local_db_save(void)117e1de11d4SPeter Zijlstra static __always_inline unsigned long local_db_save(void)
118e1de11d4SPeter Zijlstra {
119e1de11d4SPeter Zijlstra unsigned long dr7;
120e1de11d4SPeter Zijlstra
12184b6a349SPeter Zijlstra if (static_cpu_has(X86_FEATURE_HYPERVISOR) && !hw_breakpoint_active())
12284b6a349SPeter Zijlstra return 0;
12384b6a349SPeter Zijlstra
124e1de11d4SPeter Zijlstra get_debugreg(dr7, 7);
125e1de11d4SPeter Zijlstra dr7 &= ~0x400; /* architecturally set bit */
126e1de11d4SPeter Zijlstra if (dr7)
127e1de11d4SPeter Zijlstra set_debugreg(0, 7);
128e1de11d4SPeter Zijlstra /*
129e1de11d4SPeter Zijlstra * Ensure the compiler doesn't lower the above statements into
130e1de11d4SPeter Zijlstra * the critical section; disabling breakpoints late would not
131e1de11d4SPeter Zijlstra * be good.
132e1de11d4SPeter Zijlstra */
133e1de11d4SPeter Zijlstra barrier();
134e1de11d4SPeter Zijlstra
135e1de11d4SPeter Zijlstra return dr7;
136e1de11d4SPeter Zijlstra }
137e1de11d4SPeter Zijlstra
local_db_restore(unsigned long dr7)138e1de11d4SPeter Zijlstra static __always_inline void local_db_restore(unsigned long dr7)
139e1de11d4SPeter Zijlstra {
140e1de11d4SPeter Zijlstra /*
141e1de11d4SPeter Zijlstra * Ensure the compiler doesn't raise this statement into
142e1de11d4SPeter Zijlstra * the critical section; enabling breakpoints early would
143e1de11d4SPeter Zijlstra * not be good.
144e1de11d4SPeter Zijlstra */
145e1de11d4SPeter Zijlstra barrier();
146e1de11d4SPeter Zijlstra if (dr7)
147e1de11d4SPeter Zijlstra set_debugreg(dr7, 7);
148e1de11d4SPeter Zijlstra }
149e1de11d4SPeter Zijlstra
150d6d55f0bSJacob Shin #ifdef CONFIG_CPU_SUP_AMD
151*79146957SAlexey Kardashevskiy extern void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr);
152*79146957SAlexey Kardashevskiy extern unsigned long amd_get_dr_addr_mask(unsigned int dr);
153d6d55f0bSJacob Shin #else
amd_set_dr_addr_mask(unsigned long mask,unsigned int dr)154*79146957SAlexey Kardashevskiy static inline void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr) { }
amd_get_dr_addr_mask(unsigned int dr)155*79146957SAlexey Kardashevskiy static inline unsigned long amd_get_dr_addr_mask(unsigned int dr)
156*79146957SAlexey Kardashevskiy {
157*79146957SAlexey Kardashevskiy return 0;
158*79146957SAlexey Kardashevskiy }
159d6d55f0bSJacob Shin #endif
16042181186SSteven Rostedt
1611965aae3SH. Peter Anvin #endif /* _ASM_X86_DEBUGREG_H */
162