1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_SPECIAL_INSNS_H
3 #define _ASM_X86_SPECIAL_INSNS_H
4 
5 
6 #ifdef __KERNEL__
7 
8 #include <asm/nops.h>
9 #include <asm/processor-flags.h>
10 #include <linux/jump_label.h>
11 
12 /*
13  * Volatile isn't enough to prevent the compiler from reordering the
14  * read/write functions for the control registers and messing everything up.
15  * A memory clobber would solve the problem, but would prevent reordering of
16  * all loads stores around it, which can hurt performance. Solution is to
17  * use a variable and mimic reads and writes to it to enforce serialization
18  */
19 extern unsigned long __force_order;
20 
21 void native_write_cr0(unsigned long val);
22 
23 static inline unsigned long native_read_cr0(void)
24 {
25 	unsigned long val;
26 	asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
27 	return val;
28 }
29 
30 static inline unsigned long native_read_cr2(void)
31 {
32 	unsigned long val;
33 	asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
34 	return val;
35 }
36 
37 static inline void native_write_cr2(unsigned long val)
38 {
39 	asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
40 }
41 
42 static inline unsigned long __native_read_cr3(void)
43 {
44 	unsigned long val;
45 	asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
46 	return val;
47 }
48 
49 static inline void native_write_cr3(unsigned long val)
50 {
51 	asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
52 }
53 
54 static inline unsigned long native_read_cr4(void)
55 {
56 	unsigned long val;
57 #ifdef CONFIG_X86_32
58 	/*
59 	 * This could fault if CR4 does not exist.  Non-existent CR4
60 	 * is functionally equivalent to CR4 == 0.  Keep it simple and pretend
61 	 * that CR4 == 0 on CPUs that don't have CR4.
62 	 */
63 	asm volatile("1: mov %%cr4, %0\n"
64 		     "2:\n"
65 		     _ASM_EXTABLE(1b, 2b)
66 		     : "=r" (val), "=m" (__force_order) : "0" (0));
67 #else
68 	/* CR4 always exists on x86_64. */
69 	asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
70 #endif
71 	return val;
72 }
73 
74 void native_write_cr4(unsigned long val);
75 
76 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
77 static inline u32 rdpkru(void)
78 {
79 	u32 ecx = 0;
80 	u32 edx, pkru;
81 
82 	/*
83 	 * "rdpkru" instruction.  Places PKRU contents in to EAX,
84 	 * clears EDX and requires that ecx=0.
85 	 */
86 	asm volatile(".byte 0x0f,0x01,0xee\n\t"
87 		     : "=a" (pkru), "=d" (edx)
88 		     : "c" (ecx));
89 	return pkru;
90 }
91 
92 static inline void wrpkru(u32 pkru)
93 {
94 	u32 ecx = 0, edx = 0;
95 
96 	/*
97 	 * "wrpkru" instruction.  Loads contents in EAX to PKRU,
98 	 * requires that ecx = edx = 0.
99 	 */
100 	asm volatile(".byte 0x0f,0x01,0xef\n\t"
101 		     : : "a" (pkru), "c"(ecx), "d"(edx));
102 }
103 
104 static inline void __write_pkru(u32 pkru)
105 {
106 	/*
107 	 * WRPKRU is relatively expensive compared to RDPKRU.
108 	 * Avoid WRPKRU when it would not change the value.
109 	 */
110 	if (pkru == rdpkru())
111 		return;
112 
113 	wrpkru(pkru);
114 }
115 
116 #else
117 static inline u32 rdpkru(void)
118 {
119 	return 0;
120 }
121 
122 static inline void __write_pkru(u32 pkru)
123 {
124 }
125 #endif
126 
127 static inline void native_wbinvd(void)
128 {
129 	asm volatile("wbinvd": : :"memory");
130 }
131 
132 extern asmlinkage void native_load_gs_index(unsigned);
133 
134 static inline unsigned long __read_cr4(void)
135 {
136 	return native_read_cr4();
137 }
138 
139 #ifdef CONFIG_PARAVIRT_XXL
140 #include <asm/paravirt.h>
141 #else
142 
143 static inline unsigned long read_cr0(void)
144 {
145 	return native_read_cr0();
146 }
147 
148 static inline void write_cr0(unsigned long x)
149 {
150 	native_write_cr0(x);
151 }
152 
153 static inline unsigned long read_cr2(void)
154 {
155 	return native_read_cr2();
156 }
157 
158 static inline void write_cr2(unsigned long x)
159 {
160 	native_write_cr2(x);
161 }
162 
163 /*
164  * Careful!  CR3 contains more than just an address.  You probably want
165  * read_cr3_pa() instead.
166  */
167 static inline unsigned long __read_cr3(void)
168 {
169 	return __native_read_cr3();
170 }
171 
172 static inline void write_cr3(unsigned long x)
173 {
174 	native_write_cr3(x);
175 }
176 
177 static inline void __write_cr4(unsigned long x)
178 {
179 	native_write_cr4(x);
180 }
181 
182 static inline void wbinvd(void)
183 {
184 	native_wbinvd();
185 }
186 
187 #ifdef CONFIG_X86_64
188 
189 static inline void load_gs_index(unsigned selector)
190 {
191 	native_load_gs_index(selector);
192 }
193 
194 #endif
195 
196 #endif /* CONFIG_PARAVIRT_XXL */
197 
198 static inline void clflush(volatile void *__p)
199 {
200 	asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
201 }
202 
203 static inline void clflushopt(volatile void *__p)
204 {
205 	alternative_io(".byte " __stringify(NOP_DS_PREFIX) "; clflush %P0",
206 		       ".byte 0x66; clflush %P0",
207 		       X86_FEATURE_CLFLUSHOPT,
208 		       "+m" (*(volatile char __force *)__p));
209 }
210 
211 static inline void clwb(volatile void *__p)
212 {
213 	volatile struct { char x[64]; } *p = __p;
214 
215 	asm volatile(ALTERNATIVE_2(
216 		".byte " __stringify(NOP_DS_PREFIX) "; clflush (%[pax])",
217 		".byte 0x66; clflush (%[pax])", /* clflushopt (%%rax) */
218 		X86_FEATURE_CLFLUSHOPT,
219 		".byte 0x66, 0x0f, 0xae, 0x30",  /* clwb (%%rax) */
220 		X86_FEATURE_CLWB)
221 		: [p] "+m" (*p)
222 		: [pax] "a" (p));
223 }
224 
225 #define nop() asm volatile ("nop")
226 
227 
228 #endif /* __KERNEL__ */
229 
230 #endif /* _ASM_X86_SPECIAL_INSNS_H */
231