xref: /openbmc/linux/arch/x86/include/asm/mwait.h (revision 5f32c314)
1 #ifndef _ASM_X86_MWAIT_H
2 #define _ASM_X86_MWAIT_H
3 
4 #include <linux/sched.h>
5 
6 #define MWAIT_SUBSTATE_MASK		0xf
7 #define MWAIT_CSTATE_MASK		0xf
8 #define MWAIT_SUBSTATE_SIZE		4
9 #define MWAIT_HINT2CSTATE(hint)		(((hint) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK)
10 #define MWAIT_HINT2SUBSTATE(hint)	((hint) & MWAIT_CSTATE_MASK)
11 
12 #define CPUID_MWAIT_LEAF		5
13 #define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
14 #define CPUID5_ECX_INTERRUPT_BREAK	0x2
15 
16 #define MWAIT_ECX_INTERRUPT_BREAK	0x1
17 
18 static inline void __monitor(const void *eax, unsigned long ecx,
19 			     unsigned long edx)
20 {
21 	/* "monitor %eax, %ecx, %edx;" */
22 	asm volatile(".byte 0x0f, 0x01, 0xc8;"
23 		     :: "a" (eax), "c" (ecx), "d"(edx));
24 }
25 
26 static inline void __mwait(unsigned long eax, unsigned long ecx)
27 {
28 	/* "mwait %eax, %ecx;" */
29 	asm volatile(".byte 0x0f, 0x01, 0xc9;"
30 		     :: "a" (eax), "c" (ecx));
31 }
32 
33 /*
34  * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
35  * which can obviate IPI to trigger checking of need_resched.
36  * We execute MONITOR against need_resched and enter optimized wait state
37  * through MWAIT. Whenever someone changes need_resched, we would be woken
38  * up from MWAIT (without an IPI).
39  *
40  * New with Core Duo processors, MWAIT can take some hints based on CPU
41  * capability.
42  */
43 static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
44 {
45 	if (!current_set_polling_and_test()) {
46 		if (static_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) {
47 			mb();
48 			clflush((void *)&current_thread_info()->flags);
49 			mb();
50 		}
51 
52 		__monitor((void *)&current_thread_info()->flags, 0, 0);
53 		if (!need_resched())
54 			__mwait(eax, ecx);
55 	}
56 	current_clr_polling();
57 }
58 
59 #endif /* _ASM_X86_MWAIT_H */
60