|
1 | 1 | #ifndef _ASM_X86_MWAIT_H
|
2 | 2 | #define _ASM_X86_MWAIT_H
|
3 | 3 |
|
| 4 | +#include <linux/sched.h> |
| 5 | + |
4 | 6 | #define MWAIT_SUBSTATE_MASK 0xf
|
5 | 7 | #define MWAIT_CSTATE_MASK 0xf
|
6 | 8 | #define MWAIT_SUBSTATE_SIZE 4
|
|
13 | 15 |
|
14 | 16 | #define MWAIT_ECX_INTERRUPT_BREAK 0x1
|
15 | 17 |
|
| 18 | +static inline void __monitor(const void *eax, unsigned long ecx, |
| 19 | + unsigned long edx) |
| 20 | +{ |
| 21 | + /* "monitor %eax, %ecx, %edx;" */ |
| 22 | + asm volatile(".byte 0x0f, 0x01, 0xc8;" |
| 23 | + :: "a" (eax), "c" (ecx), "d"(edx)); |
| 24 | +} |
| 25 | + |
| 26 | +static inline void __mwait(unsigned long eax, unsigned long ecx) |
| 27 | +{ |
| 28 | + /* "mwait %eax, %ecx;" */ |
| 29 | + asm volatile(".byte 0x0f, 0x01, 0xc9;" |
| 30 | + :: "a" (eax), "c" (ecx)); |
| 31 | +} |
| 32 | + |
| 33 | +/* |
| 34 | + * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, |
| 35 | + * which can obviate IPI to trigger checking of need_resched. |
| 36 | + * We execute MONITOR against need_resched and enter optimized wait state |
| 37 | + * through MWAIT. Whenever someone changes need_resched, we would be woken |
| 38 | + * up from MWAIT (without an IPI). |
| 39 | + * |
| 40 | + * New with Core Duo processors, MWAIT can take some hints based on CPU |
| 41 | + * capability. |
| 42 | + */ |
| 43 | +static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) |
| 44 | +{ |
| 45 | + if (!current_set_polling_and_test()) { |
| 46 | + if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) |
| 47 | + clflush((void *)¤t_thread_info()->flags); |
| 48 | + |
| 49 | + __monitor((void *)¤t_thread_info()->flags, 0, 0); |
| 50 | + if (!need_resched()) |
| 51 | + __mwait(eax, ecx); |
| 52 | + } |
| 53 | + __current_clr_polling(); |
| 54 | +} |
| 55 | + |
16 | 56 | #endif /* _ASM_X86_MWAIT_H */
|
0 commit comments