Skip to content

Commit 1682425

Browse files
Peter ZijlstraH. Peter Anvin
Peter Zijlstra
authored and
H. Peter Anvin
committed
x86, acpi, idle: Restructure the mwait idle routines
People seem to delight in writing wrong and broken mwait idle routines; collapse the lot. This leaves mwait_play_dead() the sole remaining user of __mwait() and new __mwait() users are probably doing it wrong. Also remove __sti_mwait() as its unused. Cc: Arjan van de Ven <[email protected]> Cc: Jacob Jun Pan <[email protected]> Cc: Mike Galbraith <[email protected]> Cc: Len Brown <[email protected]> Cc: Rui Zhang <[email protected]> Acked-by: Rafael Wysocki <[email protected]> Signed-off-by: Peter Zijlstra <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: H. Peter Anvin <[email protected]>
1 parent 40e2d7f commit 1682425

File tree

7 files changed

+43
-78
lines changed

7 files changed

+43
-78
lines changed

arch/x86/include/asm/mwait.h

+40
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
#ifndef _ASM_X86_MWAIT_H
22
#define _ASM_X86_MWAIT_H
33

4+
#include <linux/sched.h>
5+
46
#define MWAIT_SUBSTATE_MASK 0xf
57
#define MWAIT_CSTATE_MASK 0xf
68
#define MWAIT_SUBSTATE_SIZE 4
@@ -13,4 +15,42 @@
1315

1416
#define MWAIT_ECX_INTERRUPT_BREAK 0x1
1517

18+
static inline void __monitor(const void *eax, unsigned long ecx,
19+
unsigned long edx)
20+
{
21+
/* "monitor %eax, %ecx, %edx;" */
22+
asm volatile(".byte 0x0f, 0x01, 0xc8;"
23+
:: "a" (eax), "c" (ecx), "d"(edx));
24+
}
25+
26+
static inline void __mwait(unsigned long eax, unsigned long ecx)
27+
{
28+
/* "mwait %eax, %ecx;" */
29+
asm volatile(".byte 0x0f, 0x01, 0xc9;"
30+
:: "a" (eax), "c" (ecx));
31+
}
32+
33+
/*
34+
* This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
35+
* which can obviate IPI to trigger checking of need_resched.
36+
* We execute MONITOR against need_resched and enter optimized wait state
37+
* through MWAIT. Whenever someone changes need_resched, we would be woken
38+
* up from MWAIT (without an IPI).
39+
*
40+
* New with Core Duo processors, MWAIT can take some hints based on CPU
41+
* capability.
42+
*/
43+
static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
44+
{
45+
if (!current_set_polling_and_test()) {
46+
if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
47+
clflush((void *)&current_thread_info()->flags);
48+
49+
__monitor((void *)&current_thread_info()->flags, 0, 0);
50+
if (!need_resched())
51+
__mwait(eax, ecx);
52+
}
53+
__current_clr_polling();
54+
}
55+
1656
#endif /* _ASM_X86_MWAIT_H */

arch/x86/include/asm/processor.h

-23
Original file line numberDiff line numberDiff line change
@@ -700,29 +700,6 @@ static inline void sync_core(void)
700700
#endif
701701
}
702702

703-
static inline void __monitor(const void *eax, unsigned long ecx,
704-
unsigned long edx)
705-
{
706-
/* "monitor %eax, %ecx, %edx;" */
707-
asm volatile(".byte 0x0f, 0x01, 0xc8;"
708-
:: "a" (eax), "c" (ecx), "d"(edx));
709-
}
710-
711-
static inline void __mwait(unsigned long eax, unsigned long ecx)
712-
{
713-
/* "mwait %eax, %ecx;" */
714-
asm volatile(".byte 0x0f, 0x01, 0xc9;"
715-
:: "a" (eax), "c" (ecx));
716-
}
717-
718-
static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
719-
{
720-
trace_hardirqs_on();
721-
/* "mwait %eax, %ecx;" */
722-
asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
723-
:: "a" (eax), "c" (ecx));
724-
}
725-
726703
extern void select_idle_routine(const struct cpuinfo_x86 *c);
727704
extern void init_amd_e400_c1e_mask(void);
728705

arch/x86/kernel/acpi/cstate.c

-23
Original file line numberDiff line numberDiff line change
@@ -150,29 +150,6 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
150150
}
151151
EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
152152

153-
/*
154-
* This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
155-
* which can obviate IPI to trigger checking of need_resched.
156-
* We execute MONITOR against need_resched and enter optimized wait state
157-
* through MWAIT. Whenever someone changes need_resched, we would be woken
158-
* up from MWAIT (without an IPI).
159-
*
160-
* New with Core Duo processors, MWAIT can take some hints based on CPU
161-
* capability.
162-
*/
163-
void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
164-
{
165-
if (!need_resched()) {
166-
if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
167-
clflush((void *)&current_thread_info()->flags);
168-
169-
__monitor((void *)&current_thread_info()->flags, 0, 0);
170-
smp_mb();
171-
if (!need_resched())
172-
__mwait(ax, cx);
173-
}
174-
}
175-
176153
void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
177154
{
178155
unsigned int cpu = smp_processor_id();

drivers/acpi/acpi_pad.c

+1-4
Original file line numberDiff line numberDiff line change
@@ -193,10 +193,7 @@ static int power_saving_thread(void *data)
193193
CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
194194
stop_critical_timings();
195195

196-
__monitor((void *)&current_thread_info()->flags, 0, 0);
197-
smp_mb();
198-
if (!need_resched())
199-
__mwait(power_saving_mwait_eax, 1);
196+
mwait_idle_with_hints(power_saving_mwait_eax, 1);
200197

201198
start_critical_timings();
202199
if (lapic_marked_unstable)

drivers/acpi/processor_idle.c

-15
Original file line numberDiff line numberDiff line change
@@ -727,11 +727,6 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
727727
if (unlikely(!pr))
728728
return -EINVAL;
729729

730-
if (cx->entry_method == ACPI_CSTATE_FFH) {
731-
if (current_set_polling_and_test())
732-
return -EINVAL;
733-
}
734-
735730
lapic_timer_state_broadcast(pr, cx, 1);
736731
acpi_idle_do_entry(cx);
737732

@@ -785,11 +780,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
785780
if (unlikely(!pr))
786781
return -EINVAL;
787782

788-
if (cx->entry_method == ACPI_CSTATE_FFH) {
789-
if (current_set_polling_and_test())
790-
return -EINVAL;
791-
}
792-
793783
/*
794784
* Must be done before busmaster disable as we might need to
795785
* access HPET !
@@ -841,11 +831,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
841831
}
842832
}
843833

844-
if (cx->entry_method == ACPI_CSTATE_FFH) {
845-
if (current_set_polling_and_test())
846-
return -EINVAL;
847-
}
848-
849834
acpi_unlazy_tlb(smp_processor_id());
850835

851836
/* Tell the scheduler that we are going deep-idle: */

drivers/idle/intel_idle.c

+1-10
Original file line numberDiff line numberDiff line change
@@ -375,16 +375,7 @@ static int intel_idle(struct cpuidle_device *dev,
375375
if (!(lapic_timer_reliable_states & (1 << (cstate))))
376376
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
377377

378-
if (!current_set_polling_and_test()) {
379-
380-
if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
381-
clflush((void *)&current_thread_info()->flags);
382-
383-
__monitor((void *)&current_thread_info()->flags, 0, 0);
384-
smp_mb();
385-
if (!need_resched())
386-
__mwait(eax, ecx);
387-
}
378+
mwait_idle_with_hints(eax, ecx);
388379

389380
if (!(lapic_timer_reliable_states & (1 << (cstate))))
390381
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);

drivers/thermal/intel_powerclamp.c

+1-3
Original file line numberDiff line numberDiff line change
@@ -438,9 +438,7 @@ static int clamp_thread(void *arg)
438438
*/
439439
local_touch_nmi();
440440
stop_critical_timings();
441-
__monitor((void *)&current_thread_info()->flags, 0, 0);
442-
cpu_relax(); /* allow HT sibling to run */
443-
__mwait(eax, ecx);
441+
mwait_idle_with_hints(eax, ecx);
444442
start_critical_timings();
445443
atomic_inc(&idle_wakeup_counter);
446444
}

0 commit comments

Comments
 (0)