Skip to content

Commit

Permalink
x86/cpu: Refactor sync_core() for readability
Browse files Browse the repository at this point in the history
Instead of having #ifdef/#endif blocks inside sync_core() for X86_64 and
X86_32, implement the new function iret_to_self() with two versions.

In this manner, avoid having to use even more more #ifdef/#endif blocks
when adding support for SERIALIZE in sync_core().

Co-developed-by: Tony Luck <[email protected]>
Signed-off-by: Tony Luck <[email protected]>
Signed-off-by: Ricardo Neri <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
  • Loading branch information
ricardon authored and Ingo Molnar committed Jul 27, 2020
1 parent 9998a98 commit f69ca62
Show file tree
Hide file tree
Showing 2 changed files with 32 additions and 25 deletions.
1 change: 0 additions & 1 deletion arch/x86/include/asm/special_insns.h
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,6 @@ static inline void clwb(volatile void *__p)

#define nop() asm volatile ("nop")


#endif /* __KERNEL__ */

#endif /* _ASM_X86_SPECIAL_INSNS_H */
56 changes: 32 additions & 24 deletions arch/x86/include/asm/sync_core.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,37 @@
#include <asm/processor.h>
#include <asm/cpufeature.h>

#ifdef CONFIG_X86_32
static inline void iret_to_self(void)
{
asm volatile (
"pushfl\n\t"
"pushl %%cs\n\t"
"pushl $1f\n\t"
"iret\n\t"
"1:"
: ASM_CALL_CONSTRAINT : : "memory");
}
#else
static inline void iret_to_self(void)
{
unsigned int tmp;

asm volatile (
"mov %%ss, %0\n\t"
"pushq %q0\n\t"
"pushq %%rsp\n\t"
"addq $8, (%%rsp)\n\t"
"pushfq\n\t"
"mov %%cs, %0\n\t"
"pushq %q0\n\t"
"pushq $1f\n\t"
"iretq\n\t"
"1:"
: "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
}
#endif /* CONFIG_X86_32 */

/*
* This function forces the icache and prefetched instruction stream to
* catch up with reality in two very specific cases:
Expand Down Expand Up @@ -44,30 +75,7 @@ static inline void sync_core(void)
* Like all of Linux's memory ordering operations, this is a
* compiler barrier as well.
*/
#ifdef CONFIG_X86_32
asm volatile (
"pushfl\n\t"
"pushl %%cs\n\t"
"pushl $1f\n\t"
"iret\n\t"
"1:"
: ASM_CALL_CONSTRAINT : : "memory");
#else
unsigned int tmp;

asm volatile (
"mov %%ss, %0\n\t"
"pushq %q0\n\t"
"pushq %%rsp\n\t"
"addq $8, (%%rsp)\n\t"
"pushfq\n\t"
"mov %%cs, %0\n\t"
"pushq %q0\n\t"
"pushq $1f\n\t"
"iretq\n\t"
"1:"
: "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
#endif
iret_to_self();
}

/*
Expand Down

0 comments on commit f69ca62

Please sign in to comment.