Skip to content

Commit

Permalink
KVM: selftests: Stuff RAX/RCX with 'safe' values in vmmcall()/vmcall()
Browse files Browse the repository at this point in the history
vmmcall()/vmcall() are used to exit from L2 to L1 and no concrete hypercall
ABI is currenty followed. With the introduction of Hyper-V L2 TLB flush
it becomes (theoretically) possible that L0 will take responsibility for
handling the call and no L1 exit will happen. Prevent this by stuffing RAX
(KVM ABI) and RCX (Hyper-V ABI) with 'safe' values.

While on it, convert vmmcall() to 'static inline', make it setup stack
frame and move to include/x86_64/svm_util.h.

Signed-off-by: Vitaly Kuznetsov <[email protected]>
Reviewed-by: Sean Christopherson <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
  • Loading branch information
vittyvk authored and bonzini committed Nov 21, 2022
1 parent 6c15c3c commit 8fda37c
Show file tree
Hide file tree
Showing 3 changed files with 24 additions and 10 deletions.
5 changes: 0 additions & 5 deletions tools/testing/selftests/kvm/include/x86_64/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -677,11 +677,6 @@ static inline void cpu_relax(void)
asm volatile("rep; nop" ::: "memory");
}

#define vmmcall() \
__asm__ __volatile__( \
"vmmcall\n" \
)

#define ud2() \
__asm__ __volatile__( \
"ud2\n" \
Expand Down
14 changes: 14 additions & 0 deletions tools/testing/selftests/kvm/include/x86_64/svm_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,20 @@ struct svm_test_data {
uint64_t msr_gpa;
};

static inline void vmmcall(void)
{
/*
* Stuff RAX and RCX with "safe" values to make sure L0 doesn't handle
* it as a valid hypercall (e.g. Hyper-V L2 TLB flush) as the intended
* use of this function is to exit to L1 from L2. Clobber all other
* GPRs as L1 doesn't correctly preserve them during vmexits.
*/
__asm__ __volatile__("push %%rbp; vmmcall; pop %%rbp"
: : "a"(0xdeadbeef), "c"(0xbeefdead)
: "rbx", "rdx", "rsi", "rdi", "r8", "r9",
"r10", "r11", "r12", "r13", "r14", "r15");
}

#define stgi() \
__asm__ __volatile__( \
"stgi\n" \
Expand Down
15 changes: 10 additions & 5 deletions tools/testing/selftests/kvm/include/x86_64/vmx.h
Original file line number Diff line number Diff line change
Expand Up @@ -437,11 +437,16 @@ static inline int vmresume(void)

static inline void vmcall(void)
{
/* Currently, L1 destroys our GPRs during vmexits. */
__asm__ __volatile__("push %%rbp; vmcall; pop %%rbp" : : :
"rax", "rbx", "rcx", "rdx",
"rsi", "rdi", "r8", "r9", "r10", "r11", "r12",
"r13", "r14", "r15");
/*
* Stuff RAX and RCX with "safe" values to make sure L0 doesn't handle
* it as a valid hypercall (e.g. Hyper-V L2 TLB flush) as the intended
* use of this function is to exit to L1 from L2. Clobber all other
* GPRs as L1 doesn't correctly preserve them during vmexits.
*/
__asm__ __volatile__("push %%rbp; vmcall; pop %%rbp"
: : "a"(0xdeadbeef), "c"(0xbeefdead)
: "rbx", "rdx", "rsi", "rdi", "r8", "r9",
"r10", "r11", "r12", "r13", "r14", "r15");
}

static inline int vmread(uint64_t encoding, uint64_t *value)
Expand Down

0 comments on commit 8fda37c

Please sign in to comment.