Skip to content

Commit

Permalink
Merge branch 'svm' of https://github.com/kvm-x86/linux into HEAD
Browse files Browse the repository at this point in the history
Clean up SVM's enter/exit assembly code so that it can be compiled
without OBJECT_FILES_NON_STANDARD.  The "standard" __svm_vcpu_run() can't
be made 100% bulletproof, as RBP isn't restored on #VMEXIT, but that's
also the case for __vmx_vcpu_run(), and getting "close enough" is better
than not even trying.

As for SEV-ES, after yet another refresher on swap types, I realized
KVM can simply let the hardware restore registers after #VMEXIT, all
that's missing is storing the current values to the host save area
(they are swap type B).  This should provide 100% accuracy when using
stack frames for unwinding, and requires less assembly.

In between, build the SEV-ES code iff CONFIG_KVM_AMD_SEV=y, and yank out
"support" for 32-bit kernels in __svm_sev_es_vcpu_run, which was
unnecessarily polluting the code for a configuration that is disabled
at build time.

Signed-off-by: Paolo Bonzini <[email protected]>
  • Loading branch information
bonzini committed Apr 17, 2024
2 parents 1c3bed8 + 27ca867 commit 44ecfa3
Show file tree
Hide file tree
Showing 5 changed files with 57 additions and 67 deletions.
5 changes: 0 additions & 5 deletions arch/x86/kvm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,6 @@
ccflags-y += -I $(srctree)/arch/x86/kvm
ccflags-$(CONFIG_KVM_WERROR) += -Werror

ifeq ($(CONFIG_FRAME_POINTER),y)
OBJECT_FILES_NON_STANDARD_vmx/vmenter.o := y
OBJECT_FILES_NON_STANDARD_svm/vmenter.o := y
endif

include $(srctree)/virt/kvm/Makefile.kvm

kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kvm/svm/sev.c
Original file line number Diff line number Diff line change
Expand Up @@ -434,7 +434,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
/* Avoid using vmalloc for smaller buffers. */
size = npages * sizeof(struct page *);
if (size > PAGE_SIZE)
pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
pages = __vmalloc(size, GFP_KERNEL_ACCOUNT);
else
pages = kmalloc(size, GFP_KERNEL_ACCOUNT);

Expand Down
17 changes: 10 additions & 7 deletions arch/x86/kvm/svm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -1503,6 +1503,11 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
__free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
}

static struct sev_es_save_area *sev_es_host_save_area(struct svm_cpu_data *sd)
{
return page_address(sd->save_area) + 0x400;
}

static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
Expand All @@ -1519,12 +1524,8 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
* or subsequent vmload of host save area.
*/
vmsave(sd->save_area_pa);
if (sev_es_guest(vcpu->kvm)) {
struct sev_es_save_area *hostsa;
hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);

sev_es_prepare_switch_to_guest(hostsa);
}
if (sev_es_guest(vcpu->kvm))
sev_es_prepare_switch_to_guest(sev_es_host_save_area(sd));

if (tsc_scaling)
__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
Expand Down Expand Up @@ -4101,14 +4102,16 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)

static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_intercepted)
{
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
struct vcpu_svm *svm = to_svm(vcpu);

guest_state_enter_irqoff();

amd_clear_divider();

if (sev_es_guest(vcpu->kvm))
__svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted);
__svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted,
sev_es_host_save_area(sd));
else
__svm_vcpu_run(svm, spec_ctrl_intercepted);

Expand Down
3 changes: 2 additions & 1 deletion arch/x86/kvm/svm/svm.h
Original file line number Diff line number Diff line change
Expand Up @@ -698,7 +698,8 @@ struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu);

/* vmenter.S */

void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted,
struct sev_es_save_area *hostsa);
void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);

#define DEFINE_KVM_GHCB_ACCESSORS(field) \
Expand Down
97 changes: 44 additions & 53 deletions arch/x86/kvm/svm/vmenter.S
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/bitsperlong.h>
#include <asm/frame.h>
#include <asm/kvm_vcpu_regs.h>
#include <asm/nospec-branch.h>
#include "kvm-asm-offsets.h"
Expand Down Expand Up @@ -67,7 +68,7 @@
"", X86_FEATURE_V_SPEC_CTRL
901:
.endm
.macro RESTORE_HOST_SPEC_CTRL_BODY
.macro RESTORE_HOST_SPEC_CTRL_BODY spec_ctrl_intercepted:req
900:
/* Same for after vmexit. */
mov $MSR_IA32_SPEC_CTRL, %ecx
Expand All @@ -76,7 +77,7 @@
* Load the value that the guest had written into MSR_IA32_SPEC_CTRL,
* if it was not intercepted during guest execution.
*/
cmpb $0, (%_ASM_SP)
cmpb $0, \spec_ctrl_intercepted
jnz 998f
rdmsr
movl %eax, SVM_spec_ctrl(%_ASM_DI)
Expand All @@ -99,6 +100,7 @@
*/
SYM_FUNC_START(__svm_vcpu_run)
push %_ASM_BP
mov %_ASM_SP, %_ASM_BP
#ifdef CONFIG_X86_64
push %r15
push %r14
Expand Down Expand Up @@ -268,7 +270,7 @@ SYM_FUNC_START(__svm_vcpu_run)
RET

RESTORE_GUEST_SPEC_CTRL_BODY
RESTORE_HOST_SPEC_CTRL_BODY
RESTORE_HOST_SPEC_CTRL_BODY (%_ASM_SP)

10: cmpb $0, _ASM_RIP(kvm_rebooting)
jne 2b
Expand All @@ -290,66 +292,68 @@ SYM_FUNC_START(__svm_vcpu_run)

SYM_FUNC_END(__svm_vcpu_run)

#ifdef CONFIG_KVM_AMD_SEV


#ifdef CONFIG_X86_64
#define SEV_ES_GPRS_BASE 0x300
#define SEV_ES_RBX (SEV_ES_GPRS_BASE + __VCPU_REGS_RBX * WORD_SIZE)
#define SEV_ES_RBP (SEV_ES_GPRS_BASE + __VCPU_REGS_RBP * WORD_SIZE)
#define SEV_ES_RSI (SEV_ES_GPRS_BASE + __VCPU_REGS_RSI * WORD_SIZE)
#define SEV_ES_RDI (SEV_ES_GPRS_BASE + __VCPU_REGS_RDI * WORD_SIZE)
#define SEV_ES_R12 (SEV_ES_GPRS_BASE + __VCPU_REGS_R12 * WORD_SIZE)
#define SEV_ES_R13 (SEV_ES_GPRS_BASE + __VCPU_REGS_R13 * WORD_SIZE)
#define SEV_ES_R14 (SEV_ES_GPRS_BASE + __VCPU_REGS_R14 * WORD_SIZE)
#define SEV_ES_R15 (SEV_ES_GPRS_BASE + __VCPU_REGS_R15 * WORD_SIZE)
#endif

/**
* __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
* @svm: struct vcpu_svm *
* @spec_ctrl_intercepted: bool
*/
SYM_FUNC_START(__svm_sev_es_vcpu_run)
push %_ASM_BP
#ifdef CONFIG_X86_64
push %r15
push %r14
push %r13
push %r12
#else
push %edi
push %esi
#endif
push %_ASM_BX
FRAME_BEGIN

/*
* Save variables needed after vmexit on the stack, in inverse
* order compared to when they are needed.
* Save non-volatile (callee-saved) registers to the host save area.
* Except for RAX and RSP, all GPRs are restored on #VMEXIT, but not
* saved on VMRUN.
*/
mov %rbp, SEV_ES_RBP (%rdx)
mov %r15, SEV_ES_R15 (%rdx)
mov %r14, SEV_ES_R14 (%rdx)
mov %r13, SEV_ES_R13 (%rdx)
mov %r12, SEV_ES_R12 (%rdx)
mov %rbx, SEV_ES_RBX (%rdx)

/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */
push %_ASM_ARG2

/* Save @svm. */
push %_ASM_ARG1

.ifnc _ASM_ARG1, _ASM_DI
/*
* Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
* and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
* Save volatile registers that hold arguments that are needed after
* #VMEXIT (RDI=@svm and RSI=@spec_ctrl_intercepted).
*/
mov %_ASM_ARG1, %_ASM_DI
.endif
mov %rdi, SEV_ES_RDI (%rdx)
mov %rsi, SEV_ES_RSI (%rdx)

/* Clobbers RAX, RCX, RDX. */
/* Clobbers RAX, RCX, RDX (@hostsa). */
RESTORE_GUEST_SPEC_CTRL

/* Get svm->current_vmcb->pa into RAX. */
mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
mov SVM_current_vmcb(%rdi), %rax
mov KVM_VMCB_pa(%rax), %rax

/* Enter guest mode */
sti

1: vmrun %_ASM_AX
1: vmrun %rax

2: cli

/* Pop @svm to RDI, guest registers have been saved already. */
pop %_ASM_DI

#ifdef CONFIG_MITIGATION_RETPOLINE
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
#endif

/* Clobbers RAX, RCX, RDX. */
/* Clobbers RAX, RCX, RDX, consumes RDI (@svm) and RSI (@spec_ctrl_intercepted). */
RESTORE_HOST_SPEC_CTRL

/*
Expand All @@ -361,30 +365,17 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
*/
UNTRAIN_RET_VM

/* "Pop" @spec_ctrl_intercepted. */
pop %_ASM_BX

pop %_ASM_BX

#ifdef CONFIG_X86_64
pop %r12
pop %r13
pop %r14
pop %r15
#else
pop %esi
pop %edi
#endif
pop %_ASM_BP
FRAME_END
RET

RESTORE_GUEST_SPEC_CTRL_BODY
RESTORE_HOST_SPEC_CTRL_BODY
RESTORE_HOST_SPEC_CTRL_BODY %sil

3: cmpb $0, _ASM_RIP(kvm_rebooting)
3: cmpb $0, kvm_rebooting(%rip)
jne 2b
ud2

_ASM_EXTABLE(1b, 3b)

SYM_FUNC_END(__svm_sev_es_vcpu_run)
#endif /* CONFIG_KVM_AMD_SEV */

0 comments on commit 44ecfa3

Please sign in to comment.