Skip to content

Commit

Permalink
KVM: x86: nSVM: refactor svm_leave_smm and smm_enter_smm
Browse files Browse the repository at this point in the history
Use return statements instead of nested if, and fix error
path to free all the maps that were allocated.

Suggested-by: Sean Christopherson <[email protected]>
Signed-off-by: Maxim Levitsky <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
  • Loading branch information
Maxim Levitsky authored and bonzini committed Sep 22, 2021
1 parent e85d3e7 commit 136a55c
Showing 1 changed file with 69 additions and 66 deletions.
135 changes: 69 additions & 66 deletions arch/x86/kvm/svm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -4285,96 +4285,99 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
struct kvm_host_map map_save;
int ret;

if (is_guest_mode(vcpu)) {
/* FED8h - SVM Guest */
put_smstate(u64, smstate, 0x7ed8, 1);
/* FEE0h - SVM Guest VMCB Physical Address */
put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
if (!is_guest_mode(vcpu))
return 0;

svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
/* FED8h - SVM Guest */
put_smstate(u64, smstate, 0x7ed8, 1);
/* FEE0h - SVM Guest VMCB Physical Address */
put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);

ret = nested_svm_vmexit(svm);
if (ret)
return ret;
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];

/*
* KVM uses VMCB01 to store L1 host state while L2 runs but
* VMCB01 is going to be used during SMM and thus the state will
* be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
* area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
* format of the area is identical to guest save area offsetted
* by 0x400 (matches the offset of 'struct vmcb_save_area'
* within 'struct vmcb'). Note: HSAVE area may also be used by
* L1 hypervisor to save additional host context (e.g. KVM does
* that, see svm_prepare_guest_switch()) which must be
* preserved.
*/
if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
&map_save) == -EINVAL)
return 1;
ret = nested_svm_vmexit(svm);
if (ret)
return ret;

/*
* KVM uses VMCB01 to store L1 host state while L2 runs but
* VMCB01 is going to be used during SMM and thus the state will
* be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
* area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
* format of the area is identical to guest save area offsetted
* by 0x400 (matches the offset of 'struct vmcb_save_area'
* within 'struct vmcb'). Note: HSAVE area may also be used by
* L1 hypervisor to save additional host context (e.g. KVM does
* that, see svm_prepare_guest_switch()) which must be
* preserved.
*/
if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
&map_save) == -EINVAL)
return 1;

BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);

svm_copy_vmrun_state(map_save.hva + 0x400,
&svm->vmcb01.ptr->save);
svm_copy_vmrun_state(map_save.hva + 0x400,
&svm->vmcb01.ptr->save);

kvm_vcpu_unmap(vcpu, &map_save, true);
}
kvm_vcpu_unmap(vcpu, &map_save, true);
return 0;
}

static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct kvm_host_map map, map_save;
int ret = 0;
u64 saved_efer, vmcb12_gpa;
struct vmcb *vmcb12;
int ret;

if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8);
u64 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
struct vmcb *vmcb12;
if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
return 0;

if (guest) {
if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
return 1;
/* Non-zero if SMI arrived while vCPU was in guest mode. */
if (!GET_SMSTATE(u64, smstate, 0x7ed8))
return 0;

if (!(saved_efer & EFER_SVME))
return 1;
if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
return 1;

if (kvm_vcpu_map(vcpu,
gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
return 1;
saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
if (!(saved_efer & EFER_SVME))
return 1;

if (svm_allocate_nested(svm))
return 1;
vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
return 1;

kvm_vcpu_unmap(vcpu, &map, true);
ret = 1;
if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save) == -EINVAL)
goto unmap_map;

/*
* Restore L1 host state from L1 HSAVE area as VMCB01 was
* used during SMM (see svm_enter_smm())
*/
if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
&map_save) == -EINVAL)
return 1;
if (svm_allocate_nested(svm))
goto unmap_save;

svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
map_save.hva + 0x400);
/*
* Restore L1 host state from L1 HSAVE area as VMCB01 was
* used during SMM (see svm_enter_smm())
*/

/*
* Enter the nested guest now
*/
vmcb12 = map.hva;
nested_load_control_from_vmcb12(svm, &vmcb12->control);
ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400);

kvm_vcpu_unmap(vcpu, &map_save, true);
}
}
/*
* Enter the nested guest now
*/

vmcb12 = map.hva;
nested_load_control_from_vmcb12(svm, &vmcb12->control);
ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);

unmap_save:
kvm_vcpu_unmap(vcpu, &map_save, true);
unmap_map:
kvm_vcpu_unmap(vcpu, &map, true);
return ret;
}

Expand Down

0 comments on commit 136a55c

Please sign in to comment.