From 55c590adfe18b5380f7c4ae3696468bc5c916ee5 Mon Sep 17 00:00:00 2001 From: Like Xu Date: Wed, 7 Dec 2022 15:15:05 +0800 Subject: [PATCH 01/68] KVM: x86/pmu: Prevent zero period event from being repeatedly released The current vPMU can reuse the same pmc->perf_event for the same hardware event via pmc_pause/resume_counter(), but this optimization does not apply to a portion of the TSX events (e.g., "event=0x3c,in_tx=1, in_tx_cp=1"), where event->attr.sample_period is legally zero at creation, thus making the perf call to perf_event_period() meaningless (no need to adjust sample period in this case), and instead causing such reusable perf_events to be repeatedly released and created. Avoid releasing zero sample_period events by checking is_sampling_event() to follow the previously enable/disable optimization. Signed-off-by: Like Xu Message-Id: <20221207071506.15733-2-likexu@tencent.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/pmu.c | 3 ++- arch/x86/kvm/pmu.h | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 684393c2210523..eb594620dd75a1 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -238,7 +238,8 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc) return false; /* recalibrate sample period and check if it's accepted by perf core */ - if (perf_event_period(pmc->perf_event, + if (is_sampling_event(pmc->perf_event) && + perf_event_period(pmc->perf_event, get_sample_period(pmc, pmc->counter))) return false; diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index 85ff3c0588bac0..cdb91009701dd1 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -140,7 +140,8 @@ static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value) static inline void pmc_update_sample_period(struct kvm_pmc *pmc) { - if (!pmc->perf_event || pmc->is_paused) + if (!pmc->perf_event || pmc->is_paused || + !is_sampling_event(pmc->perf_event)) return; perf_event_period(pmc->perf_event, From fceb3a36c29a957515d5156e5e7844ea040dc43d Mon Sep 17 00:00:00 2001 From: Adamos Ttofari Date: Thu, 8 Dec 2022 09:44:14 +0000 Subject: [PATCH 02/68] KVM: x86: ioapic: Fix level-triggered EOI and userspace I/OAPIC reconfigure race When scanning userspace I/OAPIC entries, intercept EOI for level-triggered IRQs if the current vCPU has a pending and/or in-service IRQ for the vector in its local API, even if the vCPU doesn't match the new entry's destination. This fixes a race between userspace I/OAPIC reconfiguration and IRQ delivery that results in the vector's bit being left set in the remote IRR due to the eventual EOI not being forwarded to the userspace I/OAPIC. Commit 0fc5a36dd6b3 ("KVM: x86: ioapic: Fix level-triggered EOI and IOAPIC reconfigure race") fixed the in-kernel IOAPIC, but not the userspace IOAPIC configuration, which has a similar race. Fixes: 0fc5a36dd6b3 ("KVM: x86: ioapic: Fix level-triggered EOI and IOAPIC reconfigure race") Signed-off-by: Adamos Ttofari Reviewed-by: Sean Christopherson Message-Id: <20221208094415.12723-1-attofari@amazon.de> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/irq_comm.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c index 0687162c4f227d..3742d9adacfc1a 100644 --- a/arch/x86/kvm/irq_comm.c +++ b/arch/x86/kvm/irq_comm.c @@ -426,8 +426,9 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu, kvm_set_msi_irq(vcpu->kvm, entry, &irq); if (irq.trig_mode && - kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT, - irq.dest_id, irq.dest_mode)) + (kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT, + irq.dest_id, irq.dest_mode) || + kvm_apic_pending_eoi(vcpu, irq.vector))) __set_bit(irq.vector, ioapic_handled_vectors); } } From 8b9e13d2de73b5513c2ceffe0f62eab40206a126 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Thu, 8 Dec 2022 11:27:00 +0100 Subject: [PATCH 03/68] KVM: x86: hyper-v: Fix 'using uninitialized value' Coverity warning In kvm_hv_flush_tlb(), 'data_offset' and 'consumed_xmm_halves' variables are used in a mutually exclusive way: in 'hc->fast' we count in 'XMM halves' and increase 'data_offset' otherwise. Coverity discovered, that in one case both variables are incremented unconditionally. This doesn't seem to cause any issues as the only user of 'data_offset'/'consumed_xmm_halves' data is kvm_hv_get_tlb_flush_entries() -> kvm_hv_get_hc_data() which also takes into account 'hc->fast' but is still worth fixing. To make things explicit, put 'data_offset' and 'consumed_xmm_halves' to 'struct kvm_hv_hcall' as a union and use at call sites. This allows to remove explicit 'data_offset'/'consumed_xmm_halves' parameters from kvm_hv_get_hc_data()/kvm_get_sparse_vp_set()/kvm_hv_get_tlb_flush_entries() helpers. Note: 'struct kvm_hv_hcall' is allocated on stack in kvm_hv_hypercall() and is not zeroed, consumers are supposed to initialize the appropriate field if needed. Reported-by: coverity-bot Addresses-Coverity-ID: 1527764 ("Uninitialized variables") Fixes: 260970862c88 ("KVM: x86: hyper-v: Handle HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST{,EX} calls gently") Signed-off-by: Vitaly Kuznetsov Reviewed-by: Sean Christopherson Message-Id: <20221208102700.959630-1-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/hyperv.c | 63 ++++++++++++++++++++++++------------------- 1 file changed, 36 insertions(+), 27 deletions(-) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 2c7f2a26421e82..e8296942a86821 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1769,6 +1769,7 @@ static bool hv_is_vp_in_sparse_set(u32 vp_id, u64 valid_bank_mask, u64 sparse_ba } struct kvm_hv_hcall { + /* Hypercall input data */ u64 param; u64 ingpa; u64 outgpa; @@ -1779,12 +1780,21 @@ struct kvm_hv_hcall { bool fast; bool rep; sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS]; + + /* + * Current read offset when KVM reads hypercall input data gradually, + * either offset in bytes from 'ingpa' for regular hypercalls or the + * number of already consumed 'XMM halves' for 'fast' hypercalls. + */ + union { + gpa_t data_offset; + int consumed_xmm_halves; + }; }; static int kvm_hv_get_hc_data(struct kvm *kvm, struct kvm_hv_hcall *hc, - u16 orig_cnt, u16 cnt_cap, u64 *data, - int consumed_xmm_halves, gpa_t offset) + u16 orig_cnt, u16 cnt_cap, u64 *data) { /* * Preserve the original count when ignoring entries via a "cap", KVM @@ -1799,11 +1809,11 @@ static int kvm_hv_get_hc_data(struct kvm *kvm, struct kvm_hv_hcall *hc, * Each XMM holds two sparse banks, but do not count halves that * have already been consumed for hypercall parameters. */ - if (orig_cnt > 2 * HV_HYPERCALL_MAX_XMM_REGISTERS - consumed_xmm_halves) + if (orig_cnt > 2 * HV_HYPERCALL_MAX_XMM_REGISTERS - hc->consumed_xmm_halves) return HV_STATUS_INVALID_HYPERCALL_INPUT; for (i = 0; i < cnt; i++) { - j = i + consumed_xmm_halves; + j = i + hc->consumed_xmm_halves; if (j % 2) data[i] = sse128_hi(hc->xmm[j / 2]); else @@ -1812,27 +1822,24 @@ static int kvm_hv_get_hc_data(struct kvm *kvm, struct kvm_hv_hcall *hc, return 0; } - return kvm_read_guest(kvm, hc->ingpa + offset, data, + return kvm_read_guest(kvm, hc->ingpa + hc->data_offset, data, cnt * sizeof(*data)); } static u64 kvm_get_sparse_vp_set(struct kvm *kvm, struct kvm_hv_hcall *hc, - u64 *sparse_banks, int consumed_xmm_halves, - gpa_t offset) + u64 *sparse_banks) { if (hc->var_cnt > HV_MAX_SPARSE_VCPU_BANKS) return -EINVAL; /* Cap var_cnt to ignore banks that cannot contain a legal VP index. */ return kvm_hv_get_hc_data(kvm, hc, hc->var_cnt, KVM_HV_MAX_SPARSE_VCPU_SET_BITS, - sparse_banks, consumed_xmm_halves, offset); + sparse_banks); } -static int kvm_hv_get_tlb_flush_entries(struct kvm *kvm, struct kvm_hv_hcall *hc, u64 entries[], - int consumed_xmm_halves, gpa_t offset) +static int kvm_hv_get_tlb_flush_entries(struct kvm *kvm, struct kvm_hv_hcall *hc, u64 entries[]) { - return kvm_hv_get_hc_data(kvm, hc, hc->rep_cnt, hc->rep_cnt, - entries, consumed_xmm_halves, offset); + return kvm_hv_get_hc_data(kvm, hc, hc->rep_cnt, hc->rep_cnt, entries); } static void hv_tlb_flush_enqueue(struct kvm_vcpu *vcpu, @@ -1926,8 +1933,6 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) struct kvm_vcpu *v; unsigned long i; bool all_cpus; - int consumed_xmm_halves = 0; - gpa_t data_offset; /* * The Hyper-V TLFS doesn't allow more than HV_MAX_SPARSE_VCPU_BANKS @@ -1955,12 +1960,12 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) flush.address_space = hc->ingpa; flush.flags = hc->outgpa; flush.processor_mask = sse128_lo(hc->xmm[0]); - consumed_xmm_halves = 1; + hc->consumed_xmm_halves = 1; } else { if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush, sizeof(flush)))) return HV_STATUS_INVALID_HYPERCALL_INPUT; - data_offset = sizeof(flush); + hc->data_offset = sizeof(flush); } trace_kvm_hv_flush_tlb(flush.processor_mask, @@ -1985,12 +1990,12 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) flush_ex.flags = hc->outgpa; memcpy(&flush_ex.hv_vp_set, &hc->xmm[0], sizeof(hc->xmm[0])); - consumed_xmm_halves = 2; + hc->consumed_xmm_halves = 2; } else { if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex, sizeof(flush_ex)))) return HV_STATUS_INVALID_HYPERCALL_INPUT; - data_offset = sizeof(flush_ex); + hc->data_offset = sizeof(flush_ex); } trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask, @@ -2009,8 +2014,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) if (!hc->var_cnt) goto ret_success; - if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks, - consumed_xmm_halves, data_offset)) + if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks)) return HV_STATUS_INVALID_HYPERCALL_INPUT; } @@ -2021,8 +2025,10 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) * consumed_xmm_halves to make sure TLB flush entries are read * from the correct offset. */ - data_offset += hc->var_cnt * sizeof(sparse_banks[0]); - consumed_xmm_halves += hc->var_cnt; + if (hc->fast) + hc->consumed_xmm_halves += hc->var_cnt; + else + hc->data_offset += hc->var_cnt * sizeof(sparse_banks[0]); } if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE || @@ -2030,8 +2036,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) hc->rep_cnt > ARRAY_SIZE(__tlb_flush_entries)) { tlb_flush_entries = NULL; } else { - if (kvm_hv_get_tlb_flush_entries(kvm, hc, __tlb_flush_entries, - consumed_xmm_halves, data_offset)) + if (kvm_hv_get_tlb_flush_entries(kvm, hc, __tlb_flush_entries)) return HV_STATUS_INVALID_HYPERCALL_INPUT; tlb_flush_entries = __tlb_flush_entries; } @@ -2180,9 +2185,13 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) if (!hc->var_cnt) goto ret_success; - if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks, 1, - offsetof(struct hv_send_ipi_ex, - vp_set.bank_contents))) + if (!hc->fast) + hc->data_offset = offsetof(struct hv_send_ipi_ex, + vp_set.bank_contents); + else + hc->consumed_xmm_halves = 1; + + if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks)) return HV_STATUS_INVALID_HYPERCALL_INPUT; } From 3c649918b764c0aaef22ea65d514bac5e2324ec0 Mon Sep 17 00:00:00 2001 From: Peng Hao Date: Tue, 6 Dec 2022 17:20:15 +0800 Subject: [PATCH 04/68] KVM: x86: Simplify kvm_apic_hw_enabled kvm_apic_hw_enabled() only needs to return bool, there is no place to use the return value of MSR_IA32_APICBASE_ENABLE. Signed-off-by: Peng Hao Message-Id: Signed-off-by: Paolo Bonzini --- arch/x86/kvm/lapic.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 28e3769066e215..58c3242fcc7ad9 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -188,11 +188,11 @@ static inline bool lapic_in_kernel(struct kvm_vcpu *vcpu) extern struct static_key_false_deferred apic_hw_disabled; -static inline int kvm_apic_hw_enabled(struct kvm_lapic *apic) +static inline bool kvm_apic_hw_enabled(struct kvm_lapic *apic) { if (static_branch_unlikely(&apic_hw_disabled.key)) return apic->vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE; - return MSR_IA32_APICBASE_ENABLE; + return true; } extern struct static_key_false_deferred apic_sw_disabled; From 77b1908e10eccf34310ffd95b0b455c01aa76286 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 20 Dec 2022 15:34:27 +0000 Subject: [PATCH 05/68] KVM: x86: Sanity check inputs to kvm_handle_memory_failure() Add a sanity check in kvm_handle_memory_failure() to assert that a valid x86_exception structure is provided if the memory "failure" wants to propagate a fault into the guest. If a memory failure happens during a direct guest physical memory access, e.g. for nested VMX, KVM hardcodes the failure to X86EMUL_IO_NEEDED and doesn't provide an exception pointer (because the exception struct would just be filled with garbage). Signed-off-by: Sean Christopherson Message-Id: <20221220153427.514032-1-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index fd6c01a3931282..5c3ce39cdccbf9 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -13132,6 +13132,9 @@ int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, struct x86_exception *e) { if (r == X86EMUL_PROPAGATE_FAULT) { + if (KVM_BUG_ON(!e, vcpu->kvm)) + return -EIO; + kvm_inject_emulated_page_fault(vcpu, e); return 1; } From 53800f88d414525d3fdc5c84629faa0b6bc35b3b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 19 Dec 2022 22:04:16 +0000 Subject: [PATCH 06/68] KVM: selftests: Zero out valid_bank_mask for "all" case in Hyper-V IPI test Zero out the valid_bank_mask when using the fast variant of HVCALL_SEND_IPI_EX to send IPIs to all vCPUs. KVM requires the "var_cnt" and "valid_bank_mask" inputs to be consistent even when targeting all vCPUs. See commit bd1ba5732bb9 ("KVM: x86: Get the number of Hyper-V sparse banks from the VARHEAD field"). Fixes: 998489245d84 ("KVM: selftests: Hyper-V PV IPI selftest") Cc: Vitaly Kuznetsov Signed-off-by: Sean Christopherson Message-Id: <20221219220416.395329-1-seanjc@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/x86_64/hyperv_ipi.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_ipi.c b/tools/testing/selftests/kvm/x86_64/hyperv_ipi.c index 8b791eac7d5a37..0cbb0e646ef8d7 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_ipi.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_ipi.c @@ -193,8 +193,9 @@ static void sender_guest_code(void *hcall_page, vm_vaddr_t pgs_gpa) GUEST_SYNC(stage++); /* * 'XMM Fast' HvCallSendSyntheticClusterIpiEx to HV_GENERIC_SET_ALL. - * Nothing to write anything to XMM regs. */ + ipi_ex->vp_set.valid_bank_mask = 0; + hyperv_write_xmm_input(&ipi_ex->vp_set.valid_bank_mask, 2); hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT, IPI_VECTOR, HV_GENERIC_SET_ALL); nop_loop(); From 057b18756b464729bc787ed4e6b44abb9f5c3a38 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 20 Dec 2022 15:42:24 +0000 Subject: [PATCH 07/68] KVM: nVMX: Document that ignoring memory failures for VMCLEAR is deliberate Explicitly drop the result of kvm_vcpu_write_guest() when writing the "launch state" as part of VMCLEAR emulation, and add a comment to call out that KVM's behavior is architecturally valid. Intel's pseudocode effectively says that VMCLEAR is a nop if the target VMCS address isn't in memory, e.g. if the address points at MMIO. Add a FIXME to call out that suppressing failures on __copy_to_user() is wrong, as memory (a memslot) does exist in that case. Punt the issue to the future as open coding kvm_vcpu_write_guest() just to make sure the guest dies with -EFAULT isn't worth the extra complexity. The flaw will need to be addressed if KVM ever does something intelligent on uaccess failures, e.g. to support post-copy demand paging, but in that case KVM will need a more thorough overhaul, i.e. VMCLEAR shouldn't need to open code a core KVM helper. No functional change intended. Reported-by: coverity-bot Addresses-Coverity-ID: 1527765 ("Error handling issues") Fixes: 587d7e72aedc ("kvm: nVMX: VMCLEAR should not cause the vCPU to shut down") Cc: Jim Mattson Signed-off-by: Sean Christopherson Message-Id: <20221220154224.526568-1-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index b6f4411b613e91..f18f3a9f094355 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -5296,10 +5296,19 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) if (vmptr == vmx->nested.current_vmptr) nested_release_vmcs12(vcpu); - kvm_vcpu_write_guest(vcpu, - vmptr + offsetof(struct vmcs12, - launch_state), - &zero, sizeof(zero)); + /* + * Silently ignore memory errors on VMCLEAR, Intel's pseudocode + * for VMCLEAR includes a "ensure that data for VMCS referenced + * by the operand is in memory" clause that guards writes to + * memory, i.e. doing nothing for I/O is architecturally valid. + * + * FIXME: Suppress failures if and only if no memslot is found, + * i.e. exit to userspace if __copy_to_user() fails. + */ + (void)kvm_vcpu_write_guest(vcpu, + vmptr + offsetof(struct vmcs12, + launch_state), + &zero, sizeof(zero)); } else if (vmx->nested.hv_evmcs && vmptr == vmx->nested.hv_evmcs_vmptr) { nested_release_evmcs(vcpu); } From 31de69f4eea77b28a9724b3fa55aae104fc91fc7 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 13 Dec 2022 06:23:03 +0000 Subject: [PATCH 08/68] KVM: nVMX: Properly expose ENABLE_USR_WAIT_PAUSE control to L1 Set ENABLE_USR_WAIT_PAUSE in KVM's supported VMX MSR configuration if the feature is supported in hardware and enabled in KVM's base, non-nested configuration, i.e. expose ENABLE_USR_WAIT_PAUSE to L1 if it's supported. This fixes a bug where saving/restoring, i.e. migrating, a vCPU will fail if WAITPKG (the associated CPUID feature) is enabled for the vCPU, and obviously allows L1 to enable the feature for L2. KVM already effectively exposes ENABLE_USR_WAIT_PAUSE to L1 by stuffing the allowed-1 control ina vCPU's virtual MSR_IA32_VMX_PROCBASED_CTLS2 when updating secondary controls in response to KVM_SET_CPUID(2), but (a) that depends on flawed code (KVM shouldn't touch VMX MSRs in response to CPUID updates) and (b) runs afoul of vmx_restore_control_msr()'s restriction that the guest value must be a strict subset of the supported host value. Although no past commit explicitly enabled nested support for WAITPKG, doing so is safe and functionally correct from an architectural perspective as no additional KVM support is needed to virtualize TPAUSE, UMONITOR, and UMWAIT for L2 relative to L1, and KVM already forwards VM-Exits to L1 as necessary (commit bf653b78f960, "KVM: vmx: Introduce handle_unexpected_vmexit and handle WAITPKG vmexit"). Note, KVM always keeps the hosts MSR_IA32_UMWAIT_CONTROL resident in hardware, i.e. always runs both L1 and L2 with the host's power management settings for TPAUSE and UMWAIT. See commit bf09fb6cba4f ("KVM: VMX: Stop context switching MSR_IA32_UMWAIT_CONTROL") for more details. Fixes: e69e72faa3a0 ("KVM: x86: Add support for user wait instructions") Cc: stable@vger.kernel.org Reported-by: Aaron Lewis Reported-by: Yu Zhang Signed-off-by: Sean Christopherson Reviewed-by: Jim Mattson Message-Id: <20221213062306.667649-2-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index f18f3a9f094355..d93c715cda6ab3 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -6882,7 +6882,8 @@ void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps) SECONDARY_EXEC_ENABLE_INVPCID | SECONDARY_EXEC_RDSEED_EXITING | SECONDARY_EXEC_XSAVES | - SECONDARY_EXEC_TSC_SCALING; + SECONDARY_EXEC_TSC_SCALING | + SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; /* * We can emulate "VMCS shadowing," even if the hardware From a0860d68a25dee4e51e7d3e067a66ca765776fe8 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 13 Dec 2022 06:23:04 +0000 Subject: [PATCH 09/68] KVM: nVMX: Don't stuff secondary execution control if it's not supported When stuffing the allowed secondary execution controls for nested VMX in response to CPUID updates, don't set the allowed-1 bit for a feature that isn't supported by KVM, i.e. isn't allowed by the canonical vmcs_config. WARN if KVM attempts to manipulate a feature that isn't supported. All features that are currently stuffed are always advertised to L1 for nested VMX if they are supported in KVM's base configuration, and no additional features should ever be added to the CPUID-induced stuffing (updating VMX MSRs in response to CPUID updates is a long-standing KVM flaw that is slowly being fixed). Signed-off-by: Sean Christopherson Message-Id: <20221213062306.667649-3-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index fe5615fd8295c4..fc9008dbed3348 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4459,6 +4459,13 @@ vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control, * controls for features that are/aren't exposed to the guest. */ if (nested) { + /* + * All features that can be added or removed to VMX MSRs must + * be supported in the first place for nested virtualization. + */ + if (WARN_ON_ONCE(!(vmcs_config.nested.secondary_ctls_high & control))) + enabled = false; + if (enabled) vmx->nested.msrs.secondary_ctls_high |= control; else From f5d16bb9be68b20c78fc3d93fa243eb1f0b9fa53 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 13 Dec 2022 03:30:26 +0000 Subject: [PATCH 10/68] KVM: x86/mmu: Don't attempt to map leaf if target TDP MMU SPTE is frozen Hoist the is_removed_spte() check above the "level == goal_level" check when walking SPTEs during a TDP MMU page fault to avoid attempting to map a leaf entry if said entry is frozen by a different task/vCPU. ------------[ cut here ]------------ WARNING: CPU: 3 PID: 939 at arch/x86/kvm/mmu/tdp_mmu.c:653 kvm_tdp_mmu_map+0x269/0x4b0 Modules linked in: kvm_intel CPU: 3 PID: 939 Comm: nx_huge_pages_t Not tainted 6.1.0-rc4+ #67 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015 RIP: 0010:kvm_tdp_mmu_map+0x269/0x4b0 RSP: 0018:ffffc9000068fba8 EFLAGS: 00010246 RAX: 00000000000005a0 RBX: ffffc9000068fcc0 RCX: 0000000000000005 RDX: ffff88810741f000 RSI: ffff888107f04600 RDI: ffffc900006a3000 RBP: 060000010b000bf3 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 000ffffffffff000 R12: 0000000000000005 R13: ffff888113670000 R14: ffff888107464958 R15: 0000000000000000 FS: 00007f01c942c740(0000) GS:ffff888277cc0000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000000 CR3: 0000000117013006 CR4: 0000000000172ea0 Call Trace: kvm_tdp_page_fault+0x10c/0x130 kvm_mmu_page_fault+0x103/0x680 vmx_handle_exit+0x132/0x5a0 [kvm_intel] vcpu_enter_guest+0x60c/0x16f0 kvm_arch_vcpu_ioctl_run+0x1e2/0x9d0 kvm_vcpu_ioctl+0x271/0x660 __x64_sys_ioctl+0x80/0xb0 do_syscall_64+0x2b/0x50 entry_SYSCALL_64_after_hwframe+0x46/0xb0 ---[ end trace 0000000000000000 ]--- Fixes: 63d28a25e04c ("KVM: x86/mmu: simplify kvm_tdp_mmu_map flow when guest has to retry") Cc: Robert Hoo Signed-off-by: Sean Christopherson Reviewed-by: Robert Hoo Message-Id: <20221213033030.83345-2-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/tdp_mmu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 771210ce518112..84b0b225fe5f52 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1173,9 +1173,6 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) if (fault->nx_huge_page_workaround_enabled) disallowed_hugepage_adjust(fault, iter.old_spte, iter.level); - if (iter.level == fault->goal_level) - break; - /* * If SPTE has been frozen by another thread, just give up and * retry, avoiding unnecessary page table allocation and free. @@ -1183,6 +1180,9 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) if (is_removed_spte(iter.old_spte)) goto retry; + if (iter.level == fault->goal_level) + break; + /* Step down into the lower level page table if it exists. */ if (is_shadow_present_pte(iter.old_spte) && !is_large_pte(iter.old_spte)) From 80a3e4ae962de33ff6a94e798c80e56e1fed4d10 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 13 Dec 2022 03:30:27 +0000 Subject: [PATCH 11/68] KVM: x86/mmu: Map TDP MMU leaf SPTE iff target level is reached Map the leaf SPTE when handling a TDP MMU page fault if and only if the target level is reached. A recent commit reworked the retry logic and incorrectly assumed that walking SPTEs would never "fail", as the loop either bails (retries) or installs parent SPs. However, the iterator itself will bail early if it detects a frozen (REMOVED) SPTE when stepping down. The TDP iterator also rereads the current SPTE before stepping down specifically to avoid walking into a part of the tree that is being removed, which means it's possible to terminate the loop without the guts of the loop observing the frozen SPTE, e.g. if a different task zaps a parent SPTE between the initial read and try_step_down()'s refresh. Mapping a leaf SPTE at the wrong level results in all kinds of badness as page table walkers interpret the SPTE as a page table, not a leaf, and walk into the weeds. ------------[ cut here ]------------ WARNING: CPU: 1 PID: 1025 at arch/x86/kvm/mmu/tdp_mmu.c:1070 kvm_tdp_mmu_map+0x481/0x510 Modules linked in: kvm_intel CPU: 1 PID: 1025 Comm: nx_huge_pages_t Tainted: G W 6.1.0-rc4+ #64 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015 RIP: 0010:kvm_tdp_mmu_map+0x481/0x510 RSP: 0018:ffffc9000072fba8 EFLAGS: 00010286 RAX: 0000000000000000 RBX: ffffc9000072fcc0 RCX: 0000000000000027 RDX: 0000000000000027 RSI: 00000000ffffdfff RDI: ffff888277c5b4c8 RBP: ffff888107d45a10 R08: ffff888277c5b4c0 R09: ffffc9000072fa48 R10: 0000000000000001 R11: 0000000000000001 R12: ffffc9000073a0e0 R13: ffff88810fc54800 R14: ffff888107d1ae60 R15: ffff88810fc54f90 FS: 00007fba9f853740(0000) GS:ffff888277c40000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000000 CR3: 000000010aa7a003 CR4: 0000000000172ea0 Call Trace: kvm_tdp_page_fault+0x10c/0x130 kvm_mmu_page_fault+0x103/0x680 vmx_handle_exit+0x132/0x5a0 [kvm_intel] vcpu_enter_guest+0x60c/0x16f0 kvm_arch_vcpu_ioctl_run+0x1e2/0x9d0 kvm_vcpu_ioctl+0x271/0x660 __x64_sys_ioctl+0x80/0xb0 do_syscall_64+0x2b/0x50 entry_SYSCALL_64_after_hwframe+0x46/0xb0 ---[ end trace 0000000000000000 ]--- Invalid SPTE change: cannot replace a present leaf SPTE with another present leaf SPTE mapping a different PFN! as_id: 0 gfn: 100200 old_spte: 600000112400bf3 new_spte: 6000001126009f3 level: 2 ------------[ cut here ]------------ kernel BUG at arch/x86/kvm/mmu/tdp_mmu.c:559! invalid opcode: 0000 [#1] SMP CPU: 1 PID: 1025 Comm: nx_huge_pages_t Tainted: G W 6.1.0-rc4+ #64 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015 RIP: 0010:__handle_changed_spte.cold+0x95/0x9c RSP: 0018:ffffc9000072faf8 EFLAGS: 00010246 RAX: 00000000000000c1 RBX: ffffc90000731000 RCX: 0000000000000027 RDX: 0000000000000000 RSI: 00000000ffffdfff RDI: ffff888277c5b4c8 RBP: 0600000112400bf3 R08: ffff888277c5b4c0 R09: ffffc9000072f9a0 R10: 0000000000000001 R11: 0000000000000001 R12: 06000001126009f3 R13: 0000000000000002 R14: 0000000012600901 R15: 0000000012400b01 FS: 00007fba9f853740(0000) GS:ffff888277c40000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000000 CR3: 000000010aa7a003 CR4: 0000000000172ea0 Call Trace: kvm_tdp_mmu_map+0x3b0/0x510 kvm_tdp_page_fault+0x10c/0x130 kvm_mmu_page_fault+0x103/0x680 vmx_handle_exit+0x132/0x5a0 [kvm_intel] vcpu_enter_guest+0x60c/0x16f0 kvm_arch_vcpu_ioctl_run+0x1e2/0x9d0 kvm_vcpu_ioctl+0x271/0x660 __x64_sys_ioctl+0x80/0xb0 do_syscall_64+0x2b/0x50 entry_SYSCALL_64_after_hwframe+0x46/0xb0 Modules linked in: kvm_intel ---[ end trace 0000000000000000 ]--- Fixes: 63d28a25e04c ("KVM: x86/mmu: simplify kvm_tdp_mmu_map flow when guest has to retry") Cc: Robert Hoo Signed-off-by: Sean Christopherson Message-Id: <20221213033030.83345-3-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/tdp_mmu.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 84b0b225fe5f52..fbdef59374fed3 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1181,7 +1181,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) goto retry; if (iter.level == fault->goal_level) - break; + goto map_target_level; /* Step down into the lower level page table if it exists. */ if (is_shadow_present_pte(iter.old_spte) && @@ -1203,8 +1203,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) r = tdp_mmu_link_sp(kvm, &iter, sp, true); /* - * Also force the guest to retry the access if the upper level SPTEs - * aren't in place. + * Force the guest to retry if installing an upper level SPTE + * failed, e.g. because a different task modified the SPTE. */ if (r) { tdp_mmu_free_sp(sp); @@ -1219,6 +1219,14 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) } } + /* + * The walk aborted before reaching the target level, e.g. because the + * iterator detected an upper level SPTE was frozen during traversal. + */ + WARN_ON_ONCE(iter.level == fault->goal_level); + goto retry; + +map_target_level: ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter); retry: From 21a36ac6b6c7059965bac0cc73ef3cbb8ef576dd Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 13 Dec 2022 03:30:28 +0000 Subject: [PATCH 12/68] KVM: x86/mmu: Re-check under lock that TDP MMU SP hugepage is disallowed Re-check sp->nx_huge_page_disallowed under the tdp_mmu_pages_lock spinlock when adding a new shadow page in the TDP MMU. To ensure the NX reclaim kthread can't see a not-yet-linked shadow page, the page fault path links the new page table prior to adding the page to possible_nx_huge_pages. If the page is zapped by different task, e.g. because dirty logging is disabled, between linking the page and adding it to the list, KVM can end up triggering use-after-free by adding the zapped SP to the aforementioned list, as the zapped SP's memory is scheduled for removal via RCU callback. The bug is detected by the sanity checks guarded by CONFIG_DEBUG_LIST=y, i.e. the below splat is just one possible signature. ------------[ cut here ]------------ list_add corruption. prev->next should be next (ffffc9000071fa70), but was ffff88811125ee38. (prev=ffff88811125ee38). WARNING: CPU: 1 PID: 953 at lib/list_debug.c:30 __list_add_valid+0x79/0xa0 Modules linked in: kvm_intel CPU: 1 PID: 953 Comm: nx_huge_pages_t Tainted: G W 6.1.0-rc4+ #71 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015 RIP: 0010:__list_add_valid+0x79/0xa0 RSP: 0018:ffffc900006efb68 EFLAGS: 00010286 RAX: 0000000000000000 RBX: ffff888116cae8a0 RCX: 0000000000000027 RDX: 0000000000000027 RSI: 0000000100001872 RDI: ffff888277c5b4c8 RBP: ffffc90000717000 R08: ffff888277c5b4c0 R09: ffffc900006efa08 R10: 0000000000199998 R11: 0000000000199a20 R12: ffff888116cae930 R13: ffff88811125ee38 R14: ffffc9000071fa70 R15: ffff88810b794f90 FS: 00007fc0415d2740(0000) GS:ffff888277c40000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000000 CR3: 0000000115201006 CR4: 0000000000172ea0 Call Trace: track_possible_nx_huge_page+0x53/0x80 kvm_tdp_mmu_map+0x242/0x2c0 kvm_tdp_page_fault+0x10c/0x130 kvm_mmu_page_fault+0x103/0x680 vmx_handle_exit+0x132/0x5a0 [kvm_intel] vcpu_enter_guest+0x60c/0x16f0 kvm_arch_vcpu_ioctl_run+0x1e2/0x9d0 kvm_vcpu_ioctl+0x271/0x660 __x64_sys_ioctl+0x80/0xb0 do_syscall_64+0x2b/0x50 entry_SYSCALL_64_after_hwframe+0x46/0xb0 ---[ end trace 0000000000000000 ]--- Fixes: 61f94478547b ("KVM: x86/mmu: Set disallowed_nx_huge_page in TDP MMU before setting SPTE") Reported-by: Greg Thelen Analyzed-by: David Matlack Cc: David Matlack Cc: Ben Gardon Cc: Mingwei Zhang Signed-off-by: Sean Christopherson Message-Id: <20221213033030.83345-4-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/tdp_mmu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index fbdef59374fed3..62a687d094bba6 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1214,7 +1214,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) if (fault->huge_page_disallowed && fault->req_level >= iter.level) { spin_lock(&kvm->arch.tdp_mmu_pages_lock); - track_possible_nx_huge_page(kvm, sp); + if (sp->nx_huge_page_disallowed) + track_possible_nx_huge_page(kvm, sp); spin_unlock(&kvm->arch.tdp_mmu_pages_lock); } } From 50a9ac25985c037d45ee6d7e3a7ae198a63b9266 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 13 Dec 2022 03:30:29 +0000 Subject: [PATCH 13/68] KVM: x86/mmu: Don't install TDP MMU SPTE if SP has unexpected level Don't install a leaf TDP MMU SPTE if the parent page's level doesn't match the target level of the fault, and instead have the vCPU retry the faulting instruction after warning. Continuing on is completely unnecessary as the absolute worst case scenario of retrying is DoSing the vCPU, whereas continuing on all but guarantees bigger explosions, e.g. ------------[ cut here ]------------ kernel BUG at arch/x86/kvm/mmu/tdp_mmu.c:559! invalid opcode: 0000 [#1] SMP CPU: 1 PID: 1025 Comm: nx_huge_pages_t Tainted: G W 6.1.0-rc4+ #64 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015 RIP: 0010:__handle_changed_spte.cold+0x95/0x9c RSP: 0018:ffffc9000072faf8 EFLAGS: 00010246 RAX: 00000000000000c1 RBX: ffffc90000731000 RCX: 0000000000000027 RDX: 0000000000000000 RSI: 00000000ffffdfff RDI: ffff888277c5b4c8 RBP: 0600000112400bf3 R08: ffff888277c5b4c0 R09: ffffc9000072f9a0 R10: 0000000000000001 R11: 0000000000000001 R12: 06000001126009f3 R13: 0000000000000002 R14: 0000000012600901 R15: 0000000012400b01 FS: 00007fba9f853740(0000) GS:ffff888277c40000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000000 CR3: 000000010aa7a003 CR4: 0000000000172ea0 Call Trace: kvm_tdp_mmu_map+0x3b0/0x510 kvm_tdp_page_fault+0x10c/0x130 kvm_mmu_page_fault+0x103/0x680 vmx_handle_exit+0x132/0x5a0 [kvm_intel] vcpu_enter_guest+0x60c/0x16f0 kvm_arch_vcpu_ioctl_run+0x1e2/0x9d0 kvm_vcpu_ioctl+0x271/0x660 __x64_sys_ioctl+0x80/0xb0 do_syscall_64+0x2b/0x50 entry_SYSCALL_64_after_hwframe+0x46/0xb0 Modules linked in: kvm_intel ---[ end trace 0000000000000000 ]--- Signed-off-by: Sean Christopherson Message-Id: <20221213033030.83345-5-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/tdp_mmu.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 62a687d094bba6..d6df38d371a00c 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1074,7 +1074,9 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int ret = RET_PF_FIXED; bool wrprot = false; - WARN_ON(sp->role.level != fault->goal_level); + if (WARN_ON_ONCE(sp->role.level != fault->goal_level)) + return RET_PF_RETRY; + if (unlikely(!fault->slot)) new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); else From e779fd53b4aa0aa8704ae62eb56065b9877a540b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 13 Dec 2022 00:16:40 +0000 Subject: [PATCH 14/68] KVM: selftests: Define literal to asm constraint in aarch64 as unsigned long MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Define a literal '0' asm input constraint to aarch64/page_fault_test's guest_cas() as an unsigned long to make clang happy. tools/testing/selftests/kvm/aarch64/page_fault_test.c:120:16: error: value size does not match register size specified by the constraint and modifier [-Werror,-Wasm-operand-widths] :: "r" (0), "r" (TEST_DATA), "r" (guest_test_memory)); ^ tools/testing/selftests/kvm/aarch64/page_fault_test.c:119:15: note: use constraint modifier "w" "casal %0, %1, [%2]\n" ^~ %w0 Fixes: 35c581015712 ("KVM: selftests: aarch64: Add aarch64/page_fault_test") Cc: Ricardo Koller Signed-off-by: Sean Christopherson Reviewed-by: Philippe Mathieu-Daudé Message-Id: <20221213001653.3852042-2-seanjc@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/aarch64/page_fault_test.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/aarch64/page_fault_test.c b/tools/testing/selftests/kvm/aarch64/page_fault_test.c index 95d22cfb7b41a2..beb944fa6fd469 100644 --- a/tools/testing/selftests/kvm/aarch64/page_fault_test.c +++ b/tools/testing/selftests/kvm/aarch64/page_fault_test.c @@ -117,7 +117,7 @@ static void guest_cas(void) GUEST_ASSERT(guest_check_lse()); asm volatile(".arch_extension lse\n" "casal %0, %1, [%2]\n" - :: "r" (0), "r" (TEST_DATA), "r" (guest_test_memory)); + :: "r" (0ul), "r" (TEST_DATA), "r" (guest_test_memory)); val = READ_ONCE(*guest_test_memory); GUEST_ASSERT_EQ(val, TEST_DATA); } From 73441efa36c253906057b8800bc9a3fdadbc2c41 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 13 Dec 2022 00:16:41 +0000 Subject: [PATCH 15/68] KVM: selftests: Delete dead code in x86_64/vmx_tsc_adjust_test.c Delete an unused struct definition in x86_64/vmx_tsc_adjust_test.c. Signed-off-by: Sean Christopherson Message-Id: <20221213001653.3852042-3-seanjc@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c index 5943187e859494..ff8ecdf32ae07d 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c @@ -49,11 +49,6 @@ enum { NUM_VMX_PAGES, }; -struct kvm_single_msr { - struct kvm_msrs header; - struct kvm_msr_entry entry; -} __attribute__((packed)); - /* The virtual machine object. */ static struct kvm_vm *vm; From d61a12cb9af5b355a38e0c0106e91224b49195ce Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 13 Dec 2022 00:16:42 +0000 Subject: [PATCH 16/68] KVM: selftests: Fix divide-by-zero bug in memslot_perf_test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Check that the number of pages per slot is non-zero in get_max_slots() prior to computing the remaining number of pages. clang generates code that uses an actual DIV for calculating the remaining, which causes a #DE if the total number of pages is less than the number of slots. traps: memslot_perf_te[97611] trap divide error ip:4030c4 sp:7ffd18ae58f0 error:0 in memslot_perf_test[401000+cb000] Fixes: a69170c65acd ("KVM: selftests: memslot_perf_test: Report optimal memory slots") Signed-off-by: Sean Christopherson Reviewed-by: Philippe Mathieu-Daudé Message-Id: <20221213001653.3852042-4-seanjc@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/memslot_perf_test.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c index e698306bf49d1d..e6587e19349051 100644 --- a/tools/testing/selftests/kvm/memslot_perf_test.c +++ b/tools/testing/selftests/kvm/memslot_perf_test.c @@ -265,6 +265,9 @@ static uint64_t get_max_slots(struct vm_data *data, uint32_t host_page_size) slots = data->nslots; while (--slots > 1) { pages_per_slot = mempages / slots; + if (!pages_per_slot) + continue; + rempages = mempages % pages_per_slot; if (check_slot_pages(host_page_size, guest_page_size, pages_per_slot, rempages)) From 43e96957e8b87bad8e4ba666750ff0cda9e03ffb Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 13 Dec 2022 00:16:43 +0000 Subject: [PATCH 17/68] KVM: selftests: Use pattern matching in .gitignore Use pattern matching to exclude everything except .c, .h, .S, and .sh files from Git. Manually adding every test target has an absurd maintenance cost, is comically error prone, and leads to bikeshedding over whether or not the targets should be listed in alphabetical order. Deliberately do not include the one-off assets, e.g. config, settings, .gitignore itself, etc as Git doesn't ignore files that are already in the repository. Adding the one-off assets won't prevent mistakes where developers forget to --force add files that don't match the "allowed". Signed-off-by: Sean Christopherson Message-Id: <20221213001653.3852042-5-seanjc@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/.gitignore | 91 ++------------------------ 1 file changed, 6 insertions(+), 85 deletions(-) diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index 6ce8c488d62eac..6d9381d60172f3 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -1,86 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only -/aarch64/aarch32_id_regs -/aarch64/arch_timer -/aarch64/debug-exceptions -/aarch64/get-reg-list -/aarch64/hypercalls -/aarch64/page_fault_test -/aarch64/psci_test -/aarch64/vcpu_width_config -/aarch64/vgic_init -/aarch64/vgic_irq -/s390x/memop -/s390x/resets -/s390x/sync_regs_test -/s390x/tprot -/x86_64/amx_test -/x86_64/cpuid_test -/x86_64/cr4_cpuid_sync_test -/x86_64/debug_regs -/x86_64/exit_on_emulation_failure_test -/x86_64/fix_hypercall_test -/x86_64/get_msr_index_features -/x86_64/kvm_clock_test -/x86_64/kvm_pv_test -/x86_64/hyperv_clock -/x86_64/hyperv_cpuid -/x86_64/hyperv_evmcs -/x86_64/hyperv_features -/x86_64/hyperv_ipi -/x86_64/hyperv_svm_test -/x86_64/hyperv_tlb_flush -/x86_64/max_vcpuid_cap_test -/x86_64/mmio_warning_test -/x86_64/monitor_mwait_test -/x86_64/nested_exceptions_test -/x86_64/nx_huge_pages_test -/x86_64/platform_info_test -/x86_64/pmu_event_filter_test -/x86_64/set_boot_cpu_id -/x86_64/set_sregs_test -/x86_64/sev_migrate_tests -/x86_64/smaller_maxphyaddr_emulation_test -/x86_64/smm_test -/x86_64/state_test -/x86_64/svm_vmcall_test -/x86_64/svm_int_ctl_test -/x86_64/svm_nested_soft_inject_test -/x86_64/svm_nested_shutdown_test -/x86_64/sync_regs_test -/x86_64/tsc_msrs_test -/x86_64/tsc_scaling_sync -/x86_64/ucna_injection_test -/x86_64/userspace_io_test -/x86_64/userspace_msr_exit_test -/x86_64/vmx_apic_access_test -/x86_64/vmx_close_while_nested_test -/x86_64/vmx_dirty_log_test -/x86_64/vmx_exception_with_invalid_guest_state -/x86_64/vmx_invalid_nested_guest_state -/x86_64/vmx_msrs_test -/x86_64/vmx_preemption_timer_test -/x86_64/vmx_set_nested_state_test -/x86_64/vmx_tsc_adjust_test -/x86_64/vmx_nested_tsc_scaling_test -/x86_64/xapic_ipi_test -/x86_64/xapic_state_test -/x86_64/xen_shinfo_test -/x86_64/xen_vmcall_test -/x86_64/xss_msr_test -/x86_64/vmx_pmu_caps_test -/x86_64/triple_fault_event_test -/access_tracking_perf_test -/demand_paging_test -/dirty_log_test -/dirty_log_perf_test -/hardware_disable_test -/kvm_create_max_vcpus -/kvm_page_table_test -/max_guest_memory_test -/memslot_modification_stress_test -/memslot_perf_test -/rseq_test -/set_memory_region_test -/steal_time -/kvm_binary_stats_test -/system_counter_offset_test +* +!/**/ +!*.c +!*.h +!*.S +!*.sh From 1525429fe5cb8e23b74c6dd473bb477a35906704 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 13 Dec 2022 00:16:44 +0000 Subject: [PATCH 18/68] KVM: selftests: Fix a typo in x86-64's kvm_get_cpu_address_width() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix a == vs. = typo in kvm_get_cpu_address_width() that results in @pa_bits being left unset if the CPU doesn't support enumerating its MAX_PHY_ADDR. Flagged by clang's unusued-value warning. lib/x86_64/processor.c:1034:51: warning: expression result unused [-Wunused-value] *pa_bits == kvm_cpu_has(X86_FEATURE_PAE) ? 36 : 32; Fixes: 3bd396353d18 ("KVM: selftests: Add X86_FEATURE_PAE and use it calc "fallback" MAXPHYADDR") Signed-off-by: Sean Christopherson Reviewed-by: Philippe Mathieu-Daudé Message-Id: <20221213001653.3852042-6-seanjc@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/lib/x86_64/processor.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index c4d368d56cfe2b..acfa1d01e7df08 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -1031,7 +1031,7 @@ bool is_amd_cpu(void) void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits) { if (!kvm_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR)) { - *pa_bits == kvm_cpu_has(X86_FEATURE_PAE) ? 36 : 32; + *pa_bits = kvm_cpu_has(X86_FEATURE_PAE) ? 36 : 32; *va_bits = 32; } else { *pa_bits = kvm_cpu_property(X86_PROPERTY_MAX_PHY_ADDR); From 6a5db83adfd668b3c1092274ddf45903eb1fe435 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 13 Dec 2022 00:16:45 +0000 Subject: [PATCH 19/68] KVM: selftests: Rename UNAME_M to ARCH_DIR, fill explicitly for x86 Rename UNAME_M to ARCH_DIR and explicitly set it directly for x86. At this point, the name of the arch directory really doesn't have anything to do with `uname -m`, and UNAME_M is unnecessarily confusing given that its purpose is purely to identify the arch specific directory. Signed-off-by: Sean Christopherson Message-Id: <20221213001653.3852042-7-seanjc@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/Makefile | 47 ++++++++-------------------- 1 file changed, 13 insertions(+), 34 deletions(-) diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 947676983da1f2..59f3eb53c932cc 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -7,35 +7,14 @@ top_srcdir = ../../../.. include $(top_srcdir)/scripts/subarch.include ARCH ?= $(SUBARCH) -# For cross-builds to work, UNAME_M has to map to ARCH and arch specific -# directories and targets in this Makefile. "uname -m" doesn't map to -# arch specific sub-directory names. -# -# UNAME_M variable to used to run the compiles pointing to the right arch -# directories and build the right targets for these supported architectures. -# -# TEST_GEN_PROGS and LIBKVM are set using UNAME_M variable. -# LINUX_TOOL_ARCH_INCLUDE is set using ARCH variable. -# -# x86_64 targets are named to include x86_64 as a suffix and directories -# for includes are in x86_64 sub-directory. s390x and aarch64 follow the -# same convention. "uname -m" doesn't result in the correct mapping for -# s390x and aarch64. -# -# No change necessary for x86_64 -UNAME_M := $(shell uname -m) - -# Set UNAME_M for arm64 compile/install to work -ifeq ($(ARCH),arm64) - UNAME_M := aarch64 -endif -# Set UNAME_M s390x compile/install to work -ifeq ($(ARCH),s390) - UNAME_M := s390x -endif -# Set UNAME_M riscv compile/install to work -ifeq ($(ARCH),riscv) - UNAME_M := riscv +ifeq ($(ARCH),x86) + ARCH_DIR := x86_64 +else ifeq ($(ARCH),arm64) + ARCH_DIR := aarch64 +else ifeq ($(ARCH),s390) + ARCH_DIR := s390x +else + ARCH_DIR := $(ARCH) endif LIBKVM += lib/assert.c @@ -196,10 +175,10 @@ TEST_GEN_PROGS_riscv += kvm_page_table_test TEST_GEN_PROGS_riscv += set_memory_region_test TEST_GEN_PROGS_riscv += kvm_binary_stats_test -TEST_PROGS += $(TEST_PROGS_$(UNAME_M)) -TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M)) -TEST_GEN_PROGS_EXTENDED += $(TEST_GEN_PROGS_EXTENDED_$(UNAME_M)) -LIBKVM += $(LIBKVM_$(UNAME_M)) +TEST_PROGS += $(TEST_PROGS_$(ARCH_DIR)) +TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(ARCH_DIR)) +TEST_GEN_PROGS_EXTENDED += $(TEST_GEN_PROGS_EXTENDED_$(ARCH_DIR)) +LIBKVM += $(LIBKVM_$(ARCH_DIR)) INSTALL_HDR_PATH = $(top_srcdir)/usr LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ @@ -212,7 +191,7 @@ endif CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \ -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \ -I$(LINUX_TOOL_ARCH_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude \ - -I$( Date: Tue, 13 Dec 2022 00:16:46 +0000 Subject: [PATCH 20/68] KVM: selftests: Use proper function prototypes in probing code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make the main() functions in the probing code proper prototypes so that compiling the probing code with more strict flags won't generate false negatives. :1:5: error: function declaration isn’t a prototype [-Werror=strict-prototypes] Signed-off-by: Sean Christopherson Message-Id: <20221213001653.3852042-8-seanjc@google.com> Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 59f3eb53c932cc..8eecda2be0d0fe 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -194,11 +194,11 @@ CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \ -I$( Date: Tue, 13 Dec 2022 00:16:47 +0000 Subject: [PATCH 21/68] KVM: selftests: Probe -no-pie with actual CFLAGS used to compile Probe -no-pie with the actual set of CFLAGS used to compile the tests, clang whines about -no-pie being unused if the tests are compiled with -static. clang: warning: argument unused during compilation: '-no-pie' [-Wunused-command-line-argument] Signed-off-by: Sean Christopherson Message-Id: <20221213001653.3852042-9-seanjc@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 8eecda2be0d0fe..2ffd8724727936 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -195,7 +195,7 @@ CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \ $(KHDR_INCLUDES) no-pie-option := $(call try-run, echo 'int main(void) { return 0; }' | \ - $(CC) -Werror -no-pie -x c - -o "$$TMP", -no-pie) + $(CC) -Werror $(CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie) # On s390, build the testcases KVM-enabled pgste-option = $(call try-run, echo 'int main(void) { return 0; }' | \ From 7cf2e7373ab145bf972c3cbcb495fd1a9770c3b0 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 13 Dec 2022 00:16:48 +0000 Subject: [PATCH 22/68] KVM: selftests: Explicitly disable builtins for mem*() overrides Explicitly disable the compiler's builtin memcmp(), memcpy(), and memset(). Because only lib/string_override.c is built with -ffreestanding, the compiler reserves the right to do what it wants and can try to link the non-freestanding code to its own crud. /usr/bin/x86_64-linux-gnu-ld: /lib/x86_64-linux-gnu/libc.a(memcmp.o): in function `memcmp_ifunc': (.text+0x0): multiple definition of `memcmp'; tools/testing/selftests/kvm/lib/string_override.o: tools/testing/selftests/kvm/lib/string_override.c:15: first defined here clang: error: linker command failed with exit code 1 (use -v to see invocation) Fixes: 6b6f71484bf4 ("KVM: selftests: Implement memcmp(), memcpy(), and memset() for guest use") Reported-by: Aaron Lewis Reported-by: Raghavendra Rao Ananta Signed-off-by: Sean Christopherson Message-Id: <20221213001653.3852042-10-seanjc@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 2ffd8724727936..ecd3c8126c3d71 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -189,6 +189,7 @@ else LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include endif CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \ + -fno-builtin-memcmp -fno-builtin-memcpy -fno-builtin-memset \ -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \ -I$(LINUX_TOOL_ARCH_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude \ -I$( Date: Tue, 13 Dec 2022 00:16:49 +0000 Subject: [PATCH 23/68] KVM: selftests: Include lib.mk before consuming $(CC) Include lib.mk before consuming $(CC) and document that lib.mk overwrites $(CC) unless make was invoked with -e or $(CC) was specified after make (which makes the environment override the Makefile). Including lib.mk after using it for probing, e.g. for -no-pie, can lead to weirdness. Signed-off-by: Sean Christopherson Message-Id: <20221213001653.3852042-11-seanjc@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/Makefile | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index ecd3c8126c3d71..2acba3c74ad6d9 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -180,6 +180,11 @@ TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(ARCH_DIR)) TEST_GEN_PROGS_EXTENDED += $(TEST_GEN_PROGS_EXTENDED_$(ARCH_DIR)) LIBKVM += $(LIBKVM_$(ARCH_DIR)) +# lib.mak defines $(OUTPUT), prepends $(OUTPUT)/ to $(TEST_GEN_PROGS), and most +# importantly defines, i.e. overwrites, $(CC) (unless `make -e` or `make CC=`, +# which causes the environment variable to override the makefile). +include ../lib.mk + INSTALL_HDR_PATH = $(top_srcdir)/usr LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include @@ -205,10 +210,6 @@ pgste-option = $(call try-run, echo 'int main(void) { return 0; }' | \ LDLIBS += -ldl LDFLAGS += -pthread $(no-pie-option) $(pgste-option) -# After inclusion, $(OUTPUT) is defined and -# $(TEST_GEN_PROGS) starts with $(OUTPUT)/ -include ../lib.mk - LIBKVM_C := $(filter %.c,$(LIBKVM)) LIBKVM_S := $(filter %.S,$(LIBKVM)) LIBKVM_C_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_C)) From db7b780dab6742a8358ae7ecb1d0e972ccea8737 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 13 Dec 2022 00:16:50 +0000 Subject: [PATCH 24/68] KVM: selftests: Disable "gnu-variable-sized-type-not-at-end" warning Disable gnu-variable-sized-type-not-at-end so that tests and libraries can create overlays of variable sized arrays at the end of structs when using a fixed number of entries, e.g. to get/set a single MSR. It's possible to fudge around the warning, e.g. by defining a custom struct that hardcodes the number of entries, but that is a burden for both developers and readers of the code. lib/x86_64/processor.c:664:19: warning: field 'header' with variable sized type 'struct kvm_msrs' not at the end of a struct or class is a GNU extension [-Wgnu-variable-sized-type-not-at-end] struct kvm_msrs header; ^ lib/x86_64/processor.c:772:19: warning: field 'header' with variable sized type 'struct kvm_msrs' not at the end of a struct or class is a GNU extension [-Wgnu-variable-sized-type-not-at-end] struct kvm_msrs header; ^ lib/x86_64/processor.c:787:19: warning: field 'header' with variable sized type 'struct kvm_msrs' not at the end of a struct or class is a GNU extension [-Wgnu-variable-sized-type-not-at-end] struct kvm_msrs header; ^ 3 warnings generated. x86_64/hyperv_tlb_flush.c:54:18: warning: field 'hv_vp_set' with variable sized type 'struct hv_vpset' not at the end of a struct or class is a GNU extension [-Wgnu-variable-sized-type-not-at-end] struct hv_vpset hv_vp_set; ^ 1 warning generated. x86_64/xen_shinfo_test.c:137:25: warning: field 'info' with variable sized type 'struct kvm_irq_routing' not at the end of a struct or class is a GNU extension [-Wgnu-variable-sized-type-not-at-end] struct kvm_irq_routing info; ^ 1 warning generated. Signed-off-by: Sean Christopherson Message-Id: <20221213001653.3852042-12-seanjc@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 2acba3c74ad6d9..1750f91dd93629 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -194,6 +194,7 @@ else LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include endif CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \ + -Wno-gnu-variable-sized-type-not-at-end \ -fno-builtin-memcmp -fno-builtin-memcpy -fno-builtin-memset \ -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \ -I$(LINUX_TOOL_ARCH_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude \ From 2f5213b8fc311eaa8fc78de7ecbd27ead027993c Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 9 Dec 2022 12:55:44 -0800 Subject: [PATCH 25/68] KVM: selftests: Use magic value to signal ucall_alloc() failure Use a magic value to signal a ucall_alloc() failure instead of simply doing GUEST_ASSERT(). GUEST_ASSERT() relies on ucall_alloc() and so a failure puts the guest into an infinite loop. Use -1 as the magic value, as a real ucall struct should never wrap. Reported-by: Oliver Upton Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/lib/ucall_common.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/kvm/lib/ucall_common.c b/tools/testing/selftests/kvm/lib/ucall_common.c index 0cc0971ce60e3a..2f0e2ea941cc66 100644 --- a/tools/testing/selftests/kvm/lib/ucall_common.c +++ b/tools/testing/selftests/kvm/lib/ucall_common.c @@ -4,6 +4,8 @@ #include "linux/bitmap.h" #include "linux/atomic.h" +#define GUEST_UCALL_FAILED -1 + struct ucall_header { DECLARE_BITMAP(in_use, KVM_MAX_VCPUS); struct ucall ucalls[KVM_MAX_VCPUS]; @@ -41,7 +43,8 @@ static struct ucall *ucall_alloc(void) struct ucall *uc; int i; - GUEST_ASSERT(ucall_pool); + if (!ucall_pool) + goto ucall_failed; for (i = 0; i < KVM_MAX_VCPUS; ++i) { if (!test_and_set_bit(i, ucall_pool->in_use)) { @@ -51,7 +54,13 @@ static struct ucall *ucall_alloc(void) } } - GUEST_ASSERT(0); +ucall_failed: + /* + * If the vCPU cannot grab a ucall structure, make a bare ucall with a + * magic value to signal to get_ucall() that things went sideways. + * GUEST_ASSERT() depends on ucall_alloc() and so cannot be used here. + */ + ucall_arch_do_ucall(GUEST_UCALL_FAILED); return NULL; } @@ -93,6 +102,9 @@ uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc) addr = ucall_arch_get_ucall(vcpu); if (addr) { + TEST_ASSERT(addr != (void *)GUEST_UCALL_FAILED, + "Guest failed to allocate ucall struct"); + memcpy(uc, addr, sizeof(*uc)); vcpu_run_complete_io(vcpu); } else { From feb84f6daa7e7d51444d13fa65df7d5562fd0075 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 12 Dec 2022 05:36:53 -0500 Subject: [PATCH 26/68] KVM: selftests: document the default implementation of vm_vaddr_populate_bitmap Explain the meaning of the bit manipulations of vm_vaddr_populate_bitmap. These correspond to the "canonical addresses" of x86 and other architectures, but that is not obvious. Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/lib/kvm_util.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index c88c3ace16d2fc..bd892a81495182 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -186,6 +186,15 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = { _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES, "Missing new mode params?"); +/* + * Initializes vm->vpages_valid to match the canonical VA space of the + * architecture. + * + * The default implementation is valid for architectures which split the + * range addressed by a single page table into a low and high region + * based on the MSB of the VA. On architectures with this behavior + * the VA region spans [0, 2^(va_bits - 1)), [-(2^(va_bits - 1), -1]. + */ __weak void vm_vaddr_populate_bitmap(struct kvm_vm *vm) { sparsebit_set_num(vm->vpages_valid, From 7a16142505cbb9b80d5e998e32b1d882e0f45d64 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Fri, 9 Dec 2022 01:53:04 +0000 Subject: [PATCH 27/68] KVM: arm64: selftests: Don't identity map the ucall MMIO hole Currently the ucall MMIO hole is placed immediately after slot0, which is a relatively safe address in the PA space. However, it is possible that the same address has already been used for something else (like the guest program image) in the VA space. At least in my own testing, building the vgic_irq test with clang leads to the MMIO hole appearing underneath gicv3_ops. Stop identity mapping the MMIO hole and instead find an unused VA to map to it. Yet another subtle detail of the KVM selftests library is that virt_pg_map() does not update vm->vpages_mapped. Switch over to virt_map() instead to guarantee that the chosen VA isn't to something else. Signed-off-by: Oliver Upton Message-Id: <20221209015307.1781352-6-oliver.upton@linux.dev> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/lib/aarch64/ucall.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/kvm/lib/aarch64/ucall.c b/tools/testing/selftests/kvm/lib/aarch64/ucall.c index 562c16dfbb0023..f212bd8ab93d80 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/ucall.c +++ b/tools/testing/selftests/kvm/lib/aarch64/ucall.c @@ -14,11 +14,13 @@ static vm_vaddr_t *ucall_exit_mmio_addr; void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) { - virt_pg_map(vm, mmio_gpa, mmio_gpa); + vm_vaddr_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR); + + virt_map(vm, mmio_gva, mmio_gpa, 1); vm->ucall_mmio_addr = mmio_gpa; - write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gpa); + write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gva); } void ucall_arch_do_ucall(vm_vaddr_t uc) From 92c8191bb5d3f670ed806f91823381193288a4e1 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Fri, 9 Dec 2022 01:53:02 +0000 Subject: [PATCH 28/68] KVM: selftests: Mark correct page as mapped in virt_map() The loop marks vaddr as mapped after incrementing it by page size, thereby marking the *next* page as mapped. Set the bit in vpages_mapped first instead. Fixes: 56fc7732031d ("KVM: selftests: Fill in vm->vpages_mapped bitmap in virt_map() too") Signed-off-by: Oliver Upton Message-Id: <20221209015307.1781352-4-oliver.upton@linux.dev> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/lib/kvm_util.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index bd892a81495182..56d5ea949cbbe9 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -1425,10 +1425,10 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, while (npages--) { virt_pg_map(vm, vaddr, paddr); + sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift); + vaddr += page_size; paddr += page_size; - - sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift); } } From e0a78525f540f9d9a44a296f307b8b74cee4c288 Mon Sep 17 00:00:00 2001 From: Lukas Bulwahn Date: Mon, 5 Dec 2022 09:20:44 +0100 Subject: [PATCH 29/68] MAINTAINERS: adjust entry after renaming the vmx hyperv files Commit a789aeba4196 ("KVM: VMX: Rename "vmx/evmcs.{ch}" to "vmx/hyperv.{ch}"") renames the VMX specific Hyper-V files, but does not adjust the entry in MAINTAINERS. Hence, ./scripts/get_maintainer.pl --self-test=patterns complains about a broken reference. Repair this file reference in KVM X86 HYPER-V (KVM/hyper-v). Signed-off-by: Lukas Bulwahn Fixes: a789aeba4196 ("KVM: VMX: Rename "vmx/evmcs.{ch}" to "vmx/hyperv.{ch}"") Reviewed-by: Sean Christopherson Message-Id: <20221205082044.10141-1-lukas.bulwahn@gmail.com> Signed-off-by: Paolo Bonzini --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 89672a59c0c3a1..8d90da0cda5a46 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11322,7 +11322,7 @@ F: arch/x86/kvm/hyperv.* F: arch/x86/kvm/kvm_onhyperv.* F: arch/x86/kvm/svm/hyperv.* F: arch/x86/kvm/svm/svm_onhyperv.* -F: arch/x86/kvm/vmx/evmcs.* +F: arch/x86/kvm/vmx/hyperv.* KVM X86 Xen (KVM/Xen) M: David Woodhouse From a303def0fc18f0f2393b5c5f8ae3d2657a9713dc Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Wed, 7 Dec 2022 20:06:16 +0800 Subject: [PATCH 30/68] kvm: Remove the unused macro KVM_MMU_READ_{,UN}LOCK() No code is using KVM_MMU_READ_LOCK() or KVM_MMU_READ_UNLOCK(). They used to be in virt/kvm/pfncache.c: KVM_MMU_READ_LOCK(kvm); retry = mmu_notifier_retry_hva(kvm, mmu_seq, uhva); KVM_MMU_READ_UNLOCK(kvm); However, since 58cd407ca4c6 ("KVM: Fix multiple races in gfn=>pfn cache refresh", 2022-05-25) the code is only relying on the MMU notifier's invalidation count and sequence number. Signed-off-by: Lai Jiangshan Message-Id: <20221207120617.9409-1-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_mm.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/virt/kvm/kvm_mm.h b/virt/kvm/kvm_mm.h index a1ab15006af34e..180f1a09e6ba7b 100644 --- a/virt/kvm/kvm_mm.h +++ b/virt/kvm/kvm_mm.h @@ -14,14 +14,10 @@ #define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock) #define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock) #define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock) -#define KVM_MMU_READ_LOCK(kvm) read_lock(&(kvm)->mmu_lock) -#define KVM_MMU_READ_UNLOCK(kvm) read_unlock(&(kvm)->mmu_lock) #else #define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock) #define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock) #define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock) -#define KVM_MMU_READ_LOCK(kvm) spin_lock(&(kvm)->mmu_lock) -#define KVM_MMU_READ_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock) #endif /* KVM_HAVE_MMU_RWLOCK */ kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible, From 562f5bc48a8d99a8898c734ecacf061a79a88fbf Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Wed, 7 Dec 2022 20:05:05 +0800 Subject: [PATCH 31/68] kvm: x86/mmu: Remove duplicated "be split" in spte.h "be split be split" -> "be split" Signed-off-by: Lai Jiangshan Message-Id: <20221207120505.9175-1-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/spte.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index 1f03701b943a1c..6f54dc9409c943 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -363,7 +363,7 @@ static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check, * A shadow-present leaf SPTE may be non-writable for 4 possible reasons: * * 1. To intercept writes for dirty logging. KVM write-protects huge pages - * so that they can be split be split down into the dirty logging + * so that they can be split down into the dirty logging * granularity (4KiB) whenever the guest writes to them. KVM also * write-protects 4KiB pages so that writes can be recorded in the dirty log * (e.g. if not using PML). SPTEs are write-protected for dirty logging From 23e528d9bce2385967370ad95a7d52a3c7a0a016 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 7 Dec 2022 00:36:37 +0000 Subject: [PATCH 32/68] KVM: Delete extra block of "};" in the KVM API documentation Delete an extra block of code/documentation that snuck in when KVM's documentation was converted to ReST format. Fixes: 106ee47dc633 ("docs: kvm: Convert api.txt to ReST format") Signed-off-by: Sean Christopherson Message-Id: <20221207003637.2041211-1-seanjc@google.com> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/api.rst | 5 ----- 1 file changed, 5 deletions(-) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 778c6460d1de63..d795d683601cce 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -6577,11 +6577,6 @@ Please note that the kernel is allowed to use the kvm_run structure as the primary storage for certain register types. Therefore, the kernel may use the values in kvm_run even if the corresponding bit in kvm_dirty_regs is not set. -:: - - }; - - 6. Capabilities that can be enabled on vCPUs ============================================ From 385407a69d5140825d4cdab814cbf128ba63a64a Mon Sep 17 00:00:00 2001 From: Michal Luczaj Date: Mon, 26 Dec 2022 12:03:15 +0000 Subject: [PATCH 33/68] KVM: x86/xen: Fix memory leak in kvm_xen_write_hypercall_page() Release page irrespectively of kvm_vcpu_write_guest() return value. Suggested-by: Paul Durrant Fixes: 23200b7a30de ("KVM: x86/xen: intercept xen hypercalls if enabled") Signed-off-by: Michal Luczaj Message-Id: <20221220151454.712165-1-mhal@rbox.co> Reviewed-by: Paul Durrant Signed-off-by: David Woodhouse Message-Id: <20221226120320.1125390-1-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/xen.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index d7af402402484b..d1a98d834d185f 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -1069,6 +1069,7 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 : kvm->arch.xen_hvm_config.blob_size_32; u8 *page; + int ret; if (page_num >= blob_size) return 1; @@ -1079,10 +1080,10 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) if (IS_ERR(page)) return PTR_ERR(page); - if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) { - kfree(page); + ret = kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE); + kfree(page); + if (ret) return 1; - } } return 0; } From 92c58965e9656dc6e682a8ffe520fac0fb256d13 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Mon, 26 Dec 2022 12:03:16 +0000 Subject: [PATCH 34/68] KVM: x86/xen: Use kvm_read_guest_virt() instead of open-coding it badly In particular, we shouldn't assume that being contiguous in guest virtual address space means being contiguous in guest *physical* address space. In dropping the manual calls to kvm_mmu_gva_to_gpa_system(), also drop the srcu_read_lock() that was around them. All call sites are reached from kvm_xen_hypercall() which is called from the handle_exit function with the read lock already held. 536395260 ("KVM: x86/xen: handle PV timers oneshot mode") 1a65105a5 ("KVM: x86/xen: handle PV spinlocks slowpath") Fixes: 2fd6df2f2 ("KVM: x86/xen: intercept EVTCHNOP_send from guests") Signed-off-by: David Woodhouse Message-Id: <20221226120320.1125390-2-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/xen.c | 56 +++++++++++++++------------------------------- 1 file changed, 18 insertions(+), 38 deletions(-) diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index d1a98d834d185f..929b887eafd7bf 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -1184,30 +1184,22 @@ static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports, static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode, u64 param, u64 *r) { - int idx, i; struct sched_poll sched_poll; evtchn_port_t port, *ports; - gpa_t gpa; + struct x86_exception e; + int i; if (!lapic_in_kernel(vcpu) || !(vcpu->kvm->arch.xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND)) return false; - idx = srcu_read_lock(&vcpu->kvm->srcu); - gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL); - srcu_read_unlock(&vcpu->kvm->srcu, idx); - if (!gpa) { - *r = -EFAULT; - return true; - } - if (IS_ENABLED(CONFIG_64BIT) && !longmode) { struct compat_sched_poll sp32; /* Sanity check that the compat struct definition is correct */ BUILD_BUG_ON(sizeof(sp32) != 16); - if (kvm_vcpu_read_guest(vcpu, gpa, &sp32, sizeof(sp32))) { + if (kvm_read_guest_virt(vcpu, param, &sp32, sizeof(sp32), &e)) { *r = -EFAULT; return true; } @@ -1221,8 +1213,8 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode, sched_poll.nr_ports = sp32.nr_ports; sched_poll.timeout = sp32.timeout; } else { - if (kvm_vcpu_read_guest(vcpu, gpa, &sched_poll, - sizeof(sched_poll))) { + if (kvm_read_guest_virt(vcpu, param, &sched_poll, + sizeof(sched_poll), &e)) { *r = -EFAULT; return true; } @@ -1244,18 +1236,13 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode, } else ports = &port; + if (kvm_read_guest_virt(vcpu, (gva_t)sched_poll.ports, ports, + sched_poll.nr_ports * sizeof(*ports), &e)) { + *r = -EFAULT; + return true; + } + for (i = 0; i < sched_poll.nr_ports; i++) { - idx = srcu_read_lock(&vcpu->kvm->srcu); - gpa = kvm_mmu_gva_to_gpa_system(vcpu, - (gva_t)(sched_poll.ports + i), - NULL); - srcu_read_unlock(&vcpu->kvm->srcu, idx); - - if (!gpa || kvm_vcpu_read_guest(vcpu, gpa, - &ports[i], sizeof(port))) { - *r = -EFAULT; - goto out; - } if (ports[i] >= max_evtchn_port(vcpu->kvm)) { *r = -EINVAL; goto out; @@ -1331,9 +1318,8 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd, int vcpu_id, u64 param, u64 *r) { struct vcpu_set_singleshot_timer oneshot; + struct x86_exception e; s64 delta; - gpa_t gpa; - int idx; if (!kvm_xen_timer_enabled(vcpu)) return false; @@ -1344,9 +1330,6 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd, *r = -EINVAL; return true; } - idx = srcu_read_lock(&vcpu->kvm->srcu); - gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL); - srcu_read_unlock(&vcpu->kvm->srcu, idx); /* * The only difference for 32-bit compat is the 4 bytes of @@ -1364,9 +1347,8 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd, BUILD_BUG_ON(sizeof_field(struct compat_vcpu_set_singleshot_timer, flags) != sizeof_field(struct vcpu_set_singleshot_timer, flags)); - if (!gpa || - kvm_vcpu_read_guest(vcpu, gpa, &oneshot, longmode ? sizeof(oneshot) : - sizeof(struct compat_vcpu_set_singleshot_timer))) { + if (kvm_read_guest_virt(vcpu, param, &oneshot, longmode ? sizeof(oneshot) : + sizeof(struct compat_vcpu_set_singleshot_timer), &e)) { *r = -EFAULT; return true; } @@ -2003,14 +1985,12 @@ static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r) { struct evtchnfd *evtchnfd; struct evtchn_send send; - gpa_t gpa; - int idx; + struct x86_exception e; - idx = srcu_read_lock(&vcpu->kvm->srcu); - gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL); - srcu_read_unlock(&vcpu->kvm->srcu, idx); + /* Sanity check: this structure is the same for 32-bit and 64-bit */ + BUILD_BUG_ON(sizeof(send) != 4); - if (!gpa || kvm_vcpu_read_guest(vcpu, gpa, &send, sizeof(send))) { + if (kvm_read_guest_virt(vcpu, param, &send, sizeof(send), &e)) { *r = -EFAULT; return true; } From 70eae03087a3101493d9a1cf60c86c5f65600822 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 26 Dec 2022 12:03:17 +0000 Subject: [PATCH 35/68] KVM: x86/xen: Fix SRCU/RCU usage in readers of evtchn_ports The evtchnfd structure itself must be protected by either kvm->lock or SRCU. Use the former in kvm_xen_eventfd_update(), since the lock is being taken anyway; kvm_xen_hcall_evtchn_send() instead is a reader and does not need kvm->lock, and is called in SRCU critical section from the kvm_x86_handle_exit function. It is also important to use rcu_read_{lock,unlock}() in kvm_xen_hcall_evtchn_send(), because idr_remove() will *not* use synchronize_srcu() to wait for readers to complete. Remove a superfluous if (kvm) check before calling synchronize_srcu() in kvm_xen_eventfd_deassign() where kvm has been dereferenced already. Co-developed-by: Michal Luczaj Signed-off-by: Michal Luczaj Signed-off-by: Paolo Bonzini Signed-off-by: David Woodhouse Message-Id: <20221226120320.1125390-3-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/xen.c | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 929b887eafd7bf..9b75457120f7d4 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -1808,20 +1808,23 @@ static int kvm_xen_eventfd_update(struct kvm *kvm, { u32 port = data->u.evtchn.send_port; struct evtchnfd *evtchnfd; + int ret; if (!port || port >= max_evtchn_port(kvm)) return -EINVAL; + /* Protect writes to evtchnfd as well as the idr lookup. */ mutex_lock(&kvm->lock); evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port); - mutex_unlock(&kvm->lock); + ret = -ENOENT; if (!evtchnfd) - return -ENOENT; + goto out_unlock; /* For an UPDATE, nothing may change except the priority/vcpu */ + ret = -EINVAL; if (evtchnfd->type != data->u.evtchn.type) - return -EINVAL; + goto out_unlock; /* * Port cannot change, and if it's zero that was an eventfd @@ -1829,20 +1832,21 @@ static int kvm_xen_eventfd_update(struct kvm *kvm, */ if (!evtchnfd->deliver.port.port || evtchnfd->deliver.port.port != data->u.evtchn.deliver.port.port) - return -EINVAL; + goto out_unlock; /* We only support 2 level event channels for now */ if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) - return -EINVAL; + goto out_unlock; - mutex_lock(&kvm->lock); evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority; if (evtchnfd->deliver.port.vcpu_id != data->u.evtchn.deliver.port.vcpu) { evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu; evtchnfd->deliver.port.vcpu_idx = -1; } + ret = 0; +out_unlock: mutex_unlock(&kvm->lock); - return 0; + return ret; } /* @@ -1935,8 +1939,7 @@ static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port) if (!evtchnfd) return -ENOENT; - if (kvm) - synchronize_srcu(&kvm->srcu); + synchronize_srcu(&kvm->srcu); if (!evtchnfd->deliver.port.port) eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx); kfree(evtchnfd); @@ -1989,14 +1992,18 @@ static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r) /* Sanity check: this structure is the same for 32-bit and 64-bit */ BUILD_BUG_ON(sizeof(send) != 4); - if (kvm_read_guest_virt(vcpu, param, &send, sizeof(send), &e)) { *r = -EFAULT; return true; } - /* The evtchn_ports idr is protected by vcpu->kvm->srcu */ + /* + * evtchnfd is protected by kvm->srcu; the idr lookup instead + * is protected by RCU. + */ + rcu_read_lock(); evtchnfd = idr_find(&vcpu->kvm->arch.xen.evtchn_ports, send.port); + rcu_read_unlock(); if (!evtchnfd) return false; From 1c14faa5087db0a098c3ab1e183f2b5df4b0d3f2 Mon Sep 17 00:00:00 2001 From: Michal Luczaj Date: Mon, 26 Dec 2022 12:03:18 +0000 Subject: [PATCH 36/68] KVM: x86/xen: Simplify eventfd IOCTLs Port number is validated in kvm_xen_setattr_evtchn(). Remove superfluous checks in kvm_xen_eventfd_assign() and kvm_xen_eventfd_update(). Signed-off-by: Michal Luczaj Message-Id: <20221222203021.1944101-3-mhal@rbox.co> Signed-off-by: Paolo Bonzini Signed-off-by: David Woodhouse Message-Id: <20221226120320.1125390-4-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/xen.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 9b75457120f7d4..bddbe5ac5cfa2d 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -1810,9 +1810,6 @@ static int kvm_xen_eventfd_update(struct kvm *kvm, struct evtchnfd *evtchnfd; int ret; - if (!port || port >= max_evtchn_port(kvm)) - return -EINVAL; - /* Protect writes to evtchnfd as well as the idr lookup. */ mutex_lock(&kvm->lock); evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port); @@ -1858,12 +1855,9 @@ static int kvm_xen_eventfd_assign(struct kvm *kvm, { u32 port = data->u.evtchn.send_port; struct eventfd_ctx *eventfd = NULL; - struct evtchnfd *evtchnfd = NULL; + struct evtchnfd *evtchnfd; int ret = -EINVAL; - if (!port || port >= max_evtchn_port(kvm)) - return -EINVAL; - evtchnfd = kzalloc(sizeof(struct evtchnfd), GFP_KERNEL); if (!evtchnfd) return -ENOMEM; From b0305c1e0e27ad91187bc6d5ac3d502799faf239 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Mon, 26 Dec 2022 12:03:19 +0000 Subject: [PATCH 37/68] KVM: x86/xen: Add KVM_XEN_INVALID_GPA and KVM_XEN_INVALID_GFN to uapi These are (uint64_t)-1 magic values are a userspace ABI, allowing the shared info pages and other enlightenments to be disabled. This isn't a Xen ABI because Xen doesn't let the guest turn these off except with the full SHUTDOWN_soft_reset mechanism. Under KVM, the userspace VMM is expected to handle soft reset, and tear down the kernel parts of the enlightenments accordingly. Suggested-by: Sean Christopherson Signed-off-by: David Woodhouse Message-Id: <20221226120320.1125390-5-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/xen.c | 14 +++++++------- include/uapi/linux/kvm.h | 3 +++ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index bddbe5ac5cfa2d..b178f40bd863c2 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -41,7 +41,7 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn) int ret = 0; int idx = srcu_read_lock(&kvm->srcu); - if (gfn == GPA_INVALID) { + if (gfn == KVM_XEN_INVALID_GFN) { kvm_gpc_deactivate(gpc); goto out; } @@ -659,7 +659,7 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) if (kvm->arch.xen.shinfo_cache.active) data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa); else - data->u.shared_info.gfn = GPA_INVALID; + data->u.shared_info.gfn = KVM_XEN_INVALID_GFN; r = 0; break; @@ -705,7 +705,7 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) BUILD_BUG_ON(offsetof(struct vcpu_info, time) != offsetof(struct compat_vcpu_info, time)); - if (data->u.gpa == GPA_INVALID) { + if (data->u.gpa == KVM_XEN_INVALID_GPA) { kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache); r = 0; break; @@ -719,7 +719,7 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) break; case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO: - if (data->u.gpa == GPA_INVALID) { + if (data->u.gpa == KVM_XEN_INVALID_GPA) { kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache); r = 0; break; @@ -739,7 +739,7 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) r = -EOPNOTSUPP; break; } - if (data->u.gpa == GPA_INVALID) { + if (data->u.gpa == KVM_XEN_INVALID_GPA) { r = 0; deactivate_out: kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache); @@ -937,7 +937,7 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) if (vcpu->arch.xen.vcpu_info_cache.active) data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa; else - data->u.gpa = GPA_INVALID; + data->u.gpa = KVM_XEN_INVALID_GPA; r = 0; break; @@ -945,7 +945,7 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) if (vcpu->arch.xen.vcpu_time_info_cache.active) data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa; else - data->u.gpa = GPA_INVALID; + data->u.gpa = KVM_XEN_INVALID_GPA; r = 0; break; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 20522d4ba1e0d8..55155e262646e5 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1767,6 +1767,7 @@ struct kvm_xen_hvm_attr { __u8 runstate_update_flag; struct { __u64 gfn; +#define KVM_XEN_INVALID_GFN ((__u64)-1) } shared_info; struct { __u32 send_port; @@ -1798,6 +1799,7 @@ struct kvm_xen_hvm_attr { } u; }; + /* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */ #define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0 #define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1 @@ -1823,6 +1825,7 @@ struct kvm_xen_vcpu_attr { __u16 pad[3]; union { __u64 gpa; +#define KVM_XEN_INVALID_GPA ((__u64)-1) __u64 pad[8]; struct { __u64 state; From af2808906aab0bf5786021d45b3ebfca6f4ad72f Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Mon, 26 Dec 2022 12:03:20 +0000 Subject: [PATCH 38/68] KVM: x86/xen: Documentation updates and clarifications Most notably, the KVM_XEN_EVTCHN_RESET feature had escaped documentation entirely. Along with how to turn most stuff off on SHUTDOWN_soft_reset. Signed-off-by: David Woodhouse Message-Id: <20221226120320.1125390-6-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/api.rst | 41 +++++++++++++++++++++------------- 1 file changed, 26 insertions(+), 15 deletions(-) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index d795d683601cce..af64716573952b 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -5343,9 +5343,9 @@ KVM_XEN_ATTR_TYPE_SHARED_INFO 32 vCPUs in the shared_info page, KVM does not automatically do so and instead requires that KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO be used explicitly even when the vcpu_info for a given vCPU resides at the - "default" location in the shared_info page. This is because KVM is - not aware of the Xen CPU id which is used as the index into the - vcpu_info[] array, so cannot know the correct default location. + "default" location in the shared_info page. This is because KVM may + not be aware of the Xen CPU id which is used as the index into the + vcpu_info[] array, so may know the correct default location. Note that the shared info page may be constantly written to by KVM; it contains the event channel bitmap used to deliver interrupts to @@ -5356,23 +5356,29 @@ KVM_XEN_ATTR_TYPE_SHARED_INFO any vCPU has been running or any event channel interrupts can be routed to the guest. + Setting the gfn to KVM_XEN_INVALID_GFN will disable the shared info + page. + KVM_XEN_ATTR_TYPE_UPCALL_VECTOR Sets the exception vector used to deliver Xen event channel upcalls. This is the HVM-wide vector injected directly by the hypervisor (not through the local APIC), typically configured by a guest via - HVM_PARAM_CALLBACK_IRQ. + HVM_PARAM_CALLBACK_IRQ. This can be disabled again (e.g. for guest + SHUTDOWN_soft_reset) by setting it to zero. KVM_XEN_ATTR_TYPE_EVTCHN This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates support for KVM_XEN_HVM_CONFIG_EVTCHN_SEND features. It configures an outbound port number for interception of EVTCHNOP_send requests - from the guest. A given sending port number may be directed back - to a specified vCPU (by APIC ID) / port / priority on the guest, - or to trigger events on an eventfd. The vCPU and priority can be - changed by setting KVM_XEN_EVTCHN_UPDATE in a subsequent call, - but other fields cannot change for a given sending port. A port - mapping is removed by using KVM_XEN_EVTCHN_DEASSIGN in the flags - field. + from the guest. A given sending port number may be directed back to + a specified vCPU (by APIC ID) / port / priority on the guest, or to + trigger events on an eventfd. The vCPU and priority can be changed + by setting KVM_XEN_EVTCHN_UPDATE in a subsequent call, but but other + fields cannot change for a given sending port. A port mapping is + removed by using KVM_XEN_EVTCHN_DEASSIGN in the flags field. Passing + KVM_XEN_EVTCHN_RESET in the flags field removes all interception of + outbound event channels. The values of the flags field are mutually + exclusive and cannot be combined as a bitmask. KVM_XEN_ATTR_TYPE_XEN_VERSION This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates @@ -5388,7 +5394,7 @@ KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG support for KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG. It enables the XEN_RUNSTATE_UPDATE flag which allows guest vCPUs to safely read other vCPUs' vcpu_runstate_info. Xen guests enable this feature via - the VM_ASST_TYPE_runstate_update_flag of the HYPERVISOR_vm_assist + the VMASST_TYPE_runstate_update_flag of the HYPERVISOR_vm_assist hypercall. 4.127 KVM_XEN_HVM_GET_ATTR @@ -5446,15 +5452,18 @@ KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO As with the shared_info page for the VM, the corresponding page may be dirtied at any time if event channel interrupt delivery is enabled, so userspace should always assume that the page is dirty without relying - on dirty logging. + on dirty logging. Setting the gpa to KVM_XEN_INVALID_GPA will disable + the vcpu_info. KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO Sets the guest physical address of an additional pvclock structure for a given vCPU. This is typically used for guest vsyscall support. + Setting the gpa to KVM_XEN_INVALID_GPA will disable the structure. KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR Sets the guest physical address of the vcpu_runstate_info for a given vCPU. This is how a Xen guest tracks CPU state such as steal time. + Setting the gpa to KVM_XEN_INVALID_GPA will disable the runstate area. KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT Sets the runstate (RUNSTATE_running/_runnable/_blocked/_offline) of @@ -5487,7 +5496,8 @@ KVM_XEN_VCPU_ATTR_TYPE_TIMER This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates support for KVM_XEN_HVM_CONFIG_EVTCHN_SEND features. It sets the event channel port/priority for the VIRQ_TIMER of the vCPU, as well - as allowing a pending timer to be saved/restored. + as allowing a pending timer to be saved/restored. Setting the timer + port to zero disables kernel handling of the singleshot timer. KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates @@ -5495,7 +5505,8 @@ KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR per-vCPU local APIC upcall vector, configured by a Xen guest with the HVMOP_set_evtchn_upcall_vector hypercall. This is typically used by Windows guests, and is distinct from the HVM-wide upcall - vector configured with HVM_PARAM_CALLBACK_IRQ. + vector configured with HVM_PARAM_CALLBACK_IRQ. It is disabled by + setting the vector to zero. 4.129 KVM_XEN_VCPU_GET_ATTR From a79b53aaaab53de017517bf9579b6106397a523c Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 28 Dec 2022 05:33:41 -0500 Subject: [PATCH 39/68] KVM: x86: fix deadlock for KVM_XEN_EVTCHN_RESET While KVM_XEN_EVTCHN_RESET is usually called with no vCPUs running, if that happened it could cause a deadlock. This is due to kvm_xen_eventfd_reset() doing a synchronize_srcu() inside a kvm->lock critical section. To avoid this, first collect all the evtchnfd objects in an array and free all of them once the kvm->lock critical section is over and th SRCU grace period has expired. Reported-by: Michal Luczaj Cc: David Woodhouse Signed-off-by: Paolo Bonzini --- arch/x86/kvm/xen.c | 30 +++++++++++++++++-- .../selftests/kvm/x86_64/xen_shinfo_test.c | 6 ++++ 2 files changed, 33 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index b178f40bd863c2..2e29bdc2949ca9 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -1942,18 +1942,42 @@ static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port) static int kvm_xen_eventfd_reset(struct kvm *kvm) { - struct evtchnfd *evtchnfd; + struct evtchnfd *evtchnfd, **all_evtchnfds; int i; + int n = 0; mutex_lock(&kvm->lock); + + /* + * Because synchronize_srcu() cannot be called inside the + * critical section, first collect all the evtchnfd objects + * in an array as they are removed from evtchn_ports. + */ + idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) + n++; + + all_evtchnfds = kmalloc_array(n, sizeof(struct evtchnfd *), GFP_KERNEL); + if (!all_evtchnfds) { + mutex_unlock(&kvm->lock); + return -ENOMEM; + } + + n = 0; idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) { + all_evtchnfds[n++] = evtchnfd; idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port); - synchronize_srcu(&kvm->srcu); + } + mutex_unlock(&kvm->lock); + + synchronize_srcu(&kvm->srcu); + + while (n--) { + evtchnfd = all_evtchnfds[n]; if (!evtchnfd->deliver.port.port) eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx); kfree(evtchnfd); } - mutex_unlock(&kvm->lock); + kfree(all_evtchnfds); return 0; } diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c index 721f6a693799b5..dae510c263b45b 100644 --- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c +++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c @@ -962,6 +962,12 @@ int main(int argc, char *argv[]) } done: + struct kvm_xen_hvm_attr evt_reset = { + .type = KVM_XEN_ATTR_TYPE_EVTCHN, + .u.evtchn.flags = KVM_XEN_EVTCHN_RESET, + }; + vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &evt_reset); + alarm(0); clock_gettime(CLOCK_REALTIME, &max_ts); From 02d9a04da453984b16f4a585ad808cf961df495e Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 28 Dec 2022 06:00:22 -0500 Subject: [PATCH 40/68] Documentation: kvm: clarify SRCU locking order Currently only the locking order of SRCU vs kvm->slots_arch_lock and kvm->slots_lock is documented. Extend this to kvm->lock since Xen emulation got it terribly wrong. Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/locking.rst | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/Documentation/virt/kvm/locking.rst b/Documentation/virt/kvm/locking.rst index 845a561629f19d..a3ca76f9be7510 100644 --- a/Documentation/virt/kvm/locking.rst +++ b/Documentation/virt/kvm/locking.rst @@ -16,17 +16,26 @@ The acquisition orders for mutexes are as follows: - kvm->slots_lock is taken outside kvm->irq_lock, though acquiring them together is quite rare. -- Unlike kvm->slots_lock, kvm->slots_arch_lock is released before - synchronize_srcu(&kvm->srcu). Therefore kvm->slots_arch_lock - can be taken inside a kvm->srcu read-side critical section, - while kvm->slots_lock cannot. - - kvm->mn_active_invalidate_count ensures that pairs of invalidate_range_start() and invalidate_range_end() callbacks use the same memslots array. kvm->slots_lock and kvm->slots_arch_lock are taken on the waiting side in install_new_memslots, so MMU notifiers must not take either kvm->slots_lock or kvm->slots_arch_lock. +For SRCU: + +- ``synchronize_srcu(&kvm->srcu)`` is called _inside_ + the kvm->slots_lock critical section, therefore kvm->slots_lock + cannot be taken inside a kvm->srcu read-side critical section. + Instead, kvm->slots_arch_lock is released before the call + to ``synchronize_srcu()`` and _can_ be taken inside a + kvm->srcu read-side critical section. + +- kvm->lock is taken inside kvm->srcu, therefore + ``synchronize_srcu(&kvm->srcu)`` cannot be called inside + a kvm->lock critical section. If you cannot delay the + call until after kvm->lock is released, use ``call_srcu``. + On x86: - vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock From 129c48cde6c9e519d033305649665427c6cac494 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 30 Nov 2022 13:11:47 -0500 Subject: [PATCH 41/68] KVM: selftests: restore special vmmcall code layout needed by the harness Commit 8fda37cf3d41 ("KVM: selftests: Stuff RAX/RCX with 'safe' values in vmmcall()/vmcall()", 2022-11-21) broke the svm_nested_soft_inject_test because it placed a "pop rbp" instruction after vmmcall. While this is correct and mimics what is done in the VMX case, this particular test expects a ud2 instruction right after the vmmcall, so that it can skip over it in the L1 part of the test. Inline a suitably-modified version of vmmcall() to restore the functionality of the test. Fixes: 8fda37cf3d41 ("KVM: selftests: Stuff RAX/RCX with 'safe' values in vmmcall()/vmcall()" Cc: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini Reviewed-by: Sean Christopherson Reviewed-by: Vitaly Kuznetsov Reviewed-by: Maxim Levitsky Message-Id: <20221130181147.9911-1-pbonzini@redhat.com> Signed-off-by: Paolo Bonzini --- .../kvm/x86_64/svm_nested_soft_inject_test.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c b/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c index e497ace629c192..b34980d45648a6 100644 --- a/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c +++ b/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c @@ -41,8 +41,17 @@ static void guest_int_handler(struct ex_regs *regs) static void l2_guest_code_int(void) { GUEST_ASSERT_1(int_fired == 1, int_fired); - vmmcall(); - ud2(); + + /* + * Same as the vmmcall() function, but with a ud2 sneaked after the + * vmmcall. The caller injects an exception with the return address + * increased by 2, so the "pop rbp" must be after the ud2 and we cannot + * use vmmcall() directly. + */ + __asm__ __volatile__("push %%rbp; vmmcall; ud2; pop %%rbp" + : : "a"(0xdeadbeef), "c"(0xbeefdead) + : "rbx", "rdx", "rsi", "rdi", "r8", "r9", + "r10", "r11", "r12", "r13", "r14", "r15"); GUEST_ASSERT_1(bp_fired == 1, bp_fired); hlt(); From a2ad080b4dfd6bf97738bc79f8a6eca5790c9ae5 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Fri, 4 Nov 2022 15:47:05 +0100 Subject: [PATCH 42/68] KVM: nVMX: Sanitize primary processor-based VM-execution controls with eVMCS too The only unsupported primary processor-based VM-execution control at the moment is CPU_BASED_ACTIVATE_TERTIARY_CONTROLS and KVM doesn't expose it in nested VMX feature MSRs anyway (see nested_vmx_setup_ctls_msrs()) but in preparation to inverting "unsupported with eVMCS" checks (and for completeness) it's better to sanitize MSR_IA32_VMX_PROCBASED_CTLS/ MSR_IA32_VMX_TRUE_PROCBASED_CTLS too. No functional change intended. Signed-off-by: Vitaly Kuznetsov Message-Id: <20221104144708.435865-2-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/hyperv.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/x86/kvm/vmx/hyperv.c b/arch/x86/kvm/vmx/hyperv.c index ae03d1fe035521..04ea0259ab7feb 100644 --- a/arch/x86/kvm/vmx/hyperv.c +++ b/arch/x86/kvm/vmx/hyperv.c @@ -361,6 +361,7 @@ enum evmcs_revision { enum evmcs_ctrl_type { EVMCS_EXIT_CTRLS, EVMCS_ENTRY_CTRLS, + EVMCS_EXEC_CTRL, EVMCS_2NDEXEC, EVMCS_PINCTRL, EVMCS_VMFUNC, @@ -374,6 +375,9 @@ static const u32 evmcs_unsupported_ctrls[NR_EVMCS_CTRLS][NR_EVMCS_REVISIONS] = { [EVMCS_ENTRY_CTRLS] = { [EVMCSv1_LEGACY] = EVMCS1_UNSUPPORTED_VMENTRY_CTRL, }, + [EVMCS_EXEC_CTRL] = { + [EVMCSv1_LEGACY] = EVMCS1_UNSUPPORTED_EXEC_CTRL, + }, [EVMCS_2NDEXEC] = { [EVMCSv1_LEGACY] = EVMCS1_UNSUPPORTED_2NDEXEC, }, @@ -434,6 +438,10 @@ void nested_evmcs_filter_control_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 * unsupported_ctrls |= VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; ctl_high &= ~unsupported_ctrls; break; + case MSR_IA32_VMX_PROCBASED_CTLS: + case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: + ctl_high &= ~evmcs_get_unsupported_ctls(EVMCS_EXEC_CTRL); + break; case MSR_IA32_VMX_PROCBASED_CTLS2: ctl_high &= ~evmcs_get_unsupported_ctls(EVMCS_2NDEXEC); break; @@ -461,6 +469,10 @@ int nested_evmcs_check_controls(struct vmcs12 *vmcs12) vmcs12->pin_based_vm_exec_control))) return -EINVAL; + if (CC(!nested_evmcs_is_valid_controls(EVMCS_EXEC_CTRL, + vmcs12->cpu_based_vm_exec_control))) + return -EINVAL; + if (CC(!nested_evmcs_is_valid_controls(EVMCS_2NDEXEC, vmcs12->secondary_vm_exec_control))) return -EINVAL; From 96d6955d215e6234bb820fd23756b2a9b77aef0f Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Fri, 4 Nov 2022 15:47:06 +0100 Subject: [PATCH 43/68] KVM: nVMX: Invert 'unsupported by eVMCSv1' check When a new feature gets implemented in KVM, EVMCS1_UNSUPPORTED_* defines need to be adjusted to avoid the situation when the feature is exposed to the guest but there's no corresponding eVMCS field[s] for it. This is not obvious and fragile. Invert 'unsupported by eVMCSv1' check and make it 'supported by eVMCSv1' instead, this way it's much harder to make a mistake. New features will get added to EVMCS1_SUPPORTED_* defines when the corresponding fields are added to eVMCS definition. No functional change intended. EVMCS1_SUPPORTED_* defines are composed by taking KVM_{REQUIRED,OPTIONAL}_VMX_ defines and filtering out what was previously known as EVMCS1_UNSUPPORTED_*. From all the controls, SECONDARY_EXEC_TSC_SCALING requires special handling as it's actually present in eVMCSv1 definition but is not currently supported for Hyper-V-on-KVM, just for KVM-on-Hyper-V. As evmcs_supported_ctrls will be used for both scenarios, just add it there instead of EVMCS1_SUPPORTED_2NDEXEC. Signed-off-by: Vitaly Kuznetsov Message-Id: <20221104144708.435865-3-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/hyperv.c | 42 +++++++++--------- arch/x86/kvm/vmx/hyperv.h | 90 ++++++++++++++++++++++++++++++++------- 2 files changed, 96 insertions(+), 36 deletions(-) diff --git a/arch/x86/kvm/vmx/hyperv.c b/arch/x86/kvm/vmx/hyperv.c index 04ea0259ab7feb..77027f6a9b41e1 100644 --- a/arch/x86/kvm/vmx/hyperv.c +++ b/arch/x86/kvm/vmx/hyperv.c @@ -368,32 +368,32 @@ enum evmcs_ctrl_type { NR_EVMCS_CTRLS, }; -static const u32 evmcs_unsupported_ctrls[NR_EVMCS_CTRLS][NR_EVMCS_REVISIONS] = { +static const u32 evmcs_supported_ctrls[NR_EVMCS_CTRLS][NR_EVMCS_REVISIONS] = { [EVMCS_EXIT_CTRLS] = { - [EVMCSv1_LEGACY] = EVMCS1_UNSUPPORTED_VMEXIT_CTRL, + [EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_VMEXIT_CTRL, }, [EVMCS_ENTRY_CTRLS] = { - [EVMCSv1_LEGACY] = EVMCS1_UNSUPPORTED_VMENTRY_CTRL, + [EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_VMENTRY_CTRL, }, [EVMCS_EXEC_CTRL] = { - [EVMCSv1_LEGACY] = EVMCS1_UNSUPPORTED_EXEC_CTRL, + [EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_EXEC_CTRL, }, [EVMCS_2NDEXEC] = { - [EVMCSv1_LEGACY] = EVMCS1_UNSUPPORTED_2NDEXEC, + [EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_2NDEXEC & ~SECONDARY_EXEC_TSC_SCALING, }, [EVMCS_PINCTRL] = { - [EVMCSv1_LEGACY] = EVMCS1_UNSUPPORTED_PINCTRL, + [EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_PINCTRL, }, [EVMCS_VMFUNC] = { - [EVMCSv1_LEGACY] = EVMCS1_UNSUPPORTED_VMFUNC, + [EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_VMFUNC, }, }; -static u32 evmcs_get_unsupported_ctls(enum evmcs_ctrl_type ctrl_type) +static u32 evmcs_get_supported_ctls(enum evmcs_ctrl_type ctrl_type) { enum evmcs_revision evmcs_rev = EVMCSv1_LEGACY; - return evmcs_unsupported_ctrls[ctrl_type][evmcs_rev]; + return evmcs_supported_ctrls[ctrl_type][evmcs_rev]; } static bool evmcs_has_perf_global_ctrl(struct kvm_vcpu *vcpu) @@ -417,7 +417,7 @@ void nested_evmcs_filter_control_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 * { u32 ctl_low = (u32)*pdata; u32 ctl_high = (u32)(*pdata >> 32); - u32 unsupported_ctrls; + u32 supported_ctrls; /* * Hyper-V 2016 and 2019 try using these features even when eVMCS @@ -426,31 +426,31 @@ void nested_evmcs_filter_control_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 * switch (msr_index) { case MSR_IA32_VMX_EXIT_CTLS: case MSR_IA32_VMX_TRUE_EXIT_CTLS: - unsupported_ctrls = evmcs_get_unsupported_ctls(EVMCS_EXIT_CTRLS); + supported_ctrls = evmcs_get_supported_ctls(EVMCS_EXIT_CTRLS); if (!evmcs_has_perf_global_ctrl(vcpu)) - unsupported_ctrls |= VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; - ctl_high &= ~unsupported_ctrls; + supported_ctrls &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; + ctl_high &= supported_ctrls; break; case MSR_IA32_VMX_ENTRY_CTLS: case MSR_IA32_VMX_TRUE_ENTRY_CTLS: - unsupported_ctrls = evmcs_get_unsupported_ctls(EVMCS_ENTRY_CTRLS); + supported_ctrls = evmcs_get_supported_ctls(EVMCS_ENTRY_CTRLS); if (!evmcs_has_perf_global_ctrl(vcpu)) - unsupported_ctrls |= VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; - ctl_high &= ~unsupported_ctrls; + supported_ctrls &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; + ctl_high &= supported_ctrls; break; case MSR_IA32_VMX_PROCBASED_CTLS: case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: - ctl_high &= ~evmcs_get_unsupported_ctls(EVMCS_EXEC_CTRL); + ctl_high &= evmcs_get_supported_ctls(EVMCS_EXEC_CTRL); break; case MSR_IA32_VMX_PROCBASED_CTLS2: - ctl_high &= ~evmcs_get_unsupported_ctls(EVMCS_2NDEXEC); + ctl_high &= evmcs_get_supported_ctls(EVMCS_2NDEXEC); break; case MSR_IA32_VMX_TRUE_PINBASED_CTLS: case MSR_IA32_VMX_PINBASED_CTLS: - ctl_high &= ~evmcs_get_unsupported_ctls(EVMCS_PINCTRL); + ctl_high &= evmcs_get_supported_ctls(EVMCS_PINCTRL); break; case MSR_IA32_VMX_VMFUNC: - ctl_low &= ~evmcs_get_unsupported_ctls(EVMCS_VMFUNC); + ctl_low &= evmcs_get_supported_ctls(EVMCS_VMFUNC); break; } @@ -460,7 +460,7 @@ void nested_evmcs_filter_control_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 * static bool nested_evmcs_is_valid_controls(enum evmcs_ctrl_type ctrl_type, u32 val) { - return !(val & evmcs_get_unsupported_ctls(ctrl_type)); + return !(val & ~evmcs_get_supported_ctls(ctrl_type)); } int nested_evmcs_check_controls(struct vmcs12 *vmcs12) diff --git a/arch/x86/kvm/vmx/hyperv.h b/arch/x86/kvm/vmx/hyperv.h index 571e7929d14e7b..a9da47c69a2be6 100644 --- a/arch/x86/kvm/vmx/hyperv.h +++ b/arch/x86/kvm/vmx/hyperv.h @@ -48,22 +48,82 @@ DECLARE_STATIC_KEY_FALSE(enable_evmcs); * Currently unsupported in KVM: * GUEST_IA32_RTIT_CTL = 0x00002814, */ -#define EVMCS1_UNSUPPORTED_PINCTRL (PIN_BASED_POSTED_INTR | \ - PIN_BASED_VMX_PREEMPTION_TIMER) -#define EVMCS1_UNSUPPORTED_EXEC_CTRL (CPU_BASED_ACTIVATE_TERTIARY_CONTROLS) -#define EVMCS1_UNSUPPORTED_2NDEXEC \ - (SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | \ - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | \ - SECONDARY_EXEC_APIC_REGISTER_VIRT | \ - SECONDARY_EXEC_ENABLE_PML | \ - SECONDARY_EXEC_ENABLE_VMFUNC | \ - SECONDARY_EXEC_SHADOW_VMCS | \ +#define EVMCS1_SUPPORTED_PINCTRL \ + (PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | \ + PIN_BASED_EXT_INTR_MASK | \ + PIN_BASED_NMI_EXITING | \ + PIN_BASED_VIRTUAL_NMIS) + +#define EVMCS1_SUPPORTED_EXEC_CTRL \ + (CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | \ + CPU_BASED_HLT_EXITING | \ + CPU_BASED_CR3_LOAD_EXITING | \ + CPU_BASED_CR3_STORE_EXITING | \ + CPU_BASED_UNCOND_IO_EXITING | \ + CPU_BASED_MOV_DR_EXITING | \ + CPU_BASED_USE_TSC_OFFSETTING | \ + CPU_BASED_MWAIT_EXITING | \ + CPU_BASED_MONITOR_EXITING | \ + CPU_BASED_INVLPG_EXITING | \ + CPU_BASED_RDPMC_EXITING | \ + CPU_BASED_INTR_WINDOW_EXITING | \ + CPU_BASED_CR8_LOAD_EXITING | \ + CPU_BASED_CR8_STORE_EXITING | \ + CPU_BASED_RDTSC_EXITING | \ + CPU_BASED_TPR_SHADOW | \ + CPU_BASED_USE_IO_BITMAPS | \ + CPU_BASED_MONITOR_TRAP_FLAG | \ + CPU_BASED_USE_MSR_BITMAPS | \ + CPU_BASED_NMI_WINDOW_EXITING | \ + CPU_BASED_PAUSE_EXITING | \ + CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) + +#define EVMCS1_SUPPORTED_2NDEXEC \ + (SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | \ + SECONDARY_EXEC_WBINVD_EXITING | \ + SECONDARY_EXEC_ENABLE_VPID | \ + SECONDARY_EXEC_ENABLE_EPT | \ + SECONDARY_EXEC_UNRESTRICTED_GUEST | \ + SECONDARY_EXEC_DESC | \ + SECONDARY_EXEC_ENABLE_RDTSCP | \ + SECONDARY_EXEC_ENABLE_INVPCID | \ + SECONDARY_EXEC_XSAVES | \ + SECONDARY_EXEC_RDSEED_EXITING | \ + SECONDARY_EXEC_RDRAND_EXITING | \ SECONDARY_EXEC_TSC_SCALING | \ - SECONDARY_EXEC_PAUSE_LOOP_EXITING) -#define EVMCS1_UNSUPPORTED_VMEXIT_CTRL \ - (VM_EXIT_SAVE_VMX_PREEMPTION_TIMER) -#define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (0) -#define EVMCS1_UNSUPPORTED_VMFUNC (VMX_VMFUNC_EPTP_SWITCHING) + SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | \ + SECONDARY_EXEC_PT_USE_GPA | \ + SECONDARY_EXEC_PT_CONCEAL_VMX | \ + SECONDARY_EXEC_BUS_LOCK_DETECTION | \ + SECONDARY_EXEC_NOTIFY_VM_EXITING | \ + SECONDARY_EXEC_ENCLS_EXITING) + +#define EVMCS1_SUPPORTED_VMEXIT_CTRL \ + (VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | \ + VM_EXIT_SAVE_DEBUG_CONTROLS | \ + VM_EXIT_ACK_INTR_ON_EXIT | \ + VM_EXIT_HOST_ADDR_SPACE_SIZE | \ + VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | \ + VM_EXIT_SAVE_IA32_PAT | \ + VM_EXIT_LOAD_IA32_PAT | \ + VM_EXIT_SAVE_IA32_EFER | \ + VM_EXIT_LOAD_IA32_EFER | \ + VM_EXIT_CLEAR_BNDCFGS | \ + VM_EXIT_PT_CONCEAL_PIP | \ + VM_EXIT_CLEAR_IA32_RTIT_CTL) + +#define EVMCS1_SUPPORTED_VMENTRY_CTRL \ + (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | \ + VM_ENTRY_LOAD_DEBUG_CONTROLS | \ + VM_ENTRY_IA32E_MODE | \ + VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | \ + VM_ENTRY_LOAD_IA32_PAT | \ + VM_ENTRY_LOAD_IA32_EFER | \ + VM_ENTRY_LOAD_BNDCFGS | \ + VM_ENTRY_PT_CONCEAL_PIP | \ + VM_ENTRY_LOAD_IA32_RTIT_CTL) + +#define EVMCS1_SUPPORTED_VMFUNC (0) struct evmcs_field { u16 offset; From c128d3fd389b583ce6b28dbaa0e924de5671b961 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Fri, 4 Nov 2022 15:47:07 +0100 Subject: [PATCH 44/68] KVM: nVMX: Prepare to sanitize tertiary execution controls with eVMCS In preparation to restoring vmcs_conf sanitization for KVM-on-Hyper-V, (and for completeness) add tertiary VM-execution controls to 'evmcs_supported_ctrls'. No functional change intended as KVM doesn't yet expose MSR_IA32_VMX_PROCBASED_CTLS3 to its guests. Signed-off-by: Vitaly Kuznetsov Message-Id: <20221104144708.435865-4-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/hyperv.c | 4 ++++ arch/x86/kvm/vmx/hyperv.h | 2 ++ 2 files changed, 6 insertions(+) diff --git a/arch/x86/kvm/vmx/hyperv.c b/arch/x86/kvm/vmx/hyperv.c index 77027f6a9b41e1..a5cbd029c07b22 100644 --- a/arch/x86/kvm/vmx/hyperv.c +++ b/arch/x86/kvm/vmx/hyperv.c @@ -363,6 +363,7 @@ enum evmcs_ctrl_type { EVMCS_ENTRY_CTRLS, EVMCS_EXEC_CTRL, EVMCS_2NDEXEC, + EVMCS_3RDEXEC, EVMCS_PINCTRL, EVMCS_VMFUNC, NR_EVMCS_CTRLS, @@ -381,6 +382,9 @@ static const u32 evmcs_supported_ctrls[NR_EVMCS_CTRLS][NR_EVMCS_REVISIONS] = { [EVMCS_2NDEXEC] = { [EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_2NDEXEC & ~SECONDARY_EXEC_TSC_SCALING, }, + [EVMCS_3RDEXEC] = { + [EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_3RDEXEC, + }, [EVMCS_PINCTRL] = { [EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_PINCTRL, }, diff --git a/arch/x86/kvm/vmx/hyperv.h b/arch/x86/kvm/vmx/hyperv.h index a9da47c69a2be6..3edbbd0991dcd6 100644 --- a/arch/x86/kvm/vmx/hyperv.h +++ b/arch/x86/kvm/vmx/hyperv.h @@ -98,6 +98,8 @@ DECLARE_STATIC_KEY_FALSE(enable_evmcs); SECONDARY_EXEC_NOTIFY_VM_EXITING | \ SECONDARY_EXEC_ENCLS_EXITING) +#define EVMCS1_SUPPORTED_3RDEXEC (0ULL) + #define EVMCS1_SUPPORTED_VMEXIT_CTRL \ (VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | \ VM_EXIT_SAVE_DEBUG_CONTROLS | \ From 80edc49f6a7544fa0fac122e177ffb63af60651e Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Fri, 4 Nov 2022 15:47:08 +0100 Subject: [PATCH 45/68] KVM: VMX: Resurrect vmcs_conf sanitization for KVM-on-Hyper-V Commit 9bcb90650e31 ("KVM: VMX: Get rid of eVMCS specific VMX controls sanitization") dropped 'vmcs_conf' sanitization for KVM-on-Hyper-V because there's no known Hyper-V version which would expose a feature unsupported in eVMCS in VMX feature MSRs. This works well for all currently existing Hyper-V version, however, future Hyper-V versions may add features which are supported by KVM and are currently missing in eVMCSv1 definition (e.g. APIC virtualization, PML,...). When this happens, existing KVMs will get broken. With the inverted 'unsupported by eVMCSv1' checks, we can resurrect vmcs_conf sanitization and make KVM future proof. Signed-off-by: Vitaly Kuznetsov Message-Id: <20221104144708.435865-5-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/hyperv.c | 34 ++++++++++++++++++++++++++++++++++ arch/x86/kvm/vmx/hyperv.h | 1 + arch/x86/kvm/vmx/vmx.c | 5 +++++ 3 files changed, 40 insertions(+) diff --git a/arch/x86/kvm/vmx/hyperv.c b/arch/x86/kvm/vmx/hyperv.c index a5cbd029c07b22..f773450fba6e5d 100644 --- a/arch/x86/kvm/vmx/hyperv.c +++ b/arch/x86/kvm/vmx/hyperv.c @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 +#define pr_fmt(fmt) "kvm/hyper-v: " fmt + #include #include @@ -504,6 +506,38 @@ int nested_evmcs_check_controls(struct vmcs12 *vmcs12) return 0; } +#if IS_ENABLED(CONFIG_HYPERV) +/* + * KVM on Hyper-V always uses the latest known eVMCSv1 revision, the assumption + * is: in case a feature has corresponding fields in eVMCS described and it was + * exposed in VMX feature MSRs, KVM is free to use it. Warn if KVM meets a + * feature which has no corresponding eVMCS field, this likely means that KVM + * needs to be updated. + */ +#define evmcs_check_vmcs_conf(field, ctrl) \ + do { \ + typeof(vmcs_conf->field) unsupported; \ + \ + unsupported = vmcs_conf->field & ~EVMCS1_SUPPORTED_ ## ctrl; \ + if (unsupported) { \ + pr_warn_once(#field " unsupported with eVMCS: 0x%llx\n",\ + (u64)unsupported); \ + vmcs_conf->field &= EVMCS1_SUPPORTED_ ## ctrl; \ + } \ + } \ + while (0) + +__init void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) +{ + evmcs_check_vmcs_conf(cpu_based_exec_ctrl, EXEC_CTRL); + evmcs_check_vmcs_conf(pin_based_exec_ctrl, PINCTRL); + evmcs_check_vmcs_conf(cpu_based_2nd_exec_ctrl, 2NDEXEC); + evmcs_check_vmcs_conf(cpu_based_3rd_exec_ctrl, 3RDEXEC); + evmcs_check_vmcs_conf(vmentry_ctrl, VMENTRY_CTRL); + evmcs_check_vmcs_conf(vmexit_ctrl, VMEXIT_CTRL); +} +#endif + int nested_enable_evmcs(struct kvm_vcpu *vcpu, uint16_t *vmcs_version) { diff --git a/arch/x86/kvm/vmx/hyperv.h b/arch/x86/kvm/vmx/hyperv.h index 3edbbd0991dcd6..883102d567c38e 100644 --- a/arch/x86/kvm/vmx/hyperv.h +++ b/arch/x86/kvm/vmx/hyperv.h @@ -273,6 +273,7 @@ static inline void evmcs_load(u64 phys_addr) vp_ap->enlighten_vmentry = 1; } +__init void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf); #else /* !IS_ENABLED(CONFIG_HYPERV) */ static __always_inline void evmcs_write64(unsigned long field, u64 value) {} static inline void evmcs_write32(unsigned long field, u32 value) {} diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index fc9008dbed3348..0220e22b89ca86 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2752,6 +2752,11 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf, vmcs_conf->vmentry_ctrl = _vmentry_control; vmcs_conf->misc = misc_msr; +#if IS_ENABLED(CONFIG_HYPERV) + if (enlightened_vmcs) + evmcs_sanitize_exec_ctrls(vmcs_conf); +#endif + return 0; } From c4a488685b84ad94f099aa6be066bf1488e4b73c Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Mon, 12 Dec 2022 17:01:06 +0800 Subject: [PATCH 46/68] kvm: x86/mmu: Warn on linking when sp->unsync_children Since the commit 65855ed8b034 ("KVM: X86: Synchronize the shadow pagetable before link it"), no sp would be linked with sp->unsync_children = 1. So make it WARN if it is the case. Signed-off-by: Lai Jiangshan Message-Id: <20221212090106.378206-1-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 4736d7849c60f2..26b32ae6b52646 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2355,7 +2355,16 @@ static void __link_shadow_page(struct kvm *kvm, mmu_page_add_parent_pte(cache, sp, sptep); - if (sp->unsync_children || sp->unsync) + /* + * The non-direct sub-pagetable must be updated before linking. For + * L1 sp, the pagetable is updated via kvm_sync_page() in + * kvm_mmu_find_shadow_page() without write-protecting the gfn, + * so sp->unsync can be true or false. For higher level non-direct + * sp, the pagetable is updated/synced via mmu_sync_children() in + * FNAME(fetch)(), so sp->unsync_children can only be false. + * WARN_ON_ONCE() if anything happens unexpectedly. + */ + if (WARN_ON_ONCE(sp->unsync_children) || sp->unsync) mark_unsync(sptep); } From 8125b6cda9b17f5a0163cca3904c430f4f03591b Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Tue, 4 Oct 2022 11:31:29 +0200 Subject: [PATCH 47/68] KVM: selftests: Use TAP interface in the kvm_binary_stats_test The kvm_binary_stats_test test currently does not have any output (unless one of the TEST_ASSERT statement fails), so it's hard to say for a user how far it did proceed already. Thus let's make this a little bit more user-friendly and include some TAP output via the kselftest.h interface. Signed-off-by: Thomas Huth Reviewed-by: Andrew Jones Message-Id: <20221004093131.40392-2-thuth@redhat.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/kvm_binary_stats_test.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/kvm/kvm_binary_stats_test.c b/tools/testing/selftests/kvm/kvm_binary_stats_test.c index 0b45ac59338789..894417c96f7014 100644 --- a/tools/testing/selftests/kvm/kvm_binary_stats_test.c +++ b/tools/testing/selftests/kvm/kvm_binary_stats_test.c @@ -19,6 +19,7 @@ #include "kvm_util.h" #include "asm/kvm.h" #include "linux/kvm.h" +#include "kselftest.h" static void stats_test(int stats_fd) { @@ -51,7 +52,7 @@ static void stats_test(int stats_fd) /* Sanity check for other fields in header */ if (header.num_desc == 0) { - printf("No KVM stats defined!"); + ksft_print_msg("No KVM stats defined!\n"); return; } /* @@ -224,9 +225,13 @@ int main(int argc, char *argv[]) max_vcpu = DEFAULT_NUM_VCPU; } + ksft_print_header(); + /* Check the extension for binary stats */ TEST_REQUIRE(kvm_has_cap(KVM_CAP_BINARY_STATS_FD)); + ksft_set_plan(max_vm); + /* Create VMs and VCPUs */ vms = malloc(sizeof(vms[0]) * max_vm); TEST_ASSERT(vms, "Allocate memory for storing VM pointers"); @@ -245,10 +250,12 @@ int main(int argc, char *argv[]) vm_stats_test(vms[i]); for (j = 0; j < max_vcpu; ++j) vcpu_stats_test(vcpus[i * max_vcpu + j]); + ksft_test_result_pass("vm%i\n", i); } for (i = 0; i < max_vm; ++i) kvm_vm_free(vms[i]); free(vms); - return 0; + + ksft_finished(); /* Print results and exit() accordingly */ } From 79edd55049527596187ee493d5f949fd1eda7067 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Tue, 4 Oct 2022 11:31:31 +0200 Subject: [PATCH 48/68] KVM: selftests: x86: Use TAP interface in the tsc_msrs_test Let's add some output here so that the user has some feedback about what is being run. Signed-off-by: Thomas Huth Message-Id: <20221004093131.40392-4-thuth@redhat.com> Signed-off-by: Paolo Bonzini --- .../testing/selftests/kvm/x86_64/tsc_msrs_test.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c b/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c index 22d366c697f7d1..c9f67702f657fe 100644 --- a/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c +++ b/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c @@ -72,11 +72,16 @@ static void run_vcpu(struct kvm_vcpu *vcpu, int stage) switch (get_ucall(vcpu, &uc)) { case UCALL_SYNC: - TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") && - uc.args[1] == stage + 1, "Stage %d: Unexpected register values vmexit, got %lx", - stage + 1, (ulong)uc.args[1]); + if (!strcmp((const char *)uc.args[0], "hello") && + uc.args[1] == stage + 1) + ksft_test_result_pass("stage %d passed\n", stage + 1); + else + ksft_test_result_fail( + "stage %d: Unexpected register values vmexit, got %lx", + stage + 1, (ulong)uc.args[1]); return; case UCALL_DONE: + ksft_test_result_pass("stage %d passed\n", stage + 1); return; case UCALL_ABORT: REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx"); @@ -92,6 +97,9 @@ int main(void) struct kvm_vm *vm; uint64_t val; + ksft_print_header(); + ksft_set_plan(5); + vm = vm_create_with_one_vcpu(&vcpu, guest_code); val = 0; @@ -149,5 +157,5 @@ int main(void) kvm_vm_free(vm); - return 0; + ksft_finished(); /* Print results and exit() accordingly */ } From 1f98f2bd8ec4df365381ba654db37a39ce20ccdf Mon Sep 17 00:00:00 2001 From: David Matlack Date: Wed, 21 Sep 2022 10:35:37 -0700 Subject: [PATCH 49/68] KVM: x86/mmu: Change tdp_mmu to a read-only parameter Change tdp_mmu to a read-only parameter and drop the per-vm tdp_mmu_enabled. For 32-bit KVM, make tdp_mmu_enabled a macro that is always false so that the compiler can continue omitting cals to the TDP MMU. The TDP MMU was introduced in 5.10 and has been enabled by default since 5.15. At this point there are no known functionality gaps between the TDP MMU and the shadow MMU, and the TDP MMU uses less memory and scales better with the number of vCPUs. In other words, there is no good reason to disable the TDP MMU on a live system. Purposely do not drop tdp_mmu=N support (i.e. do not force 64-bit KVM to always use the TDP MMU) since tdp_mmu=N is still used to get test coverage of KVM's shadow MMU TDP support, which is used in 32-bit KVM. Signed-off-by: David Matlack Reviewed-by: Kai Huang Signed-off-by: Paolo Bonzini Message-Id: <20220921173546.2674386-2-dmatlack@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 13 ++------- arch/x86/kvm/mmu.h | 6 ++-- arch/x86/kvm/mmu/mmu.c | 51 ++++++++++++++++++++++----------- arch/x86/kvm/mmu/tdp_mmu.c | 9 ++---- 4 files changed, 41 insertions(+), 38 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f35f1ff4427bb8..aa4eb8cfcd7e17 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1341,21 +1341,12 @@ struct kvm_arch { struct task_struct *nx_huge_page_recovery_thread; #ifdef CONFIG_X86_64 - /* - * Whether the TDP MMU is enabled for this VM. This contains a - * snapshot of the TDP MMU module parameter from when the VM was - * created and remains unchanged for the life of the VM. If this is - * true, TDP MMU handler functions will run for various MMU - * operations. - */ - bool tdp_mmu_enabled; - /* The number of TDP MMU pages across all roots. */ atomic64_t tdp_mmu_pages; /* - * List of kvm_mmu_page structs being used as roots. - * All kvm_mmu_page structs in the list should have + * List of struct kvm_mmu_pages being used as roots. + * All struct kvm_mmu_pages in the list should have * tdp_mmu_page set. * * For reads, this list is protected by: diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 6bdaacb6faa072..168c46fd8dd189 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -230,14 +230,14 @@ static inline bool kvm_shadow_root_allocated(struct kvm *kvm) } #ifdef CONFIG_X86_64 -static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; } +extern bool tdp_mmu_enabled; #else -static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; } +#define tdp_mmu_enabled false #endif static inline bool kvm_memslots_have_rmaps(struct kvm *kvm) { - return !is_tdp_mmu_enabled(kvm) || kvm_shadow_root_allocated(kvm); + return !tdp_mmu_enabled || kvm_shadow_root_allocated(kvm); } static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 26b32ae6b52646..bd44e5682a64ad 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -99,6 +99,13 @@ module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644); */ bool tdp_enabled = false; +bool __ro_after_init tdp_mmu_allowed; + +#ifdef CONFIG_X86_64 +bool __read_mostly tdp_mmu_enabled = true; +module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0444); +#endif + static int max_huge_page_level __read_mostly; static int tdp_root_level __read_mostly; static int max_tdp_level __read_mostly; @@ -1279,7 +1286,7 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, { struct kvm_rmap_head *rmap_head; - if (is_tdp_mmu_enabled(kvm)) + if (tdp_mmu_enabled) kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot, slot->base_gfn + gfn_offset, mask, true); @@ -1312,7 +1319,7 @@ static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, { struct kvm_rmap_head *rmap_head; - if (is_tdp_mmu_enabled(kvm)) + if (tdp_mmu_enabled) kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot, slot->base_gfn + gfn_offset, mask, false); @@ -1395,7 +1402,7 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, } } - if (is_tdp_mmu_enabled(kvm)) + if (tdp_mmu_enabled) write_protected |= kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level); @@ -1558,7 +1565,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) if (kvm_memslots_have_rmaps(kvm)) flush = kvm_handle_gfn_range(kvm, range, kvm_zap_rmap); - if (is_tdp_mmu_enabled(kvm)) + if (tdp_mmu_enabled) flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush); return flush; @@ -1571,7 +1578,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) if (kvm_memslots_have_rmaps(kvm)) flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmap); - if (is_tdp_mmu_enabled(kvm)) + if (tdp_mmu_enabled) flush |= kvm_tdp_mmu_set_spte_gfn(kvm, range); return flush; @@ -1646,7 +1653,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) if (kvm_memslots_have_rmaps(kvm)) young = kvm_handle_gfn_range(kvm, range, kvm_age_rmap); - if (is_tdp_mmu_enabled(kvm)) + if (tdp_mmu_enabled) young |= kvm_tdp_mmu_age_gfn_range(kvm, range); return young; @@ -1659,7 +1666,7 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) if (kvm_memslots_have_rmaps(kvm)) young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmap); - if (is_tdp_mmu_enabled(kvm)) + if (tdp_mmu_enabled) young |= kvm_tdp_mmu_test_age_gfn(kvm, range); return young; @@ -3604,7 +3611,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) if (r < 0) goto out_unlock; - if (is_tdp_mmu_enabled(vcpu->kvm)) { + if (tdp_mmu_enabled) { root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu); mmu->root.hpa = root; } else if (shadow_root_level >= PT64_ROOT_4LEVEL) { @@ -5727,6 +5734,9 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level, tdp_root_level = tdp_forced_root_level; max_tdp_level = tdp_max_root_level; +#ifdef CONFIG_X86_64 + tdp_mmu_enabled = tdp_mmu_allowed && tdp_enabled; +#endif /* * max_huge_page_level reflects KVM's MMU capabilities irrespective * of kernel support, e.g. KVM may be capable of using 1GB pages when @@ -5974,7 +5984,7 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm) * write and in the same critical section as making the reload request, * e.g. before kvm_zap_obsolete_pages() could drop mmu_lock and yield. */ - if (is_tdp_mmu_enabled(kvm)) + if (tdp_mmu_enabled) kvm_tdp_mmu_invalidate_all_roots(kvm); /* @@ -5999,7 +6009,7 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm) * Deferring the zap until the final reference to the root is put would * lead to use-after-free. */ - if (is_tdp_mmu_enabled(kvm)) + if (tdp_mmu_enabled) kvm_tdp_mmu_zap_invalidated_roots(kvm); } @@ -6111,7 +6121,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end); - if (is_tdp_mmu_enabled(kvm)) { + if (tdp_mmu_enabled) { for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) flush = kvm_tdp_mmu_zap_leafs(kvm, i, gfn_start, gfn_end, true, flush); @@ -6144,7 +6154,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, write_unlock(&kvm->mmu_lock); } - if (is_tdp_mmu_enabled(kvm)) { + if (tdp_mmu_enabled) { read_lock(&kvm->mmu_lock); kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level); read_unlock(&kvm->mmu_lock); @@ -6387,7 +6397,7 @@ void kvm_mmu_try_split_huge_pages(struct kvm *kvm, u64 start, u64 end, int target_level) { - if (!is_tdp_mmu_enabled(kvm)) + if (!tdp_mmu_enabled) return; if (kvm_memslots_have_rmaps(kvm)) @@ -6408,7 +6418,7 @@ void kvm_mmu_slot_try_split_huge_pages(struct kvm *kvm, u64 start = memslot->base_gfn; u64 end = start + memslot->npages; - if (!is_tdp_mmu_enabled(kvm)) + if (!tdp_mmu_enabled) return; if (kvm_memslots_have_rmaps(kvm)) { @@ -6491,7 +6501,7 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, write_unlock(&kvm->mmu_lock); } - if (is_tdp_mmu_enabled(kvm)) { + if (tdp_mmu_enabled) { read_lock(&kvm->mmu_lock); kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot); read_unlock(&kvm->mmu_lock); @@ -6526,7 +6536,7 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, write_unlock(&kvm->mmu_lock); } - if (is_tdp_mmu_enabled(kvm)) { + if (tdp_mmu_enabled) { read_lock(&kvm->mmu_lock); kvm_tdp_mmu_clear_dirty_slot(kvm, memslot); read_unlock(&kvm->mmu_lock); @@ -6561,7 +6571,7 @@ void kvm_mmu_zap_all(struct kvm *kvm) kvm_mmu_commit_zap_page(kvm, &invalid_list); - if (is_tdp_mmu_enabled(kvm)) + if (tdp_mmu_enabled) kvm_tdp_mmu_zap_all(kvm); write_unlock(&kvm->mmu_lock); @@ -6726,6 +6736,13 @@ void __init kvm_mmu_x86_module_init(void) if (nx_huge_pages == -1) __set_nx_huge_pages(get_nx_auto_mode()); + /* + * Snapshot userspace's desire to enable the TDP MMU. Whether or not the + * TDP MMU is actually enabled is determined in kvm_configure_mmu() + * when the vendor module is loaded. + */ + tdp_mmu_allowed = tdp_mmu_enabled; + kvm_mmu_spte_module_init(); } diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index d6df38d371a00c..03511e83050fac 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -10,23 +10,18 @@ #include #include -static bool __read_mostly tdp_mmu_enabled = true; -module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644); - /* Initializes the TDP MMU for the VM, if enabled. */ int kvm_mmu_init_tdp_mmu(struct kvm *kvm) { struct workqueue_struct *wq; - if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled)) + if (!tdp_mmu_enabled) return 0; wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0); if (!wq) return -ENOMEM; - /* This should not be changed for the lifetime of the VM. */ - kvm->arch.tdp_mmu_enabled = true; INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots); spin_lock_init(&kvm->arch.tdp_mmu_pages_lock); kvm->arch.tdp_mmu_zap_wq = wq; @@ -47,7 +42,7 @@ static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm, void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) { - if (!kvm->arch.tdp_mmu_enabled) + if (!tdp_mmu_enabled) return; /* Also waits for any queued work items. */ From 09732d2b4dc5ba9de504711a3ae5aa1199127e6f Mon Sep 17 00:00:00 2001 From: David Matlack Date: Wed, 21 Sep 2022 10:35:38 -0700 Subject: [PATCH 50/68] KVM: x86/mmu: Move TDP MMU VM init/uninit behind tdp_mmu_enabled Move kvm_mmu_{init,uninit}_tdp_mmu() behind tdp_mmu_enabled. This makes these functions consistent with the rest of the calls into the TDP MMU from mmu.c, and which is now possible since tdp_mmu_enabled is only modified when the x86 vendor module is loaded. i.e. It will never change during the lifetime of a VM. This change also enabled removing the stub definitions for 32-bit KVM, as the compiler will just optimize the calls out like it does for all the other TDP MMU functions. No functional change intended. Signed-off-by: David Matlack Reviewed-by: Isaku Yamahata Signed-off-by: Paolo Bonzini Message-Id: <20220921173546.2674386-3-dmatlack@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 11 +++++++---- arch/x86/kvm/mmu/tdp_mmu.c | 6 ------ arch/x86/kvm/mmu/tdp_mmu.h | 7 +++---- 3 files changed, 10 insertions(+), 14 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index bd44e5682a64ad..31bdf93711a211 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -6035,9 +6035,11 @@ int kvm_mmu_init_vm(struct kvm *kvm) INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages); spin_lock_init(&kvm->arch.mmu_unsync_pages_lock); - r = kvm_mmu_init_tdp_mmu(kvm); - if (r < 0) - return r; + if (tdp_mmu_enabled) { + r = kvm_mmu_init_tdp_mmu(kvm); + if (r < 0) + return r; + } node->track_write = kvm_mmu_pte_write; node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot; @@ -6067,7 +6069,8 @@ void kvm_mmu_uninit_vm(struct kvm *kvm) kvm_page_track_unregister_notifier(kvm, node); - kvm_mmu_uninit_tdp_mmu(kvm); + if (tdp_mmu_enabled) + kvm_mmu_uninit_tdp_mmu(kvm); mmu_free_vm_memory_caches(kvm); } diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 03511e83050fac..7e5952e95d3bf7 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -15,9 +15,6 @@ int kvm_mmu_init_tdp_mmu(struct kvm *kvm) { struct workqueue_struct *wq; - if (!tdp_mmu_enabled) - return 0; - wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0); if (!wq) return -ENOMEM; @@ -42,9 +39,6 @@ static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm, void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) { - if (!tdp_mmu_enabled) - return; - /* Also waits for any queued work items. */ destroy_workqueue(kvm->arch.tdp_mmu_zap_wq); diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h index d3714200b932af..e4ab2dac269d68 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.h +++ b/arch/x86/kvm/mmu/tdp_mmu.h @@ -7,6 +7,9 @@ #include "spte.h" +int kvm_mmu_init_tdp_mmu(struct kvm *kvm); +void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm); + hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu); __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root) @@ -68,8 +71,6 @@ u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr, u64 *spte); #ifdef CONFIG_X86_64 -int kvm_mmu_init_tdp_mmu(struct kvm *kvm); -void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm); static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; } static inline bool is_tdp_mmu(struct kvm_mmu *mmu) @@ -89,8 +90,6 @@ static inline bool is_tdp_mmu(struct kvm_mmu *mmu) return sp && is_tdp_mmu_page(sp) && sp->root_count; } #else -static inline int kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return 0; } -static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {} static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; } static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; } #endif From ba6e3fe255439fd401a8d5487ad7c030db14afa8 Mon Sep 17 00:00:00 2001 From: David Matlack Date: Wed, 21 Sep 2022 10:35:39 -0700 Subject: [PATCH 51/68] KVM: x86/mmu: Grab mmu_invalidate_seq in kvm_faultin_pfn() Grab mmu_invalidate_seq in kvm_faultin_pfn() and stash it in struct kvm_page_fault. The eliminates duplicate code and reduces the amount of parameters needed for is_page_fault_stale(). Preemptively split out __kvm_faultin_pfn() to a separate function for use in subsequent commits. No functional change intended. Signed-off-by: David Matlack Reviewed-by: Isaku Yamahata Signed-off-by: Paolo Bonzini Message-Id: <20220921173546.2674386-4-dmatlack@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 21 ++++++++++++--------- arch/x86/kvm/mmu/mmu_internal.h | 1 + arch/x86/kvm/mmu/paging_tmpl.h | 6 +----- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 31bdf93711a211..efc540e43e17f2 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4189,7 +4189,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true); } -static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) +static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { struct kvm_memory_slot *slot = fault->slot; bool async; @@ -4250,12 +4250,20 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) return RET_PF_CONTINUE; } +static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) +{ + fault->mmu_seq = vcpu->kvm->mmu_invalidate_seq; + smp_rmb(); + + return __kvm_faultin_pfn(vcpu, fault); +} + /* * Returns true if the page fault is stale and needs to be retried, i.e. if the * root was invalidated by a memslot update or a relevant mmu_notifier fired. */ static bool is_page_fault_stale(struct kvm_vcpu *vcpu, - struct kvm_page_fault *fault, int mmu_seq) + struct kvm_page_fault *fault) { struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root.hpa); @@ -4275,14 +4283,12 @@ static bool is_page_fault_stale(struct kvm_vcpu *vcpu, return true; return fault->slot && - mmu_invalidate_retry_hva(vcpu->kvm, mmu_seq, fault->hva); + mmu_invalidate_retry_hva(vcpu->kvm, fault->mmu_seq, fault->hva); } static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu); - - unsigned long mmu_seq; int r; fault->gfn = fault->addr >> PAGE_SHIFT; @@ -4299,9 +4305,6 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault if (r) return r; - mmu_seq = vcpu->kvm->mmu_invalidate_seq; - smp_rmb(); - r = kvm_faultin_pfn(vcpu, fault); if (r != RET_PF_CONTINUE) return r; @@ -4317,7 +4320,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault else write_lock(&vcpu->kvm->mmu_lock); - if (is_page_fault_stale(vcpu, fault, mmu_seq)) + if (is_page_fault_stale(vcpu, fault)) goto out_unlock; r = make_mmu_pages_available(vcpu); diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index dbaf6755c5a71c..1556f596a628d7 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -222,6 +222,7 @@ struct kvm_page_fault { struct kvm_memory_slot *slot; /* Outputs of kvm_faultin_pfn. */ + unsigned long mmu_seq; kvm_pfn_t pfn; hva_t hva; bool map_writable; diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 0f645507205579..88acf232494ba8 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -791,7 +791,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault { struct guest_walker walker; int r; - unsigned long mmu_seq; bool is_self_change_mapping; pgprintk("%s: addr %lx err %x\n", __func__, fault->addr, fault->error_code); @@ -838,9 +837,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault else fault->max_level = walker.level; - mmu_seq = vcpu->kvm->mmu_invalidate_seq; - smp_rmb(); - r = kvm_faultin_pfn(vcpu, fault); if (r != RET_PF_CONTINUE) return r; @@ -871,7 +867,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault r = RET_PF_RETRY; write_lock(&vcpu->kvm->mmu_lock); - if (is_page_fault_stale(vcpu, fault, mmu_seq)) + if (is_page_fault_stale(vcpu, fault)) goto out_unlock; r = make_mmu_pages_available(vcpu); From 56c3a4e4a2d5ef36ac12156e9d4d793e2841135c Mon Sep 17 00:00:00 2001 From: David Matlack Date: Wed, 21 Sep 2022 10:35:40 -0700 Subject: [PATCH 52/68] KVM: x86/mmu: Handle error PFNs in kvm_faultin_pfn() Handle error PFNs in kvm_faultin_pfn() rather than relying on the caller to invoke handle_abnormal_pfn() after kvm_faultin_pfn(). Opportunistically rename kvm_handle_bad_page() to kvm_handle_error_pfn() to make it more consistent with is_error_pfn(). This commit moves KVM closer to being able to drop handle_abnormal_pfn(), which will reduce the amount of duplicate code in the various page fault handlers. No functional change intended. Signed-off-by: David Matlack Signed-off-by: Paolo Bonzini Message-Id: <20220921173546.2674386-5-dmatlack@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index efc540e43e17f2..d8a256f1b84207 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3219,10 +3219,6 @@ static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) static int handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, unsigned int access) { - /* The pfn is invalid, report the error! */ - if (unlikely(is_error_pfn(fault->pfn))) - return kvm_handle_error_pfn(vcpu, fault->gfn, fault->pfn); - if (unlikely(!fault->slot)) { gva_t gva = fault->is_tdp ? 0 : fault->addr; @@ -4252,10 +4248,19 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { + int ret; + fault->mmu_seq = vcpu->kvm->mmu_invalidate_seq; smp_rmb(); - return __kvm_faultin_pfn(vcpu, fault); + ret = __kvm_faultin_pfn(vcpu, fault); + if (ret != RET_PF_CONTINUE) + return ret; + + if (unlikely(is_error_pfn(fault->pfn))) + return kvm_handle_error_pfn(vcpu, fault->gfn, fault->pfn); + + return RET_PF_CONTINUE; } /* From cd08d178ff45d6564e782a3d005922ed439bd77f Mon Sep 17 00:00:00 2001 From: David Matlack Date: Wed, 21 Sep 2022 10:35:41 -0700 Subject: [PATCH 53/68] KVM: x86/mmu: Avoid memslot lookup during KVM_PFN_ERR_HWPOISON handling Pass the kvm_page_fault struct down to kvm_handle_error_pfn() to avoid a memslot lookup when handling KVM_PFN_ERR_HWPOISON. Opportunistically move the gfn_to_hva_memslot() call and @current down into kvm_send_hwpoison_signal() to cut down on line lengths. No functional change intended. Signed-off-by: David Matlack Reviewed-by: Isaku Yamahata Signed-off-by: Paolo Bonzini Message-Id: <20220921173546.2674386-6-dmatlack@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index d8a256f1b84207..b5f9f075545407 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3188,14 +3188,16 @@ static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) return ret; } -static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk) +static void kvm_send_hwpoison_signal(struct kvm_memory_slot *slot, gfn_t gfn) { - send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk); + unsigned long hva = gfn_to_hva_memslot(slot, gfn); + + send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva, PAGE_SHIFT, current); } -static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) +static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { - if (is_sigpending_pfn(pfn)) { + if (is_sigpending_pfn(fault->pfn)) { kvm_handle_signal_exit(vcpu); return -EINTR; } @@ -3205,11 +3207,11 @@ static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) * into the spte otherwise read access on readonly gfn also can * caused mmio page fault and treat it as mmio access. */ - if (pfn == KVM_PFN_ERR_RO_FAULT) + if (fault->pfn == KVM_PFN_ERR_RO_FAULT) return RET_PF_EMULATE; - if (pfn == KVM_PFN_ERR_HWPOISON) { - kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current); + if (fault->pfn == KVM_PFN_ERR_HWPOISON) { + kvm_send_hwpoison_signal(fault->slot, fault->gfn); return RET_PF_RETRY; } @@ -4258,7 +4260,7 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) return ret; if (unlikely(is_error_pfn(fault->pfn))) - return kvm_handle_error_pfn(vcpu, fault->gfn, fault->pfn); + return kvm_handle_error_pfn(vcpu, fault); return RET_PF_CONTINUE; } From 354c908c068ec0a21850aef5dbf6b8331434a2e9 Mon Sep 17 00:00:00 2001 From: David Matlack Date: Wed, 21 Sep 2022 10:35:42 -0700 Subject: [PATCH 54/68] KVM: x86/mmu: Handle no-slot faults in kvm_faultin_pfn() Handle faults on GFNs that do not have a backing memslot in kvm_faultin_pfn() and drop handle_abnormal_pfn(). This eliminates duplicate code in the various page fault handlers. Opportunistically tweak the comment about handling gfn > host.MAXPHYADDR to reflect that the effect of returning RET_PF_EMULATE at that point is to avoid creating an MMIO SPTE for such GFNs. No functional change intended. Signed-off-by: David Matlack Signed-off-by: Paolo Bonzini Message-Id: <20220921173546.2674386-7-dmatlack@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 56 ++++++++++++++++++---------------- arch/x86/kvm/mmu/paging_tmpl.h | 6 +--- 2 files changed, 31 insertions(+), 31 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index b5f9f075545407..e2e8c4dfbaa511 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3218,28 +3218,32 @@ static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fa return -EFAULT; } -static int handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, - unsigned int access) +static int kvm_handle_noslot_fault(struct kvm_vcpu *vcpu, + struct kvm_page_fault *fault, + unsigned int access) { - if (unlikely(!fault->slot)) { - gva_t gva = fault->is_tdp ? 0 : fault->addr; + gva_t gva = fault->is_tdp ? 0 : fault->addr; - vcpu_cache_mmio_info(vcpu, gva, fault->gfn, - access & shadow_mmio_access_mask); - /* - * If MMIO caching is disabled, emulate immediately without - * touching the shadow page tables as attempting to install an - * MMIO SPTE will just be an expensive nop. Do not cache MMIO - * whose gfn is greater than host.MAXPHYADDR, any guest that - * generates such gfns is running nested and is being tricked - * by L0 userspace (you can observe gfn > L1.MAXPHYADDR if - * and only if L1's MAXPHYADDR is inaccurate with respect to - * the hardware's). - */ - if (unlikely(!enable_mmio_caching) || - unlikely(fault->gfn > kvm_mmu_max_gfn())) - return RET_PF_EMULATE; - } + vcpu_cache_mmio_info(vcpu, gva, fault->gfn, + access & shadow_mmio_access_mask); + + /* + * If MMIO caching is disabled, emulate immediately without + * touching the shadow page tables as attempting to install an + * MMIO SPTE will just be an expensive nop. + */ + if (unlikely(!enable_mmio_caching)) + return RET_PF_EMULATE; + + /* + * Do not create an MMIO SPTE for a gfn greater than host.MAXPHYADDR, + * any guest that generates such gfns is running nested and is being + * tricked by L0 userspace (you can observe gfn > L1.MAXPHYADDR if and + * only if L1's MAXPHYADDR is inaccurate with respect to the + * hardware's). + */ + if (unlikely(fault->gfn > kvm_mmu_max_gfn())) + return RET_PF_EMULATE; return RET_PF_CONTINUE; } @@ -4248,7 +4252,8 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault return RET_PF_CONTINUE; } -static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) +static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, + unsigned int access) { int ret; @@ -4262,6 +4267,9 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) if (unlikely(is_error_pfn(fault->pfn))) return kvm_handle_error_pfn(vcpu, fault); + if (unlikely(!fault->slot)) + return kvm_handle_noslot_fault(vcpu, fault, access); + return RET_PF_CONTINUE; } @@ -4312,11 +4320,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault if (r) return r; - r = kvm_faultin_pfn(vcpu, fault); - if (r != RET_PF_CONTINUE) - return r; - - r = handle_abnormal_pfn(vcpu, fault, ACC_ALL); + r = kvm_faultin_pfn(vcpu, fault, ACC_ALL); if (r != RET_PF_CONTINUE) return r; diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 88acf232494ba8..e5662dbd519c4e 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -837,11 +837,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault else fault->max_level = walker.level; - r = kvm_faultin_pfn(vcpu, fault); - if (r != RET_PF_CONTINUE) - return r; - - r = handle_abnormal_pfn(vcpu, fault, walker.pte_access); + r = kvm_faultin_pfn(vcpu, fault, walker.pte_access); if (r != RET_PF_CONTINUE) return r; From e5e6f8d254a285e97b02180fdc75d02ffdf9f0ed Mon Sep 17 00:00:00 2001 From: David Matlack Date: Wed, 21 Sep 2022 10:35:43 -0700 Subject: [PATCH 55/68] KVM: x86/mmu: Initialize fault.{gfn,slot} earlier for direct MMUs Move the initialization of fault.{gfn,slot} earlier in the page fault handling code for fully direct MMUs. This will enable a future commit to split out TDP MMU page fault handling without needing to duplicate the initialization of these 2 fields. Opportunistically take advantage of the fact that fault.gfn is initialized in kvm_tdp_page_fault() rather than recomputing it from fault->addr. No functional change intended. Signed-off-by: David Matlack Reviewed-by: Isaku Yamahata Signed-off-by: Paolo Bonzini Message-Id: <20220921173546.2674386-8-dmatlack@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 5 +---- arch/x86/kvm/mmu/mmu_internal.h | 5 +++++ 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index e2e8c4dfbaa511..cc2683419ff224 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4306,9 +4306,6 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu); int r; - fault->gfn = fault->addr >> PAGE_SHIFT; - fault->slot = kvm_vcpu_gfn_to_memslot(vcpu, fault->gfn); - if (page_fault_handle_page_track(vcpu, fault)) return RET_PF_EMULATE; @@ -4412,7 +4409,7 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) if (shadow_memtype_mask && kvm_arch_has_noncoherent_dma(vcpu->kvm)) { for ( ; fault->max_level > PG_LEVEL_4K; --fault->max_level) { int page_num = KVM_PAGES_PER_HPAGE(fault->max_level); - gfn_t base = (fault->addr >> PAGE_SHIFT) & ~(page_num - 1); + gfn_t base = fault->gfn & ~(page_num - 1); if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num)) break; diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 1556f596a628d7..069890789faff3 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -280,6 +280,11 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, }; int r; + if (vcpu->arch.mmu->root_role.direct) { + fault.gfn = fault.addr >> PAGE_SHIFT; + fault.slot = kvm_vcpu_gfn_to_memslot(vcpu, fault.gfn); + } + /* * Async #PF "faults", a.k.a. prefetch faults, are not faults from the * guest perspective and have already been counted at the time of the From 9aa8ab43b38146029de807a8ff2696f51e15b226 Mon Sep 17 00:00:00 2001 From: David Matlack Date: Wed, 21 Sep 2022 10:35:44 -0700 Subject: [PATCH 56/68] KVM: x86/mmu: Split out TDP MMU page fault handling Split out the page fault handling for the TDP MMU to a separate function. This creates some duplicate code, but makes the TDP MMU fault handler simpler to read by eliminating branches and will enable future cleanups by allowing the TDP MMU and non-TDP MMU fault paths to diverge. Only compile in the TDP MMU fault handler for 64-bit builds since kvm_tdp_mmu_map() does not exist in 32-bit builds. No functional change intended. Signed-off-by: David Matlack Reviewed-by: Isaku Yamahata Signed-off-by: Paolo Bonzini Message-Id: <20220921173546.2674386-9-dmatlack@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 62 ++++++++++++++++++++++++++++++++---------- 1 file changed, 48 insertions(+), 14 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index cc2683419ff224..7b10bff3d2e709 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4303,7 +4303,6 @@ static bool is_page_fault_stale(struct kvm_vcpu *vcpu, static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { - bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu); int r; if (page_fault_handle_page_track(vcpu, fault)) @@ -4322,11 +4321,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault return r; r = RET_PF_RETRY; - - if (is_tdp_mmu_fault) - read_lock(&vcpu->kvm->mmu_lock); - else - write_lock(&vcpu->kvm->mmu_lock); + write_lock(&vcpu->kvm->mmu_lock); if (is_page_fault_stale(vcpu, fault)) goto out_unlock; @@ -4335,16 +4330,10 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault if (r) goto out_unlock; - if (is_tdp_mmu_fault) - r = kvm_tdp_mmu_map(vcpu, fault); - else - r = __direct_map(vcpu, fault); + r = __direct_map(vcpu, fault); out_unlock: - if (is_tdp_mmu_fault) - read_unlock(&vcpu->kvm->mmu_lock); - else - write_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); kvm_release_pfn_clean(fault->pfn); return r; } @@ -4392,6 +4381,46 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, } EXPORT_SYMBOL_GPL(kvm_handle_page_fault); +#ifdef CONFIG_X86_64 +static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu, + struct kvm_page_fault *fault) +{ + int r; + + if (page_fault_handle_page_track(vcpu, fault)) + return RET_PF_EMULATE; + + r = fast_page_fault(vcpu, fault); + if (r != RET_PF_INVALID) + return r; + + r = mmu_topup_memory_caches(vcpu, false); + if (r) + return r; + + r = kvm_faultin_pfn(vcpu, fault, ACC_ALL); + if (r != RET_PF_CONTINUE) + return r; + + r = RET_PF_RETRY; + read_lock(&vcpu->kvm->mmu_lock); + + if (is_page_fault_stale(vcpu, fault)) + goto out_unlock; + + r = make_mmu_pages_available(vcpu); + if (r) + goto out_unlock; + + r = kvm_tdp_mmu_map(vcpu, fault); + +out_unlock: + read_unlock(&vcpu->kvm->mmu_lock); + kvm_release_pfn_clean(fault->pfn); + return r; +} +#endif + int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { /* @@ -4416,6 +4445,11 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) } } +#ifdef CONFIG_X86_64 + if (tdp_mmu_enabled) + return kvm_tdp_mmu_page_fault(vcpu, fault); +#endif + return direct_page_fault(vcpu, fault); } From 9f33697ac7ff6945cc06cffb37423176680e2ddf Mon Sep 17 00:00:00 2001 From: David Matlack Date: Wed, 21 Sep 2022 10:35:45 -0700 Subject: [PATCH 57/68] KVM: x86/mmu: Stop needlessly making MMU pages available for TDP MMU faults Stop calling make_mmu_pages_available() when handling TDP MMU faults. The TDP MMU does not participate in the "available MMU pages" tracking and limiting so calling this function is unnecessary work when handling TDP MMU faults. Signed-off-by: David Matlack Reviewed-by: Isaku Yamahata Signed-off-by: Paolo Bonzini Message-Id: <20220921173546.2674386-10-dmatlack@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 7b10bff3d2e709..dfac473d188a19 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4408,10 +4408,6 @@ static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu, if (is_page_fault_stale(vcpu, fault)) goto out_unlock; - r = make_mmu_pages_available(vcpu); - if (r) - goto out_unlock; - r = kvm_tdp_mmu_map(vcpu, fault); out_unlock: From 6c882ef4fc7bd99b67ad152e75428b669281c521 Mon Sep 17 00:00:00 2001 From: David Matlack Date: Wed, 21 Sep 2022 10:35:46 -0700 Subject: [PATCH 58/68] KVM: x86/mmu: Rename __direct_map() to direct_map() Rename __direct_map() to direct_map() since the leading underscores are unnecessary. This also makes the page fault handler names more consistent: kvm_tdp_mmu_page_fault() calls kvm_tdp_mmu_map() and direct_page_fault() calls direct_map(). Opportunistically make some trivial cleanups to comments that had to be modified anyway since they mentioned __direct_map(). Specifically, use "()" when referring to functions, and include kvm_tdp_mmu_map() among the various callers of disallowed_hugepage_adjust(). No functional change intended. Signed-off-by: David Matlack Reviewed-by: Isaku Yamahata Signed-off-by: Paolo Bonzini Message-Id: <20220921173546.2674386-11-dmatlack@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 14 +++++++------- arch/x86/kvm/mmu/mmu_internal.h | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index dfac473d188a19..7fb7a07a389e5b 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3131,11 +3131,11 @@ void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_ !is_large_pte(spte) && spte_to_child_sp(spte)->nx_huge_page_disallowed) { /* - * A small SPTE exists for this pfn, but FNAME(fetch) - * and __direct_map would like to create a large PTE - * instead: just force them to go down another level, - * patching back for them into pfn the next 9 bits of - * the address. + * A small SPTE exists for this pfn, but FNAME(fetch), + * direct_map(), or kvm_tdp_mmu_map() would like to create a + * large PTE instead: just force them to go down another level, + * patching back for them into pfn the next 9 bits of the + * address. */ u64 page_mask = KVM_PAGES_PER_HPAGE(cur_level) - KVM_PAGES_PER_HPAGE(cur_level - 1); @@ -3144,7 +3144,7 @@ void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_ } } -static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) +static int direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { struct kvm_shadow_walk_iterator it; struct kvm_mmu_page *sp; @@ -4330,7 +4330,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault if (r) goto out_unlock; - r = __direct_map(vcpu, fault); + r = direct_map(vcpu, fault); out_unlock: write_unlock(&vcpu->kvm->mmu_lock); diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 069890789faff3..ac00bfbf32f672 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -199,7 +199,7 @@ struct kvm_page_fault { /* * Maximum page size that can be created for this fault; input to - * FNAME(fetch), __direct_map and kvm_tdp_mmu_map. + * FNAME(fetch), direct_map() and kvm_tdp_mmu_map(). */ u8 max_level; From de0322f575be9e7fb598d14d58f5a7c7d8394e2b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 12 Oct 2022 18:17:00 +0000 Subject: [PATCH 59/68] KVM: x86/mmu: Replace open coded usage of tdp_mmu_page with is_tdp_mmu_page() Use is_tdp_mmu_page() instead of querying sp->tdp_mmu_page directly so that all users benefit if KVM ever finds a way to optimize the logic. No functional change intended. Signed-off-by: Sean Christopherson Message-Id: <20221012181702.3663607-10-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 2 +- arch/x86/kvm/mmu/tdp_mmu.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 7fb7a07a389e5b..27b233eff6e5b0 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1928,7 +1928,7 @@ static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp) return true; /* TDP MMU pages do not use the MMU generation. */ - return !sp->tdp_mmu_page && + return !is_tdp_mmu_page(sp) && unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen); } diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 7e5952e95d3bf7..cc1fb9a6562015 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -133,7 +133,7 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, if (!refcount_dec_and_test(&root->tdp_mmu_root_count)) return; - WARN_ON(!root->tdp_mmu_page); + WARN_ON(!is_tdp_mmu_page(root)); /* * The root now has refcount=0. It is valid, but readers already From 78fdd2f09fb1dfe152fe4a151c484786f1ed6984 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 12 Oct 2022 18:16:59 +0000 Subject: [PATCH 60/68] KVM: x86/mmu: Pivot on "TDP MMU enabled" to check if active MMU is TDP MMU Simplify and optimize the logic for detecting if the current/active MMU is a TDP MMU. If the TDP MMU is globally enabled, then the active MMU is a TDP MMU if it is direct. When TDP is enabled, so called nonpaging MMUs are never used as the only form of shadow paging KVM uses is for nested TDP, and the active MMU can't be direct in that case. Rename the helper and take the vCPU instead of an arbitrary MMU, as nonpaging MMUs can show up in the walk_mmu if L1 is using nested TDP and L2 has paging disabled. Taking the vCPU has the added bonus of cleaning up the callers, all of which check the current MMU but wrap code that consumes the vCPU. No functional change intended. Signed-off-by: Sean Christopherson Message-Id: <20221012181702.3663607-9-seanjc@google.com> [Use tdp_mmu_enabled variable. - Paolo] Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 11 ++++++++--- arch/x86/kvm/mmu/tdp_mmu.h | 18 ------------------ 2 files changed, 8 insertions(+), 21 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 27b233eff6e5b0..c5193bb56bfeb4 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -616,9 +616,14 @@ static bool mmu_spte_age(u64 *sptep) return true; } +static inline bool is_tdp_mmu_active(struct kvm_vcpu *vcpu) +{ + return tdp_mmu_enabled && vcpu->arch.mmu->root_role.direct; +} + static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu) { - if (is_tdp_mmu(vcpu->arch.mmu)) { + if (is_tdp_mmu_active(vcpu)) { kvm_tdp_mmu_walk_lockless_begin(); } else { /* @@ -637,7 +642,7 @@ static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu) static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu) { - if (is_tdp_mmu(vcpu->arch.mmu)) { + if (is_tdp_mmu_active(vcpu)) { kvm_tdp_mmu_walk_lockless_end(); } else { /* @@ -4043,7 +4048,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) walk_shadow_page_lockless_begin(vcpu); - if (is_tdp_mmu(vcpu->arch.mmu)) + if (is_tdp_mmu_active(vcpu)) leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root); else leaf = get_walk(vcpu, addr, sptes, &root); diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h index e4ab2dac269d68..0a63b1afabd3cc 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.h +++ b/arch/x86/kvm/mmu/tdp_mmu.h @@ -72,26 +72,8 @@ u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr, #ifdef CONFIG_X86_64 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; } - -static inline bool is_tdp_mmu(struct kvm_mmu *mmu) -{ - struct kvm_mmu_page *sp; - hpa_t hpa = mmu->root.hpa; - - if (WARN_ON(!VALID_PAGE(hpa))) - return false; - - /* - * A NULL shadow page is legal when shadowing a non-paging guest with - * PAE paging, as the MMU will be direct with root_hpa pointing at the - * pae_root page, not a shadow page. - */ - sp = to_shadow_page(hpa); - return sp && is_tdp_mmu_page(sp) && sp->root_count; -} #else static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; } -static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; } #endif #endif /* __KVM_X86_MMU_TDP_MMU_H */ From dfe0ecc6f5d32da13783d203403bd3ecacbd3179 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 12 Oct 2022 18:16:58 +0000 Subject: [PATCH 61/68] KVM: x86/mmu: Pivot on "TDP MMU enabled" when handling direct page faults When handling direct page faults, pivot on the TDP MMU being globally enabled instead of checking if the target MMU is a TDP MMU. Now that the TDP MMU is all-or-nothing, if the TDP MMU is enabled, KVM will reach direct_page_fault() if and only if the MMU is a TDP MMU. When TDP is enabled (obviously required for the TDP MMU), only non-nested TDP page faults reach direct_page_fault(), i.e. nonpaging MMUs are impossible, as NPT requires paging to be enabled and EPT faults use ept_page_fault(). Signed-off-by: Sean Christopherson Message-Id: <20221012181702.3663607-8-seanjc@google.com> [Use tdp_mmu_enabled variable. - Paolo] Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index c5193bb56bfeb4..15d389370f8852 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3372,7 +3372,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) do { u64 new_spte; - if (is_tdp_mmu(vcpu->arch.mmu)) + if (tdp_mmu_enabled) sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, fault->addr, &spte); else sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte); From b961aa757f940bebc608a412f66951e52a39a298 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Thu, 13 Oct 2022 11:58:43 +0200 Subject: [PATCH 62/68] x86/hyperv: Add HV_EXPOSE_INVARIANT_TSC define Avoid open coding BIT(0) of HV_X64_MSR_TSC_INVARIANT_CONTROL by adding a dedicated define. While there's only one user at this moment, the upcoming KVM implementation of Hyper-V Invariant TSC feature will need to use it as well. Reviewed-by: Michael Kelley Signed-off-by: Vitaly Kuznetsov Reviewed-by: Sean Christopherson Message-Id: <20221013095849.705943-2-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/hyperv-tlfs.h | 3 +++ arch/x86/kernel/cpu/mshyperv.c | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h index e3efaf6e6b6285..617332dd64ac67 100644 --- a/arch/x86/include/asm/hyperv-tlfs.h +++ b/arch/x86/include/asm/hyperv-tlfs.h @@ -255,6 +255,9 @@ enum hv_isolation_type { /* TSC invariant control */ #define HV_X64_MSR_TSC_INVARIANT_CONTROL 0x40000118 +/* HV_X64_MSR_TSC_INVARIANT_CONTROL bits */ +#define HV_EXPOSE_INVARIANT_TSC BIT_ULL(0) + /* Register name aliases for temporary compatibility */ #define HV_X64_MSR_STIMER0_COUNT HV_REGISTER_STIMER0_COUNT #define HV_X64_MSR_STIMER0_CONFIG HV_REGISTER_STIMER0_CONFIG diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 831613959a92af..e402923800d7ae 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -388,7 +388,7 @@ static void __init ms_hyperv_init_platform(void) * setting of this MSR bit should happen before init_intel() * is called. */ - wrmsrl(HV_X64_MSR_TSC_INVARIANT_CONTROL, 0x1); + wrmsrl(HV_X64_MSR_TSC_INVARIANT_CONTROL, HV_EXPOSE_INVARIANT_TSC); setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE); } From 0fcf86f05af208e4e6c62cf1a7e7e28bc6c9d9f9 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Thu, 13 Oct 2022 11:58:44 +0200 Subject: [PATCH 63/68] KVM: x86: Add a KVM-only leaf for CPUID_8000_0007_EDX CPUID_8000_0007_EDX may come handy when X86_FEATURE_CONSTANT_TSC needs to be checked. No functional change intended. Signed-off-by: Vitaly Kuznetsov Reviewed-by: Sean Christopherson Message-Id: <20221013095849.705943-3-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/cpuid.c | 8 ++++++-- arch/x86/kvm/reverse_cpuid.h | 7 +++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 0b5bf013fcb8e3..ff342c9da17226 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -701,6 +701,10 @@ void kvm_set_cpu_caps(void) if (!tdp_enabled && IS_ENABLED(CONFIG_X86_64)) kvm_cpu_cap_set(X86_FEATURE_GBPAGES); + kvm_cpu_cap_init_kvm_defined(CPUID_8000_0007_EDX, + SF(CONSTANT_TSC) + ); + kvm_cpu_cap_mask(CPUID_8000_0008_EBX, F(CLZERO) | F(XSAVEERPTR) | F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) | @@ -1153,8 +1157,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) entry->edx &= ~GENMASK(17, 16); break; case 0x80000007: /* Advanced power management */ - /* invariant TSC is CPUID.80000007H:EDX[8] */ - entry->edx &= (1 << 8); + cpuid_entry_override(entry, CPUID_8000_0007_EDX); + /* mask against host */ entry->edx &= boot_cpu_data.x86_power; entry->eax = entry->ebx = entry->ecx = 0; diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h index 203fdad07bae6c..25b9b51abb20de 100644 --- a/arch/x86/kvm/reverse_cpuid.h +++ b/arch/x86/kvm/reverse_cpuid.h @@ -14,6 +14,7 @@ enum kvm_only_cpuid_leafs { CPUID_12_EAX = NCAPINTS, CPUID_7_1_EDX, + CPUID_8000_0007_EDX, NR_KVM_CPU_CAPS, NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS, @@ -42,6 +43,9 @@ enum kvm_only_cpuid_leafs { #define X86_FEATURE_AVX_NE_CONVERT KVM_X86_FEATURE(CPUID_7_1_EDX, 5) #define X86_FEATURE_PREFETCHITI KVM_X86_FEATURE(CPUID_7_1_EDX, 14) +/* CPUID level 0x80000007 (EDX). */ +#define KVM_X86_FEATURE_CONSTANT_TSC KVM_X86_FEATURE(CPUID_8000_0007_EDX, 8) + struct cpuid_reg { u32 function; u32 index; @@ -67,6 +71,7 @@ static const struct cpuid_reg reverse_cpuid[] = { [CPUID_12_EAX] = {0x00000012, 0, CPUID_EAX}, [CPUID_8000_001F_EAX] = {0x8000001f, 0, CPUID_EAX}, [CPUID_7_1_EDX] = { 7, 1, CPUID_EDX}, + [CPUID_8000_0007_EDX] = {0x80000007, 0, CPUID_EDX}, }; /* @@ -97,6 +102,8 @@ static __always_inline u32 __feature_translate(int x86_feature) return KVM_X86_FEATURE_SGX1; else if (x86_feature == X86_FEATURE_SGX2) return KVM_X86_FEATURE_SGX2; + else if (x86_feature == X86_FEATURE_CONSTANT_TSC) + return KVM_X86_FEATURE_CONSTANT_TSC; return x86_feature; } From 2be1bd3a70c81835b8969216ceedbd017fea732d Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Thu, 13 Oct 2022 11:58:45 +0200 Subject: [PATCH 64/68] KVM: x86: Hyper-V invariant TSC control Normally, genuine Hyper-V doesn't expose architectural invariant TSC (CPUID.80000007H:EDX[8]) to its guests by default. A special PV MSR (HV_X64_MSR_TSC_INVARIANT_CONTROL, 0x40000118) and corresponding CPUID feature bit (CPUID.0x40000003.EAX[15]) were introduced. When bit 0 of the PV MSR is set, invariant TSC bit starts to show up in CPUID. When the feature is exposed to Hyper-V guests, reenlightenment becomes unneeded. Add the feature to KVM. Keep CPUID output intact when the feature wasn't exposed to L1 and implement the required logic for hiding invariant TSC when the feature was exposed and invariant TSC control MSR wasn't written to. Copy genuine Hyper-V behavior and forbid to disable the feature once it was enabled. For the reference, for linux guests, support for the feature was added in commit dce7cd62754b ("x86/hyperv: Allow guests to enable InvariantTSC"). Signed-off-by: Vitaly Kuznetsov Reviewed-by: Sean Christopherson Message-Id: <20221013095849.705943-4-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/cpuid.c | 3 +++ arch/x86/kvm/hyperv.c | 19 +++++++++++++++++++ arch/x86/kvm/hyperv.h | 27 +++++++++++++++++++++++++++ arch/x86/kvm/x86.c | 4 +++- 5 files changed, 53 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index aa4eb8cfcd7e17..c70690b2c82da5 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1088,6 +1088,7 @@ struct kvm_hv { u64 hv_reenlightenment_control; u64 hv_tsc_emulation_control; u64 hv_tsc_emulation_status; + u64 hv_invtsc_control; /* How many vCPUs have VP index != vCPU index */ atomic_t num_mismatched_vp_indexes; diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index ff342c9da17226..c3f084185daf9f 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -1488,6 +1488,9 @@ bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, if (!__kvm_get_msr(vcpu, MSR_IA32_TSX_CTRL, &data, true) && (data & TSX_CTRL_CPUID_CLEAR)) *ebx &= ~(F(RTM) | F(HLE)); + } else if (function == 0x80000007) { + if (kvm_hv_invtsc_suppressed(vcpu)) + *edx &= ~SF(CONSTANT_TSC); } } else { *eax = *ebx = *ecx = *edx = 0; diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index e8296942a86821..80d082ecd5c578 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -999,6 +999,7 @@ static bool kvm_hv_msr_partition_wide(u32 msr) case HV_X64_MSR_REENLIGHTENMENT_CONTROL: case HV_X64_MSR_TSC_EMULATION_CONTROL: case HV_X64_MSR_TSC_EMULATION_STATUS: + case HV_X64_MSR_TSC_INVARIANT_CONTROL: case HV_X64_MSR_SYNDBG_OPTIONS: case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: r = true; @@ -1283,6 +1284,9 @@ static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr) case HV_X64_MSR_TSC_EMULATION_STATUS: return hv_vcpu->cpuid_cache.features_eax & HV_ACCESS_REENLIGHTENMENT; + case HV_X64_MSR_TSC_INVARIANT_CONTROL: + return hv_vcpu->cpuid_cache.features_eax & + HV_ACCESS_TSC_INVARIANT; case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: case HV_X64_MSR_CRASH_CTL: return hv_vcpu->cpuid_cache.features_edx & @@ -1410,6 +1414,17 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, if (!host) return 1; break; + case HV_X64_MSR_TSC_INVARIANT_CONTROL: + /* Only bit 0 is supported */ + if (data & ~HV_EXPOSE_INVARIANT_TSC) + return 1; + + /* The feature can't be disabled from the guest */ + if (!host && hv->hv_invtsc_control && !data) + return 1; + + hv->hv_invtsc_control = data; + break; case HV_X64_MSR_SYNDBG_OPTIONS: case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: return syndbg_set_msr(vcpu, msr, data, host); @@ -1585,6 +1600,9 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, case HV_X64_MSR_TSC_EMULATION_STATUS: data = hv->hv_tsc_emulation_status; break; + case HV_X64_MSR_TSC_INVARIANT_CONTROL: + data = hv->hv_invtsc_control; + break; case HV_X64_MSR_SYNDBG_OPTIONS: case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: return syndbg_get_msr(vcpu, msr, pdata, host); @@ -2733,6 +2751,7 @@ int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE; ent->eax |= HV_ACCESS_FREQUENCY_MSRS; ent->eax |= HV_ACCESS_REENLIGHTENMENT; + ent->eax |= HV_ACCESS_TSC_INVARIANT; ent->ebx |= HV_POST_MESSAGES; ent->ebx |= HV_SIGNAL_EVENTS; diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h index 9f96414a31c52d..f83b8db72b118c 100644 --- a/arch/x86/kvm/hyperv.h +++ b/arch/x86/kvm/hyperv.h @@ -136,6 +136,33 @@ static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu) HV_SYNIC_STIMER_COUNT); } +/* + * With HV_ACCESS_TSC_INVARIANT feature, invariant TSC (CPUID.80000007H:EDX[8]) + * is only observed after HV_X64_MSR_TSC_INVARIANT_CONTROL was written to. + */ +static inline bool kvm_hv_invtsc_suppressed(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + + /* + * If Hyper-V's invariant TSC control is not exposed to the guest, + * the invariant TSC CPUID flag is not suppressed, Windows guests were + * observed to be able to handle it correctly. Going forward, VMMs are + * encouraged to enable Hyper-V's invariant TSC control when invariant + * TSC CPUID flag is set to make KVM's behavior match genuine Hyper-V. + */ + if (!hv_vcpu || + !(hv_vcpu->cpuid_cache.features_eax & HV_ACCESS_TSC_INVARIANT)) + return false; + + /* + * If Hyper-V's invariant TSC control is exposed to the guest, KVM is + * responsible for suppressing the invariant TSC CPUID flag if the + * Hyper-V control is not enabled. + */ + return !(to_kvm_hv(vcpu->kvm)->hv_invtsc_control & HV_EXPOSE_INVARIANT_TSC); +} + void kvm_hv_process_stimers(struct kvm_vcpu *vcpu); void kvm_hv_setup_tsc_page(struct kvm *kvm, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5c3ce39cdccbf9..39b8dd37bc40ae 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1480,7 +1480,7 @@ static const u32 emulated_msrs_all[] = { HV_X64_MSR_STIMER0_CONFIG, HV_X64_MSR_VP_ASSIST_PAGE, HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL, - HV_X64_MSR_TSC_EMULATION_STATUS, + HV_X64_MSR_TSC_EMULATION_STATUS, HV_X64_MSR_TSC_INVARIANT_CONTROL, HV_X64_MSR_SYNDBG_OPTIONS, HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS, HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER, @@ -3821,6 +3821,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case HV_X64_MSR_REENLIGHTENMENT_CONTROL: case HV_X64_MSR_TSC_EMULATION_CONTROL: case HV_X64_MSR_TSC_EMULATION_STATUS: + case HV_X64_MSR_TSC_INVARIANT_CONTROL: return kvm_hv_set_msr_common(vcpu, msr, data, msr_info->host_initiated); case MSR_IA32_BBL_CR_CTL3: @@ -4191,6 +4192,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case HV_X64_MSR_REENLIGHTENMENT_CONTROL: case HV_X64_MSR_TSC_EMULATION_CONTROL: case HV_X64_MSR_TSC_EMULATION_STATUS: + case HV_X64_MSR_TSC_INVARIANT_CONTROL: return kvm_hv_get_msr_common(vcpu, msr_info->index, &msr_info->data, msr_info->host_initiated); From 67b16f180541d5d1869a07afb844a244f9b6db0f Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Thu, 13 Oct 2022 11:58:46 +0200 Subject: [PATCH 65/68] KVM: selftests: Rename 'msr->available' to 'msr->fault_exepected' in hyperv_features test It may not be clear what 'msr->available' means. The test actually checks that accessing the particular MSR doesn't cause #GP, rename the variable accordingly. While on it, use 'true'/'false' instead of '1'/'0' for 'write'/ 'fault_expected' as these are boolean. Signed-off-by: Vitaly Kuznetsov Reviewed-by: Sean Christopherson Message-Id: <20221013095849.705943-5-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- .../selftests/kvm/x86_64/hyperv_features.c | 184 +++++++++--------- 1 file changed, 92 insertions(+), 92 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c index 3163c3e8db0a72..4cf1368af48ab8 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c @@ -15,7 +15,7 @@ struct msr_data { uint32_t idx; - bool available; + bool fault_expected; bool write; u64 write_val; }; @@ -38,10 +38,10 @@ static void guest_msr(struct msr_data *msr) else vector = wrmsr_safe(msr->idx, msr->write_val); - if (msr->available) - GUEST_ASSERT_2(!vector, msr->idx, vector); - else + if (msr->fault_expected) GUEST_ASSERT_2(vector == GP_VECTOR, msr->idx, vector); + else + GUEST_ASSERT_2(!vector, msr->idx, vector); GUEST_DONE(); } @@ -134,13 +134,13 @@ static void guest_test_msrs_access(void) * Only available when Hyper-V identification is set */ msr->idx = HV_X64_MSR_GUEST_OS_ID; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 1: msr->idx = HV_X64_MSR_HYPERCALL; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 2: feat->eax |= HV_MSR_HYPERCALL_AVAILABLE; @@ -149,118 +149,118 @@ static void guest_test_msrs_access(void) * HV_X64_MSR_HYPERCALL available. */ msr->idx = HV_X64_MSR_GUEST_OS_ID; - msr->write = 1; + msr->write = true; msr->write_val = HYPERV_LINUX_OS_ID; - msr->available = 1; + msr->fault_expected = false; break; case 3: msr->idx = HV_X64_MSR_GUEST_OS_ID; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 4: msr->idx = HV_X64_MSR_HYPERCALL; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 5: msr->idx = HV_X64_MSR_VP_RUNTIME; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 6: feat->eax |= HV_MSR_VP_RUNTIME_AVAILABLE; msr->idx = HV_X64_MSR_VP_RUNTIME; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 7: /* Read only */ msr->idx = HV_X64_MSR_VP_RUNTIME; - msr->write = 1; + msr->write = true; msr->write_val = 1; - msr->available = 0; + msr->fault_expected = true; break; case 8: msr->idx = HV_X64_MSR_TIME_REF_COUNT; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 9: feat->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE; msr->idx = HV_X64_MSR_TIME_REF_COUNT; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 10: /* Read only */ msr->idx = HV_X64_MSR_TIME_REF_COUNT; - msr->write = 1; + msr->write = true; msr->write_val = 1; - msr->available = 0; + msr->fault_expected = true; break; case 11: msr->idx = HV_X64_MSR_VP_INDEX; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 12: feat->eax |= HV_MSR_VP_INDEX_AVAILABLE; msr->idx = HV_X64_MSR_VP_INDEX; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 13: /* Read only */ msr->idx = HV_X64_MSR_VP_INDEX; - msr->write = 1; + msr->write = true; msr->write_val = 1; - msr->available = 0; + msr->fault_expected = true; break; case 14: msr->idx = HV_X64_MSR_RESET; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 15: feat->eax |= HV_MSR_RESET_AVAILABLE; msr->idx = HV_X64_MSR_RESET; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 16: msr->idx = HV_X64_MSR_RESET; - msr->write = 1; + msr->write = true; msr->write_val = 0; - msr->available = 1; + msr->fault_expected = false; break; case 17: msr->idx = HV_X64_MSR_REFERENCE_TSC; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 18: feat->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE; msr->idx = HV_X64_MSR_REFERENCE_TSC; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 19: msr->idx = HV_X64_MSR_REFERENCE_TSC; - msr->write = 1; + msr->write = true; msr->write_val = 0; - msr->available = 1; + msr->fault_expected = false; break; case 20: msr->idx = HV_X64_MSR_EOM; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 21: /* @@ -268,146 +268,146 @@ static void guest_test_msrs_access(void) * capability enabled and guest visible CPUID bit unset. */ msr->idx = HV_X64_MSR_EOM; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 22: feat->eax |= HV_MSR_SYNIC_AVAILABLE; msr->idx = HV_X64_MSR_EOM; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 23: msr->idx = HV_X64_MSR_EOM; - msr->write = 1; + msr->write = true; msr->write_val = 0; - msr->available = 1; + msr->fault_expected = false; break; case 24: msr->idx = HV_X64_MSR_STIMER0_CONFIG; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 25: feat->eax |= HV_MSR_SYNTIMER_AVAILABLE; msr->idx = HV_X64_MSR_STIMER0_CONFIG; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 26: msr->idx = HV_X64_MSR_STIMER0_CONFIG; - msr->write = 1; + msr->write = true; msr->write_val = 0; - msr->available = 1; + msr->fault_expected = false; break; case 27: /* Direct mode test */ msr->idx = HV_X64_MSR_STIMER0_CONFIG; - msr->write = 1; + msr->write = true; msr->write_val = 1 << 12; - msr->available = 0; + msr->fault_expected = true; break; case 28: feat->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE; msr->idx = HV_X64_MSR_STIMER0_CONFIG; - msr->write = 1; + msr->write = true; msr->write_val = 1 << 12; - msr->available = 1; + msr->fault_expected = false; break; case 29: msr->idx = HV_X64_MSR_EOI; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 30: feat->eax |= HV_MSR_APIC_ACCESS_AVAILABLE; msr->idx = HV_X64_MSR_EOI; - msr->write = 1; + msr->write = true; msr->write_val = 1; - msr->available = 1; + msr->fault_expected = false; break; case 31: msr->idx = HV_X64_MSR_TSC_FREQUENCY; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 32: feat->eax |= HV_ACCESS_FREQUENCY_MSRS; msr->idx = HV_X64_MSR_TSC_FREQUENCY; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 33: /* Read only */ msr->idx = HV_X64_MSR_TSC_FREQUENCY; - msr->write = 1; + msr->write = true; msr->write_val = 1; - msr->available = 0; + msr->fault_expected = true; break; case 34: msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 35: feat->eax |= HV_ACCESS_REENLIGHTENMENT; msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 36: msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL; - msr->write = 1; + msr->write = true; msr->write_val = 1; - msr->available = 1; + msr->fault_expected = false; break; case 37: /* Can only write '0' */ msr->idx = HV_X64_MSR_TSC_EMULATION_STATUS; - msr->write = 1; + msr->write = true; msr->write_val = 1; - msr->available = 0; + msr->fault_expected = true; break; case 38: msr->idx = HV_X64_MSR_CRASH_P0; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 39: feat->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE; msr->idx = HV_X64_MSR_CRASH_P0; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 40: msr->idx = HV_X64_MSR_CRASH_P0; - msr->write = 1; + msr->write = true; msr->write_val = 1; - msr->available = 1; + msr->fault_expected = false; break; case 41: msr->idx = HV_X64_MSR_SYNDBG_STATUS; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 42: feat->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE; dbg->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING; msr->idx = HV_X64_MSR_SYNDBG_STATUS; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 43: msr->idx = HV_X64_MSR_SYNDBG_STATUS; - msr->write = 1; + msr->write = true; msr->write_val = 0; - msr->available = 1; + msr->fault_expected = false; break; case 44: From 2f10428ace91d07a8c7bd6f127d66eaa9e3a1a9f Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Thu, 13 Oct 2022 11:58:47 +0200 Subject: [PATCH 66/68] KVM: selftests: Convert hyperv_features test to using KVM_X86_CPU_FEATURE() hyperv_features test needs to set certain CPUID bits in Hyper-V feature leaves but instead of open coding this, common KVM_X86_CPU_FEATURE() infrastructure can be used. Signed-off-by: Vitaly Kuznetsov Reviewed-by: Sean Christopherson Message-Id: <20221013095849.705943-6-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- .../selftests/kvm/include/x86_64/hyperv.h | 141 ++++++++++++------ .../selftests/kvm/x86_64/hyperv_features.c | 67 ++++----- 2 files changed, 127 insertions(+), 81 deletions(-) diff --git a/tools/testing/selftests/kvm/include/x86_64/hyperv.h b/tools/testing/selftests/kvm/include/x86_64/hyperv.h index 9218bb5f44bfdb..ab455c4efc665d 100644 --- a/tools/testing/selftests/kvm/include/x86_64/hyperv.h +++ b/tools/testing/selftests/kvm/include/x86_64/hyperv.h @@ -85,61 +85,108 @@ #define HV_X64_MSR_SYNDBG_OPTIONS 0x400000FF /* HYPERV_CPUID_FEATURES.EAX */ -#define HV_MSR_VP_RUNTIME_AVAILABLE BIT(0) -#define HV_MSR_TIME_REF_COUNT_AVAILABLE BIT(1) -#define HV_MSR_SYNIC_AVAILABLE BIT(2) -#define HV_MSR_SYNTIMER_AVAILABLE BIT(3) -#define HV_MSR_APIC_ACCESS_AVAILABLE BIT(4) -#define HV_MSR_HYPERCALL_AVAILABLE BIT(5) -#define HV_MSR_VP_INDEX_AVAILABLE BIT(6) -#define HV_MSR_RESET_AVAILABLE BIT(7) -#define HV_MSR_STAT_PAGES_AVAILABLE BIT(8) -#define HV_MSR_REFERENCE_TSC_AVAILABLE BIT(9) -#define HV_MSR_GUEST_IDLE_AVAILABLE BIT(10) -#define HV_ACCESS_FREQUENCY_MSRS BIT(11) -#define HV_ACCESS_REENLIGHTENMENT BIT(13) -#define HV_ACCESS_TSC_INVARIANT BIT(15) +#define HV_MSR_VP_RUNTIME_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 0) +#define HV_MSR_TIME_REF_COUNT_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 1) +#define HV_MSR_SYNIC_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 2) +#define HV_MSR_SYNTIMER_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 3) +#define HV_MSR_APIC_ACCESS_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 4) +#define HV_MSR_HYPERCALL_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 5) +#define HV_MSR_VP_INDEX_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 6) +#define HV_MSR_RESET_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 7) +#define HV_MSR_STAT_PAGES_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 8) +#define HV_MSR_REFERENCE_TSC_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 9) +#define HV_MSR_GUEST_IDLE_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 10) +#define HV_ACCESS_FREQUENCY_MSRS \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 11) +#define HV_ACCESS_REENLIGHTENMENT \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 13) +#define HV_ACCESS_TSC_INVARIANT \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 15) /* HYPERV_CPUID_FEATURES.EBX */ -#define HV_CREATE_PARTITIONS BIT(0) -#define HV_ACCESS_PARTITION_ID BIT(1) -#define HV_ACCESS_MEMORY_POOL BIT(2) -#define HV_ADJUST_MESSAGE_BUFFERS BIT(3) -#define HV_POST_MESSAGES BIT(4) -#define HV_SIGNAL_EVENTS BIT(5) -#define HV_CREATE_PORT BIT(6) -#define HV_CONNECT_PORT BIT(7) -#define HV_ACCESS_STATS BIT(8) -#define HV_DEBUGGING BIT(11) -#define HV_CPU_MANAGEMENT BIT(12) -#define HV_ISOLATION BIT(22) +#define HV_CREATE_PARTITIONS \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 0) +#define HV_ACCESS_PARTITION_ID \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 1) +#define HV_ACCESS_MEMORY_POOL \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 2) +#define HV_ADJUST_MESSAGE_BUFFERS \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 3) +#define HV_POST_MESSAGES \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 4) +#define HV_SIGNAL_EVENTS \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 5) +#define HV_CREATE_PORT \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 6) +#define HV_CONNECT_PORT \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 7) +#define HV_ACCESS_STATS \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 8) +#define HV_DEBUGGING \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 11) +#define HV_CPU_MANAGEMENT \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 12) +#define HV_ISOLATION \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 22) /* HYPERV_CPUID_FEATURES.EDX */ -#define HV_X64_MWAIT_AVAILABLE BIT(0) -#define HV_X64_GUEST_DEBUGGING_AVAILABLE BIT(1) -#define HV_X64_PERF_MONITOR_AVAILABLE BIT(2) -#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE BIT(3) -#define HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE BIT(4) -#define HV_X64_GUEST_IDLE_STATE_AVAILABLE BIT(5) -#define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE BIT(8) -#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(10) -#define HV_FEATURE_DEBUG_MSRS_AVAILABLE BIT(11) -#define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(19) +#define HV_X64_MWAIT_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 0) +#define HV_X64_GUEST_DEBUGGING_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 1) +#define HV_X64_PERF_MONITOR_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 2) +#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 3) +#define HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 4) +#define HV_X64_GUEST_IDLE_STATE_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 5) +#define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 8) +#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 10) +#define HV_FEATURE_DEBUG_MSRS_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 11) +#define HV_STIMER_DIRECT_MODE_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 19) /* HYPERV_CPUID_ENLIGHTMENT_INFO.EAX */ -#define HV_X64_AS_SWITCH_RECOMMENDED BIT(0) -#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED BIT(1) -#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED BIT(2) -#define HV_X64_APIC_ACCESS_RECOMMENDED BIT(3) -#define HV_X64_SYSTEM_RESET_RECOMMENDED BIT(4) -#define HV_X64_RELAXED_TIMING_RECOMMENDED BIT(5) -#define HV_DEPRECATING_AEOI_RECOMMENDED BIT(9) -#define HV_X64_CLUSTER_IPI_RECOMMENDED BIT(10) -#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED BIT(11) -#define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED BIT(14) +#define HV_X64_AS_SWITCH_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 0) +#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 1) +#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 2) +#define HV_X64_APIC_ACCESS_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 3) +#define HV_X64_SYSTEM_RESET_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 4) +#define HV_X64_RELAXED_TIMING_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 5) +#define HV_DEPRECATING_AEOI_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 9) +#define HV_X64_CLUSTER_IPI_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 10) +#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 11) +#define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 14) /* HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX */ -#define HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING BIT(1) +#define HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES, 0, EAX, 1) /* Hypercalls */ #define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002 diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c index 4cf1368af48ab8..daad3e3cc7bbae 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c @@ -13,6 +13,14 @@ #include "processor.h" #include "hyperv.h" +/* + * HYPERV_CPUID_ENLIGHTMENT_INFO.EBX is not a 'feature' CPUID leaf + * but to activate the feature it is sufficient to set it to a non-zero + * value. Use BIT(0) for that. + */ +#define HV_PV_SPINLOCKS_TEST \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EBX, 0) + struct msr_data { uint32_t idx; bool fault_expected; @@ -89,7 +97,6 @@ static void vcpu_reset_hv_cpuid(struct kvm_vcpu *vcpu) static void guest_test_msrs_access(void) { struct kvm_cpuid2 *prev_cpuid = NULL; - struct kvm_cpuid_entry2 *feat, *dbg; struct kvm_vcpu *vcpu; struct kvm_run *run; struct kvm_vm *vm; @@ -116,9 +123,6 @@ static void guest_test_msrs_access(void) vcpu_init_cpuid(vcpu, prev_cpuid); } - feat = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES); - dbg = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES); - vm_init_descriptor_tables(vm); vcpu_init_descriptor_tables(vcpu); @@ -143,7 +147,7 @@ static void guest_test_msrs_access(void) msr->fault_expected = true; break; case 2: - feat->eax |= HV_MSR_HYPERCALL_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_HYPERCALL_AVAILABLE); /* * HV_X64_MSR_GUEST_OS_ID has to be written first to make * HV_X64_MSR_HYPERCALL available. @@ -170,7 +174,7 @@ static void guest_test_msrs_access(void) msr->fault_expected = true; break; case 6: - feat->eax |= HV_MSR_VP_RUNTIME_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_VP_RUNTIME_AVAILABLE); msr->idx = HV_X64_MSR_VP_RUNTIME; msr->write = false; msr->fault_expected = false; @@ -189,7 +193,7 @@ static void guest_test_msrs_access(void) msr->fault_expected = true; break; case 9: - feat->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_TIME_REF_COUNT_AVAILABLE); msr->idx = HV_X64_MSR_TIME_REF_COUNT; msr->write = false; msr->fault_expected = false; @@ -208,7 +212,7 @@ static void guest_test_msrs_access(void) msr->fault_expected = true; break; case 12: - feat->eax |= HV_MSR_VP_INDEX_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_VP_INDEX_AVAILABLE); msr->idx = HV_X64_MSR_VP_INDEX; msr->write = false; msr->fault_expected = false; @@ -227,7 +231,7 @@ static void guest_test_msrs_access(void) msr->fault_expected = true; break; case 15: - feat->eax |= HV_MSR_RESET_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_RESET_AVAILABLE); msr->idx = HV_X64_MSR_RESET; msr->write = false; msr->fault_expected = false; @@ -245,7 +249,7 @@ static void guest_test_msrs_access(void) msr->fault_expected = true; break; case 18: - feat->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_REFERENCE_TSC_AVAILABLE); msr->idx = HV_X64_MSR_REFERENCE_TSC; msr->write = false; msr->fault_expected = false; @@ -272,7 +276,7 @@ static void guest_test_msrs_access(void) msr->fault_expected = true; break; case 22: - feat->eax |= HV_MSR_SYNIC_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_SYNIC_AVAILABLE); msr->idx = HV_X64_MSR_EOM; msr->write = false; msr->fault_expected = false; @@ -290,7 +294,7 @@ static void guest_test_msrs_access(void) msr->fault_expected = true; break; case 25: - feat->eax |= HV_MSR_SYNTIMER_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_SYNTIMER_AVAILABLE); msr->idx = HV_X64_MSR_STIMER0_CONFIG; msr->write = false; msr->fault_expected = false; @@ -309,7 +313,7 @@ static void guest_test_msrs_access(void) msr->fault_expected = true; break; case 28: - feat->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_STIMER_DIRECT_MODE_AVAILABLE); msr->idx = HV_X64_MSR_STIMER0_CONFIG; msr->write = true; msr->write_val = 1 << 12; @@ -322,7 +326,7 @@ static void guest_test_msrs_access(void) msr->fault_expected = true; break; case 30: - feat->eax |= HV_MSR_APIC_ACCESS_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_APIC_ACCESS_AVAILABLE); msr->idx = HV_X64_MSR_EOI; msr->write = true; msr->write_val = 1; @@ -335,7 +339,7 @@ static void guest_test_msrs_access(void) msr->fault_expected = true; break; case 32: - feat->eax |= HV_ACCESS_FREQUENCY_MSRS; + vcpu_set_cpuid_feature(vcpu, HV_ACCESS_FREQUENCY_MSRS); msr->idx = HV_X64_MSR_TSC_FREQUENCY; msr->write = false; msr->fault_expected = false; @@ -354,7 +358,7 @@ static void guest_test_msrs_access(void) msr->fault_expected = true; break; case 35: - feat->eax |= HV_ACCESS_REENLIGHTENMENT; + vcpu_set_cpuid_feature(vcpu, HV_ACCESS_REENLIGHTENMENT); msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL; msr->write = false; msr->fault_expected = false; @@ -379,7 +383,7 @@ static void guest_test_msrs_access(void) msr->fault_expected = true; break; case 39: - feat->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE); msr->idx = HV_X64_MSR_CRASH_P0; msr->write = false; msr->fault_expected = false; @@ -397,8 +401,8 @@ static void guest_test_msrs_access(void) msr->fault_expected = true; break; case 42: - feat->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE; - dbg->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING; + vcpu_set_cpuid_feature(vcpu, HV_FEATURE_DEBUG_MSRS_AVAILABLE); + vcpu_set_cpuid_feature(vcpu, HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING); msr->idx = HV_X64_MSR_SYNDBG_STATUS; msr->write = false; msr->fault_expected = false; @@ -445,7 +449,6 @@ static void guest_test_msrs_access(void) static void guest_test_hcalls_access(void) { - struct kvm_cpuid_entry2 *feat, *recomm, *dbg; struct kvm_cpuid2 *prev_cpuid = NULL; struct kvm_vcpu *vcpu; struct kvm_run *run; @@ -480,15 +483,11 @@ static void guest_test_hcalls_access(void) vcpu_init_cpuid(vcpu, prev_cpuid); } - feat = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES); - recomm = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO); - dbg = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES); - run = vcpu->run; switch (stage) { case 0: - feat->eax |= HV_MSR_HYPERCALL_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_HYPERCALL_AVAILABLE); hcall->control = 0xbeef; hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE; break; @@ -498,7 +497,7 @@ static void guest_test_hcalls_access(void) hcall->expect = HV_STATUS_ACCESS_DENIED; break; case 2: - feat->ebx |= HV_POST_MESSAGES; + vcpu_set_cpuid_feature(vcpu, HV_POST_MESSAGES); hcall->control = HVCALL_POST_MESSAGE; hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT; break; @@ -508,7 +507,7 @@ static void guest_test_hcalls_access(void) hcall->expect = HV_STATUS_ACCESS_DENIED; break; case 4: - feat->ebx |= HV_SIGNAL_EVENTS; + vcpu_set_cpuid_feature(vcpu, HV_SIGNAL_EVENTS); hcall->control = HVCALL_SIGNAL_EVENT; hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT; break; @@ -518,12 +517,12 @@ static void guest_test_hcalls_access(void) hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE; break; case 6: - dbg->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING; + vcpu_set_cpuid_feature(vcpu, HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING); hcall->control = HVCALL_RESET_DEBUG_SESSION; hcall->expect = HV_STATUS_ACCESS_DENIED; break; case 7: - feat->ebx |= HV_DEBUGGING; + vcpu_set_cpuid_feature(vcpu, HV_DEBUGGING); hcall->control = HVCALL_RESET_DEBUG_SESSION; hcall->expect = HV_STATUS_OPERATION_DENIED; break; @@ -533,7 +532,7 @@ static void guest_test_hcalls_access(void) hcall->expect = HV_STATUS_ACCESS_DENIED; break; case 9: - recomm->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED; + vcpu_set_cpuid_feature(vcpu, HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED); hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE; hcall->expect = HV_STATUS_SUCCESS; break; @@ -542,7 +541,7 @@ static void guest_test_hcalls_access(void) hcall->expect = HV_STATUS_ACCESS_DENIED; break; case 11: - recomm->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED; + vcpu_set_cpuid_feature(vcpu, HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED); hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX; hcall->expect = HV_STATUS_SUCCESS; break; @@ -552,7 +551,7 @@ static void guest_test_hcalls_access(void) hcall->expect = HV_STATUS_ACCESS_DENIED; break; case 13: - recomm->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED; + vcpu_set_cpuid_feature(vcpu, HV_X64_CLUSTER_IPI_RECOMMENDED); hcall->control = HVCALL_SEND_IPI; hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT; break; @@ -567,7 +566,7 @@ static void guest_test_hcalls_access(void) hcall->expect = HV_STATUS_ACCESS_DENIED; break; case 16: - recomm->ebx = 0xfff; + vcpu_set_cpuid_feature(vcpu, HV_PV_SPINLOCKS_TEST); hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT; hcall->expect = HV_STATUS_SUCCESS; break; @@ -577,7 +576,7 @@ static void guest_test_hcalls_access(void) hcall->ud_expected = true; break; case 18: - feat->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE); hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT; hcall->ud_expected = false; hcall->expect = HV_STATUS_SUCCESS; From 91a0b5478aab482d7133b8f069eb7d4590efcb7e Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Thu, 13 Oct 2022 11:58:48 +0200 Subject: [PATCH 67/68] KVM: selftests: Test that values written to Hyper-V MSRs are preserved Enhance 'hyperv_features' selftest by adding a check that KVM preserves values written to PV MSRs. Two MSRs are, however, 'special': - HV_X64_MSR_EOI as it is a 'write-only' MSR, - HV_X64_MSR_RESET as it always reads as '0'. The later doesn't require any special handling right now because the test never writes anything besides '0' to the MSR, leave a TODO node about the fact. Suggested-by: Sean Christopherson Signed-off-by: Vitaly Kuznetsov Reviewed-by: Sean Christopherson Message-Id: <20221013095849.705943-7-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- .../selftests/kvm/x86_64/hyperv_features.c | 36 ++++++++++++++----- 1 file changed, 28 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c index daad3e3cc7bbae..b0024096397491 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c @@ -34,22 +34,36 @@ struct hcall_data { bool ud_expected; }; +static bool is_write_only_msr(uint32_t msr) +{ + return msr == HV_X64_MSR_EOI; +} + static void guest_msr(struct msr_data *msr) { - uint64_t ignored; - uint8_t vector; + uint8_t vector = 0; + uint64_t msr_val = 0; GUEST_ASSERT(msr->idx); - if (!msr->write) - vector = rdmsr_safe(msr->idx, &ignored); - else + if (msr->write) vector = wrmsr_safe(msr->idx, msr->write_val); + if (!vector && (!msr->write || !is_write_only_msr(msr->idx))) + vector = rdmsr_safe(msr->idx, &msr_val); + if (msr->fault_expected) - GUEST_ASSERT_2(vector == GP_VECTOR, msr->idx, vector); + GUEST_ASSERT_3(vector == GP_VECTOR, msr->idx, vector, GP_VECTOR); else - GUEST_ASSERT_2(!vector, msr->idx, vector); + GUEST_ASSERT_3(!vector, msr->idx, vector, 0); + + if (vector || is_write_only_msr(msr->idx)) + goto done; + + if (msr->write) + GUEST_ASSERT_3(msr_val == msr->write_val, msr->idx, + msr_val, msr->write_val); +done: GUEST_DONE(); } @@ -239,6 +253,12 @@ static void guest_test_msrs_access(void) case 16: msr->idx = HV_X64_MSR_RESET; msr->write = true; + /* + * TODO: the test only writes '0' to HV_X64_MSR_RESET + * at the moment, writing some other value there will + * trigger real vCPU reset and the code is not prepared + * to handle it yet. + */ msr->write_val = 0; msr->fault_expected = false; break; @@ -433,7 +453,7 @@ static void guest_test_msrs_access(void) switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: - REPORT_GUEST_ASSERT_2(uc, "MSR = %lx, vector = %lx"); + REPORT_GUEST_ASSERT_3(uc, "MSR = %lx, arg1 = %lx, arg2 = %lx"); return; case UCALL_DONE: break; From bd827bd775375fdef13da4c8d3f237555938aa6d Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Thu, 13 Oct 2022 11:58:49 +0200 Subject: [PATCH 68/68] KVM: selftests: Test Hyper-V invariant TSC control Add a test for the newly introduced Hyper-V invariant TSC control feature: - HV_X64_MSR_TSC_INVARIANT_CONTROL is not available without HV_ACCESS_TSC_INVARIANT CPUID bit set and available with it. - BIT(0) of HV_X64_MSR_TSC_INVARIANT_CONTROL controls the filtering of architectural invariant TSC (CPUID.80000007H:EDX[8]) bit. Signed-off-by: Vitaly Kuznetsov Reviewed-by: Sean Christopherson Message-Id: <20221013095849.705943-8-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- .../selftests/kvm/include/x86_64/hyperv.h | 3 ++ .../selftests/kvm/include/x86_64/processor.h | 1 + .../selftests/kvm/x86_64/hyperv_features.c | 47 +++++++++++++++++++ 3 files changed, 51 insertions(+) diff --git a/tools/testing/selftests/kvm/include/x86_64/hyperv.h b/tools/testing/selftests/kvm/include/x86_64/hyperv.h index ab455c4efc665d..28eb99046475e1 100644 --- a/tools/testing/selftests/kvm/include/x86_64/hyperv.h +++ b/tools/testing/selftests/kvm/include/x86_64/hyperv.h @@ -335,4 +335,7 @@ struct hyperv_test_pages { struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm, vm_vaddr_t *p_hv_pages_gva); +/* HV_X64_MSR_TSC_INVARIANT_CONTROL bits */ +#define HV_INVARIANT_TSC_EXPOSED BIT_ULL(0) + #endif /* !SELFTEST_KVM_HYPERV_H */ diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index b1a31de7108acc..2a5f47d513884a 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -137,6 +137,7 @@ struct kvm_x86_cpu_feature { #define X86_FEATURE_GBPAGES KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 26) #define X86_FEATURE_RDTSCP KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 27) #define X86_FEATURE_LM KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 29) +#define X86_FEATURE_INVTSC KVM_X86_CPU_FEATURE(0x80000007, 0, EDX, 8) #define X86_FEATURE_RDPRU KVM_X86_CPU_FEATURE(0x80000008, 0, EBX, 4) #define X86_FEATURE_AMD_IBPB KVM_X86_CPU_FEATURE(0x80000008, 0, EBX, 12) #define X86_FEATURE_NPT KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 0) diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c index b0024096397491..258267df8e2a62 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c @@ -63,6 +63,16 @@ static void guest_msr(struct msr_data *msr) if (msr->write) GUEST_ASSERT_3(msr_val == msr->write_val, msr->idx, msr_val, msr->write_val); + + /* Invariant TSC bit appears when TSC invariant control MSR is written to */ + if (msr->idx == HV_X64_MSR_TSC_INVARIANT_CONTROL) { + if (!this_cpu_has(HV_ACCESS_TSC_INVARIANT)) + GUEST_ASSERT(this_cpu_has(X86_FEATURE_INVTSC)); + else + GUEST_ASSERT(this_cpu_has(X86_FEATURE_INVTSC) == + !!(msr_val & HV_INVARIANT_TSC_EXPOSED)); + } + done: GUEST_DONE(); } @@ -118,6 +128,7 @@ static void guest_test_msrs_access(void) int stage = 0; vm_vaddr_t msr_gva; struct msr_data *msr; + bool has_invtsc = kvm_cpu_has(X86_FEATURE_INVTSC); while (true) { vm = vm_create_with_one_vcpu(&vcpu, guest_msr); @@ -435,6 +446,42 @@ static void guest_test_msrs_access(void) break; case 44: + /* MSR is not available when CPUID feature bit is unset */ + if (!has_invtsc) + continue; + msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL; + msr->write = false; + msr->fault_expected = true; + break; + case 45: + /* MSR is vailable when CPUID feature bit is set */ + if (!has_invtsc) + continue; + vcpu_set_cpuid_feature(vcpu, HV_ACCESS_TSC_INVARIANT); + msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL; + msr->write = false; + msr->fault_expected = false; + break; + case 46: + /* Writing bits other than 0 is forbidden */ + if (!has_invtsc) + continue; + msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL; + msr->write = true; + msr->write_val = 0xdeadbeef; + msr->fault_expected = true; + break; + case 47: + /* Setting bit 0 enables the feature */ + if (!has_invtsc) + continue; + msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL; + msr->write = true; + msr->write_val = 1; + msr->fault_expected = false; + break; + + default: kvm_vm_free(vm); return; }