Skip to content

Commit

Permalink
Merge tag 'kvm-x86-mmu-6.4' of https://github.com/kvm-x86/linux into …
Browse files Browse the repository at this point in the history
…HEAD

KVM x86 MMU changes for 6.4:

 - Tweak FNAME(sync_spte) to avoid unnecessary writes+flushes when the
   guest is only adding new PTEs

 - Overhaul .sync_page() and .invlpg() to share the .sync_page()
   implementation, i.e. utilize .sync_page()'s optimizations when emulating
   invalidations

 - Clean up the range-based flushing APIs

 - Revamp the TDP MMU's reaping of Accessed/Dirty bits to clear a single
   A/D bit using a LOCK AND instead of XCHG, and skip all of the "handle
   changed SPTE" overhead associated with writing the entire entry

 - Track the number of "tail" entries in a pte_list_desc to avoid having
   to walk (potentially) all descriptors during insertion and deletion,
   which gets quite expensive if the guest is spamming fork()

 - Misc cleanups
  • Loading branch information
bonzini committed Apr 26, 2023
2 parents a1c288f + 9ed3bf4 commit 807b758
Show file tree
Hide file tree
Showing 14 changed files with 515 additions and 567 deletions.
4 changes: 2 additions & 2 deletions arch/x86/include/asm/kvm-x86-ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,8 @@ KVM_X86_OP(set_rflags)
KVM_X86_OP(get_if_flag)
KVM_X86_OP(flush_tlb_all)
KVM_X86_OP(flush_tlb_current)
KVM_X86_OP_OPTIONAL(tlb_remote_flush)
KVM_X86_OP_OPTIONAL(tlb_remote_flush_with_range)
KVM_X86_OP_OPTIONAL(flush_remote_tlbs)
KVM_X86_OP_OPTIONAL(flush_remote_tlbs_range)
KVM_X86_OP(flush_tlb_gva)
KVM_X86_OP(flush_tlb_guest)
KVM_X86_OP(vcpu_pre_run)
Expand Down
32 changes: 13 additions & 19 deletions arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -420,6 +420,10 @@ struct kvm_mmu_root_info {

#define KVM_MMU_NUM_PREV_ROOTS 3

#define KVM_MMU_ROOT_CURRENT BIT(0)
#define KVM_MMU_ROOT_PREVIOUS(i) BIT(1+i)
#define KVM_MMU_ROOTS_ALL (BIT(1 + KVM_MMU_NUM_PREV_ROOTS) - 1)

#define KVM_HAVE_MMU_RWLOCK

struct kvm_mmu_page;
Expand All @@ -439,9 +443,8 @@ struct kvm_mmu {
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gpa_t gva_or_gpa, u64 access,
struct x86_exception *exception);
int (*sync_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp);
void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
int (*sync_spte)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, int i);
struct kvm_mmu_root_info root;
union kvm_cpu_role cpu_role;
union kvm_mmu_page_role root_role;
Expand Down Expand Up @@ -479,11 +482,6 @@ struct kvm_mmu {
u64 pdptrs[4]; /* pae */
};

struct kvm_tlb_range {
u64 start_gfn;
u64 pages;
};

enum pmc_type {
KVM_PMC_GP = 0,
KVM_PMC_FIXED,
Expand Down Expand Up @@ -1585,9 +1583,9 @@ struct kvm_x86_ops {

void (*flush_tlb_all)(struct kvm_vcpu *vcpu);
void (*flush_tlb_current)(struct kvm_vcpu *vcpu);
int (*tlb_remote_flush)(struct kvm *kvm);
int (*tlb_remote_flush_with_range)(struct kvm *kvm,
struct kvm_tlb_range *range);
int (*flush_remote_tlbs)(struct kvm *kvm);
int (*flush_remote_tlbs_range)(struct kvm *kvm, gfn_t gfn,
gfn_t nr_pages);

/*
* Flush any TLB entries associated with the given GVA.
Expand Down Expand Up @@ -1791,8 +1789,8 @@ void kvm_arch_free_vm(struct kvm *kvm);
#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
{
if (kvm_x86_ops.tlb_remote_flush &&
!static_call(kvm_x86_tlb_remote_flush)(kvm))
if (kvm_x86_ops.flush_remote_tlbs &&
!static_call(kvm_x86_flush_remote_tlbs)(kvm))
return 0;
else
return -ENOTSUPP;
Expand Down Expand Up @@ -1997,10 +1995,6 @@ static inline int __kvm_irq_line_state(unsigned long *irq_state,
return !!(*irq_state);
}

#define KVM_MMU_ROOT_CURRENT BIT(0)
#define KVM_MMU_ROOT_PREVIOUS(i) BIT(1+i)
#define KVM_MMU_ROOTS_ALL (~0UL)

int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level);
void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);

Expand Down Expand Up @@ -2044,8 +2038,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
void *insn, int insn_len);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gva_t gva, hpa_t root_hpa);
void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
u64 addr, unsigned long roots);
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd);

Expand Down
33 changes: 24 additions & 9 deletions arch/x86/kvm/kvm_onhyperv.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,17 +10,22 @@
#include "hyperv.h"
#include "kvm_onhyperv.h"

struct kvm_hv_tlb_range {
u64 start_gfn;
u64 pages;
};

static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
void *data)
{
struct kvm_tlb_range *range = data;
struct kvm_hv_tlb_range *range = data;

return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn,
range->pages);
}

static inline int hv_remote_flush_root_tdp(hpa_t root_tdp,
struct kvm_tlb_range *range)
struct kvm_hv_tlb_range *range)
{
if (range)
return hyperv_flush_guest_mapping_range(root_tdp,
Expand All @@ -29,8 +34,8 @@ static inline int hv_remote_flush_root_tdp(hpa_t root_tdp,
return hyperv_flush_guest_mapping(root_tdp);
}

int hv_remote_flush_tlb_with_range(struct kvm *kvm,
struct kvm_tlb_range *range)
static int __hv_flush_remote_tlbs_range(struct kvm *kvm,
struct kvm_hv_tlb_range *range)
{
struct kvm_arch *kvm_arch = &kvm->arch;
struct kvm_vcpu *vcpu;
Expand Down Expand Up @@ -86,19 +91,29 @@ int hv_remote_flush_tlb_with_range(struct kvm *kvm,
spin_unlock(&kvm_arch->hv_root_tdp_lock);
return ret;
}
EXPORT_SYMBOL_GPL(hv_remote_flush_tlb_with_range);

int hv_remote_flush_tlb(struct kvm *kvm)
int hv_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, gfn_t nr_pages)
{
struct kvm_hv_tlb_range range = {
.start_gfn = start_gfn,
.pages = nr_pages,
};

return __hv_flush_remote_tlbs_range(kvm, &range);
}
EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs_range);

int hv_flush_remote_tlbs(struct kvm *kvm)
{
return hv_remote_flush_tlb_with_range(kvm, NULL);
return __hv_flush_remote_tlbs_range(kvm, NULL);
}
EXPORT_SYMBOL_GPL(hv_remote_flush_tlb);
EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs);

void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
{
struct kvm_arch *kvm_arch = &vcpu->kvm->arch;

if (kvm_x86_ops.tlb_remote_flush == hv_remote_flush_tlb) {
if (kvm_x86_ops.flush_remote_tlbs == hv_flush_remote_tlbs) {
spin_lock(&kvm_arch->hv_root_tdp_lock);
vcpu->arch.hv_root_tdp = root_tdp;
if (root_tdp != kvm_arch->hv_root_tdp)
Expand Down
5 changes: 2 additions & 3 deletions arch/x86/kvm/kvm_onhyperv.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,8 @@
#define __ARCH_X86_KVM_KVM_ONHYPERV_H__

#if IS_ENABLED(CONFIG_HYPERV)
int hv_remote_flush_tlb_with_range(struct kvm *kvm,
struct kvm_tlb_range *range);
int hv_remote_flush_tlb(struct kvm *kvm);
int hv_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, gfn_t nr_pages);
int hv_flush_remote_tlbs(struct kvm *kvm);
void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp);
#else /* !CONFIG_HYPERV */
static inline void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
Expand Down
Loading

0 comments on commit 807b758

Please sign in to comment.