Skip to content

Commit

Permalink
KVM: x86/mmu: Move TDP MMU VM init/uninit behind tdp_mmu_enabled
Browse files Browse the repository at this point in the history
Move kvm_mmu_{init,uninit}_tdp_mmu() behind tdp_mmu_enabled. This makes
these functions consistent with the rest of the calls into the TDP MMU
from mmu.c, and which is now possible since tdp_mmu_enabled is only
modified when the x86 vendor module is loaded. i.e. It will never change
during the lifetime of a VM.

This change also enabled removing the stub definitions for 32-bit KVM,
as the compiler will just optimize the calls out like it does for all
the other TDP MMU functions.

No functional change intended.

Signed-off-by: David Matlack <[email protected]>
Reviewed-by: Isaku Yamahata <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
  • Loading branch information
dmatlack authored and bonzini committed Dec 29, 2022
1 parent 1f98f2b commit 09732d2
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 14 deletions.
11 changes: 7 additions & 4 deletions arch/x86/kvm/mmu/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -6035,9 +6035,11 @@ int kvm_mmu_init_vm(struct kvm *kvm)
INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);

r = kvm_mmu_init_tdp_mmu(kvm);
if (r < 0)
return r;
if (tdp_mmu_enabled) {
r = kvm_mmu_init_tdp_mmu(kvm);
if (r < 0)
return r;
}

node->track_write = kvm_mmu_pte_write;
node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
Expand Down Expand Up @@ -6067,7 +6069,8 @@ void kvm_mmu_uninit_vm(struct kvm *kvm)

kvm_page_track_unregister_notifier(kvm, node);

kvm_mmu_uninit_tdp_mmu(kvm);
if (tdp_mmu_enabled)
kvm_mmu_uninit_tdp_mmu(kvm);

mmu_free_vm_memory_caches(kvm);
}
Expand Down
6 changes: 0 additions & 6 deletions arch/x86/kvm/mmu/tdp_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,6 @@ int kvm_mmu_init_tdp_mmu(struct kvm *kvm)
{
struct workqueue_struct *wq;

if (!tdp_mmu_enabled)
return 0;

wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0);
if (!wq)
return -ENOMEM;
Expand All @@ -42,9 +39,6 @@ static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,

void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
{
if (!tdp_mmu_enabled)
return;

/* Also waits for any queued work items. */
destroy_workqueue(kvm->arch.tdp_mmu_zap_wq);

Expand Down
7 changes: 3 additions & 4 deletions arch/x86/kvm/mmu/tdp_mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@

#include "spte.h"

int kvm_mmu_init_tdp_mmu(struct kvm *kvm);
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);

hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);

__must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
Expand Down Expand Up @@ -68,8 +71,6 @@ u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
u64 *spte);

#ifdef CONFIG_X86_64
int kvm_mmu_init_tdp_mmu(struct kvm *kvm);
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }

static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
Expand All @@ -89,8 +90,6 @@ static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
return sp && is_tdp_mmu_page(sp) && sp->root_count;
}
#else
static inline int kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return 0; }
static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; }
#endif
Expand Down

0 comments on commit 09732d2

Please sign in to comment.