Skip to content

Commit

Permalink
KVM: Rename mmu_notifier_* to mmu_invalidate_*
Browse files Browse the repository at this point in the history
The motivation of this renaming is to make these variables and related
helper functions less mmu_notifier bound and can also be used for non
mmu_notifier based page invalidation. mmu_invalidate_* was chosen to
better describe the purpose of 'invalidating' a page that those
variables are used for.

  - mmu_notifier_seq/range_start/range_end are renamed to
    mmu_invalidate_seq/range_start/range_end.

  - mmu_notifier_retry{_hva} helper functions are renamed to
    mmu_invalidate_retry{_hva}.

  - mmu_notifier_count is renamed to mmu_invalidate_in_progress to
    avoid confusion with mn_active_invalidate_count.

  - While here, also update kvm_inc/dec_notifier_count() to
    kvm_mmu_invalidate_begin/end() to match the change for
    mmu_notifier_count.

No functional change intended.

Signed-off-by: Chao Peng <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
  • Loading branch information
chao-p authored and bonzini committed Aug 19, 2022
1 parent bdd1c37 commit 20ec3eb
Show file tree
Hide file tree
Showing 15 changed files with 103 additions and 98 deletions.
8 changes: 4 additions & 4 deletions arch/arm64/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -993,7 +993,7 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
* THP doesn't start to split while we are adjusting the
* refcounts.
*
* We are sure this doesn't happen, because mmu_notifier_retry
* We are sure this doesn't happen, because mmu_invalidate_retry
* was successful and we are holding the mmu_lock, so if this
* THP is trying to split, it will be blocked in the mmu
* notifier before touching any of the pages, specifically
Expand Down Expand Up @@ -1188,9 +1188,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return ret;
}

mmu_seq = vcpu->kvm->mmu_notifier_seq;
mmu_seq = vcpu->kvm->mmu_invalidate_seq;
/*
* Ensure the read of mmu_notifier_seq happens before we call
* Ensure the read of mmu_invalidate_seq happens before we call
* gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
* the page we just got a reference to gets unmapped before we have a
* chance to grab the mmu_lock, which ensure that if the page gets
Expand Down Expand Up @@ -1246,7 +1246,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
else
write_lock(&kvm->mmu_lock);
pgt = vcpu->arch.hw_mmu->pgt;
if (mmu_notifier_retry(kvm, mmu_seq))
if (mmu_invalidate_retry(kvm, mmu_seq))
goto out_unlock;

/*
Expand Down
12 changes: 6 additions & 6 deletions arch/mips/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -615,17 +615,17 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
* Used to check for invalidations in progress, of the pfn that is
* returned by pfn_to_pfn_prot below.
*/
mmu_seq = kvm->mmu_notifier_seq;
mmu_seq = kvm->mmu_invalidate_seq;
/*
* Ensure the read of mmu_notifier_seq isn't reordered with PTE reads in
* gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
* Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads
* in gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
* risk the page we get a reference to getting unmapped before we have a
* chance to grab the mmu_lock without mmu_notifier_retry() noticing.
* chance to grab the mmu_lock without mmu_invalidate_retry() noticing.
*
* This smp_rmb() pairs with the effective smp_wmb() of the combination
* of the pte_unmap_unlock() after the PTE is zapped, and the
* spin_lock() in kvm_mmu_notifier_invalidate_<page|range_end>() before
* mmu_notifier_seq is incremented.
* mmu_invalidate_seq is incremented.
*/
smp_rmb();

Expand All @@ -638,7 +638,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,

spin_lock(&kvm->mmu_lock);
/* Check if an invalidation has taken place since we got pfn */
if (mmu_notifier_retry(kvm, mmu_seq)) {
if (mmu_invalidate_retry(kvm, mmu_seq)) {
/*
* This can happen when mappings are changed asynchronously, but
* also synchronously if a COW is triggered by
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/include/asm/kvm_book3s_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -666,7 +666,7 @@ static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq,
VM_WARN(!spin_is_locked(&kvm->mmu_lock),
"%s called with kvm mmu_lock not held \n", __func__);

if (mmu_notifier_retry(kvm, mmu_seq))
if (mmu_invalidate_retry(kvm, mmu_seq))
return NULL;

pte = __find_linux_pte(kvm->mm->pgd, ea, NULL, hshift);
Expand Down
4 changes: 2 additions & 2 deletions arch/powerpc/kvm/book3s_64_mmu_host.c
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
unsigned long pfn;

/* used to check for invalidations in progress */
mmu_seq = kvm->mmu_notifier_seq;
mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();

/* Get host physical address for gpa */
Expand Down Expand Up @@ -151,7 +151,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
cpte = kvmppc_mmu_hpte_cache_next(vcpu);

spin_lock(&kvm->mmu_lock);
if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) {
if (!cpte || mmu_invalidate_retry(kvm, mmu_seq)) {
r = -EAGAIN;
goto out_unlock;
}
Expand Down
4 changes: 2 additions & 2 deletions arch/powerpc/kvm/book3s_64_mmu_hv.c
Original file line number Diff line number Diff line change
Expand Up @@ -578,7 +578,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
return -EFAULT;

/* used to check for invalidations in progress */
mmu_seq = kvm->mmu_notifier_seq;
mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();

ret = -EFAULT;
Expand Down Expand Up @@ -693,7 +693,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,

/* Check if we might have been invalidated; let the guest retry if so */
ret = RESUME_GUEST;
if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
if (mmu_invalidate_retry(vcpu->kvm, mmu_seq)) {
unlock_rmap(rmap);
goto out_unlock;
}
Expand Down
6 changes: 3 additions & 3 deletions arch/powerpc/kvm/book3s_64_mmu_radix.c
Original file line number Diff line number Diff line change
Expand Up @@ -640,7 +640,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
/* Check if we might have been invalidated; let the guest retry if so */
spin_lock(&kvm->mmu_lock);
ret = -EAGAIN;
if (mmu_notifier_retry(kvm, mmu_seq))
if (mmu_invalidate_retry(kvm, mmu_seq))
goto out_unlock;

/* Now traverse again under the lock and change the tree */
Expand Down Expand Up @@ -830,7 +830,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
bool large_enable;

/* used to check for invalidations in progress */
mmu_seq = kvm->mmu_notifier_seq;
mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();

/*
Expand Down Expand Up @@ -1191,7 +1191,7 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
* Increase the mmu notifier sequence number to prevent any page
* fault that read the memslot earlier from writing a PTE.
*/
kvm->mmu_notifier_seq++;
kvm->mmu_invalidate_seq++;
spin_unlock(&kvm->mmu_lock);
}

Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kvm/book3s_hv_nested.c
Original file line number Diff line number Diff line change
Expand Up @@ -1580,7 +1580,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
/* 2. Find the host pte for this L1 guest real address */

/* Used to check for invalidations in progress */
mmu_seq = kvm->mmu_notifier_seq;
mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();

/* See if can find translation in our partition scoped tables for L1 */
Expand Down
8 changes: 4 additions & 4 deletions arch/powerpc/kvm/book3s_hv_rm_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
g_ptel = ptel;

/* used later to detect if we might have been invalidated */
mmu_seq = kvm->mmu_notifier_seq;
mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();

/* Find the memslot (if any) for this address */
Expand Down Expand Up @@ -366,7 +366,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
rmap = real_vmalloc_addr(rmap);
lock_rmap(rmap);
/* Check for pending invalidations under the rmap chain lock */
if (mmu_notifier_retry(kvm, mmu_seq)) {
if (mmu_invalidate_retry(kvm, mmu_seq)) {
/* inval in progress, write a non-present HPTE */
pteh |= HPTE_V_ABSENT;
pteh &= ~HPTE_V_VALID;
Expand Down Expand Up @@ -932,7 +932,7 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
int i;

/* Used later to detect if we might have been invalidated */
mmu_seq = kvm->mmu_notifier_seq;
mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();

arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
Expand Down Expand Up @@ -960,7 +960,7 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
long ret = H_SUCCESS;

/* Used later to detect if we might have been invalidated */
mmu_seq = kvm->mmu_notifier_seq;
mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();

arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
Expand Down
4 changes: 2 additions & 2 deletions arch/powerpc/kvm/e500_mmu_host.c
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
unsigned long flags;

/* used to check for invalidations in progress */
mmu_seq = kvm->mmu_notifier_seq;
mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();

/*
Expand Down Expand Up @@ -460,7 +460,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
}

spin_lock(&kvm->mmu_lock);
if (mmu_notifier_retry(kvm, mmu_seq)) {
if (mmu_invalidate_retry(kvm, mmu_seq)) {
ret = -EAGAIN;
goto out;
}
Expand Down
4 changes: 2 additions & 2 deletions arch/riscv/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -666,7 +666,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
return ret;
}

mmu_seq = kvm->mmu_notifier_seq;
mmu_seq = kvm->mmu_invalidate_seq;

hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writable);
if (hfn == KVM_PFN_ERR_HWPOISON) {
Expand All @@ -686,7 +686,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,

spin_lock(&kvm->mmu_lock);

if (mmu_notifier_retry(kvm, mmu_seq))
if (mmu_invalidate_retry(kvm, mmu_seq))
goto out_unlock;

if (writable) {
Expand Down
14 changes: 7 additions & 7 deletions arch/x86/kvm/mmu/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -2914,7 +2914,7 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
* If addresses are being invalidated, skip prefetching to avoid
* accidentally prefetching those addresses.
*/
if (unlikely(vcpu->kvm->mmu_notifier_count))
if (unlikely(vcpu->kvm->mmu_invalidate_in_progress))
return;

__direct_pte_prefetch(vcpu, sp, sptep);
Expand All @@ -2928,7 +2928,7 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
*
* There are several ways to safely use this helper:
*
* - Check mmu_notifier_retry_hva() after grabbing the mapping level, before
* - Check mmu_invalidate_retry_hva() after grabbing the mapping level, before
* consuming it. In this case, mmu_lock doesn't need to be held during the
* lookup, but it does need to be held while checking the MMU notifier.
*
Expand Down Expand Up @@ -3056,7 +3056,7 @@ void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
return;

/*
* mmu_notifier_retry() was successful and mmu_lock is held, so
* mmu_invalidate_retry() was successful and mmu_lock is held, so
* the pmd can't be split from under us.
*/
fault->goal_level = fault->req_level;
Expand Down Expand Up @@ -4203,7 +4203,7 @@ static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
return true;

return fault->slot &&
mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva);
mmu_invalidate_retry_hva(vcpu->kvm, mmu_seq, fault->hva);
}

static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
Expand All @@ -4227,7 +4227,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
if (r)
return r;

mmu_seq = vcpu->kvm->mmu_notifier_seq;
mmu_seq = vcpu->kvm->mmu_invalidate_seq;
smp_rmb();

r = kvm_faultin_pfn(vcpu, fault);
Expand Down Expand Up @@ -6055,7 +6055,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)

write_lock(&kvm->mmu_lock);

kvm_inc_notifier_count(kvm, gfn_start, gfn_end);
kvm_mmu_invalidate_begin(kvm, gfn_start, gfn_end);

flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);

Expand All @@ -6069,7 +6069,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
gfn_end - gfn_start);

kvm_dec_notifier_count(kvm, gfn_start, gfn_end);
kvm_mmu_invalidate_end(kvm, gfn_start, gfn_end);

write_unlock(&kvm->mmu_lock);
}
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kvm/mmu/paging_tmpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -589,7 +589,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
* If addresses are being invalidated, skip prefetching to avoid
* accidentally prefetching those addresses.
*/
if (unlikely(vcpu->kvm->mmu_notifier_count))
if (unlikely(vcpu->kvm->mmu_invalidate_in_progress))
return;

if (sp->role.direct)
Expand Down Expand Up @@ -838,7 +838,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
else
fault->max_level = walker.level;

mmu_seq = vcpu->kvm->mmu_notifier_seq;
mmu_seq = vcpu->kvm->mmu_invalidate_seq;
smp_rmb();

r = kvm_faultin_pfn(vcpu, fault);
Expand Down
60 changes: 31 additions & 29 deletions include/linux/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -765,10 +765,10 @@ struct kvm {

#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
struct mmu_notifier mmu_notifier;
unsigned long mmu_notifier_seq;
long mmu_notifier_count;
unsigned long mmu_notifier_range_start;
unsigned long mmu_notifier_range_end;
unsigned long mmu_invalidate_seq;
long mmu_invalidate_in_progress;
unsigned long mmu_invalidate_range_start;
unsigned long mmu_invalidate_range_end;
#endif
struct list_head devices;
u64 manual_dirty_log_protect;
Expand Down Expand Up @@ -1357,10 +1357,10 @@ void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
#endif

void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start,
unsigned long end);
void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start,
unsigned long end);
void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
unsigned long end);
void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
unsigned long end);

long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
Expand Down Expand Up @@ -1907,42 +1907,44 @@ extern const struct kvm_stats_header kvm_vcpu_stats_header;
extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];

#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
{
if (unlikely(kvm->mmu_notifier_count))
if (unlikely(kvm->mmu_invalidate_in_progress))
return 1;
/*
* Ensure the read of mmu_notifier_count happens before the read
* of mmu_notifier_seq. This interacts with the smp_wmb() in
* mmu_notifier_invalidate_range_end to make sure that the caller
* either sees the old (non-zero) value of mmu_notifier_count or
* the new (incremented) value of mmu_notifier_seq.
* PowerPC Book3s HV KVM calls this under a per-page lock
* rather than under kvm->mmu_lock, for scalability, so
* can't rely on kvm->mmu_lock to keep things ordered.
* Ensure the read of mmu_invalidate_in_progress happens before
* the read of mmu_invalidate_seq. This interacts with the
* smp_wmb() in mmu_notifier_invalidate_range_end to make sure
* that the caller either sees the old (non-zero) value of
* mmu_invalidate_in_progress or the new (incremented) value of
* mmu_invalidate_seq.
*
* PowerPC Book3s HV KVM calls this under a per-page lock rather
* than under kvm->mmu_lock, for scalability, so can't rely on
* kvm->mmu_lock to keep things ordered.
*/
smp_rmb();
if (kvm->mmu_notifier_seq != mmu_seq)
if (kvm->mmu_invalidate_seq != mmu_seq)
return 1;
return 0;
}

static inline int mmu_notifier_retry_hva(struct kvm *kvm,
unsigned long mmu_seq,
unsigned long hva)
static inline int mmu_invalidate_retry_hva(struct kvm *kvm,
unsigned long mmu_seq,
unsigned long hva)
{
lockdep_assert_held(&kvm->mmu_lock);
/*
* If mmu_notifier_count is non-zero, then the range maintained by
* kvm_mmu_notifier_invalidate_range_start contains all addresses that
* might be being invalidated. Note that it may include some false
* If mmu_invalidate_in_progress is non-zero, then the range maintained
* by kvm_mmu_notifier_invalidate_range_start contains all addresses
* that might be being invalidated. Note that it may include some false
* positives, due to shortcuts when handing concurrent invalidations.
*/
if (unlikely(kvm->mmu_notifier_count) &&
hva >= kvm->mmu_notifier_range_start &&
hva < kvm->mmu_notifier_range_end)
if (unlikely(kvm->mmu_invalidate_in_progress) &&
hva >= kvm->mmu_invalidate_range_start &&
hva < kvm->mmu_invalidate_range_end)
return 1;
if (kvm->mmu_notifier_seq != mmu_seq)
if (kvm->mmu_invalidate_seq != mmu_seq)
return 1;
return 0;
}
Expand Down
Loading

0 comments on commit 20ec3eb

Please sign in to comment.