Skip to content

Commit

Permalink
Merge branch 'topic/ppc-kvm' into next
Browse files Browse the repository at this point in the history
Merge one more commit from the topic branch we shared with the kvm-ppc
tree.

This brings in a fix to the code that scans for dirty pages during
migration of a VM, which was incorrectly triggering a warning.
  • Loading branch information
mpe committed Jun 3, 2020
2 parents 4336b93 + bf8036a commit 1395375
Show file tree
Hide file tree
Showing 2 changed files with 39 additions and 6 deletions.
10 changes: 10 additions & 0 deletions arch/powerpc/include/asm/kvm_book3s_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -635,6 +635,16 @@ extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
unsigned long gpa, unsigned long hpa,
unsigned long nbytes);

static inline pte_t *
find_kvm_secondary_pte_unlocked(struct kvm *kvm, unsigned long ea,
unsigned *hshift)
{
pte_t *pte;

pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
return pte;
}

static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea,
unsigned *hshift)
{
Expand Down
35 changes: 29 additions & 6 deletions arch/powerpc/kvm/book3s_64_mmu_radix.c
Original file line number Diff line number Diff line change
Expand Up @@ -1040,20 +1040,43 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
{
unsigned long gfn = memslot->base_gfn + pagenum;
unsigned long gpa = gfn << PAGE_SHIFT;
pte_t *ptep;
pte_t *ptep, pte;
unsigned int shift;
int ret = 0;
unsigned long old, *rmapp;

if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
return ret;

ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
ret = 1;
if (shift)
ret = 1 << (shift - PAGE_SHIFT);
/*
* For performance reasons we don't hold kvm->mmu_lock while walking the
* partition scoped table.
*/
ptep = find_kvm_secondary_pte_unlocked(kvm, gpa, &shift);
if (!ptep)
return 0;

pte = READ_ONCE(*ptep);
if (pte_present(pte) && pte_dirty(pte)) {
spin_lock(&kvm->mmu_lock);
/*
* Recheck the pte again
*/
if (pte_val(pte) != pte_val(*ptep)) {
/*
* We have KVM_MEM_LOG_DIRTY_PAGES enabled. Hence we can
* only find PAGE_SIZE pte entries here. We can continue
* to use the pte addr returned by above page table
* walk.
*/
if (!pte_present(*ptep) || !pte_dirty(*ptep)) {
spin_unlock(&kvm->mmu_lock);
return 0;
}
}

ret = 1;
VM_BUG_ON(shift);
old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
gpa, shift);
kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid);
Expand Down

0 comments on commit 1395375

Please sign in to comment.