Skip to content

Commit

Permalink
KVM: do not release the error pfn
Browse files Browse the repository at this point in the history
After commit a276632, the error pfn is replaced by the
error code, it need not be released anymore

[ The patch has been compiling tested for powerpc ]

Signed-off-by: Xiao Guangrong <[email protected]>
Signed-off-by: Avi Kivity <[email protected]>
  • Loading branch information
Xiao Guangrong authored and avikivity committed Aug 6, 2012
1 parent 6cede2e commit cb9aaa3
Show file tree
Hide file tree
Showing 6 changed files with 14 additions and 21 deletions.
1 change: 0 additions & 1 deletion arch/powerpc/kvm/e500_tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -524,7 +524,6 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
if (is_error_pfn(pfn)) {
printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
(long)gfn);
kvm_release_pfn_clean(pfn);
return;
}

Expand Down
7 changes: 3 additions & 4 deletions arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -2496,7 +2496,9 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
rmap_recycle(vcpu, sptep, gfn);
}
}
kvm_release_pfn_clean(pfn);

if (!is_error_pfn(pfn))
kvm_release_pfn_clean(pfn);
}

static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
Expand Down Expand Up @@ -2648,7 +2650,6 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *

static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn)
{
kvm_release_pfn_clean(pfn);
if (pfn == KVM_PFN_ERR_HWPOISON) {
kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current);
return 0;
Expand Down Expand Up @@ -3273,8 +3274,6 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
if (!async)
return false; /* *pfn has correct page already */

kvm_release_pfn_clean(*pfn);

if (!prefault && can_do_async_pf(vcpu)) {
trace_kvm_try_async_get_page(gva, gfn);
if (kvm_find_async_pf_gfn(vcpu, gfn)) {
Expand Down
4 changes: 1 addition & 3 deletions arch/x86/kvm/mmu_audit.c
Original file line number Diff line number Diff line change
Expand Up @@ -116,10 +116,8 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn);

if (is_error_pfn(pfn)) {
kvm_release_pfn_clean(pfn);
if (is_error_pfn(pfn))
return;
}

hpa = pfn << PAGE_SHIFT;
if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
Expand Down
8 changes: 2 additions & 6 deletions arch/x86/kvm/paging_tmpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -370,10 +370,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true);
pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
if (mmu_invalid_pfn(pfn)) {
kvm_release_pfn_clean(pfn);
if (mmu_invalid_pfn(pfn))
return;
}

/*
* we call mmu_set_spte() with host_writable = true because that
Expand Down Expand Up @@ -448,10 +446,8 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
gfn = gpte_to_gfn(gpte);
pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
pte_access & ACC_WRITE_MASK);
if (mmu_invalid_pfn(pfn)) {
kvm_release_pfn_clean(pfn);
if (mmu_invalid_pfn(pfn))
break;
}

mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
NULL, PT_PAGE_TABLE_LEVEL, gfn,
Expand Down
1 change: 0 additions & 1 deletion virt/kvm/iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,6 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
*/
pfn = kvm_pin_pages(slot, gfn, page_size);
if (is_error_pfn(pfn)) {
kvm_release_pfn_clean(pfn);
gfn += 1;
continue;
}
Expand Down
14 changes: 8 additions & 6 deletions virt/kvm/kvm_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -102,9 +102,6 @@ static bool largepages_enabled = true;

bool kvm_is_mmio_pfn(pfn_t pfn)
{
if (is_error_pfn(pfn))
return false;

if (pfn_valid(pfn)) {
int reserved;
struct page *tail = pfn_to_page(pfn);
Expand Down Expand Up @@ -1165,10 +1162,13 @@ EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);

static struct page *kvm_pfn_to_page(pfn_t pfn)
{
WARN_ON(kvm_is_mmio_pfn(pfn));
if (is_error_pfn(pfn))
return KVM_ERR_PTR_BAD_PAGE;

if (is_error_pfn(pfn) || kvm_is_mmio_pfn(pfn))
if (kvm_is_mmio_pfn(pfn)) {
WARN_ON(1);
return KVM_ERR_PTR_BAD_PAGE;
}

return pfn_to_page(pfn);
}
Expand All @@ -1193,7 +1193,9 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);

void kvm_release_pfn_clean(pfn_t pfn)
{
if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn))
WARN_ON(is_error_pfn(pfn));

if (!kvm_is_mmio_pfn(pfn))
put_page(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
Expand Down

0 comments on commit cb9aaa3

Please sign in to comment.