Skip to content

Commit

Permalink
KVM: Move memslot deletion to helper function
Browse files Browse the repository at this point in the history
Move memslot deletion into its own routine so that the success path for
other memslot updates does not need to use kvm_free_memslot(), i.e. can
explicitly destroy the dirty bitmap when necessary.  This paves the way
for dropping @dont from kvm_free_memslot(), i.e. all callers now pass
NULL for @dont.

Add a comment above the code to make a copy of the existing memslot
prior to deletion, it is not at all obvious that the pointer will become
stale during sorting and/or installation of new memslots.

Note, kvm_arch_commit_memory_region() allows an architecture to free
resources when moving a memslot or changing its flags, e.g. x86 frees
its arch specific memslot metadata during commit_memory_region().

Acked-by: Christoffer Dall <[email protected]>
Tested-by: Christoffer Dall <[email protected]>
Reviewed-by: Peter Xu <[email protected]>
Signed-off-by: Sean Christopherson <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
  • Loading branch information
Sean Christopherson authored and bonzini committed Mar 16, 2020
1 parent 2119884 commit 5c0b4f3
Showing 1 changed file with 46 additions and 29 deletions.
75 changes: 46 additions & 29 deletions virt/kvm/kvm_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -1043,6 +1043,27 @@ static int kvm_set_memslot(struct kvm *kvm,
return r;
}

static int kvm_delete_memslot(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot *old, int as_id)
{
struct kvm_memory_slot new;
int r;

if (!old->npages)
return -EINVAL;

memset(&new, 0, sizeof(new));
new.id = old->id;

r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE);
if (r)
return r;

kvm_free_memslot(kvm, old, NULL);
return 0;
}

/*
* Allocate some memory and give it an address in the guest physical address
* space.
Expand Down Expand Up @@ -1092,37 +1113,38 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (npages > KVM_MEM_MAX_NR_PAGES)
return -EINVAL;

new = old = *slot;
/*
* Make a full copy of the old memslot, the pointer will become stale
* when the memslots are re-sorted by update_memslots(), and the old
* memslot needs to be referenced after calling update_memslots(), e.g.
* to free its resources and for arch specific behavior.
*/
old = *slot;
if (!mem->memory_size)
return kvm_delete_memslot(kvm, mem, &old, as_id);

new = old;

new.id = id;
new.base_gfn = base_gfn;
new.npages = npages;
new.flags = mem->flags;
new.userspace_addr = mem->userspace_addr;

if (npages) {
if (!old.npages)
change = KVM_MR_CREATE;
else { /* Modify an existing slot. */
if ((new.userspace_addr != old.userspace_addr) ||
(npages != old.npages) ||
((new.flags ^ old.flags) & KVM_MEM_READONLY))
return -EINVAL;

if (base_gfn != old.base_gfn)
change = KVM_MR_MOVE;
else if (new.flags != old.flags)
change = KVM_MR_FLAGS_ONLY;
else /* Nothing to change. */
return 0;
}
} else {
if (!old.npages)
if (!old.npages) {
change = KVM_MR_CREATE;
} else { /* Modify an existing slot. */
if ((new.userspace_addr != old.userspace_addr) ||
(npages != old.npages) ||
((new.flags ^ old.flags) & KVM_MEM_READONLY))
return -EINVAL;

change = KVM_MR_DELETE;
new.base_gfn = 0;
new.flags = 0;
if (base_gfn != old.base_gfn)
change = KVM_MR_MOVE;
else if (new.flags != old.flags)
change = KVM_MR_FLAGS_ONLY;
else /* Nothing to change. */
return 0;
}

if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
Expand All @@ -1145,17 +1167,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
return r;
}

/* actual memory is freed via old in kvm_free_memslot below */
if (change == KVM_MR_DELETE) {
new.dirty_bitmap = NULL;
memset(&new.arch, 0, sizeof(new.arch));
}

r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change);
if (r)
goto out_bitmap;

kvm_free_memslot(kvm, &old, &new);
if (old.dirty_bitmap && !new.dirty_bitmap)
kvm_destroy_dirty_bitmap(&old);
return 0;

out_bitmap:
Expand Down

0 comments on commit 5c0b4f3

Please sign in to comment.