Skip to content

Commit

Permalink
KVM: Provide common implementation for generic dirty log functions
Browse files Browse the repository at this point in the history
Move the implementations of KVM_GET_DIRTY_LOG and KVM_CLEAR_DIRTY_LOG
for CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT into common KVM code.
The arch specific implemenations are extremely similar, differing
only in whether the dirty log needs to be sync'd from hardware (x86)
and how the TLBs are flushed.  Add new arch hooks to handle sync
and TLB flush; the sync will also be used for non-generic dirty log
support in a future patch (s390).

The ulterior motive for providing a common implementation is to
eliminate the dependency between arch and common code with respect to
the memslot referenced by the dirty log, i.e. to make it obvious in the
code that the validity of the memslot is guaranteed, as a future patch
will rework memslot handling such that id_to_memslot() can return NULL.

Signed-off-by: Sean Christopherson <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
  • Loading branch information
Sean Christopherson authored and bonzini committed Mar 16, 2020
1 parent 163da37 commit 0dff084
Show file tree
Hide file tree
Showing 8 changed files with 106 additions and 195 deletions.
63 changes: 5 additions & 58 deletions arch/mips/kvm/mips.c
Original file line number Diff line number Diff line change
Expand Up @@ -978,69 +978,16 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
return r;
}

/**
* kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
* @kvm: kvm instance
* @log: slot id and address to which we copy the log
*
* Steps 1-4 below provide general overview of dirty page logging. See
* kvm_get_dirty_log_protect() function description for additional details.
*
* We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
* always flush the TLB (step 4) even if previous step failed and the dirty
* bitmap may be corrupt. Regardless of previous outcome the KVM logging API
* does not preclude user space subsequent dirty log read. Flushing TLB ensures
* writes will be marked dirty for next log read.
*
* 1. Take a snapshot of the bit and clear it if needed.
* 2. Write protect the corresponding page.
* 3. Copy the snapshot to the userspace.
* 4. Flush TLB's if needed.
*/
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
bool flush = false;
int r;

mutex_lock(&kvm->slots_lock);

r = kvm_get_dirty_log_protect(kvm, log, &flush);

if (flush) {
slots = kvm_memslots(kvm);
memslot = id_to_memslot(slots, log->slot);

/* Let implementation handle TLB/GVA invalidation */
kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
}

mutex_unlock(&kvm->slots_lock);
return r;
}

int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log)
void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
struct kvm_memory_slot *memslot)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
bool flush = false;
int r;

mutex_lock(&kvm->slots_lock);

r = kvm_clear_dirty_log_protect(kvm, log, &flush);

if (flush) {
slots = kvm_memslots(kvm);
memslot = id_to_memslot(slots, log->slot);

/* Let implementation handle TLB/GVA invalidation */
kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
}

mutex_unlock(&kvm->slots_lock);
return r;
/* Let implementation handle TLB/GVA invalidation */
kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
}

long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
Expand Down
5 changes: 5 additions & 0 deletions arch/powerpc/kvm/book3s.c
Original file line number Diff line number Diff line change
Expand Up @@ -799,6 +799,11 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
}

void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{

}

int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
Expand Down
5 changes: 5 additions & 0 deletions arch/powerpc/kvm/booke.c
Original file line number Diff line number Diff line change
Expand Up @@ -1766,6 +1766,11 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
return r;
}

void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{

}

int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
return -ENOTSUPP;
Expand Down
5 changes: 2 additions & 3 deletions arch/s390/kvm/kvm-s390.c
Original file line number Diff line number Diff line change
Expand Up @@ -570,8 +570,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
return r;
}

static void kvm_s390_sync_dirty_log(struct kvm *kvm,
struct kvm_memory_slot *memslot)
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
int i;
gfn_t cur_gfn, last_gfn;
Expand Down Expand Up @@ -631,7 +630,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
if (!memslot->dirty_bitmap)
goto out;

kvm_s390_sync_dirty_log(kvm, memslot);
kvm_arch_sync_dirty_log(kvm, memslot);
r = kvm_get_dirty_log(kvm, log, &is_dirty);
if (r)
goto out;
Expand Down
61 changes: 4 additions & 57 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -4759,77 +4759,24 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
return 0;
}

/**
* kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
* @kvm: kvm instance
* @log: slot id and address to which we copy the log
*
* Steps 1-4 below provide general overview of dirty page logging. See
* kvm_get_dirty_log_protect() function description for additional details.
*
* We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
* always flush the TLB (step 4) even if previous step failed and the dirty
* bitmap may be corrupt. Regardless of previous outcome the KVM logging API
* does not preclude user space subsequent dirty log read. Flushing TLB ensures
* writes will be marked dirty for next log read.
*
* 1. Take a snapshot of the bit and clear it if needed.
* 2. Write protect the corresponding page.
* 3. Copy the snapshot to the userspace.
* 4. Flush TLB's if needed.
*/
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
bool flush = false;
int r;

mutex_lock(&kvm->slots_lock);

/*
* Flush potentially hardware-cached dirty pages to dirty_bitmap.
*/
if (kvm_x86_ops->flush_log_dirty)
kvm_x86_ops->flush_log_dirty(kvm);

r = kvm_get_dirty_log_protect(kvm, log, &flush);

/*
* All the TLBs can be flushed out of mmu lock, see the comments in
* kvm_mmu_slot_remove_write_access().
*/
lockdep_assert_held(&kvm->slots_lock);
if (flush)
kvm_flush_remote_tlbs(kvm);

mutex_unlock(&kvm->slots_lock);
return r;
}

int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log)
void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
struct kvm_memory_slot *memslot)
{
bool flush = false;
int r;

mutex_lock(&kvm->slots_lock);

/*
* Flush potentially hardware-cached dirty pages to dirty_bitmap.
*/
if (kvm_x86_ops->flush_log_dirty)
kvm_x86_ops->flush_log_dirty(kvm);

r = kvm_clear_dirty_log_protect(kvm, log, &flush);

/*
* All the TLBs can be flushed out of mmu lock, see the comments in
* kvm_mmu_slot_remove_write_access().
*/
lockdep_assert_held(&kvm->slots_lock);
if (flush)
kvm_flush_remote_tlbs(kvm);

mutex_unlock(&kvm->slots_lock);
return r;
kvm_flush_remote_tlbs(kvm);
}

int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
Expand Down
23 changes: 10 additions & 13 deletions include/linux/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -816,23 +816,20 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);

int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);

int kvm_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log, int *is_dirty);

int kvm_get_dirty_log_protect(struct kvm *kvm,
struct kvm_dirty_log *log, bool *flush);
int kvm_clear_dirty_log_protect(struct kvm *kvm,
struct kvm_clear_dirty_log *log, bool *flush);

void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset,
unsigned long mask);

int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log);
int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
struct kvm_clear_dirty_log *log);
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);

#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
struct kvm_memory_slot *memslot);
#else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
int *is_dirty);
#endif

int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
bool line_status);
Expand Down
48 changes: 4 additions & 44 deletions virt/kvm/arm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -1183,55 +1183,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
return r;
}

/**
* kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
* @kvm: kvm instance
* @log: slot id and address to which we copy the log
*
* Steps 1-4 below provide general overview of dirty page logging. See
* kvm_get_dirty_log_protect() function description for additional details.
*
* We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
* always flush the TLB (step 4) even if previous step failed and the dirty
* bitmap may be corrupt. Regardless of previous outcome the KVM logging API
* does not preclude user space subsequent dirty log read. Flushing TLB ensures
* writes will be marked dirty for next log read.
*
* 1. Take a snapshot of the bit and clear it if needed.
* 2. Write protect the corresponding page.
* 3. Copy the snapshot to the userspace.
* 4. Flush TLB's if needed.
*/
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
bool flush = false;
int r;

mutex_lock(&kvm->slots_lock);

r = kvm_get_dirty_log_protect(kvm, log, &flush);

if (flush)
kvm_flush_remote_tlbs(kvm);

mutex_unlock(&kvm->slots_lock);
return r;
}

int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log)
void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
struct kvm_memory_slot *memslot)
{
bool flush = false;
int r;

mutex_lock(&kvm->slots_lock);

r = kvm_clear_dirty_log_protect(kvm, log, &flush);

if (flush)
kvm_flush_remote_tlbs(kvm);

mutex_unlock(&kvm->slots_lock);
return r;
kvm_flush_remote_tlbs(kvm);
}

static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
Expand Down
Loading

0 comments on commit 0dff084

Please sign in to comment.