Skip to content

Commit

Permalink
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Browse files Browse the repository at this point in the history
Pull KVM fixes from Paolo Bonzini:
 "The ARM changes are largish, but not too scary.  And a simple fix for
  x86 (bug introduced in 3.19)"

(Paolo sayus these are the "Final" fixes. We'll see).

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: x86: check LAPIC presence when building apic_map
  arm/arm64: KVM: Use kernel mapping to perform invalidation on page fault
  arm/arm64: KVM: Invalidate data cache on unmap
  arm/arm64: KVM: Use set/way op trapping to track the state of the caches
  • Loading branch information
torvalds committed Jan 30, 2015
2 parents f3a3404 + df04d1d commit 1f59fe7
Show file tree
Hide file tree
Showing 15 changed files with 330 additions and 178 deletions.
10 changes: 10 additions & 0 deletions arch/arm/include/asm/kvm_emulate.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr = HCR_GUEST_MASK;
}

static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.hcr;
}

static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
{
vcpu->arch.hcr = hcr;
}

static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
{
return 1;
Expand Down
3 changes: 0 additions & 3 deletions arch/arm/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -125,9 +125,6 @@ struct kvm_vcpu_arch {
* Anything that is not used directly from assembly code goes
* here.
*/
/* dcache set/way operation pending */
int last_pcpu;
cpumask_t require_dcache_flush;

/* Don't run the guest on this vcpu */
bool pause;
Expand Down
77 changes: 67 additions & 10 deletions arch/arm/include/asm/kvm_mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@

#ifndef __ASSEMBLY__

#include <linux/highmem.h>
#include <asm/cacheflush.h>
#include <asm/pgalloc.h>

Expand Down Expand Up @@ -161,13 +162,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
}

static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
unsigned long size,
bool ipa_uncached)
static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
unsigned long size,
bool ipa_uncached)
{
if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
kvm_flush_dcache_to_poc((void *)hva, size);

/*
* If we are going to insert an instruction page and the icache is
* either VIPT or PIPT, there is a potential problem where the host
Expand All @@ -179,18 +177,77 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
*
* VIVT caches are tagged using both the ASID and the VMID and doesn't
* need any kind of flushing (DDI 0406C.b - Page B3-1392).
*
* We need to do this through a kernel mapping (using the
* user-space mapping has proved to be the wrong
* solution). For that, we need to kmap one page at a time,
* and iterate over the range.
*/
if (icache_is_pipt()) {
__cpuc_coherent_user_range(hva, hva + size);
} else if (!icache_is_vivt_asid_tagged()) {

bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;

VM_BUG_ON(size & PAGE_MASK);

if (!need_flush && !icache_is_pipt())
goto vipt_cache;

while (size) {
void *va = kmap_atomic_pfn(pfn);

if (need_flush)
kvm_flush_dcache_to_poc(va, PAGE_SIZE);

if (icache_is_pipt())
__cpuc_coherent_user_range((unsigned long)va,
(unsigned long)va + PAGE_SIZE);

size -= PAGE_SIZE;
pfn++;

kunmap_atomic(va);
}

vipt_cache:
if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
/* any kind of VIPT cache */
__flush_icache_all();
}
}

static inline void __kvm_flush_dcache_pte(pte_t pte)
{
void *va = kmap_atomic(pte_page(pte));

kvm_flush_dcache_to_poc(va, PAGE_SIZE);

kunmap_atomic(va);
}

static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
{
unsigned long size = PMD_SIZE;
pfn_t pfn = pmd_pfn(pmd);

while (size) {
void *va = kmap_atomic_pfn(pfn);

kvm_flush_dcache_to_poc(va, PAGE_SIZE);

pfn++;
size -= PAGE_SIZE;

kunmap_atomic(va);
}
}

static inline void __kvm_flush_dcache_pud(pud_t pud)
{
}

#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))

void stage2_flush_vm(struct kvm *kvm);
void kvm_set_way_flush(struct kvm_vcpu *vcpu);
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);

#endif /* !__ASSEMBLY__ */

Expand Down
10 changes: 0 additions & 10 deletions arch/arm/kvm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -281,15 +281,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu->cpu = cpu;
vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);

/*
* Check whether this vcpu requires the cache to be flushed on
* this physical CPU. This is a consequence of doing dcache
* operations by set/way on this vcpu. We do it here to be in
* a non-preemptible section.
*/
if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush))
flush_cache_all(); /* We'd really want v7_flush_dcache_all() */

kvm_arm_set_running_vcpu(vcpu);
}

Expand Down Expand Up @@ -541,7 +532,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);

vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu->arch.last_pcpu = smp_processor_id();
kvm_guest_exit();
trace_kvm_exit(*vcpu_pc(vcpu));
/*
Expand Down
70 changes: 14 additions & 56 deletions arch/arm/kvm/coproc.c
Original file line number Diff line number Diff line change
Expand Up @@ -189,82 +189,40 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
return true;
}

/* See note at ARM ARM B1.14.4 */
/*
* See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
*/
static bool access_dcsw(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r)
{
unsigned long val;
int cpu;

if (!p->is_write)
return read_from_write_only(vcpu, p);

cpu = get_cpu();

cpumask_setall(&vcpu->arch.require_dcache_flush);
cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);

/* If we were already preempted, take the long way around */
if (cpu != vcpu->arch.last_pcpu) {
flush_cache_all();
goto done;
}

val = *vcpu_reg(vcpu, p->Rt1);

switch (p->CRm) {
case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
case 14: /* DCCISW */
asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
break;

case 10: /* DCCSW */
asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
break;
}

done:
put_cpu();

kvm_set_way_flush(vcpu);
return true;
}

/*
* Generic accessor for VM registers. Only called as long as HCR_TVM
* is set.
* is set. If the guest enables the MMU, we stop trapping the VM
* sys_regs and leave it in complete control of the caches.
*
* Used by the cpu-specific code.
*/
static bool access_vm_reg(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r)
bool access_vm_reg(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r)
{
bool was_enabled = vcpu_has_cache_enabled(vcpu);

BUG_ON(!p->is_write);

vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1);
if (p->is_64bit)
vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2);

return true;
}

/*
* SCTLR accessor. Only called as long as HCR_TVM is set. If the
* guest enables the MMU, we stop trapping the VM sys_regs and leave
* it in complete control of the caches.
*
* Used by the cpu-specific code.
*/
bool access_sctlr(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r)
{
access_vm_reg(vcpu, p, r);

if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
vcpu->arch.hcr &= ~HCR_TVM;
stage2_flush_vm(vcpu->kvm);
}

kvm_toggle_cache(vcpu, was_enabled);
return true;
}

Expand Down
6 changes: 3 additions & 3 deletions arch/arm/kvm/coproc.h
Original file line number Diff line number Diff line change
Expand Up @@ -153,8 +153,8 @@ static inline int cmp_reg(const struct coproc_reg *i1,
#define is64 .is_64 = true
#define is32 .is_64 = false

bool access_sctlr(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r);
bool access_vm_reg(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r);

#endif /* __ARM_KVM_COPROC_LOCAL_H__ */
2 changes: 1 addition & 1 deletion arch/arm/kvm/coproc_a15.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
static const struct coproc_reg a15_regs[] = {
/* SCTLR: swapped by interrupt.S. */
{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
access_sctlr, reset_val, c1_SCTLR, 0x00C50078 },
access_vm_reg, reset_val, c1_SCTLR, 0x00C50078 },
};

static struct kvm_coproc_target_table a15_target_table = {
Expand Down
2 changes: 1 addition & 1 deletion arch/arm/kvm/coproc_a7.c
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
static const struct coproc_reg a7_regs[] = {
/* SCTLR: swapped by interrupt.S. */
{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
access_sctlr, reset_val, c1_SCTLR, 0x00C50878 },
access_vm_reg, reset_val, c1_SCTLR, 0x00C50878 },
};

static struct kvm_coproc_target_table a7_target_table = {
Expand Down
Loading

0 comments on commit 1f59fe7

Please sign in to comment.