Skip to content

Commit

Permalink
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Browse files Browse the repository at this point in the history
Pull kvm fixes from Paolo Bonzini:
 "ARM:
   - fix compilation error when PMD and PUD are folded
   - fix regression in reads-as-zero behaviour of ID_AA64ZFR0_EL1
   - add aarch64 get-reg-list test

  x86:
   - fix semantic conflict between two series merged for 5.10
   - fix (and test) enforcement of paravirtual cpuid features

  selftests:
   - various cleanups to memory management selftests
   - new selftests testcase for performance of dirty logging"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (30 commits)
  KVM: selftests: allow two iterations of dirty_log_perf_test
  KVM: selftests: Introduce the dirty log perf test
  KVM: selftests: Make the number of vcpus global
  KVM: selftests: Make the per vcpu memory size global
  KVM: selftests: Drop pointless vm_create wrapper
  KVM: selftests: Add wrfract to common guest code
  KVM: selftests: Simplify demand_paging_test with timespec_diff_now
  KVM: selftests: Remove address rounding in guest code
  KVM: selftests: Factor code out of demand_paging_test
  KVM: selftests: Use a single binary for dirty/clear log test
  KVM: selftests: Always clear dirty bitmap after iteration
  KVM: selftests: Add blessed SVE registers to get-reg-list
  KVM: selftests: Add aarch64 get-reg-list test
  selftests: kvm: test enforcement of paravirtual cpuid features
  selftests: kvm: Add exception handling to selftests
  selftests: kvm: Clear uc so UCALL_NONE is being properly reported
  selftests: kvm: Fix the segment descriptor layout to match the actual layout
  KVM: x86: handle MSR_IA32_DEBUGCTLMSR with report_ignored_msrs
  kvm: x86: request masterclock update any time guest uses different msr
  kvm: x86: ensure pv_cpuid.features is initialized when enabling cap
  ...
  • Loading branch information
torvalds committed Nov 9, 2020
2 parents 3552c37 + 6d6a18f commit 407ab57
Show file tree
Hide file tree
Showing 32 changed files with 2,383 additions and 393 deletions.
5 changes: 2 additions & 3 deletions Documentation/virt/kvm/api.rst
Original file line number Diff line number Diff line change
Expand Up @@ -6367,7 +6367,7 @@ accesses that would usually trigger a #GP by KVM into the guest will
instead get bounced to user space through the KVM_EXIT_X86_RDMSR and
KVM_EXIT_X86_WRMSR exit notifications.

8.25 KVM_X86_SET_MSR_FILTER
8.27 KVM_X86_SET_MSR_FILTER
---------------------------

:Architectures: x86
Expand All @@ -6381,8 +6381,7 @@ In combination with KVM_CAP_X86_USER_SPACE_MSR, this allows user space to
trap and emulate MSRs that are outside of the scope of KVM as well as
limit the attack surface on KVM's MSR emulation code.


8.26 KVM_CAP_ENFORCE_PV_CPUID
8.28 KVM_CAP_ENFORCE_PV_CPUID
-----------------------------

Architectures: x86
Expand Down
2 changes: 2 additions & 0 deletions arch/arm64/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -788,10 +788,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
}

switch (vma_shift) {
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SHIFT:
if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
break;
fallthrough;
#endif
case CONT_PMD_SHIFT:
vma_shift = PMD_SHIFT;
fallthrough;
Expand Down
108 changes: 33 additions & 75 deletions arch/arm64/kvm/sys_regs.c
Original file line number Diff line number Diff line change
Expand Up @@ -1069,7 +1069,7 @@ static bool trap_ptrauth(struct kvm_vcpu *vcpu,
static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN_USER | REG_HIDDEN_GUEST;
return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
}

#define __PTRAUTH_KEY(k) \
Expand Down Expand Up @@ -1153,6 +1153,22 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
return val;
}

static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
{
u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
(u32)r->CRn, (u32)r->CRm, (u32)r->Op2);

switch (id) {
case SYS_ID_AA64ZFR0_EL1:
if (!vcpu_has_sve(vcpu))
return REG_RAZ;
break;
}

return 0;
}

/* cpufeature ID register access trap handlers */

static bool __access_id_reg(struct kvm_vcpu *vcpu,
Expand All @@ -1171,7 +1187,9 @@ static bool access_id_reg(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
return __access_id_reg(vcpu, p, r, false);
bool raz = sysreg_visible_as_raz(vcpu, r);

return __access_id_reg(vcpu, p, r, raz);
}

static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
Expand All @@ -1192,72 +1210,7 @@ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
if (vcpu_has_sve(vcpu))
return 0;

return REG_HIDDEN_USER | REG_HIDDEN_GUEST;
}

/* Visibility overrides for SVE-specific ID registers */
static unsigned int sve_id_visibility(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
if (vcpu_has_sve(vcpu))
return 0;

return REG_HIDDEN_USER;
}

/* Generate the emulated ID_AA64ZFR0_EL1 value exposed to the guest */
static u64 guest_id_aa64zfr0_el1(const struct kvm_vcpu *vcpu)
{
if (!vcpu_has_sve(vcpu))
return 0;

return read_sanitised_ftr_reg(SYS_ID_AA64ZFR0_EL1);
}

static bool access_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
if (p->is_write)
return write_to_read_only(vcpu, p, rd);

p->regval = guest_id_aa64zfr0_el1(vcpu);
return true;
}

static int get_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
u64 val;

if (WARN_ON(!vcpu_has_sve(vcpu)))
return -ENOENT;

val = guest_id_aa64zfr0_el1(vcpu);
return reg_to_user(uaddr, &val, reg->id);
}

static int set_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
const u64 id = sys_reg_to_index(rd);
int err;
u64 val;

if (WARN_ON(!vcpu_has_sve(vcpu)))
return -ENOENT;

err = reg_from_user(&val, uaddr, id);
if (err)
return err;

/* This is what we mean by invariant: you can't change it. */
if (val != guest_id_aa64zfr0_el1(vcpu))
return -EINVAL;

return 0;
return REG_HIDDEN;
}

/*
Expand Down Expand Up @@ -1299,13 +1252,17 @@ static int __set_id_reg(const struct kvm_vcpu *vcpu,
static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
return __get_id_reg(vcpu, rd, uaddr, false);
bool raz = sysreg_visible_as_raz(vcpu, rd);

return __get_id_reg(vcpu, rd, uaddr, raz);
}

static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
return __set_id_reg(vcpu, rd, uaddr, false);
bool raz = sysreg_visible_as_raz(vcpu, rd);

return __set_id_reg(vcpu, rd, uaddr, raz);
}

static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
Expand Down Expand Up @@ -1397,6 +1354,7 @@ static bool access_mte_regs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
.access = access_id_reg, \
.get_user = get_id_reg, \
.set_user = set_id_reg, \
.visibility = id_visibility, \
}

/*
Expand Down Expand Up @@ -1518,7 +1476,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_SANITISED(ID_AA64PFR1_EL1),
ID_UNALLOCATED(4,2),
ID_UNALLOCATED(4,3),
{ SYS_DESC(SYS_ID_AA64ZFR0_EL1), access_id_aa64zfr0_el1, .get_user = get_id_aa64zfr0_el1, .set_user = set_id_aa64zfr0_el1, .visibility = sve_id_visibility },
ID_SANITISED(ID_AA64ZFR0_EL1),
ID_UNALLOCATED(4,5),
ID_UNALLOCATED(4,6),
ID_UNALLOCATED(4,7),
Expand Down Expand Up @@ -2185,7 +2143,7 @@ static void perform_access(struct kvm_vcpu *vcpu,
trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);

/* Check for regs disabled by runtime config */
if (sysreg_hidden_from_guest(vcpu, r)) {
if (sysreg_hidden(vcpu, r)) {
kvm_inject_undefined(vcpu);
return;
}
Expand Down Expand Up @@ -2684,7 +2642,7 @@ int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
return get_invariant_sys_reg(reg->id, uaddr);

/* Check for regs disabled by runtime config */
if (sysreg_hidden_from_user(vcpu, r))
if (sysreg_hidden(vcpu, r))
return -ENOENT;

if (r->get_user)
Expand All @@ -2709,7 +2667,7 @@ int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
return set_invariant_sys_reg(reg->id, uaddr);

/* Check for regs disabled by runtime config */
if (sysreg_hidden_from_user(vcpu, r))
if (sysreg_hidden(vcpu, r))
return -ENOENT;

if (r->set_user)
Expand Down Expand Up @@ -2780,7 +2738,7 @@ static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
if (!(rd->reg || rd->get_user))
return 0;

if (sysreg_hidden_from_user(vcpu, rd))
if (sysreg_hidden(vcpu, rd))
return 0;

if (!copy_reg_to_user(rd, uind))
Expand Down
16 changes: 8 additions & 8 deletions arch/arm64/kvm/sys_regs.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,8 @@ struct sys_reg_desc {
const struct sys_reg_desc *rd);
};

#define REG_HIDDEN_USER (1 << 0) /* hidden from userspace ioctls */
#define REG_HIDDEN_GUEST (1 << 1) /* hidden from guest */
#define REG_HIDDEN (1 << 0) /* hidden from userspace and guest */
#define REG_RAZ (1 << 1) /* RAZ from userspace and guest */

static __printf(2, 3)
inline void print_sys_reg_msg(const struct sys_reg_params *p,
Expand Down Expand Up @@ -111,22 +111,22 @@ static inline void reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r
__vcpu_sys_reg(vcpu, r->reg) = r->val;
}

static inline bool sysreg_hidden_from_guest(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
static inline bool sysreg_hidden(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
{
if (likely(!r->visibility))
return false;

return r->visibility(vcpu, r) & REG_HIDDEN_GUEST;
return r->visibility(vcpu, r) & REG_HIDDEN;
}

static inline bool sysreg_hidden_from_user(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
static inline bool sysreg_visible_as_raz(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
{
if (likely(!r->visibility))
return false;

return r->visibility(vcpu, r) & REG_HIDDEN_USER;
return r->visibility(vcpu, r) & REG_RAZ;
}

static inline int cmp_sys_reg(const struct sys_reg_desc *i1,
Expand Down
23 changes: 16 additions & 7 deletions arch/x86/kvm/cpuid.c
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,20 @@ static int kvm_check_cpuid(struct kvm_cpuid_entry2 *entries, int nent)
return 0;
}

void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;

best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0);

/*
* save the feature bitmap to avoid cpuid lookup for every PV
* operation
*/
if (best)
vcpu->arch.pv_cpuid.features = best->eax;
}

void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
Expand Down Expand Up @@ -124,13 +138,6 @@ void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
(best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);

/*
* save the feature bitmap to avoid cpuid lookup for every PV
* operation
*/
if (best)
vcpu->arch.pv_cpuid.features = best->eax;

if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
if (best)
Expand Down Expand Up @@ -162,6 +169,8 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
vcpu->arch.guest_supported_xcr0 =
(best->eax | ((u64)best->edx << 32)) & supported_xcr0;

kvm_update_pv_runtime(vcpu);

vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
kvm_mmu_reset_context(vcpu);

Expand Down
1 change: 1 addition & 0 deletions arch/x86/kvm/cpuid.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ extern u32 kvm_cpu_caps[NCAPINTS] __read_mostly;
void kvm_set_cpu_caps(void);

void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
void kvm_update_pv_runtime(struct kvm_vcpu *vcpu);
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
u32 function, u32 index);
int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
Expand Down
12 changes: 7 additions & 5 deletions arch/x86/kvm/mmu/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -856,12 +856,14 @@ static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
} else {
rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
while (desc->sptes[PTE_LIST_EXT-1] && desc->more) {
desc = desc->more;
while (desc->sptes[PTE_LIST_EXT-1]) {
count += PTE_LIST_EXT;
}
if (desc->sptes[PTE_LIST_EXT-1]) {
desc->more = mmu_alloc_pte_list_desc(vcpu);

if (!desc->more) {
desc->more = mmu_alloc_pte_list_desc(vcpu);
desc = desc->more;
break;
}
desc = desc->more;
}
for (i = 0; desc->sptes[i]; ++i)
Expand Down
Loading

0 comments on commit 407ab57

Please sign in to comment.