Skip to content

Commit

Permalink
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Browse files Browse the repository at this point in the history
Pull KVM fixes from Paolo Bonzini:
 "The usual smattering of fixes and tunings that came in too late for
  the merge window, but should not wait four months before they appear
  in a release.

  I also travelled a bit more than usual in the first part of May, which
  didn't help with picking up patches and reports promptly"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (33 commits)
  KVM: x86: fix return value for reserved EFER
  tools/kvm_stat: fix fields filter for child events
  KVM: selftests: Wrap vcpu_nested_state_get/set functions with x86 guard
  kvm: selftests: aarch64: compile with warnings on
  kvm: selftests: aarch64: fix default vm mode
  kvm: selftests: aarch64: dirty_log_test: fix unaligned memslot size
  KVM: s390: fix memory slot handling for KVM_SET_USER_MEMORY_REGION
  KVM: x86/pmu: do not mask the value that is written to fixed PMUs
  KVM: x86/pmu: mask the result of rdpmc according to the width of the counters
  x86/kvm/pmu: Set AMD's virt PMU version to 1
  KVM: x86: do not spam dmesg with VMCS/VMCB dumps
  kvm: Check irqchip mode before assign irqfd
  kvm: svm/avic: fix off-by-one in checking host APIC ID
  KVM: selftests: do not blindly clobber registers in guest asm
  KVM: selftests: Remove duplicated TEST_ASSERT in hyperv_cpuid.c
  KVM: LAPIC: Expose per-vCPU timer_advance_ns to userspace
  KVM: LAPIC: Fix lapic_timer_advance_ns parameter overflow
  kvm: vmx: Fix -Wmissing-prototypes warnings
  KVM: nVMX: Fix using __this_cpu_read() in preemptible context
  kvm: fix compilation on s390
  ...
  • Loading branch information
torvalds committed May 26, 2019
2 parents 128f2bf + 66f61c9 commit 862f0a3
Show file tree
Hide file tree
Showing 44 changed files with 399 additions and 303 deletions.
2 changes: 0 additions & 2 deletions MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -8611,14 +8611,12 @@ F: arch/x86/include/asm/svm.h
F: arch/x86/kvm/svm.c

KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64)
M: Christoffer Dall <[email protected]>
M: Marc Zyngier <[email protected]>
R: James Morse <[email protected]>
R: Julien Thierry <[email protected]>
R: Suzuki K Pouloze <[email protected]>
L: [email protected] (moderated for non-subscribers)
L: [email protected]
W: http://systems.cs.columbia.edu/projects/kvm-arm
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git
S: Maintained
F: arch/arm/include/uapi/asm/kvm*
Expand Down
1 change: 1 addition & 0 deletions arch/arm/kvm/hyp/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve)

obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/aarch32.o

obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o
Expand Down
3 changes: 0 additions & 3 deletions arch/arm64/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -592,9 +592,6 @@ static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
void kvm_clr_pmu_events(u32 clr);

void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt);
bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt);

void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
#else
Expand Down
1 change: 1 addition & 0 deletions arch/arm64/kvm/hyp/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ KVM=../../../../virt/kvm

obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/aarch32.o

obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-cpuif-proxy.o
obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o
Expand Down
39 changes: 39 additions & 0 deletions arch/arm64/kvm/hyp/switch.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
*/

#include <linux/arm-smccc.h>
#include <linux/kvm_host.h>
#include <linux/types.h>
#include <linux/jump_label.h>
#include <uapi/linux/psci.h>
Expand Down Expand Up @@ -505,6 +506,44 @@ static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
#endif
}

/**
* Disable host events, enable guest events
*/
static bool __hyp_text __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
{
struct kvm_host_data *host;
struct kvm_pmu_events *pmu;

host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
pmu = &host->pmu_events;

if (pmu->events_host)
write_sysreg(pmu->events_host, pmcntenclr_el0);

if (pmu->events_guest)
write_sysreg(pmu->events_guest, pmcntenset_el0);

return (pmu->events_host || pmu->events_guest);
}

/**
* Disable guest events, enable host events
*/
static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
{
struct kvm_host_data *host;
struct kvm_pmu_events *pmu;

host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
pmu = &host->pmu_events;

if (pmu->events_guest)
write_sysreg(pmu->events_guest, pmcntenclr_el0);

if (pmu->events_host)
write_sysreg(pmu->events_host, pmcntenset_el0);
}

/* Switch to the guest for VHE systems running in EL2 */
int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
{
Expand Down
38 changes: 0 additions & 38 deletions arch/arm64/kvm/pmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -53,44 +53,6 @@ void kvm_clr_pmu_events(u32 clr)
ctx->pmu_events.events_guest &= ~clr;
}

/**
* Disable host events, enable guest events
*/
bool __hyp_text __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
{
struct kvm_host_data *host;
struct kvm_pmu_events *pmu;

host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
pmu = &host->pmu_events;

if (pmu->events_host)
write_sysreg(pmu->events_host, pmcntenclr_el0);

if (pmu->events_guest)
write_sysreg(pmu->events_guest, pmcntenset_el0);

return (pmu->events_host || pmu->events_guest);
}

/**
* Disable guest events, enable host events
*/
void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
{
struct kvm_host_data *host;
struct kvm_pmu_events *pmu;

host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
pmu = &host->pmu_events;

if (pmu->events_guest)
write_sysreg(pmu->events_guest, pmcntenclr_el0);

if (pmu->events_host)
write_sysreg(pmu->events_host, pmcntenset_el0);
}

#define PMEVTYPER_READ_CASE(idx) \
case idx: \
return read_sysreg(pmevtyper##idx##_el0)
Expand Down
2 changes: 1 addition & 1 deletion arch/s390/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
*/
#define KVM_NR_IRQCHIPS 1
#define KVM_IRQCHIP_NUM_PINS 4096
#define KVM_HALT_POLL_NS_DEFAULT 80000
#define KVM_HALT_POLL_NS_DEFAULT 50000

/* s390-specific vcpu->requests bit members */
#define KVM_REQ_ENABLE_IBS KVM_ARCH_REQ(0)
Expand Down
37 changes: 22 additions & 15 deletions arch/s390/kvm/kvm-s390.c
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ MODULE_PARM_DESC(hpage, "1m huge page backing support");
/* maximum percentage of steal time for polling. >100 is treated like 100 */
static u8 halt_poll_max_steal = 10;
module_param(halt_poll_max_steal, byte, 0644);
MODULE_PARM_DESC(hpage, "Maximum percentage of steal time to allow polling");
MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");

/*
* For now we handle at most 16 double words as this is what the s390 base
Expand Down Expand Up @@ -4524,21 +4524,28 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
int rc;

/* If the basics of the memslot do not change, we do not want
* to update the gmap. Every update causes several unnecessary
* segment translation exceptions. This is usually handled just
* fine by the normal fault handler + gmap, but it will also
* cause faults on the prefix page of running guest CPUs.
*/
if (old->userspace_addr == mem->userspace_addr &&
old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
old->npages * PAGE_SIZE == mem->memory_size)
return;
int rc = 0;

rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
mem->guest_phys_addr, mem->memory_size);
switch (change) {
case KVM_MR_DELETE:
rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
old->npages * PAGE_SIZE);
break;
case KVM_MR_MOVE:
rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
old->npages * PAGE_SIZE);
if (rc)
break;
/* FALLTHROUGH */
case KVM_MR_CREATE:
rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
mem->guest_phys_addr, mem->memory_size);
break;
case KVM_MR_FLAGS_ONLY:
break;
default:
WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
}
if (rc)
pr_warn("failed to commit memory region\n");
return;
Expand Down
8 changes: 4 additions & 4 deletions arch/x86/kvm/cpuid.c
Original file line number Diff line number Diff line change
Expand Up @@ -456,8 +456,9 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
}
break;
}
/* function 4 has additional index. */
case 4: {
/* functions 4 and 0x8000001d have additional index. */
case 4:
case 0x8000001d: {
int i, cache_type;

entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
Expand Down Expand Up @@ -701,8 +702,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
entry->ecx = entry->edx = 0;
break;
case 0x8000001a:
break;
case 0x8000001d:
case 0x8000001e:
break;
/*Add support for Centaur's CPUID instruction*/
case 0xC0000000:
Expand Down
18 changes: 18 additions & 0 deletions arch/x86/kvm/debugfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,22 @@
*/
#include <linux/kvm_host.h>
#include <linux/debugfs.h>
#include "lapic.h"

bool kvm_arch_has_vcpu_debugfs(void)
{
return true;
}

static int vcpu_get_timer_advance_ns(void *data, u64 *val)
{
struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
*val = vcpu->arch.apic->lapic_timer.timer_advance_ns;
return 0;
}

DEFINE_SIMPLE_ATTRIBUTE(vcpu_timer_advance_ns_fops, vcpu_get_timer_advance_ns, NULL, "%llu\n");

static int vcpu_get_tsc_offset(void *data, u64 *val)
{
struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
Expand Down Expand Up @@ -51,6 +61,14 @@ int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
if (!ret)
return -ENOMEM;

if (lapic_in_kernel(vcpu)) {
ret = debugfs_create_file("lapic_timer_advance_ns", 0444,
vcpu->debugfs_dentry,
vcpu, &vcpu_timer_advance_ns_fops);
if (!ret)
return -ENOMEM;
}

if (kvm_has_tsc_control) {
ret = debugfs_create_file("tsc-scaling-ratio", 0444,
vcpu->debugfs_dentry,
Expand Down
7 changes: 7 additions & 0 deletions arch/x86/kvm/irq.c
Original file line number Diff line number Diff line change
Expand Up @@ -172,3 +172,10 @@ void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
__kvm_migrate_apic_timer(vcpu);
__kvm_migrate_pit_timer(vcpu);
}

bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
{
bool resample = args->flags & KVM_IRQFD_FLAG_RESAMPLE;

return resample ? irqchip_kernel(kvm) : irqchip_in_kernel(kvm);
}
1 change: 1 addition & 0 deletions arch/x86/kvm/irq.h
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,7 @@ static inline int irqchip_in_kernel(struct kvm *kvm)
return mode != KVM_IRQCHIP_NONE;
}

bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu);
Expand Down
10 changes: 3 additions & 7 deletions arch/x86/kvm/pmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -283,23 +283,19 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
bool fast_mode = idx & (1u << 31);
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct kvm_pmc *pmc;
u64 ctr_val;
u64 mask = fast_mode ? ~0u : ~0ull;

if (!pmu->version)
return 1;

if (is_vmware_backdoor_pmc(idx))
return kvm_pmu_rdpmc_vmware(vcpu, idx, data);

pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx);
pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx, &mask);
if (!pmc)
return 1;

ctr_val = pmc_read_counter(pmc);
if (fast_mode)
ctr_val = (u32)ctr_val;

*data = ctr_val;
*data = pmc_read_counter(pmc) & mask;
return 0;
}

Expand Down
3 changes: 2 additions & 1 deletion arch/x86/kvm/pmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,8 @@ struct kvm_pmu_ops {
unsigned (*find_fixed_event)(int idx);
bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx);
struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx,
u64 *mask);
int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx);
bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kvm/pmu_amd.c
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
}

/* idx is the ECX register of RDPMC instruction */
static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx)
static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *mask)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct kvm_pmc *counters;
Expand Down Expand Up @@ -269,10 +269,10 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)

pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
pmu->reserved_bits = 0xffffffff00200000ull;
pmu->version = 1;
/* not applicable to AMD; but clean them to prevent any fall out */
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
pmu->nr_arch_fixed_counters = 0;
pmu->version = 0;
pmu->global_status = 0;
}

Expand Down
15 changes: 13 additions & 2 deletions arch/x86/kvm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -379,6 +379,9 @@ module_param(vgif, int, 0444);
static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
module_param(sev, int, 0444);

static bool __read_mostly dump_invalid_vmcb = 0;
module_param(dump_invalid_vmcb, bool, 0644);

static u8 rsm_ins_bytes[] = "\x0f\xaa";

static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
Expand Down Expand Up @@ -2024,7 +2027,11 @@ static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (!kvm_vcpu_apicv_active(vcpu))
return;

if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT))
/*
* Since the host physical APIC id is 8 bits,
* we can support host APIC ID upto 255.
*/
if (WARN_ON(h_physical_id > AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
return;

entry = READ_ONCE(*(svm->avic_physical_id_cache));
Expand Down Expand Up @@ -4824,6 +4831,11 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
struct vmcb_control_area *control = &svm->vmcb->control;
struct vmcb_save_area *save = &svm->vmcb->save;

if (!dump_invalid_vmcb) {
pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
return;
}

pr_err("VMCB Control Area:\n");
pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
Expand Down Expand Up @@ -4982,7 +4994,6 @@ static int handle_exit(struct kvm_vcpu *vcpu)
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
kvm_run->fail_entry.hardware_entry_failure_reason
= svm->vmcb->control.exit_code;
pr_err("KVM: FAILED VMRUN WITH VMCB:\n");
dump_vmcb(vcpu);
return 0;
}
Expand Down
Loading

0 comments on commit 862f0a3

Please sign in to comment.