Skip to content

Commit

Permalink
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Browse files Browse the repository at this point in the history
Pull more KVM updates from Paolo Bonzini:
 "Mostly bugfixes, but also:

   - s390 support for KVM selftests

   - LAPIC timer offloading to housekeeping CPUs

   - Extend an s390 optimization for overcommitted hosts to all
     architectures

   - Debugging cleanups and improvements"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (25 commits)
  KVM: x86: Add fixed counters to PMU filter
  KVM: nVMX: do not use dangling shadow VMCS after guest reset
  KVM: VMX: dump VMCS on failed entry
  KVM: x86/vPMU: refine kvm_pmu err msg when event creation failed
  KVM: s390: Use kvm_vcpu_wake_up in kvm_s390_vcpu_wakeup
  KVM: Boost vCPUs that are delivering interrupts
  KVM: selftests: Remove superfluous define from vmx.c
  KVM: SVM: Fix detection of AMD Errata 1096
  KVM: LAPIC: Inject timer interrupt via posted interrupt
  KVM: LAPIC: Make lapic timer unpinned
  KVM: x86/vPMU: reset pmc->counter to 0 for pmu fixed_counters
  KVM: nVMX: Ignore segment base for VMX memory operand when segment not FS or GS
  kvm: x86: ioapic and apic debug macros cleanup
  kvm: x86: some tsc debug cleanup
  kvm: vmx: fix coccinelle warnings
  x86: kvm: avoid constant-conversion warning
  x86: kvm: avoid -Wsometimes-uninitized warning
  KVM: x86: expose AVX512_BF16 feature to guest
  KVM: selftests: enable pgste option for the linker on s390
  KVM: selftests: Move kvm_create_max_vcpus test to generic code
  ...
  • Loading branch information
torvalds committed Jul 20, 2019
2 parents f65420d + 30cd860 commit 07ab9d5
Show file tree
Hide file tree
Showing 31 changed files with 723 additions and 232 deletions.
15 changes: 10 additions & 5 deletions Documentation/virtual/kvm/api.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4090,17 +4090,22 @@ Parameters: struct kvm_pmu_event_filter (in)
Returns: 0 on success, -1 on error

struct kvm_pmu_event_filter {
__u32 action;
__u32 nevents;
__u64 events[0];
__u32 action;
__u32 nevents;
__u32 fixed_counter_bitmap;
__u32 flags;
__u32 pad[4];
__u64 events[0];
};

This ioctl restricts the set of PMU events that the guest can program.
The argument holds a list of events which will be allowed or denied.
The eventsel+umask of each event the guest attempts to program is compared
against the events field to determine whether the guest should have access.
This only affects general purpose counters; fixed purpose counters can
be disabled by changing the perfmon CPUID leaf.
The events field only controls general purpose counters; fixed purpose
counters are controlled by the fixed_counter_bitmap.

No flags are defined yet, the field must be zero.

Valid values for 'action':
#define KVM_PMU_EVENT_ALLOW 0
Expand Down
2 changes: 2 additions & 0 deletions MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -8878,6 +8878,8 @@ F: arch/s390/include/asm/gmap.h
F: arch/s390/include/asm/kvm*
F: arch/s390/kvm/
F: arch/s390/mm/gmap.c
F: tools/testing/selftests/kvm/s390x/
F: tools/testing/selftests/kvm/*/s390x/

KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86)
M: Paolo Bonzini <[email protected]>
Expand Down
23 changes: 3 additions & 20 deletions arch/s390/kvm/interrupt.c
Original file line number Diff line number Diff line change
Expand Up @@ -1224,28 +1224,11 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)

void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
{
/*
* We cannot move this into the if, as the CPU might be already
* in kvm_vcpu_block without having the waitqueue set (polling)
*/
vcpu->valid_wakeup = true;
kvm_vcpu_wake_up(vcpu);

/*
* This is mostly to document, that the read in swait_active could
* be moved before other stores, leading to subtle races.
* All current users do not store or use an atomic like update
*/
smp_mb__after_atomic();
if (swait_active(&vcpu->wq)) {
/*
* The vcpu gave up the cpu voluntarily, mark it as a good
* yield-candidate.
*/
vcpu->preempted = true;
swake_up_one(&vcpu->wq);
vcpu->stat.halt_wakeup++;
}
/*
* The VCPU might not be sleeping but is executing the VSIE. Let's
* The VCPU might not be sleeping but rather executing VSIE. Let's
* kick it, so it leaves the SIE to process the request.
*/
kvm_s390_vsie_kick(vcpu);
Expand Down
9 changes: 6 additions & 3 deletions arch/x86/include/uapi/asm/kvm.h
Original file line number Diff line number Diff line change
Expand Up @@ -435,9 +435,12 @@ struct kvm_nested_state {

/* for KVM_CAP_PMU_EVENT_FILTER */
struct kvm_pmu_event_filter {
__u32 action;
__u32 nevents;
__u64 events[0];
__u32 action;
__u32 nevents;
__u32 fixed_counter_bitmap;
__u32 flags;
__u32 pad[4];
__u64 events[0];
};

#define KVM_PMU_EVENT_ALLOW 0
Expand Down
12 changes: 11 additions & 1 deletion arch/x86/kvm/cpuid.c
Original file line number Diff line number Diff line change
Expand Up @@ -368,9 +368,13 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index)
F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
F(MD_CLEAR);

/* cpuid 7.1.eax */
const u32 kvm_cpuid_7_1_eax_x86_features =
F(AVX512_BF16);

switch (index) {
case 0:
entry->eax = 0;
entry->eax = min(entry->eax, 1u);
entry->ebx &= kvm_cpuid_7_0_ebx_x86_features;
cpuid_mask(&entry->ebx, CPUID_7_0_EBX);
/* TSC_ADJUST is emulated */
Expand All @@ -394,6 +398,12 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index)
*/
entry->edx |= F(ARCH_CAPABILITIES);
break;
case 1:
entry->eax &= kvm_cpuid_7_1_eax_x86_features;
entry->ebx = 0;
entry->ecx = 0;
entry->edx = 0;
break;
default:
WARN_ON_ONCE(1);
entry->eax = 0;
Expand Down
20 changes: 9 additions & 11 deletions arch/x86/kvm/hyperv.c
Original file line number Diff line number Diff line change
Expand Up @@ -1594,7 +1594,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
{
u64 param, ingpa, outgpa, ret = HV_STATUS_SUCCESS;
uint16_t code, rep_idx, rep_cnt;
bool fast, longmode, rep;
bool fast, rep;

/*
* hypercall generates UD from non zero cpl and real mode
Expand All @@ -1605,23 +1605,21 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
return 1;
}

longmode = is_64_bit_mode(vcpu);

if (!longmode) {
#ifdef CONFIG_X86_64
if (is_64_bit_mode(vcpu)) {
param = kvm_rcx_read(vcpu);
ingpa = kvm_rdx_read(vcpu);
outgpa = kvm_r8_read(vcpu);
} else
#endif
{
param = ((u64)kvm_rdx_read(vcpu) << 32) |
(kvm_rax_read(vcpu) & 0xffffffff);
ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
(kvm_rcx_read(vcpu) & 0xffffffff);
outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
(kvm_rsi_read(vcpu) & 0xffffffff);
}
#ifdef CONFIG_X86_64
else {
param = kvm_rcx_read(vcpu);
ingpa = kvm_rdx_read(vcpu);
outgpa = kvm_r8_read(vcpu);
}
#endif

code = param & 0xffff;
fast = !!(param & HV_HYPERCALL_FAST_BIT);
Expand Down
15 changes: 0 additions & 15 deletions arch/x86/kvm/ioapic.c
Original file line number Diff line number Diff line change
Expand Up @@ -45,11 +45,6 @@
#include "lapic.h"
#include "irq.h"

#if 0
#define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg)
#else
#define ioapic_debug(fmt, arg...)
#endif
static int ioapic_service(struct kvm_ioapic *vioapic, int irq,
bool line_status);

Expand Down Expand Up @@ -294,7 +289,6 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
default:
index = (ioapic->ioregsel - 0x10) >> 1;

ioapic_debug("change redir index %x val %x\n", index, val);
if (index >= IOAPIC_NUM_PINS)
return;
e = &ioapic->redirtbl[index];
Expand Down Expand Up @@ -343,12 +337,6 @@ static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
entry->fields.remote_irr))
return -1;

ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
"vector=%x trig_mode=%x\n",
entry->fields.dest_id, entry->fields.dest_mode,
entry->fields.delivery_mode, entry->fields.vector,
entry->fields.trig_mode);

irqe.dest_id = entry->fields.dest_id;
irqe.vector = entry->fields.vector;
irqe.dest_mode = entry->fields.dest_mode;
Expand Down Expand Up @@ -515,7 +503,6 @@ static int ioapic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
if (!ioapic_in_range(ioapic, addr))
return -EOPNOTSUPP;

ioapic_debug("addr %lx\n", (unsigned long)addr);
ASSERT(!(addr & 0xf)); /* check alignment */

addr &= 0xff;
Expand Down Expand Up @@ -558,8 +545,6 @@ static int ioapic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
if (!ioapic_in_range(ioapic, addr))
return -EOPNOTSUPP;

ioapic_debug("ioapic_mmio_write addr=%p len=%d val=%p\n",
(void*)addr, len, val);
ASSERT(!(addr & 0xf)); /* check alignment */

switch (len) {
Expand Down
Loading

0 comments on commit 07ab9d5

Please sign in to comment.