Skip to content

Commit

Permalink
Merge tag 'kvm-arm-for-4-7-take2' of git://git.kernel.org/pub/scm/lin…
Browse files Browse the repository at this point in the history
…ux/kernel/git/kvmarm/kvmarm into kvm-next

KVM/ARM Changes for v4.7 take 2

"The GIC is dead; Long live the GIC"

This set of changes include the new vgic, which is a reimplementation of
our horribly broken legacy vgic implementation.  The two implementations
will live side-by-side (with the new being the configured default) for
one kernel release and then we'll remove it.

Also fixes a non-critical issue with virtual abort injection to guests.
  • Loading branch information
bonzini committed May 24, 2016
2 parents 9842df6 + 35a2d58 commit 44bcc92
Show file tree
Hide file tree
Showing 34 changed files with 4,404 additions and 156 deletions.
6 changes: 6 additions & 0 deletions arch/arm/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,8 @@

#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS

#define KVM_REQ_VCPU_EXIT 8

u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
Expand Down Expand Up @@ -226,6 +228,10 @@ static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,

struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm);
void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu);
void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu);

int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
Expand Down
3 changes: 3 additions & 0 deletions arch/arm/include/asm/kvm_mmio.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,9 @@ struct kvm_decode {
bool sign_extend;
};

void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);

int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa);
Expand Down
7 changes: 7 additions & 0 deletions arch/arm/kvm/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,13 @@ config KVM_ARM_HOST
---help---
Provides host support for ARM processors.

config KVM_NEW_VGIC
bool "New VGIC implementation"
depends on KVM
default y
---help---
uses the new VGIC implementation

source drivers/vhost/Kconfig

endif # VIRTUALIZATION
11 changes: 11 additions & 0 deletions arch/arm/kvm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,18 @@ obj-$(CONFIG_KVM_ARM_HOST) += hyp/
obj-y += kvm-arm.o init.o interrupts.o
obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o

ifeq ($(CONFIG_KVM_NEW_VGIC),y)
obj-y += $(KVM)/arm/vgic/vgic.o
obj-y += $(KVM)/arm/vgic/vgic-init.o
obj-y += $(KVM)/arm/vgic/vgic-irqfd.o
obj-y += $(KVM)/arm/vgic/vgic-v2.o
obj-y += $(KVM)/arm/vgic/vgic-mmio.o
obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o
obj-y += $(KVM)/arm/vgic/vgic-kvm-device.o
else
obj-y += $(KVM)/arm/vgic.o
obj-y += $(KVM)/arm/vgic-v2.o
obj-y += $(KVM)/arm/vgic-v2-emul.o
endif
obj-y += $(KVM)/arm/arch_timer.o
37 changes: 22 additions & 15 deletions arch/arm/kvm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -459,7 +459,7 @@ static void update_vttbr(struct kvm *kvm)
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
int ret;
int ret = 0;

if (likely(vcpu->arch.has_run_once))
return 0;
Expand All @@ -482,40 +482,47 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
* interrupts from the virtual timer with a userspace gic.
*/
if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
kvm_timer_enable(kvm);
ret = kvm_timer_enable(vcpu);

return 0;
return ret;
}

bool kvm_arch_intc_initialized(struct kvm *kvm)
{
return vgic_initialized(kvm);
}

static void kvm_arm_halt_guest(struct kvm *kvm) __maybe_unused;
static void kvm_arm_resume_guest(struct kvm *kvm) __maybe_unused;

static void kvm_arm_halt_guest(struct kvm *kvm)
void kvm_arm_halt_guest(struct kvm *kvm)
{
int i;
struct kvm_vcpu *vcpu;

kvm_for_each_vcpu(i, vcpu, kvm)
vcpu->arch.pause = true;
force_vm_exit(cpu_all_mask);
kvm_make_all_cpus_request(kvm, KVM_REQ_VCPU_EXIT);
}

void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu)
{
vcpu->arch.pause = true;
kvm_vcpu_kick(vcpu);
}

static void kvm_arm_resume_guest(struct kvm *kvm)
void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu)
{
struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);

vcpu->arch.pause = false;
swake_up(wq);
}

void kvm_arm_resume_guest(struct kvm *kvm)
{
int i;
struct kvm_vcpu *vcpu;

kvm_for_each_vcpu(i, vcpu, kvm) {
struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);

vcpu->arch.pause = false;
swake_up(wq);
}
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_arm_resume_vcpu(vcpu);
}

static void vcpu_sleep(struct kvm_vcpu *vcpu)
Expand Down
24 changes: 12 additions & 12 deletions arch/arm/kvm/mmio.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@

#include "trace.h"

static void mmio_write_buf(char *buf, unsigned int len, unsigned long data)
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
{
void *datap = NULL;
union {
Expand Down Expand Up @@ -55,7 +55,7 @@ static void mmio_write_buf(char *buf, unsigned int len, unsigned long data)
memcpy(buf, datap, len);
}

static unsigned long mmio_read_buf(char *buf, unsigned int len)
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
{
unsigned long data = 0;
union {
Expand All @@ -66,7 +66,7 @@ static unsigned long mmio_read_buf(char *buf, unsigned int len)

switch (len) {
case 1:
data = buf[0];
data = *(u8 *)buf;
break;
case 2:
memcpy(&tmp.hword, buf, len);
Expand All @@ -87,11 +87,10 @@ static unsigned long mmio_read_buf(char *buf, unsigned int len)

/**
* kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
* or in-kernel IO emulation
*
* @vcpu: The VCPU pointer
* @run: The VCPU run struct containing the mmio data
*
* This should only be called after returning from userspace for MMIO load
* emulation.
*/
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
Expand All @@ -104,7 +103,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
if (len > sizeof(unsigned long))
return -EINVAL;

data = mmio_read_buf(run->mmio.data, len);
data = kvm_mmio_read_buf(run->mmio.data, len);

if (vcpu->arch.mmio_decode.sign_extend &&
len < sizeof(unsigned long)) {
Expand Down Expand Up @@ -190,7 +189,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
len);

trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
mmio_write_buf(data_buf, len, data);
kvm_mmio_write_buf(data_buf, len, data);

ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
data_buf);
Expand All @@ -206,18 +205,19 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
run->mmio.is_write = is_write;
run->mmio.phys_addr = fault_ipa;
run->mmio.len = len;
if (is_write)
memcpy(run->mmio.data, data_buf, len);

if (!ret) {
/* We handled the access successfully in the kernel. */
if (!is_write)
memcpy(run->mmio.data, data_buf, len);
vcpu->stat.mmio_exit_kernel++;
kvm_handle_mmio_return(vcpu, run);
return 1;
} else {
vcpu->stat.mmio_exit_user++;
}

if (is_write)
memcpy(run->mmio.data, data_buf, len);
vcpu->stat.mmio_exit_user++;
run->exit_reason = KVM_EXIT_MMIO;
return 0;
}
6 changes: 6 additions & 0 deletions arch/arm64/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@

#define KVM_VCPU_MAX_FEATURES 4

#define KVM_REQ_VCPU_EXIT 8

int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
int kvm_arch_dev_ioctl_check_extension(long ext);
Expand Down Expand Up @@ -325,6 +327,10 @@ static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,

struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm);
void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu);
void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu);

u64 __kvm_call_hyp(void *hypfn, ...);
#define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
Expand Down
3 changes: 3 additions & 0 deletions arch/arm64/include/asm/kvm_mmio.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,9 @@ struct kvm_decode {
bool sign_extend;
};

void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);

int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa);
Expand Down
7 changes: 7 additions & 0 deletions arch/arm64/kvm/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,13 @@ config KVM_ARM_PMU
Adds support for a virtual Performance Monitoring Unit (PMU) in
virtual machines.

config KVM_NEW_VGIC
bool "New VGIC implementation"
depends on KVM
default y
---help---
uses the new VGIC implementation

source drivers/vhost/Kconfig

endif # VIRTUALIZATION
12 changes: 12 additions & 0 deletions arch/arm64/kvm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,22 @@ kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o

ifeq ($(CONFIG_KVM_NEW_VGIC),y)
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-init.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-irqfd.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v2.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v3.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v2.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v3.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-kvm-device.o
else
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o
endif
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o
kvm-$(CONFIG_KVM_ARM_PMU) += $(KVM)/arm/pmu.o
2 changes: 1 addition & 1 deletion arch/arm64/kvm/inject_fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);

if (!is_iabt)
esr |= ESR_ELx_EC_DABT_LOW;
esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;

vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_ELx_FSC_EXTABT;
}
Expand Down
11 changes: 4 additions & 7 deletions include/kvm/arm_arch_timer.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,6 @@
#include <linux/workqueue.h>

struct arch_timer_kvm {
/* Is the timer enabled */
bool enabled;

/* Virtual offset */
cycle_t cntvoff;
};
Expand All @@ -53,15 +50,15 @@ struct arch_timer_cpu {
/* Timer IRQ */
struct kvm_irq_level irq;

/* VGIC mapping */
struct irq_phys_map *map;

/* Active IRQ state caching */
bool active_cleared_last;

/* Is the timer enabled */
bool enabled;
};

int kvm_timer_hyp_init(void);
void kvm_timer_enable(struct kvm *kvm);
int kvm_timer_enable(struct kvm_vcpu *vcpu);
void kvm_timer_init(struct kvm *kvm);
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
const struct kvm_irq_level *irq);
Expand Down
20 changes: 11 additions & 9 deletions include/kvm/arm_vgic.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,10 @@
#ifndef __ASM_ARM_KVM_VGIC_H
#define __ASM_ARM_KVM_VGIC_H

#ifdef CONFIG_KVM_NEW_VGIC
#include <kvm/vgic/vgic.h>
#else

#include <linux/kernel.h>
#include <linux/kvm.h>
#include <linux/irqreturn.h>
Expand Down Expand Up @@ -158,7 +162,6 @@ struct vgic_io_device {
struct irq_phys_map {
u32 virt_irq;
u32 phys_irq;
u32 irq;
};

struct irq_phys_map_entry {
Expand Down Expand Up @@ -305,9 +308,6 @@ struct vgic_cpu {
unsigned long *active_shared;
unsigned long *pend_act_shared;

/* Number of list registers on this CPU */
int nr_lr;

/* CPU vif control registers for world switch */
union {
struct vgic_v2_cpu_if vgic_v2;
Expand Down Expand Up @@ -342,17 +342,18 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
bool level);
int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
struct irq_phys_map *map, bool level);
unsigned int virt_irq, bool level);
void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
int virt_irq, int irq);
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, int virt_irq, int phys_irq);
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq);
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq);

#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
#define vgic_ready(k) ((k)->arch.vgic.ready)
#define vgic_valid_spi(k, i) (((i) >= VGIC_NR_PRIVATE_IRQS) && \
((i) < (k)->arch.vgic.nr_irqs))

int vgic_v2_probe(const struct gic_kvm_info *gic_kvm_info,
const struct vgic_ops **ops,
Expand All @@ -370,4 +371,5 @@ static inline int vgic_v3_probe(const struct gic_kvm_info *gic_kvm_info,
}
#endif

#endif /* old VGIC include */
#endif
Loading

0 comments on commit 44bcc92

Please sign in to comment.