Skip to content

Commit

Permalink
Merge tag 'kvmarm-fixes-5.11-1' of git://git.kernel.org/pub/scm/linux…
Browse files Browse the repository at this point in the history
…/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 fixes for 5.11, take #1

- VM init cleanups
- PSCI relay cleanups
- Kill CONFIG_KVM_ARM_PMU
- Fixup __init annotations
- Fixup reg_to_encoding()
- Fix spurious PMCR_EL0 access
  • Loading branch information
bonzini committed Jan 8, 2021
2 parents 647daca + 45ba7b1 commit 774206b
Show file tree
Hide file tree
Showing 19 changed files with 122 additions and 118 deletions.
9 changes: 7 additions & 2 deletions Documentation/virt/kvm/api.rst
Original file line number Diff line number Diff line change
Expand Up @@ -392,9 +392,14 @@ This ioctl is obsolete and has been removed.

Errors:

===== =============================
======= ==============================================================
EINTR an unmasked signal is pending
===== =============================
ENOEXEC the vcpu hasn't been initialized or the guest tried to execute
instructions from device memory (arm64)
ENOSYS data abort outside memslots with no syndrome info and
KVM_CAP_ARM_NISV_TO_USER not enabled (arm64)
EPERM SVE feature set but not finalized (arm64)
======= ==============================================================

This ioctl is used to run a guest virtual cpu. While there are no
explicit parameters, there is an implicit parameter block that can be
Expand Down
23 changes: 23 additions & 0 deletions arch/arm64/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include <linux/jump_label.h>
#include <linux/kvm_types.h>
#include <linux/percpu.h>
#include <linux/psci.h>
#include <asm/arch_gicv3.h>
#include <asm/barrier.h>
#include <asm/cpufeature.h>
Expand Down Expand Up @@ -240,6 +241,28 @@ struct kvm_host_data {
struct kvm_pmu_events pmu_events;
};

struct kvm_host_psci_config {
/* PSCI version used by host. */
u32 version;

/* Function IDs used by host if version is v0.1. */
struct psci_0_1_function_ids function_ids_0_1;

bool psci_0_1_cpu_suspend_implemented;
bool psci_0_1_cpu_on_implemented;
bool psci_0_1_cpu_off_implemented;
bool psci_0_1_migrate_implemented;
};

extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
#define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)

extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
#define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)

extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
#define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)

struct vcpu_reset_state {
unsigned long pc;
unsigned long r0;
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/kernel/cpufeature.c
Original file line number Diff line number Diff line change
Expand Up @@ -2558,7 +2558,7 @@ static void verify_hyp_capabilities(void)
int parange, ipa_max;
unsigned int safe_vmid_bits, vmid_bits;

if (!IS_ENABLED(CONFIG_KVM) || !IS_ENABLED(CONFIG_KVM_ARM_HOST))
if (!IS_ENABLED(CONFIG_KVM))
return;

safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -434,7 +434,7 @@ static void __init hyp_mode_check(void)
"CPU: CPUs started in inconsistent modes");
else
pr_info("CPU: All CPU(s) started at EL1\n");
if (IS_ENABLED(CONFIG_KVM))
if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode())
kvm_compute_layout();
}

Expand Down
8 changes: 0 additions & 8 deletions arch/arm64/kvm/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -49,14 +49,6 @@ if KVM

source "virt/kvm/Kconfig"

config KVM_ARM_PMU
bool "Virtual Performance Monitoring Unit (PMU) support"
depends on HW_PERF_EVENTS
default y
help
Adds support for a virtual Performance Monitoring Unit (PMU) in
virtual machines.

endif # KVM

endif # VIRTUALIZATION
2 changes: 1 addition & 1 deletion arch/arm64/kvm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -24,4 +24,4 @@ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \
vgic/vgic-its.o vgic/vgic-debug.o

kvm-$(CONFIG_KVM_ARM_PMU) += pmu-emul.o
kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o
7 changes: 4 additions & 3 deletions arch/arm64/kvm/arch_timer.c
Original file line number Diff line number Diff line change
Expand Up @@ -1129,9 +1129,10 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
if (!irqchip_in_kernel(vcpu->kvm))
goto no_vgic;

if (!vgic_initialized(vcpu->kvm))
return -ENODEV;

/*
* At this stage, we have the guarantee that the vgic is both
* available and initialized.
*/
if (!timer_irqs_are_valid(vcpu)) {
kvm_debug("incorrectly configured timer irqs\n");
return -EINVAL;
Expand Down
32 changes: 18 additions & 14 deletions arch/arm64/kvm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -65,10 +65,6 @@ static bool vgic_present;
static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);

extern u64 kvm_nvhe_sym(__cpu_logical_map)[NR_CPUS];
extern u32 kvm_nvhe_sym(kvm_host_psci_version);
extern struct psci_0_1_function_ids kvm_nvhe_sym(kvm_host_psci_0_1_function_ids);

int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
Expand Down Expand Up @@ -584,11 +580,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
* Map the VGIC hardware resources before running a vcpu the
* first time on this VM.
*/
if (unlikely(!vgic_ready(kvm))) {
ret = kvm_vgic_map_resources(kvm);
if (ret)
return ret;
}
ret = kvm_vgic_map_resources(kvm);
if (ret)
return ret;
} else {
/*
* Tell the rest of the code that there are userspace irqchip
Expand Down Expand Up @@ -1574,12 +1568,12 @@ static struct notifier_block hyp_init_cpu_pm_nb = {
.notifier_call = hyp_init_cpu_pm_notifier,
};

static void __init hyp_cpu_pm_init(void)
static void hyp_cpu_pm_init(void)
{
if (!is_protected_kvm_enabled())
cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
}
static void __init hyp_cpu_pm_exit(void)
static void hyp_cpu_pm_exit(void)
{
if (!is_protected_kvm_enabled())
cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
Expand All @@ -1604,9 +1598,12 @@ static void init_cpu_logical_map(void)
* allow any other CPUs from the `possible` set to boot.
*/
for_each_online_cpu(cpu)
kvm_nvhe_sym(__cpu_logical_map)[cpu] = cpu_logical_map(cpu);
hyp_cpu_logical_map[cpu] = cpu_logical_map(cpu);
}

#define init_psci_0_1_impl_state(config, what) \
config.psci_0_1_ ## what ## _implemented = psci_ops.what

static bool init_psci_relay(void)
{
/*
Expand All @@ -1618,8 +1615,15 @@ static bool init_psci_relay(void)
return false;
}

kvm_nvhe_sym(kvm_host_psci_version) = psci_ops.get_version();
kvm_nvhe_sym(kvm_host_psci_0_1_function_ids) = get_psci_0_1_function_ids();
kvm_host_psci_config.version = psci_ops.get_version();

if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) {
kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids();
init_psci_0_1_impl_state(kvm_host_psci_config, cpu_suspend);
init_psci_0_1_impl_state(kvm_host_psci_config, cpu_on);
init_psci_0_1_impl_state(kvm_host_psci_config, cpu_off);
init_psci_0_1_impl_state(kvm_host_psci_config, migrate);
}
return true;
}

Expand Down
9 changes: 9 additions & 0 deletions arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,4 +59,13 @@ static inline void __adjust_pc(struct kvm_vcpu *vcpu)
}
}

/*
* Skip an instruction while host sysregs are live.
* Assumes host is always 64-bit.
*/
static inline void kvm_skip_host_instr(void)
{
write_sysreg_el2(read_sysreg_el2(SYS_ELR) + 4, SYS_ELR);
}

#endif
12 changes: 2 additions & 10 deletions arch/arm64/kvm/hyp/nvhe/hyp-main.c
Original file line number Diff line number Diff line change
Expand Up @@ -157,11 +157,6 @@ static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
__kvm_hyp_host_forward_smc(host_ctxt);
}

static void skip_host_instruction(void)
{
write_sysreg_el2(read_sysreg_el2(SYS_ELR) + 4, SYS_ELR);
}

static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
{
bool handled;
Expand All @@ -170,11 +165,8 @@ static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
if (!handled)
default_host_smc_handler(host_ctxt);

/*
* Unlike HVC, the return address of an SMC is the instruction's PC.
* Move the return address past the instruction.
*/
skip_host_instruction();
/* SMC was trapped, move ELR past the current PC. */
kvm_skip_host_instr();
}

void handle_trap(struct kvm_cpu_context *host_ctxt)
Expand Down
6 changes: 3 additions & 3 deletions arch/arm64/kvm/hyp/nvhe/hyp-smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,14 @@
* Other CPUs should not be allowed to boot because their features were
* not checked against the finalized system capabilities.
*/
u64 __ro_after_init __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
u64 __ro_after_init hyp_cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };

u64 cpu_logical_map(unsigned int cpu)
{
if (cpu >= ARRAY_SIZE(__cpu_logical_map))
if (cpu >= ARRAY_SIZE(hyp_cpu_logical_map))
hyp_panic();

return __cpu_logical_map[cpu];
return hyp_cpu_logical_map[cpu];
}

unsigned long __hyp_per_cpu_offset(unsigned int cpu)
Expand Down
59 changes: 22 additions & 37 deletions arch/arm64/kvm/hyp/nvhe/psci-relay.c
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,8 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <kvm/arm_hypercalls.h>
#include <linux/arm-smccc.h>
#include <linux/kvm_host.h>
#include <linux/psci.h>
#include <kvm/arm_psci.h>
#include <uapi/linux/psci.h>

#include <nvhe/trap_handler.h>
Expand All @@ -22,9 +19,8 @@ void kvm_hyp_cpu_resume(unsigned long r0);
void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);

/* Config options set by the host. */
__ro_after_init u32 kvm_host_psci_version;
__ro_after_init struct psci_0_1_function_ids kvm_host_psci_0_1_function_ids;
__ro_after_init s64 hyp_physvirt_offset;
struct kvm_host_psci_config __ro_after_init kvm_host_psci_config;
s64 __ro_after_init hyp_physvirt_offset;

#define __hyp_pa(x) ((phys_addr_t)((x)) + hyp_physvirt_offset)

Expand All @@ -47,19 +43,16 @@ struct psci_boot_args {
static DEFINE_PER_CPU(struct psci_boot_args, cpu_on_args) = PSCI_BOOT_ARGS_INIT;
static DEFINE_PER_CPU(struct psci_boot_args, suspend_args) = PSCI_BOOT_ARGS_INIT;

static u64 get_psci_func_id(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(u64, func_id, host_ctxt, 0);

return func_id;
}
#define is_psci_0_1(what, func_id) \
(kvm_host_psci_config.psci_0_1_ ## what ## _implemented && \
(func_id) == kvm_host_psci_config.function_ids_0_1.what)

static bool is_psci_0_1_call(u64 func_id)
{
return (func_id == kvm_host_psci_0_1_function_ids.cpu_suspend) ||
(func_id == kvm_host_psci_0_1_function_ids.cpu_on) ||
(func_id == kvm_host_psci_0_1_function_ids.cpu_off) ||
(func_id == kvm_host_psci_0_1_function_ids.migrate);
return (is_psci_0_1(cpu_suspend, func_id) ||
is_psci_0_1(cpu_on, func_id) ||
is_psci_0_1(cpu_off, func_id) ||
is_psci_0_1(migrate, func_id));
}

static bool is_psci_0_2_call(u64 func_id)
Expand All @@ -69,16 +62,6 @@ static bool is_psci_0_2_call(u64 func_id)
(PSCI_0_2_FN64(0) <= func_id && func_id <= PSCI_0_2_FN64(31));
}

static bool is_psci_call(u64 func_id)
{
switch (kvm_host_psci_version) {
case PSCI_VERSION(0, 1):
return is_psci_0_1_call(func_id);
default:
return is_psci_0_2_call(func_id);
}
}

static unsigned long psci_call(unsigned long fn, unsigned long arg0,
unsigned long arg1, unsigned long arg2)
{
Expand Down Expand Up @@ -248,15 +231,14 @@ asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on)

static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
{
if ((func_id == kvm_host_psci_0_1_function_ids.cpu_off) ||
(func_id == kvm_host_psci_0_1_function_ids.migrate))
if (is_psci_0_1(cpu_off, func_id) || is_psci_0_1(migrate, func_id))
return psci_forward(host_ctxt);
else if (func_id == kvm_host_psci_0_1_function_ids.cpu_on)
if (is_psci_0_1(cpu_on, func_id))
return psci_cpu_on(func_id, host_ctxt);
else if (func_id == kvm_host_psci_0_1_function_ids.cpu_suspend)
if (is_psci_0_1(cpu_suspend, func_id))
return psci_cpu_suspend(func_id, host_ctxt);
else
return PSCI_RET_NOT_SUPPORTED;

return PSCI_RET_NOT_SUPPORTED;
}

static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
Expand Down Expand Up @@ -298,20 +280,23 @@ static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_

bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt)
{
u64 func_id = get_psci_func_id(host_ctxt);
DECLARE_REG(u64, func_id, host_ctxt, 0);
unsigned long ret;

if (!is_psci_call(func_id))
return false;

switch (kvm_host_psci_version) {
switch (kvm_host_psci_config.version) {
case PSCI_VERSION(0, 1):
if (!is_psci_0_1_call(func_id))
return false;
ret = psci_0_1_handler(func_id, host_ctxt);
break;
case PSCI_VERSION(0, 2):
if (!is_psci_0_2_call(func_id))
return false;
ret = psci_0_2_handler(func_id, host_ctxt);
break;
default:
if (!is_psci_0_2_call(func_id))
return false;
ret = psci_1_0_handler(func_id, host_ctxt);
break;
}
Expand Down
2 changes: 0 additions & 2 deletions arch/arm64/kvm/pmu-emul.c
Original file line number Diff line number Diff line change
Expand Up @@ -850,8 +850,6 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
return -EINVAL;
}

kvm_pmu_vcpu_reset(vcpu);

return 0;
}

Expand Down
6 changes: 5 additions & 1 deletion arch/arm64/kvm/sys_regs.c
Original file line number Diff line number Diff line change
Expand Up @@ -594,6 +594,10 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 pmcr, val;

/* No PMU available, PMCR_EL0 may UNDEF... */
if (!kvm_arm_support_pmu_v3())
return;

pmcr = read_sysreg(pmcr_el0);
/*
* Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
Expand Down Expand Up @@ -919,7 +923,7 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,

#define reg_to_encoding(x) \
sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
(u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2);
(u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)

/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
Expand Down
Loading

0 comments on commit 774206b

Please sign in to comment.