Skip to content

Commit

Permalink
Merge tag 'kvm-arm-for-v4.17' of git://git.kernel.org/pub/scm/linux/k…
Browse files Browse the repository at this point in the history
…ernel/git/kvmarm/kvmarm

KVM/ARM updates for v4.17

- VHE optimizations
- EL2 address space randomization
- Variant 3a mitigation for Cortex-A57 and A72
- The usual vgic fixes
- Various minor tidying-up
  • Loading branch information
rkrcmar committed Mar 28, 2018
2 parents d32ef54 + dc6ed61 commit abe7a45
Show file tree
Hide file tree
Showing 67 changed files with 2,435 additions and 1,045 deletions.
9 changes: 6 additions & 3 deletions Documentation/arm64/memory.txt
Original file line number Diff line number Diff line change
Expand Up @@ -86,9 +86,12 @@ Translation table lookup with 64KB pages:
+-------------------------------------------------> [63] TTBR0/1


When using KVM without the Virtualization Host Extensions, the hypervisor
maps kernel pages in EL2 at a fixed offset from the kernel VA. See the
kern_hyp_va macro for more details.
When using KVM without the Virtualization Host Extensions, the
hypervisor maps kernel pages in EL2 at a fixed (and potentially
random) offset from the linear mapping. See the kern_hyp_va macro and
kvm_update_va_mask function for more details. MMIO devices such as
GICv2 gets mapped next to the HYP idmap page, as do vectors when
ARM64_HARDEN_EL2_VECTORS is selected for particular CPUs.

When using KVM with the Virtualization Host Extensions, no additional
mappings are created, since the host kernel runs directly in EL2.
5 changes: 4 additions & 1 deletion arch/arm/include/asm/kvm_asm.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,10 @@ extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);

extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);

extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
/* no VHE on 32-bit :( */
static inline int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) { BUG(); return 0; }

extern int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu);

extern void __init_stage2_translation(void);

Expand Down
21 changes: 13 additions & 8 deletions arch/arm/include/asm/kvm_emulate.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,17 @@ static inline unsigned long *vcpu_reg32(struct kvm_vcpu *vcpu, u8 reg_num)
return vcpu_reg(vcpu, reg_num);
}

unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu);

static inline unsigned long vpcu_read_spsr(struct kvm_vcpu *vcpu)
{
return *__vcpu_spsr(vcpu);
}

static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
{
*__vcpu_spsr(vcpu) = v;
}

static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
u8 reg_num)
Expand Down Expand Up @@ -92,14 +102,9 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr = HCR_GUEST_MASK;
}

static inline unsigned long vcpu_get_hcr(const struct kvm_vcpu *vcpu)
{
return vcpu->arch.hcr;
}

static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr = hcr;
return (unsigned long *)&vcpu->arch.hcr;
}

static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
Expand Down
6 changes: 3 additions & 3 deletions arch/arm/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -155,9 +155,6 @@ struct kvm_vcpu_arch {
/* HYP trapping configuration */
u32 hcr;

/* Interrupt related fields */
u32 irq_lines; /* IRQ and FIQ levels */

/* Exception Information */
struct kvm_vcpu_fault_info fault;

Expand Down Expand Up @@ -315,4 +312,7 @@ static inline bool kvm_arm_harden_branch_predictor(void)
return false;
}

static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}

#endif /* __ARM_KVM_HOST_H__ */
4 changes: 4 additions & 0 deletions arch/arm/include/asm/kvm_hyp.h
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,10 @@ void __sysreg_restore_state(struct kvm_cpu_context *ctxt);

void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
void __vgic_v3_activate_traps(struct kvm_vcpu *vcpu);
void __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu);
void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu);
void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu);

asmlinkage void __vfp_save_state(struct vfp_hard_struct *vfp);
asmlinkage void __vfp_restore_state(struct vfp_hard_struct *vfp);
Expand Down
16 changes: 15 additions & 1 deletion arch/arm/include/asm/kvm_mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,13 @@
*/
#define kern_hyp_va(kva) (kva)

/* Contrary to arm64, there is no need to generate a PC-relative address */
#define hyp_symbol_addr(s) \
({ \
typeof(s) *addr = &(s); \
addr; \
})

/*
* KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
*/
Expand All @@ -42,8 +49,15 @@
#include <asm/pgalloc.h>
#include <asm/stage2_pgtable.h>

/* Ensure compatibility with arm64 */
#define VA_BITS 32

int create_hyp_mappings(void *from, void *to, pgprot_t prot);
int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
void __iomem **kaddr,
void __iomem **haddr);
int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
void **haddr);
void free_hyp_pgds(void);

void stage2_unmap_vm(struct kvm *kvm);
Expand Down
9 changes: 9 additions & 0 deletions arch/arm/include/uapi/asm/kvm.h
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,15 @@ struct kvm_arch_memory_slot {
#define KVM_REG_ARM_CRM_SHIFT 7
#define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800
#define KVM_REG_ARM_32_CRN_SHIFT 11
/*
* For KVM currently all guest registers are nonsecure, but we reserve a bit
* in the encoding to distinguish secure from nonsecure for AArch32 system
* registers that are banked by security. This is 1 for the secure banked
* register, and 0 for the nonsecure banked register or if the register is
* not banked by security.
*/
#define KVM_REG_ARM_SECURE_MASK 0x0000000010000000
#define KVM_REG_ARM_SECURE_SHIFT 28

#define ARM_CP15_REG_SHIFT_MASK(x,n) \
(((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK)
Expand Down
61 changes: 61 additions & 0 deletions arch/arm/kvm/coproc.c
Original file line number Diff line number Diff line change
Expand Up @@ -270,6 +270,60 @@ static bool access_gic_sre(struct kvm_vcpu *vcpu,
return true;
}

static bool access_cntp_tval(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r)
{
u64 now = kvm_phys_timer_read();
u64 val;

if (p->is_write) {
val = *vcpu_reg(vcpu, p->Rt1);
kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, val + now);
} else {
val = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
*vcpu_reg(vcpu, p->Rt1) = val - now;
}

return true;
}

static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r)
{
u32 val;

if (p->is_write) {
val = *vcpu_reg(vcpu, p->Rt1);
kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, val);
} else {
val = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL);
*vcpu_reg(vcpu, p->Rt1) = val;
}

return true;
}

static bool access_cntp_cval(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r)
{
u64 val;

if (p->is_write) {
val = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
val |= *vcpu_reg(vcpu, p->Rt1);
kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, val);
} else {
val = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
*vcpu_reg(vcpu, p->Rt1) = val;
*vcpu_reg(vcpu, p->Rt2) = val >> 32;
}

return true;
}

/*
* We could trap ID_DFR0 and tell the guest we don't support performance
* monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
Expand Down Expand Up @@ -423,10 +477,17 @@ static const struct coproc_reg cp15_regs[] = {
{ CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
NULL, reset_unknown, c13_TID_PRIV },

/* CNTP */
{ CRm64(14), Op1( 2), is64, access_cntp_cval},

/* CNTKCTL: swapped by interrupt.S. */
{ CRn(14), CRm( 1), Op1( 0), Op2( 0), is32,
NULL, reset_val, c14_CNTKCTL, 0x00000000 },

/* CNTP */
{ CRn(14), CRm( 2), Op1( 0), Op2( 0), is32, access_cntp_tval },
{ CRn(14), CRm( 2), Op1( 0), Op2( 1), is32, access_cntp_ctl },

/* The Configuration Base Address Register. */
{ CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
};
Expand Down
4 changes: 2 additions & 2 deletions arch/arm/kvm/emulate.c
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
/*
* Return the SPSR for the current mode of the virtual CPU.
*/
unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu)
unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu)
{
unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
switch (mode) {
Expand Down Expand Up @@ -174,5 +174,5 @@ unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu)
*/
void kvm_inject_vabt(struct kvm_vcpu *vcpu)
{
vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VA);
*vcpu_hcr(vcpu) |= HCR_VA;
}
1 change: 0 additions & 1 deletion arch/arm/kvm/hyp/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ KVM=../../../../virt/kvm

CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve)

obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o

Expand Down
16 changes: 8 additions & 8 deletions arch/arm/kvm/hyp/switch.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host)
isb();
}

write_sysreg(vcpu->arch.hcr | vcpu->arch.irq_lines, HCR);
write_sysreg(vcpu->arch.hcr, HCR);
/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
write_sysreg(HSTR_T(15), HSTR);
write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR);
Expand Down Expand Up @@ -90,18 +90,18 @@ static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)

static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
{
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
__vgic_v3_save_state(vcpu);
else
__vgic_v2_save_state(vcpu);
__vgic_v3_deactivate_traps(vcpu);
}
}

static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
{
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
__vgic_v3_activate_traps(vcpu);
__vgic_v3_restore_state(vcpu);
else
__vgic_v2_restore_state(vcpu);
}
}

static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
Expand Down Expand Up @@ -154,7 +154,7 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
return true;
}

int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
Expand Down
16 changes: 16 additions & 0 deletions arch/arm64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -904,6 +904,22 @@ config HARDEN_BRANCH_PREDICTOR

If unsure, say Y.

config HARDEN_EL2_VECTORS
bool "Harden EL2 vector mapping against system register leak" if EXPERT
default y
help
Speculation attacks against some high-performance processors can
be used to leak privileged information such as the vector base
register, resulting in a potential defeat of the EL2 layout
randomization.

This config option will map the vectors to a fixed location,
independent of the EL2 code mapping, so that revealing VBAR_EL2
to an attacker does not give away any extra information. This
only gets enabled on affected CPUs.

If unsure, say Y.

menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions"
depends on COMPAT
Expand Down
Loading

0 comments on commit abe7a45

Please sign in to comment.