Skip to content

Commit

Permalink
Merge tag 'kvmarm-fixes-5.8-1' of git://git.kernel.org/pub/scm/linux/…
Browse files Browse the repository at this point in the history
…kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 fixes for Linux 5.8, take #1

* 32bit VM fixes:
  - Fix embarassing mapping issue between AArch32 CSSELR and AArch64
    ACTLR
  - Add ACTLR2 support for AArch32
  - Get rid of the useless ACTLR_EL1 save/restore
  - Fix CP14/15 accesses for AArch32 guests on BE hosts
  - Ensure that we don't loose any state when injecting a 32bit
    exception when running on a VHE host

* 64bit VM fixes:
  - Fix PtrAuth host saving happening in preemptible contexts
  - Optimize PtrAuth lazy enable
  - Drop vcpu to cpu context pointer
  - Fix sparse warnings for HYP per-CPU accesses
  • Loading branch information
bonzini committed Jun 11, 2020
2 parents e0135a1 + 15c9981 commit 49b3dea
Show file tree
Hide file tree
Showing 13 changed files with 171 additions and 97 deletions.
33 changes: 30 additions & 3 deletions arch/arm64/include/asm/kvm_asm.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,12 +81,39 @@ extern u32 __kvm_get_mdcr_el2(void);

extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];

/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
/*
* Obtain the PC-relative address of a kernel symbol
* s: symbol
*
* The goal of this macro is to return a symbol's address based on a
* PC-relative computation, as opposed to a loading the VA from a
* constant pool or something similar. This works well for HYP, as an
* absolute VA is guaranteed to be wrong. Only use this if trying to
* obtain the address of a symbol (i.e. not something you obtained by
* following a pointer).
*/
#define hyp_symbol_addr(s) \
({ \
typeof(s) *addr; \
asm("adrp %0, %1\n" \
"add %0, %0, :lo12:%1\n" \
: "=r" (addr) : "S" (&s)); \
addr; \
})

/*
* Home-grown __this_cpu_{ptr,read} variants that always work at HYP,
* provided that sym is really a *symbol* and not a pointer obtained from
* a data structure. As for SHIFT_PERCPU_PTR(), the creative casting keeps
* sparse quiet.
*/
#define __hyp_this_cpu_ptr(sym) \
({ \
void *__ptr = hyp_symbol_addr(sym); \
void *__ptr; \
__verify_pcpu_ptr(&sym); \
__ptr = hyp_symbol_addr(sym); \
__ptr += read_sysreg(tpidr_el2); \
(typeof(&sym))__ptr; \
(typeof(sym) __kernel __force *)__ptr; \
})

#define __hyp_this_cpu_read(sym) \
Expand Down
6 changes: 0 additions & 6 deletions arch/arm64/include/asm/kvm_emulate.h
Original file line number Diff line number Diff line change
Expand Up @@ -112,12 +112,6 @@ static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
}

static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu)
{
if (vcpu_has_ptrauth(vcpu))
vcpu_ptrauth_disable(vcpu);
}

static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.vsesr_el2;
Expand Down
9 changes: 4 additions & 5 deletions arch/arm64/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -284,9 +284,6 @@ struct kvm_vcpu_arch {
struct kvm_guest_debug_arch vcpu_debug_state;
struct kvm_guest_debug_arch external_debug_state;

/* Pointer to host CPU context */
struct kvm_cpu_context *host_cpu_context;

struct thread_info *host_thread_info; /* hyp VA */
struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */

Expand Down Expand Up @@ -404,8 +401,10 @@ void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
* CP14 and CP15 live in the same array, as they are backed by the
* same system registers.
*/
#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)])
#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)])
#define CPx_BIAS IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)

#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])

struct kvm_vm_stat {
ulong remote_tlb_flush;
Expand Down
20 changes: 0 additions & 20 deletions arch/arm64/include/asm/kvm_mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -107,26 +107,6 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)

#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))

/*
* Obtain the PC-relative address of a kernel symbol
* s: symbol
*
* The goal of this macro is to return a symbol's address based on a
* PC-relative computation, as opposed to a loading the VA from a
* constant pool or something similar. This works well for HYP, as an
* absolute VA is guaranteed to be wrong. Only use this if trying to
* obtain the address of a symbol (i.e. not something you obtained by
* following a pointer).
*/
#define hyp_symbol_addr(s) \
({ \
typeof(s) *addr; \
asm("adrp %0, %1\n" \
"add %0, %0, :lo12:%1\n" \
: "=r" (addr) : "S" (&s)); \
addr; \
})

/*
* We currently support using a VM-specified IPA size. For backward
* compatibility, the default IPA size is fixed to 40bits.
Expand Down
28 changes: 28 additions & 0 deletions arch/arm64/kvm/aarch32.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,26 @@ static const u8 return_offsets[8][2] = {
[7] = { 4, 4 }, /* FIQ, unused */
};

static bool pre_fault_synchronize(struct kvm_vcpu *vcpu)
{
preempt_disable();
if (vcpu->arch.sysregs_loaded_on_cpu) {
kvm_arch_vcpu_put(vcpu);
return true;
}

preempt_enable();
return false;
}

static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded)
{
if (loaded) {
kvm_arch_vcpu_load(vcpu, smp_processor_id());
preempt_enable();
}
}

/*
* When an exception is taken, most CPSR fields are left unchanged in the
* handler. However, some are explicitly overridden (e.g. M[4:0]).
Expand Down Expand Up @@ -155,7 +175,10 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)

void kvm_inject_undef32(struct kvm_vcpu *vcpu)
{
bool loaded = pre_fault_synchronize(vcpu);

prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
post_fault_synchronize(vcpu, loaded);
}

/*
Expand All @@ -168,6 +191,9 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
u32 vect_offset;
u32 *far, *fsr;
bool is_lpae;
bool loaded;

loaded = pre_fault_synchronize(vcpu);

if (is_pabt) {
vect_offset = 12;
Expand All @@ -191,6 +217,8 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
/* no need to shuffle FS[4] into DFSR[10] as its 0 */
*fsr = DFSR_FSC_EXTABT_nLPAE;
}

post_fault_synchronize(vcpu, loaded);
}

void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
Expand Down
20 changes: 12 additions & 8 deletions arch/arm64/kvm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -335,10 +335,8 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
int *last_ran;
kvm_host_data_t *cpu_data;

last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
cpu_data = this_cpu_ptr(&kvm_host_data);

/*
* We might get preempted before the vCPU actually runs, but
Expand All @@ -350,7 +348,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}

vcpu->cpu = cpu;
vcpu->arch.host_cpu_context = &cpu_data->host_ctxt;

kvm_vgic_load(vcpu);
kvm_timer_vcpu_load(vcpu);
Expand All @@ -365,7 +362,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
else
vcpu_set_wfx_traps(vcpu);

vcpu_ptrauth_setup_lazy(vcpu);
if (vcpu_has_ptrauth(vcpu))
vcpu_ptrauth_disable(vcpu);
}

void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
Expand Down Expand Up @@ -985,11 +983,17 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
* Ensure a rebooted VM will fault in RAM pages and detect if the
* guest MMU is turned off and flush the caches as needed.
*
* S2FWB enforces all memory accesses to RAM being cacheable, we
* ensure that the cache is always coherent.
* S2FWB enforces all memory accesses to RAM being cacheable,
* ensuring that the data side is always coherent. We still
* need to invalidate the I-cache though, as FWB does *not*
* imply CTR_EL0.DIC.
*/
if (vcpu->arch.has_run_once && !cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
stage2_unmap_vm(vcpu->kvm);
if (vcpu->arch.has_run_once) {
if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
stage2_unmap_vm(vcpu->kvm);
else
__flush_icache_all();
}

vcpu_reset_hcr(vcpu);

Expand Down
32 changes: 3 additions & 29 deletions arch/arm64/kvm/handle_exit.c
Original file line number Diff line number Diff line change
Expand Up @@ -162,40 +162,14 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}

#define __ptrauth_save_key(regs, key) \
({ \
regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
})

/*
* Handle the guest trying to use a ptrauth instruction, or trying to access a
* ptrauth register.
*/
void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *ctxt;

if (vcpu_has_ptrauth(vcpu)) {
vcpu_ptrauth_enable(vcpu);
ctxt = vcpu->arch.host_cpu_context;
__ptrauth_save_key(ctxt->sys_regs, APIA);
__ptrauth_save_key(ctxt->sys_regs, APIB);
__ptrauth_save_key(ctxt->sys_regs, APDA);
__ptrauth_save_key(ctxt->sys_regs, APDB);
__ptrauth_save_key(ctxt->sys_regs, APGA);
} else {
kvm_inject_undefined(vcpu);
}
}

/*
* Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
* a NOP).
* a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
* that we can do is give the guest an UNDEF.
*/
static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
kvm_arm_vcpu_ptrauth_trap(vcpu);
kvm_inject_undefined(vcpu);
return 1;
}

Expand Down
4 changes: 2 additions & 2 deletions arch/arm64/kvm/hyp/debug-sr.c
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return;

host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
guest_ctxt = &vcpu->arch.ctxt;
host_dbg = &vcpu->arch.host_debug_state.regs;
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
Expand All @@ -207,7 +207,7 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return;

host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
guest_ctxt = &vcpu->arch.ctxt;
host_dbg = &vcpu->arch.host_debug_state.regs;
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
Expand Down
65 changes: 63 additions & 2 deletions arch/arm64/kvm/hyp/switch.c
Original file line number Diff line number Diff line change
Expand Up @@ -490,6 +490,64 @@ static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
return true;
}

static bool __hyp_text esr_is_ptrauth_trap(u32 esr)
{
u32 ec = ESR_ELx_EC(esr);

if (ec == ESR_ELx_EC_PAC)
return true;

if (ec != ESR_ELx_EC_SYS64)
return false;

switch (esr_sys64_to_sysreg(esr)) {
case SYS_APIAKEYLO_EL1:
case SYS_APIAKEYHI_EL1:
case SYS_APIBKEYLO_EL1:
case SYS_APIBKEYHI_EL1:
case SYS_APDAKEYLO_EL1:
case SYS_APDAKEYHI_EL1:
case SYS_APDBKEYLO_EL1:
case SYS_APDBKEYHI_EL1:
case SYS_APGAKEYLO_EL1:
case SYS_APGAKEYHI_EL1:
return true;
}

return false;
}

#define __ptrauth_save_key(regs, key) \
({ \
regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
})

static bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *ctxt;
u64 val;

if (!vcpu_has_ptrauth(vcpu) ||
!esr_is_ptrauth_trap(kvm_vcpu_get_hsr(vcpu)))
return false;

ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
__ptrauth_save_key(ctxt->sys_regs, APIA);
__ptrauth_save_key(ctxt->sys_regs, APIB);
__ptrauth_save_key(ctxt->sys_regs, APDA);
__ptrauth_save_key(ctxt->sys_regs, APDB);
__ptrauth_save_key(ctxt->sys_regs, APGA);

vcpu_ptrauth_enable(vcpu);

val = read_sysreg(hcr_el2);
val |= (HCR_API | HCR_APK);
write_sysreg(val, hcr_el2);

return true;
}

/*
* Return true when we were able to fixup the guest exit and should return to
* the guest, false when we should restore the host state and return to the
Expand Down Expand Up @@ -524,6 +582,9 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
if (__hyp_handle_fpsimd(vcpu))
return true;

if (__hyp_handle_ptrauth(vcpu))
return true;

if (!__populate_fault_info(vcpu))
return true;

Expand Down Expand Up @@ -642,7 +703,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *guest_ctxt;
u64 exit_code;

host_ctxt = vcpu->arch.host_cpu_context;
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;

Expand Down Expand Up @@ -747,7 +808,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)

vcpu = kern_hyp_va(vcpu);

host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;

Expand Down
Loading

0 comments on commit 49b3dea

Please sign in to comment.