Skip to content

Commit

Permalink
Merge tag 'kvm-ppc-next-4.16-2' of git://git.kernel.org/pub/scm/linux…
Browse files Browse the repository at this point in the history
…/kernel/git/paulus/powerpc

Second PPC KVM update for 4.16

Seven fixes that are either trivial or that address bugs that people
are actually hitting.  The main ones are:

- Drop spinlocks before reading guest memory

- Fix a bug causing corruption of VCPU state in PR KVM with preemption
  enabled

- Make HPT resizing work on POWER9

- Add MMIO emulation for vector loads and stores, because guests now
  use these instructions in memcpy and similar routines.
  • Loading branch information
rkrcmar committed Feb 9, 2018
2 parents 80132f4 + 09f9849 commit 1ab03c0
Show file tree
Hide file tree
Showing 12 changed files with 251 additions and 39 deletions.
6 changes: 2 additions & 4 deletions arch/powerpc/include/asm/kvm_book3s.h
Original file line number Diff line number Diff line change
Expand Up @@ -249,10 +249,8 @@ extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
extern int kvmppc_hcall_impl_pr(unsigned long cmd);
extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
struct kvm_vcpu *vcpu);
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
struct kvmppc_book3s_shadow_vcpu *svcpu);
extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
extern int kvm_irq_bypass;

static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
Expand Down
2 changes: 2 additions & 0 deletions arch/powerpc/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -690,6 +690,7 @@ struct kvm_vcpu_arch {
u8 mmio_vsx_offset;
u8 mmio_vsx_copy_type;
u8 mmio_vsx_tx_sx_enabled;
u8 mmio_vmx_copy_nums;
u8 osi_needed;
u8 osi_enabled;
u8 papr_enabled;
Expand Down Expand Up @@ -804,6 +805,7 @@ struct kvm_vcpu_arch {
#define KVM_MMIO_REG_QPR 0x0040
#define KVM_MMIO_REG_FQPR 0x0060
#define KVM_MMIO_REG_VSX 0x0080
#define KVM_MMIO_REG_VMX 0x00c0

#define __KVM_HAVE_ARCH_WQP
#define __KVM_HAVE_CREATE_DEVICE
Expand Down
4 changes: 4 additions & 0 deletions arch/powerpc/include/asm/kvm_ppc.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,10 @@ extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian, int mmio_sign_extend);
extern int kvmppc_handle_load128_by2x64(struct kvm_run *run,
struct kvm_vcpu *vcpu, unsigned int rt, int is_default_endian);
extern int kvmppc_handle_store128_by2x64(struct kvm_run *run,
struct kvm_vcpu *vcpu, unsigned int rs, int is_default_endian);
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
u64 val, unsigned int bytes,
int is_default_endian);
Expand Down
6 changes: 6 additions & 0 deletions arch/powerpc/include/asm/ppc-opcode.h
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,12 @@
#define OP_31_XOP_LFDX 599
#define OP_31_XOP_LFDUX 631

/* VMX Vector Load Instructions */
#define OP_31_XOP_LVX 103

/* VMX Vector Store Instructions */
#define OP_31_XOP_STVX 231

#define OP_LWZ 32
#define OP_STFS 52
#define OP_STFSU 53
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kvm/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ config KVM_BOOK3S_64
select KVM_BOOK3S_64_HANDLER
select KVM
select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE
select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_SERIES || PPC_POWERNV)
select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_PSERIES || PPC_POWERNV)
---help---
Support running unmodified book3s_64 and book3s_32 guest kernels
in virtual machines on book3s_64 host processors.
Expand Down
38 changes: 25 additions & 13 deletions arch/powerpc/kvm/book3s_64_mmu_hv.c
Original file line number Diff line number Diff line change
Expand Up @@ -1269,6 +1269,11 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
/* Nothing to do */
goto out;

if (cpu_has_feature(CPU_FTR_ARCH_300)) {
rpte = be64_to_cpu(hptep[1]);
vpte = hpte_new_to_old_v(vpte, rpte);
}

/* Unmap */
rev = &old->rev[idx];
guest_rpte = rev->guest_rpte;
Expand Down Expand Up @@ -1298,7 +1303,6 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,

/* Reload PTE after unmap */
vpte = be64_to_cpu(hptep[0]);

BUG_ON(vpte & HPTE_V_VALID);
BUG_ON(!(vpte & HPTE_V_ABSENT));

Expand All @@ -1307,6 +1311,12 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
goto out;

rpte = be64_to_cpu(hptep[1]);

if (cpu_has_feature(CPU_FTR_ARCH_300)) {
vpte = hpte_new_to_old_v(vpte, rpte);
rpte = hpte_new_to_old_r(rpte);
}

pshift = kvmppc_hpte_base_page_shift(vpte, rpte);
avpn = HPTE_V_AVPN_VAL(vpte) & ~(((1ul << pshift) - 1) >> 23);
pteg = idx / HPTES_PER_GROUP;
Expand Down Expand Up @@ -1337,17 +1347,17 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
}

new_pteg = hash & new_hash_mask;
if (vpte & HPTE_V_SECONDARY) {
BUG_ON(~pteg != (hash & old_hash_mask));
new_pteg = ~new_pteg;
} else {
BUG_ON(pteg != (hash & old_hash_mask));
}
if (vpte & HPTE_V_SECONDARY)
new_pteg = ~hash & new_hash_mask;

new_idx = new_pteg * HPTES_PER_GROUP + (idx % HPTES_PER_GROUP);
new_hptep = (__be64 *)(new->virt + (new_idx << 4));

replace_vpte = be64_to_cpu(new_hptep[0]);
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
unsigned long replace_rpte = be64_to_cpu(new_hptep[1]);
replace_vpte = hpte_new_to_old_v(replace_vpte, replace_rpte);
}

if (replace_vpte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
BUG_ON(new->order >= old->order);
Expand All @@ -1363,6 +1373,11 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
/* Discard the previous HPTE */
}

if (cpu_has_feature(CPU_FTR_ARCH_300)) {
rpte = hpte_old_to_new_r(vpte, rpte);
vpte = hpte_old_to_new_v(vpte);
}

new_hptep[1] = cpu_to_be64(rpte);
new->rev[new_idx].guest_rpte = guest_rpte;
/* No need for a barrier, since new HPT isn't active */
Expand All @@ -1380,12 +1395,6 @@ static int resize_hpt_rehash(struct kvm_resize_hpt *resize)
unsigned long i;
int rc;

/*
* resize_hpt_rehash_hpte() doesn't handle the new-format HPTEs
* that POWER9 uses, and could well hit a BUG_ON on POWER9.
*/
if (cpu_has_feature(CPU_FTR_ARCH_300))
return -EIO;
for (i = 0; i < kvmppc_hpt_npte(&kvm->arch.hpt); i++) {
rc = resize_hpt_rehash_hpte(resize, i);
if (rc != 0)
Expand Down Expand Up @@ -1416,6 +1425,9 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize)

synchronize_srcu_expedited(&kvm->srcu);

if (cpu_has_feature(CPU_FTR_ARCH_300))
kvmppc_setup_partition_table(kvm);

resize_hpt_debug(resize, "resize_hpt_pivot() done\n");
}

Expand Down
16 changes: 12 additions & 4 deletions arch/powerpc/kvm/book3s_hv.c
Original file line number Diff line number Diff line change
Expand Up @@ -1008,8 +1008,6 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *tvcpu;

if (!cpu_has_feature(CPU_FTR_ARCH_300))
return EMULATE_FAIL;
if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE)
return RESUME_GUEST;
if (get_op(inst) != 31)
Expand Down Expand Up @@ -1059,6 +1057,7 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}

/* Called with vcpu->arch.vcore->lock held */
static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
struct task_struct *tsk)
{
Expand Down Expand Up @@ -1179,7 +1178,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
swab32(vcpu->arch.emul_inst) :
vcpu->arch.emul_inst;
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
/* Need vcore unlocked to call kvmppc_get_last_inst */
spin_unlock(&vcpu->arch.vcore->lock);
r = kvmppc_emulate_debug_inst(run, vcpu);
spin_lock(&vcpu->arch.vcore->lock);
} else {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
r = RESUME_GUEST;
Expand All @@ -1194,8 +1196,13 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
*/
case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
r = EMULATE_FAIL;
if ((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG)
if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) &&
cpu_has_feature(CPU_FTR_ARCH_300)) {
/* Need vcore unlocked to call kvmppc_get_last_inst */
spin_unlock(&vcpu->arch.vcore->lock);
r = kvmppc_emulate_doorbell_instr(vcpu);
spin_lock(&vcpu->arch.vcore->lock);
}
if (r == EMULATE_FAIL) {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
r = RESUME_GUEST;
Expand Down Expand Up @@ -2946,13 +2953,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
/* make sure updates to secondary vcpu structs are visible now */
smp_mb();

preempt_enable();

for (sub = 0; sub < core_info.n_subcores; ++sub) {
pvc = core_info.vc[sub];
post_guest_process(pvc, pvc == vc);
}

spin_lock(&vc->lock);
preempt_enable();

out:
vc->vcore_state = VCORE_INACTIVE;
Expand Down
3 changes: 2 additions & 1 deletion arch/powerpc/kvm/book3s_hv_rmhandlers.S
Original file line number Diff line number Diff line change
Expand Up @@ -413,10 +413,11 @@ FTR_SECTION_ELSE
/* On P9 we use the split_info for coordinating LPCR changes */
lwz r4, KVM_SPLIT_DO_SET(r6)
cmpwi r4, 0
beq 63f
beq 1f
mr r3, r6
bl kvmhv_p9_set_lpcr
nop
1:
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
63:
/* Order load of vcpu after load of vcore */
Expand Down
4 changes: 1 addition & 3 deletions arch/powerpc/kvm/book3s_interrupts.S
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ kvm_start_entry:

kvm_start_lightweight:
/* Copy registers into shadow vcpu so we can access them in real mode */
GET_SHADOW_VCPU(r3)
mr r3, r4
bl FUNC(kvmppc_copy_to_svcpu)
nop
REST_GPR(4, r1)
Expand Down Expand Up @@ -165,9 +165,7 @@ after_sprg3_load:
stw r12, VCPU_TRAP(r3)

/* Transfer reg values from shadow vcpu back to vcpu struct */
/* On 64-bit, interrupts are still off at this point */

GET_SHADOW_VCPU(r4)
bl FUNC(kvmppc_copy_from_svcpu)
nop

Expand Down
20 changes: 9 additions & 11 deletions arch/powerpc/kvm/book3s_pr.c
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
#ifdef CONFIG_PPC_BOOK3S_64
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
if (svcpu->in_use) {
kvmppc_copy_from_svcpu(vcpu, svcpu);
kvmppc_copy_from_svcpu(vcpu);
}
memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
Expand All @@ -143,9 +143,10 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
}

/* Copy data needed by real-mode code from vcpu to shadow vcpu */
void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
struct kvm_vcpu *vcpu)
void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
{
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);

svcpu->gpr[0] = vcpu->arch.gpr[0];
svcpu->gpr[1] = vcpu->arch.gpr[1];
svcpu->gpr[2] = vcpu->arch.gpr[2];
Expand Down Expand Up @@ -177,17 +178,14 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
if (cpu_has_feature(CPU_FTR_ARCH_207S))
vcpu->arch.entry_ic = mfspr(SPRN_IC);
svcpu->in_use = true;

svcpu_put(svcpu);
}

/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
struct kvmppc_book3s_shadow_vcpu *svcpu)
void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
{
/*
* vcpu_put would just call us again because in_use hasn't
* been updated yet.
*/
preempt_disable();
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);

/*
* Maybe we were already preempted and synced the svcpu from
Expand Down Expand Up @@ -233,7 +231,7 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
svcpu->in_use = false;

out:
preempt_enable();
svcpu_put(svcpu);
}

static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
Expand Down
36 changes: 36 additions & 0 deletions arch/powerpc/kvm/emulate_loadstore.c
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,18 @@ static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
}
#endif /* CONFIG_VSX */

#ifdef CONFIG_ALTIVEC
static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
{
if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
kvmppc_core_queue_vec_unavail(vcpu);
return true;
}

return false;
}
#endif /* CONFIG_ALTIVEC */

/*
* XXX to do:
* lfiwax, lfiwzx
Expand Down Expand Up @@ -98,6 +110,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE;
vcpu->arch.mmio_sp64_extend = 0;
vcpu->arch.mmio_sign_extend = 0;
vcpu->arch.mmio_vmx_copy_nums = 0;

switch (get_op(inst)) {
case 31:
Expand Down Expand Up @@ -459,6 +472,29 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
rs, 4, 1);
break;
#endif /* CONFIG_VSX */

#ifdef CONFIG_ALTIVEC
case OP_31_XOP_LVX:
if (kvmppc_check_altivec_disabled(vcpu))
return EMULATE_DONE;
vcpu->arch.vaddr_accessed &= ~0xFULL;
vcpu->arch.paddr_accessed &= ~0xFULL;
vcpu->arch.mmio_vmx_copy_nums = 2;
emulated = kvmppc_handle_load128_by2x64(run, vcpu,
KVM_MMIO_REG_VMX|rt, 1);
break;

case OP_31_XOP_STVX:
if (kvmppc_check_altivec_disabled(vcpu))
return EMULATE_DONE;
vcpu->arch.vaddr_accessed &= ~0xFULL;
vcpu->arch.paddr_accessed &= ~0xFULL;
vcpu->arch.mmio_vmx_copy_nums = 2;
emulated = kvmppc_handle_store128_by2x64(run, vcpu,
rs, 1);
break;
#endif /* CONFIG_ALTIVEC */

default:
emulated = EMULATE_FAIL;
break;
Expand Down
Loading

0 comments on commit 1ab03c0

Please sign in to comment.