Skip to content

Commit

Permalink
Merge branch 'topic/ppc-kvm' into next
Browse files Browse the repository at this point in the history
Merge some KVM patches we are keeping in a topic branch in case there
are any merge conflicts that need resolving.
  • Loading branch information
mpe committed Aug 26, 2021
2 parents 806c0e6 + 0c8fb65 commit 465e333
Show file tree
Hide file tree
Showing 10 changed files with 180 additions and 139 deletions.
1 change: 1 addition & 0 deletions arch/powerpc/include/asm/kvm_book3s_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ struct kvm_nested_guest {
pgd_t *shadow_pgtable; /* our page table for this guest */
u64 l1_gr_to_hr; /* L1's addr of part'n-scoped table */
u64 process_table; /* process table entry for this guest */
u64 hfscr; /* HFSCR that the L1 requested for this nested guest */
long refcnt; /* number of pointers to this struct */
struct mutex tlb_lock; /* serialize page faults and tlbies */
struct kvm_nested_guest *next;
Expand Down
2 changes: 2 additions & 0 deletions arch/powerpc/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -811,6 +811,8 @@ struct kvm_vcpu_arch {

u32 online;

u64 hfscr_permitted; /* A mask of permitted HFSCR facilities */

/* For support of nested guests */
struct kvm_nested_guest *nested;
u32 nested_vcpu_id;
Expand Down
7 changes: 7 additions & 0 deletions arch/powerpc/include/asm/pmc.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,13 @@ static inline void ppc_set_pmu_inuse(int inuse)
#endif
}

#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
static inline int ppc_get_pmu_inuse(void)
{
return get_paca()->pmcregs_in_use;
}
#endif

extern void power4_enable_pmcs(void);

#else /* CONFIG_PPC64 */
Expand Down
3 changes: 2 additions & 1 deletion arch/powerpc/include/asm/reg.h
Original file line number Diff line number Diff line change
Expand Up @@ -415,6 +415,7 @@
#define FSCR_TAR __MASK(FSCR_TAR_LG)
#define FSCR_EBB __MASK(FSCR_EBB_LG)
#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
#define FSCR_INTR_CAUSE (ASM_CONST(0xFF) << 56) /* interrupt cause */
#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
#define HFSCR_PREFIX __MASK(FSCR_PREFIX_LG)
#define HFSCR_MSGP __MASK(FSCR_MSGP_LG)
Expand All @@ -426,7 +427,7 @@
#define HFSCR_DSCR __MASK(FSCR_DSCR_LG)
#define HFSCR_VECVSX __MASK(FSCR_VECVSX_LG)
#define HFSCR_FP __MASK(FSCR_FP_LG)
#define HFSCR_INTR_CAUSE (ASM_CONST(0xFF) << 56) /* interrupt cause */
#define HFSCR_INTR_CAUSE FSCR_INTR_CAUSE
#define SPRN_TAR 0x32f /* Target Address Register */
#define SPRN_LPCR 0x13E /* LPAR Control Register */
#define LPCR_VPM0 ASM_CONST(0x8000000000000000)
Expand Down
12 changes: 7 additions & 5 deletions arch/powerpc/kvm/book3s_64_mmu_radix.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,9 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
(to != NULL) ? __pa(to): 0,
(from != NULL) ? __pa(from): 0, n);

if (eaddr & (0xFFFUL << 52))
return ret;

quadrant = 1;
if (!pid)
quadrant = 2;
Expand All @@ -65,10 +68,12 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
}
isync();

pagefault_disable();
if (is_load)
ret = copy_from_user_nofault(to, (const void __user *)from, n);
ret = __copy_from_user_inatomic(to, (const void __user *)from, n);
else
ret = copy_to_user_nofault((void __user *)to, from, n);
ret = __copy_to_user_inatomic((void __user *)to, from, n);
pagefault_enable();

/* switch the pid first to avoid running host with unallocated pid */
if (quadrant == 1 && pid != old_pid)
Expand All @@ -81,7 +86,6 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,

return ret;
}
EXPORT_SYMBOL_GPL(__kvmhv_copy_tofrom_guest_radix);

static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
void *to, void *from, unsigned long n)
Expand Down Expand Up @@ -117,14 +121,12 @@ long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to,

return ret;
}
EXPORT_SYMBOL_GPL(kvmhv_copy_from_guest_radix);

long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *from,
unsigned long n)
{
return kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, NULL, from, n);
}
EXPORT_SYMBOL_GPL(kvmhv_copy_to_guest_radix);

int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *gpte, u64 root,
Expand Down
88 changes: 72 additions & 16 deletions arch/powerpc/kvm/book3s_hv.c
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@
#include <asm/kvm_book3s.h>
#include <asm/mmu_context.h>
#include <asm/lppaca.h>
#include <asm/pmc.h>
#include <asm/processor.h>
#include <asm/cputhreads.h>
#include <asm/page.h>
Expand Down Expand Up @@ -1679,6 +1680,21 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
r = RESUME_GUEST;
}
break;

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
case BOOK3S_INTERRUPT_HV_SOFTPATCH:
/*
* This occurs for various TM-related instructions that
* we need to emulate on POWER9 DD2.2. We have already
* handled the cases where the guest was in real-suspend
* mode and was transitioning to transactional state.
*/
r = kvmhv_p9_tm_emulation(vcpu);
if (r != -1)
break;
fallthrough; /* go to facility unavailable handler */
#endif

/*
* This occurs if the guest (kernel or userspace), does something that
* is prohibited by HFSCR.
Expand All @@ -1697,18 +1713,6 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
}
break;

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
case BOOK3S_INTERRUPT_HV_SOFTPATCH:
/*
* This occurs for various TM-related instructions that
* we need to emulate on POWER9 DD2.2. We have already
* handled the cases where the guest was in real-suspend
* mode and was transitioning to transactional state.
*/
r = kvmhv_p9_tm_emulation(vcpu);
break;
#endif

case BOOK3S_INTERRUPT_HV_RM_HARD:
r = RESUME_PASSTHROUGH;
break;
Expand All @@ -1727,6 +1731,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,

static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
{
struct kvm_nested_guest *nested = vcpu->arch.nested;
int r;
int srcu_idx;

Expand Down Expand Up @@ -1811,9 +1816,41 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
* mode and was transitioning to transactional state.
*/
r = kvmhv_p9_tm_emulation(vcpu);
break;
if (r != -1)
break;
fallthrough; /* go to facility unavailable handler */
#endif

case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: {
u64 cause = vcpu->arch.hfscr >> 56;

/*
* Only pass HFU interrupts to the L1 if the facility is
* permitted but disabled by the L1's HFSCR, otherwise
* the interrupt does not make sense to the L1 so turn
* it into a HEAI.
*/
if (!(vcpu->arch.hfscr_permitted & (1UL << cause)) ||
(nested->hfscr & (1UL << cause))) {
vcpu->arch.trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST;

/*
* If the fetch failed, return to guest and
* try executing it again.
*/
r = kvmppc_get_last_inst(vcpu, INST_GENERIC,
&vcpu->arch.emul_inst);
if (r != EMULATE_DONE)
r = RESUME_GUEST;
else
r = RESUME_HOST;
} else {
r = RESUME_HOST;
}

break;
}

case BOOK3S_INTERRUPT_HV_RM_HARD:
vcpu->arch.trap = 0;
r = RESUME_GUEST;
Expand Down Expand Up @@ -2684,6 +2721,7 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
spin_lock_init(&vcpu->arch.vpa_update_lock);
spin_lock_init(&vcpu->arch.tbacct_lock);
vcpu->arch.busy_preempt = TB_NIL;
vcpu->arch.shregs.msr = MSR_ME;
vcpu->arch.intr_msr = MSR_SF | MSR_ME;

/*
Expand All @@ -2705,6 +2743,8 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
if (cpu_has_feature(CPU_FTR_TM_COMP))
vcpu->arch.hfscr |= HFSCR_TM;

vcpu->arch.hfscr_permitted = vcpu->arch.hfscr;

kvmppc_mmu_book3s_hv_init(vcpu);

vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
Expand Down Expand Up @@ -3727,7 +3767,6 @@ static void load_spr_state(struct kvm_vcpu *vcpu)
mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
mtspr(SPRN_BESCR, vcpu->arch.bescr);
mtspr(SPRN_WORT, vcpu->arch.wort);
mtspr(SPRN_TIDR, vcpu->arch.tid);
mtspr(SPRN_AMR, vcpu->arch.amr);
mtspr(SPRN_UAMOR, vcpu->arch.uamor);
Expand All @@ -3754,7 +3793,6 @@ static void store_spr_state(struct kvm_vcpu *vcpu)
vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
vcpu->arch.bescr = mfspr(SPRN_BESCR);
vcpu->arch.wort = mfspr(SPRN_WORT);
vcpu->arch.tid = mfspr(SPRN_TIDR);
vcpu->arch.amr = mfspr(SPRN_AMR);
vcpu->arch.uamor = mfspr(SPRN_UAMOR);
Expand Down Expand Up @@ -3786,7 +3824,6 @@ static void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu,
struct p9_host_os_sprs *host_os_sprs)
{
mtspr(SPRN_PSPB, 0);
mtspr(SPRN_WORT, 0);
mtspr(SPRN_UAMOR, 0);

mtspr(SPRN_DSCR, host_os_sprs->dscr);
Expand Down Expand Up @@ -3852,6 +3889,18 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);

#ifdef CONFIG_PPC_PSERIES
if (kvmhv_on_pseries()) {
barrier();
if (vcpu->arch.vpa.pinned_addr) {
struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
get_lppaca()->pmcregs_in_use = lp->pmcregs_in_use;
} else {
get_lppaca()->pmcregs_in_use = 1;
}
barrier();
}
#endif
kvmhv_load_guest_pmu(vcpu);

msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
Expand Down Expand Up @@ -3986,6 +4035,13 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
save_pmu |= nesting_enabled(vcpu->kvm);

kvmhv_save_guest_pmu(vcpu, save_pmu);
#ifdef CONFIG_PPC_PSERIES
if (kvmhv_on_pseries()) {
barrier();
get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
barrier();
}
#endif

vc->entry_exit_map = 0x101;
vc->in_guest = 0;
Expand Down
Loading

0 comments on commit 465e333

Please sign in to comment.