Skip to content

Commit

Permalink
Merge branch 'topic/ppc-kvm' into next
Browse files Browse the repository at this point in the history
Pull in some more ppc KVM patches we are keeping in our topic branch.

In particular this brings in the series to add H_RPT_INVALIDATE.
  • Loading branch information
mpe committed Jun 22, 2021
2 parents 4a21192 + 51696f3 commit a736143
Show file tree
Hide file tree
Showing 16 changed files with 492 additions and 22 deletions.
18 changes: 18 additions & 0 deletions Documentation/virt/kvm/api.rst
Original file line number Diff line number Diff line change
Expand Up @@ -6362,6 +6362,24 @@ default.

See Documentation/x86/sgx/2.Kernel-internals.rst for more details.

7.26 KVM_CAP_PPC_RPT_INVALIDATE
-------------------------------

:Capability: KVM_CAP_PPC_RPT_INVALIDATE
:Architectures: ppc
:Type: vm

This capability indicates that the kernel is capable of handling
H_RPT_INVALIDATE hcall.

In order to enable the use of H_RPT_INVALIDATE in the guest,
user space might have to advertise it for the guest. For example,
IBM pSeries (sPAPR) guest starts using it if "hcall-rpt-invalidate" is
present in the "ibm,hypertas-functions" device-tree property.

This capability is enabled for hypervisors on platforms like POWER9
that support radix MMU.

8. Other capabilities.
======================

Expand Down
1 change: 1 addition & 0 deletions arch/powerpc/include/asm/book3s/64/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ struct mmu_psize_def {
int penc[MMU_PAGE_COUNT]; /* HPTE encoding */
unsigned int tlbiel; /* tlbiel supported for that page size */
unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
unsigned long h_rpt_pgsize; /* H_RPT_INVALIDATE page size encoding */
union {
unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
unsigned long ap; /* Ap encoding used by PowerISA 3.0 */
Expand Down
4 changes: 4 additions & 0 deletions arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,10 @@

#include <asm/hvcall.h>

#define RIC_FLUSH_TLB 0
#define RIC_FLUSH_PWC 1
#define RIC_FLUSH_ALL 2

struct vm_area_struct;
struct mm_struct;
struct mmu_gather;
Expand Down
30 changes: 30 additions & 0 deletions arch/powerpc/include/asm/cputhreads.h
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,36 @@ static inline int cpu_last_thread_sibling(int cpu)
return cpu | (threads_per_core - 1);
}

/*
* tlb_thread_siblings are siblings which share a TLB. This is not
* architected, is not something a hypervisor could emulate and a future
* CPU may change behaviour even in compat mode, so this should only be
* used on PowerNV, and only with care.
*/
static inline int cpu_first_tlb_thread_sibling(int cpu)
{
if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
return cpu & ~0x6; /* Big Core */
else
return cpu_first_thread_sibling(cpu);
}

static inline int cpu_last_tlb_thread_sibling(int cpu)
{
if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
return cpu | 0x6; /* Big Core */
else
return cpu_last_thread_sibling(cpu);
}

static inline int cpu_tlb_thread_sibling_step(void)
{
if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
return 2; /* Big Core */
else
return 1;
}

static inline u32 get_tensr(void)
{
#ifdef CONFIG_BOOKE
Expand Down
4 changes: 2 additions & 2 deletions arch/powerpc/include/asm/hvcall.h
Original file line number Diff line number Diff line change
Expand Up @@ -423,9 +423,9 @@
#define H_RPTI_TYPE_NESTED 0x0001 /* Invalidate nested guest partition-scope */
#define H_RPTI_TYPE_TLB 0x0002 /* Invalidate TLB */
#define H_RPTI_TYPE_PWC 0x0004 /* Invalidate Page Walk Cache */
/* Invalidate Process Table Entries if H_RPTI_TYPE_NESTED is clear */
/* Invalidate caching of Process Table Entries if H_RPTI_TYPE_NESTED is clear */
#define H_RPTI_TYPE_PRT 0x0008
/* Invalidate Partition Table Entries if H_RPTI_TYPE_NESTED is set */
/* Invalidate caching of Partition Table Entries if H_RPTI_TYPE_NESTED is set */
#define H_RPTI_TYPE_PAT 0x0008
#define H_RPTI_TYPE_ALL (H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC | \
H_RPTI_TYPE_PRT)
Expand Down
3 changes: 3 additions & 0 deletions arch/powerpc/include/asm/kvm_book3s.h
Original file line number Diff line number Diff line change
Expand Up @@ -307,6 +307,9 @@ void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1);
void kvmhv_release_all_nested(struct kvm *kvm);
long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
unsigned long type, unsigned long pg_sizes,
unsigned long start, unsigned long end);
int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu,
u64 time_limit, unsigned long lpcr);
void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
Expand Down
12 changes: 12 additions & 0 deletions arch/powerpc/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,18 @@ static inline void mm_context_add_copro(struct mm_struct *mm) { }
static inline void mm_context_remove_copro(struct mm_struct *mm) { }
#endif

#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
void do_h_rpt_invalidate_prt(unsigned long pid, unsigned long lpid,
unsigned long type, unsigned long pg_sizes,
unsigned long start, unsigned long end);
#else
static inline void do_h_rpt_invalidate_prt(unsigned long pid,
unsigned long lpid,
unsigned long type,
unsigned long pg_sizes,
unsigned long start,
unsigned long end) { }
#endif

extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk);
Expand Down
27 changes: 22 additions & 5 deletions arch/powerpc/kvm/book3s_64_mmu_radix.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
#include <asm/pte-walk.h>
#include <asm/ultravisor.h>
#include <asm/kvm_book3s_uvmem.h>
#include <asm/plpar_wrappers.h>

/*
* Supported radix tree geometry.
Expand Down Expand Up @@ -318,9 +319,19 @@ void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
}

psi = shift_to_mmu_psize(pshift);
rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
lpid, rb);

if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE)) {
rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
lpid, rb);
} else {
rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
H_RPTI_TYPE_NESTED |
H_RPTI_TYPE_TLB,
psize_to_rpti_pgsize(psi),
addr, addr + psize);
}

if (rc)
pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc);
}
Expand All @@ -334,8 +345,14 @@ static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid)
return;
}

rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
lpid, TLBIEL_INVAL_SET_LPID);
if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE))
rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
lpid, TLBIEL_INVAL_SET_LPID);
else
rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
H_RPTI_TYPE_NESTED |
H_RPTI_TYPE_PWC, H_RPTI_PAGE_ALL,
0, -1UL);
if (rc)
pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc);
}
Expand Down
102 changes: 96 additions & 6 deletions arch/powerpc/kvm/book3s_hv.c
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@
#include <asm/kvm_book3s_uvmem.h>
#include <asm/ultravisor.h>
#include <asm/dtl.h>
#include <asm/plpar_wrappers.h>

#include "book3s.h"

Expand Down Expand Up @@ -922,6 +923,68 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
return yield_count;
}

/*
* H_RPT_INVALIDATE hcall handler for nested guests.
*
* Handles only nested process-scoped invalidation requests in L0.
*/
static int kvmppc_nested_h_rpt_invalidate(struct kvm_vcpu *vcpu)
{
unsigned long type = kvmppc_get_gpr(vcpu, 6);
unsigned long pid, pg_sizes, start, end;

/*
* The partition-scoped invalidations aren't handled here in L0.
*/
if (type & H_RPTI_TYPE_NESTED)
return RESUME_HOST;

pid = kvmppc_get_gpr(vcpu, 4);
pg_sizes = kvmppc_get_gpr(vcpu, 7);
start = kvmppc_get_gpr(vcpu, 8);
end = kvmppc_get_gpr(vcpu, 9);

do_h_rpt_invalidate_prt(pid, vcpu->arch.nested->shadow_lpid,
type, pg_sizes, start, end);

kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
return RESUME_GUEST;
}

static long kvmppc_h_rpt_invalidate(struct kvm_vcpu *vcpu,
unsigned long id, unsigned long target,
unsigned long type, unsigned long pg_sizes,
unsigned long start, unsigned long end)
{
if (!kvm_is_radix(vcpu->kvm))
return H_UNSUPPORTED;

if (end < start)
return H_P5;

/*
* Partition-scoped invalidation for nested guests.
*/
if (type & H_RPTI_TYPE_NESTED) {
if (!nesting_enabled(vcpu->kvm))
return H_FUNCTION;

/* Support only cores as target */
if (target != H_RPTI_TARGET_CMMU)
return H_P2;

return do_h_rpt_invalidate_pat(vcpu, id, type, pg_sizes,
start, end);
}

/*
* Process-scoped invalidation for L1 guests.
*/
do_h_rpt_invalidate_prt(id, vcpu->kvm->arch.lpid,
type, pg_sizes, start, end);
return H_SUCCESS;
}

int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
Expand Down Expand Up @@ -1105,6 +1168,14 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
if (!powernv_get_random_long(&vcpu->arch.regs.gpr[4]))
ret = H_HARDWARE;
break;
case H_RPT_INVALIDATE:
ret = kvmppc_h_rpt_invalidate(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6),
kvmppc_get_gpr(vcpu, 7),
kvmppc_get_gpr(vcpu, 8),
kvmppc_get_gpr(vcpu, 9));
break;

case H_SET_PARTITION_TABLE:
ret = H_FUNCTION;
Expand Down Expand Up @@ -1225,6 +1296,7 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd)
case H_XIRR_X:
#endif
case H_PAGE_INIT:
case H_RPT_INVALIDATE:
return 1;
}

Expand Down Expand Up @@ -1748,6 +1820,23 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
if (!xics_on_xive())
kvmppc_xics_rm_complete(vcpu, 0);
break;
case BOOK3S_INTERRUPT_SYSCALL:
{
unsigned long req = kvmppc_get_gpr(vcpu, 3);

/*
* The H_RPT_INVALIDATE hcalls issued by nested
* guests for process-scoped invalidations when
* GTSE=0, are handled here in L0.
*/
if (req == H_RPT_INVALIDATE) {
r = kvmppc_nested_h_rpt_invalidate(vcpu);
break;
}

r = RESUME_HOST;
break;
}
default:
r = RESUME_HOST;
break;
Expand Down Expand Up @@ -2820,7 +2909,7 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
cpumask_t *cpu_in_guest;
int i;

cpu = cpu_first_thread_sibling(cpu);
cpu = cpu_first_tlb_thread_sibling(cpu);
if (nested) {
cpumask_set_cpu(cpu, &nested->need_tlb_flush);
cpu_in_guest = &nested->cpu_in_guest;
Expand All @@ -2834,9 +2923,10 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
* the other side is the first smp_mb() in kvmppc_run_core().
*/
smp_mb();
for (i = 0; i < threads_per_core; ++i)
if (cpumask_test_cpu(cpu + i, cpu_in_guest))
smp_call_function_single(cpu + i, do_nothing, NULL, 1);
for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu);
i += cpu_tlb_thread_sibling_step())
if (cpumask_test_cpu(i, cpu_in_guest))
smp_call_function_single(i, do_nothing, NULL, 1);
}

static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
Expand Down Expand Up @@ -2867,8 +2957,8 @@ static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
*/
if (prev_cpu != pcpu) {
if (prev_cpu >= 0 &&
cpu_first_thread_sibling(prev_cpu) !=
cpu_first_thread_sibling(pcpu))
cpu_first_tlb_thread_sibling(prev_cpu) !=
cpu_first_tlb_thread_sibling(pcpu))
radix_flush_cpu(kvm, prev_cpu, vcpu);
if (nested)
nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kvm/book3s_hv_builtin.c
Original file line number Diff line number Diff line change
Expand Up @@ -721,7 +721,7 @@ void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
* Thus we make all 4 threads use the same bit.
*/
if (cpu_has_feature(CPU_FTR_ARCH_300))
pcpu = cpu_first_thread_sibling(pcpu);
pcpu = cpu_first_tlb_thread_sibling(pcpu);

if (nested)
need_tlb_flush = &nested->need_tlb_flush;
Expand Down
Loading

0 comments on commit a736143

Please sign in to comment.