Skip to content

Commit

Permalink
KVM: PPC: Book3S HV P9: Remove most of the vcore logic
Browse files Browse the repository at this point in the history
The P9 path always uses one vcpu per vcore, so none of the vcore, locks,
stolen time, blocking logic, shared waitq, etc., is required.

Remove most of it.

Signed-off-by: Nicholas Piggin <[email protected]>
Signed-off-by: Michael Ellerman <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
  • Loading branch information
npiggin authored and mpe committed Nov 24, 2021
1 parent 434398a commit ecb6a72
Showing 1 changed file with 85 additions and 62 deletions.
147 changes: 85 additions & 62 deletions arch/powerpc/kvm/book3s_hv.c
Original file line number Diff line number Diff line change
Expand Up @@ -276,6 +276,8 @@ static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc, u64 tb)
{
unsigned long flags;

WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));

spin_lock_irqsave(&vc->stoltb_lock, flags);
vc->preempt_tb = tb;
spin_unlock_irqrestore(&vc->stoltb_lock, flags);
Expand All @@ -285,6 +287,8 @@ static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc, u64 tb)
{
unsigned long flags;

WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));

spin_lock_irqsave(&vc->stoltb_lock, flags);
if (vc->preempt_tb != TB_NIL) {
vc->stolen_tb += tb - vc->preempt_tb;
Expand All @@ -297,7 +301,12 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
unsigned long flags;
u64 now = mftb();
u64 now;

if (cpu_has_feature(CPU_FTR_ARCH_300))
return;

now = mftb();

/*
* We can test vc->runner without taking the vcore lock,
Expand All @@ -321,7 +330,12 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
unsigned long flags;
u64 now = mftb();
u64 now;

if (cpu_has_feature(CPU_FTR_ARCH_300))
return;

now = mftb();

if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
kvmppc_core_start_stolen(vc, now);
Expand Down Expand Up @@ -673,6 +687,8 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
u64 p;
unsigned long flags;

WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));

spin_lock_irqsave(&vc->stoltb_lock, flags);
p = vc->stolen_tb;
if (vc->vcore_state != VCORE_INACTIVE &&
Expand All @@ -695,13 +711,19 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
dt = vcpu->arch.dtl_ptr;
vpa = vcpu->arch.vpa.pinned_addr;
now = tb;
core_stolen = vcore_stolen_time(vc, now);
stolen = core_stolen - vcpu->arch.stolen_logged;
vcpu->arch.stolen_logged = core_stolen;
spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
stolen += vcpu->arch.busy_stolen;
vcpu->arch.busy_stolen = 0;
spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);

if (cpu_has_feature(CPU_FTR_ARCH_300)) {
stolen = 0;
} else {
core_stolen = vcore_stolen_time(vc, now);
stolen = core_stolen - vcpu->arch.stolen_logged;
vcpu->arch.stolen_logged = core_stolen;
spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
stolen += vcpu->arch.busy_stolen;
vcpu->arch.busy_stolen = 0;
spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
}

if (!dt || !vpa)
return;
memset(dt, 0, sizeof(struct dtl_entry));
Expand Down Expand Up @@ -898,13 +920,14 @@ static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
* mode handler is not called but no other threads are in the
* source vcore.
*/

spin_lock(&vcore->lock);
if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
vcore->vcore_state != VCORE_INACTIVE &&
vcore->runner)
target = vcore->runner;
spin_unlock(&vcore->lock);
if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
spin_lock(&vcore->lock);
if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
vcore->vcore_state != VCORE_INACTIVE &&
vcore->runner)
target = vcore->runner;
spin_unlock(&vcore->lock);
}

return kvm_vcpu_yield_to(target);
}
Expand Down Expand Up @@ -3131,13 +3154,6 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
kvmppc_ipi_thread(cpu);
}

/* Old path does this in asm */
static void kvmppc_stop_thread(struct kvm_vcpu *vcpu)
{
vcpu->cpu = -1;
vcpu->arch.thread_cpu = -1;
}

static void kvmppc_wait_for_nap(int n_threads)
{
int cpu = smp_processor_id();
Expand Down Expand Up @@ -3226,6 +3242,8 @@ static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc)
{
struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);

WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));

vc->vcore_state = VCORE_PREEMPT;
vc->pcpu = smp_processor_id();
if (vc->num_threads < threads_per_vcore(vc->kvm)) {
Expand All @@ -3242,6 +3260,8 @@ static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc)
{
struct preempted_vcore_list *lp;

WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));

kvmppc_core_end_stolen(vc, mftb());
if (!list_empty(&vc->preempt_list)) {
lp = &per_cpu(preempted_vcores, vc->pcpu);
Expand Down Expand Up @@ -3983,7 +4003,6 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns
static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
unsigned long lpcr, u64 *tb)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
u64 next_timer;
int trap;

Expand All @@ -3999,9 +4018,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,

kvmppc_subcore_enter_guest();

vc->entry_exit_map = 1;
vc->in_guest = 1;

vcpu_vpa_increment_dispatch(vcpu);

if (kvmhv_on_pseries()) {
Expand Down Expand Up @@ -4054,9 +4070,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,

vcpu_vpa_increment_dispatch(vcpu);

vc->entry_exit_map = 0x101;
vc->in_guest = 0;

kvmppc_subcore_exit_guest();

return trap;
Expand Down Expand Up @@ -4122,6 +4135,13 @@ static bool kvmppc_vcpu_woken(struct kvm_vcpu *vcpu)
return false;
}

static bool kvmppc_vcpu_check_block(struct kvm_vcpu *vcpu)
{
if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu))
return true;
return false;
}

/*
* Check to see if any of the runnable vcpus on the vcore have pending
* exceptions or are no longer ceded
Expand All @@ -4132,7 +4152,7 @@ static int kvmppc_vcore_check_block(struct kvmppc_vcore *vc)
int i;

for_each_runnable_thread(i, vcpu, vc) {
if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu))
if (kvmppc_vcpu_check_block(vcpu))
return 1;
}

Expand All @@ -4149,6 +4169,8 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
int do_sleep = 1;
u64 block_ns;

WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));

/* Poll for pending exceptions and ceded state */
cur = start_poll = ktime_get();
if (vc->halt_poll_ns) {
Expand Down Expand Up @@ -4426,11 +4448,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu->arch.ceded = 0;
vcpu->arch.run_task = current;
vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
vcpu->arch.busy_preempt = TB_NIL;
vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
vc->runnable_threads[0] = vcpu;
vc->n_runnable = 1;
vc->runner = vcpu;

/* See if the MMU is ready to go */
if (unlikely(!kvm->arch.mmu_ready)) {
Expand All @@ -4448,11 +4466,8 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,

kvmppc_update_vpas(vcpu);

init_vcore_to_run(vc);

preempt_disable();
pcpu = smp_processor_id();
vc->pcpu = pcpu;
if (kvm_is_radix(kvm))
kvmppc_prepare_radix_vcpu(vcpu, pcpu);

Expand Down Expand Up @@ -4481,21 +4496,23 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
goto out;
}

tb = mftb();
if (vcpu->arch.timer_running) {
hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
vcpu->arch.timer_running = 0;
}

vcpu->arch.stolen_logged = vcore_stolen_time(vc, tb);
vc->preempt_tb = TB_NIL;
tb = mftb();

kvmppc_clear_host_core(pcpu);
vcpu->cpu = pcpu;
vcpu->arch.thread_cpu = pcpu;
vc->pcpu = pcpu;
local_paca->kvm_hstate.kvm_vcpu = vcpu;
local_paca->kvm_hstate.ptid = 0;
local_paca->kvm_hstate.fake_suspend = 0;

local_paca->kvm_hstate.napping = 0;
local_paca->kvm_hstate.kvm_split_mode = NULL;
kvmppc_start_thread(vcpu, vc);
kvmppc_create_dtl_entry(vcpu, vc, tb);
trace_kvm_guest_enter(vcpu);

vc->vcore_state = VCORE_RUNNING;
trace_kvmppc_run_core(vc, 0);
trace_kvm_guest_enter(vcpu);

guest_enter_irqoff();

Expand All @@ -4517,8 +4534,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,

set_irq_happened(trap);

kvmppc_set_host_core(pcpu);

context_tracking_guest_exit();
if (!vtime_accounting_enabled_this_cpu()) {
local_irq_enable();
Expand All @@ -4534,7 +4549,8 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
}
vtime_account_guest_exit();

kvmppc_stop_thread(vcpu);
vcpu->cpu = -1;
vcpu->arch.thread_cpu = -1;

powerpc_local_irq_pmu_restore(flags);

Expand All @@ -4561,28 +4577,31 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
}
vcpu->arch.ret = r;

if (is_kvmppc_resume_guest(r) && vcpu->arch.ceded &&
!kvmppc_vcpu_woken(vcpu)) {
if (is_kvmppc_resume_guest(r) && !kvmppc_vcpu_check_block(vcpu)) {
kvmppc_set_timer(vcpu);
while (vcpu->arch.ceded && !kvmppc_vcpu_woken(vcpu)) {

prepare_to_rcuwait(&vcpu->wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (signal_pending(current)) {
vcpu->stat.signal_exits++;
run->exit_reason = KVM_EXIT_INTR;
vcpu->arch.ret = -EINTR;
break;
}
spin_lock(&vc->lock);
kvmppc_vcore_blocked(vc);
spin_unlock(&vc->lock);

if (kvmppc_vcpu_check_block(vcpu))
break;

trace_kvmppc_vcore_blocked(vc, 0);
schedule();
trace_kvmppc_vcore_blocked(vc, 1);
}
finish_rcuwait(&vcpu->wait);
}
vcpu->arch.ceded = 0;

vc->vcore_state = VCORE_INACTIVE;
trace_kvmppc_run_core(vc, 1);

done:
kvmppc_remove_runnable(vc, vcpu, tb);
trace_kvmppc_run_vcpu_exit(vcpu);

return vcpu->arch.ret;
Expand Down Expand Up @@ -4664,7 +4683,8 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)

kvmppc_save_current_sprs();

vcpu->arch.waitp = &vcpu->arch.vcore->wait;
if (!cpu_has_feature(CPU_FTR_ARCH_300))
vcpu->arch.waitp = &vcpu->arch.vcore->wait;
vcpu->arch.pgdir = kvm->mm->pgd;
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;

Expand Down Expand Up @@ -5126,6 +5146,9 @@ void kvmppc_alloc_host_rm_ops(void)
int cpu, core;
int size;

if (cpu_has_feature(CPU_FTR_ARCH_300))
return;

/* Not the first time here ? */
if (kvmppc_host_rm_ops_hv != NULL)
return;
Expand Down

0 comments on commit ecb6a72

Please sign in to comment.