Skip to content

Commit

Permalink
KVM: arm64: pmu: Don't mark a counter as chained if the odd one is di…
Browse files Browse the repository at this point in the history
…sabled

At the moment we update the chain bitmap on type setting. This
does not take into account the enable state of the odd register.

Let's make sure a counter is never considered as chained if
the high counter is disabled.

We recompute the chain state on enable/disable and type changes.

Also let create_perf_event() use the chain bitmap and not use
kvm_pmu_idx_has_chain_evtype().

Suggested-by: Marc Zyngier <[email protected]>
Signed-off-by: Eric Auger <[email protected]>
Signed-off-by: Marc Zyngier <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
  • Loading branch information
eauger authored and Marc Zyngier committed Jan 28, 2020
1 parent 3837407 commit 76c9fc5
Showing 1 changed file with 33 additions and 29 deletions.
62 changes: 33 additions & 29 deletions virt/kvm/arm/pmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
#include <kvm/arm_vgic.h>

static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);

#define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1

Expand Down Expand Up @@ -75,6 +77,13 @@ static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)

return pmc;
}
static struct kvm_pmc *kvm_pmu_get_alternate_pmc(struct kvm_pmc *pmc)
{
if (kvm_pmu_idx_is_high_counter(pmc->idx))
return pmc - 1;
else
return pmc + 1;
}

/**
* kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
Expand Down Expand Up @@ -294,15 +303,9 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)

pmc = &pmu->pmc[i];

/*
* For high counters of chained events we must recreate the
* perf event with the long (64bit) attribute set.
*/
if (kvm_pmu_pmc_is_chained(pmc) &&
kvm_pmu_idx_is_high_counter(i)) {
kvm_pmu_create_perf_event(vcpu, i);
continue;
}
/* A change in the enable state may affect the chain state */
kvm_pmu_update_pmc_chained(vcpu, i);
kvm_pmu_create_perf_event(vcpu, i);

/* At this point, pmc must be the canonical */
if (pmc->perf_event) {
Expand Down Expand Up @@ -335,15 +338,9 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)

pmc = &pmu->pmc[i];

/*
* For high counters of chained events we must recreate the
* perf event with the long (64bit) attribute unset.
*/
if (kvm_pmu_pmc_is_chained(pmc) &&
kvm_pmu_idx_is_high_counter(i)) {
kvm_pmu_create_perf_event(vcpu, i);
continue;
}
/* A change in the enable state may affect the chain state */
kvm_pmu_update_pmc_chained(vcpu, i);
kvm_pmu_create_perf_event(vcpu, i);

/* At this point, pmc must be the canonical */
if (pmc->perf_event)
Expand Down Expand Up @@ -585,15 +582,14 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)

counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);

if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
if (kvm_pmu_pmc_is_chained(pmc)) {
/**
* The initial sample period (overflow count) of an event. For
* chained counters we only support overflow interrupts on the
* high counter.
*/
attr.sample_period = (-counter) & GENMASK(63, 0);
if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;

event = perf_event_create_kernel_counter(&attr, -1, current,
kvm_pmu_perf_overflow,
Expand Down Expand Up @@ -624,25 +620,33 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
* @select_idx: The number of selected counter
*
* Update the chained bitmap based on the event type written in the
* typer register.
* typer register and the enable state of the odd register.
*/
static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
{
struct kvm_pmu *pmu = &vcpu->arch.pmu;
struct kvm_pmc *pmc = &pmu->pmc[select_idx];
struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc;
bool new_state, old_state;

old_state = kvm_pmu_pmc_is_chained(pmc);
new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) &&
kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1);

if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
if (old_state == new_state)
return;

canonical_pmc = kvm_pmu_get_canonical_pmc(pmc);
kvm_pmu_stop_counter(vcpu, canonical_pmc);
if (new_state) {
/*
* During promotion from !chained to chained we must ensure
* the adjacent counter is stopped and its event destroyed
*/
if (!kvm_pmu_pmc_is_chained(pmc))
kvm_pmu_stop_counter(vcpu, pmc);

kvm_pmu_stop_counter(vcpu, kvm_pmu_get_alternate_pmc(pmc));
set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
} else {
clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
return;
}
clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
}

/**
Expand Down

0 comments on commit 76c9fc5

Please sign in to comment.