Skip to content

Commit

Permalink
arm64: perf: Move PMU register related defines to asm/perf_event.h
Browse files Browse the repository at this point in the history
To use the ARMv8 PMU related register defines from the KVM code, we move
the relevant definitions to asm/perf_event.h header file and rename them
with prefix ARMV8_PMU_. This allows us to get rid of kvm_perf_event.h.

Signed-off-by: Anup Patel <[email protected]>
Signed-off-by: Shannon Zhao <[email protected]>
Acked-by: Marc Zyngier <[email protected]>
Reviewed-by: Andrew Jones <[email protected]>
Signed-off-by: Marc Zyngier <[email protected]>
Signed-off-by: Will Deacon <[email protected]>
  • Loading branch information
shannonz88 authored and wildea01 committed Mar 29, 2016
1 parent a6002ec commit b8cfadf
Show file tree
Hide file tree
Showing 5 changed files with 66 additions and 123 deletions.
1 change: 0 additions & 1 deletion arch/arm64/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
#include <asm/kvm_perf_event.h>

#define __KVM_HAVE_ARCH_INTC_INITIALIZED

Expand Down
1 change: 0 additions & 1 deletion arch/arm64/include/asm/kvm_hyp.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
#include <linux/compiler.h>
#include <linux/kvm_host.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_perf_event.h>
#include <asm/sysreg.h>

#define __hyp_text __section(.hyp.text) notrace
Expand Down
68 changes: 0 additions & 68 deletions arch/arm64/include/asm/kvm_perf_event.h

This file was deleted.

47 changes: 47 additions & 0 deletions arch/arm64/include/asm/perf_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,53 @@
#ifndef __ASM_PERF_EVENT_H
#define __ASM_PERF_EVENT_H

#define ARMV8_PMU_MAX_COUNTERS 32
#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1)

/*
* Per-CPU PMCR: config reg
*/
#define ARMV8_PMU_PMCR_E (1 << 0) /* Enable all counters */
#define ARMV8_PMU_PMCR_P (1 << 1) /* Reset all counters */
#define ARMV8_PMU_PMCR_C (1 << 2) /* Cycle counter reset */
#define ARMV8_PMU_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
#define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */
#define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
#define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */
#define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */
#define ARMV8_PMU_PMCR_N_MASK 0x1f
#define ARMV8_PMU_PMCR_MASK 0x7f /* Mask for writable bits */

/*
* PMOVSR: counters overflow flag status reg
*/
#define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */
#define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK

/*
* PMXEVTYPER: Event selection reg
*/
#define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
#define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */

#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0 /* Software increment event */

/*
* Event filters for PMUv3
*/
#define ARMV8_PMU_EXCLUDE_EL1 (1 << 31)
#define ARMV8_PMU_EXCLUDE_EL0 (1 << 30)
#define ARMV8_PMU_INCLUDE_EL2 (1 << 27)

/*
* PMUSERENR: user enable reg
*/
#define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */
#define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */
#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */
#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */
#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */

#ifdef CONFIG_PERF_EVENTS
struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
Expand Down
72 changes: 19 additions & 53 deletions arch/arm64/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
*/

#include <asm/irq_regs.h>
#include <asm/perf_event.h>
#include <asm/virt.h>

#include <linux/of.h>
Expand Down Expand Up @@ -384,9 +385,6 @@ static const struct attribute_group *armv8_pmuv3_attr_groups[] = {
#define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
(ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)

#define ARMV8_MAX_COUNTERS 32
#define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1)

/*
* ARMv8 low level PMU access
*/
Expand All @@ -395,40 +393,7 @@ static const struct attribute_group *armv8_pmuv3_attr_groups[] = {
* Perf Event to low level counters mapping
*/
#define ARMV8_IDX_TO_COUNTER(x) \
(((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK)

/*
* Per-CPU PMCR: config reg
*/
#define ARMV8_PMCR_E (1 << 0) /* Enable all counters */
#define ARMV8_PMCR_P (1 << 1) /* Reset all counters */
#define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */
#define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
#define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
#define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
#define ARMV8_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */
#define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
#define ARMV8_PMCR_N_MASK 0x1f
#define ARMV8_PMCR_MASK 0x7f /* Mask for writable bits */

/*
* PMOVSR: counters overflow flag status reg
*/
#define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */
#define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK

/*
* PMXEVTYPER: Event selection reg
*/
#define ARMV8_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
#define ARMV8_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */

/*
* Event filters for PMUv3
*/
#define ARMV8_EXCLUDE_EL1 (1 << 31)
#define ARMV8_EXCLUDE_EL0 (1 << 30)
#define ARMV8_INCLUDE_EL2 (1 << 27)
(((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)

static inline u32 armv8pmu_pmcr_read(void)
{
Expand All @@ -439,14 +404,14 @@ static inline u32 armv8pmu_pmcr_read(void)

static inline void armv8pmu_pmcr_write(u32 val)
{
val &= ARMV8_PMCR_MASK;
val &= ARMV8_PMU_PMCR_MASK;
isb();
asm volatile("msr pmcr_el0, %0" :: "r" (val));
}

static inline int armv8pmu_has_overflowed(u32 pmovsr)
{
return pmovsr & ARMV8_OVERFLOWED_MASK;
return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
}

static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
Expand Down Expand Up @@ -512,7 +477,7 @@ static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
static inline void armv8pmu_write_evtype(int idx, u32 val)
{
if (armv8pmu_select_counter(idx) == idx) {
val &= ARMV8_EVTYPE_MASK;
val &= ARMV8_PMU_EVTYPE_MASK;
asm volatile("msr pmxevtyper_el0, %0" :: "r" (val));
}
}
Expand Down Expand Up @@ -558,7 +523,7 @@ static inline u32 armv8pmu_getreset_flags(void)
asm volatile("mrs %0, pmovsclr_el0" : "=r" (value));

/* Write to clear flags */
value &= ARMV8_OVSR_MASK;
value &= ARMV8_PMU_OVSR_MASK;
asm volatile("msr pmovsclr_el0, %0" :: "r" (value));

return value;
Expand Down Expand Up @@ -696,7 +661,7 @@ static void armv8pmu_start(struct arm_pmu *cpu_pmu)

raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Enable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E);
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}

Expand All @@ -707,7 +672,7 @@ static void armv8pmu_stop(struct arm_pmu *cpu_pmu)

raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E);
armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}

Expand All @@ -717,7 +682,7 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
int idx;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
unsigned long evtype = hwc->config_base & ARMV8_EVTYPE_EVENT;
unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;

/* Always place a cycle counter into the cycle counter. */
if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
Expand Down Expand Up @@ -754,11 +719,11 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
attr->exclude_kernel != attr->exclude_hv)
return -EINVAL;
if (attr->exclude_user)
config_base |= ARMV8_EXCLUDE_EL0;
config_base |= ARMV8_PMU_EXCLUDE_EL0;
if (!is_kernel_in_hyp_mode() && attr->exclude_kernel)
config_base |= ARMV8_EXCLUDE_EL1;
config_base |= ARMV8_PMU_EXCLUDE_EL1;
if (!attr->exclude_hv)
config_base |= ARMV8_INCLUDE_EL2;
config_base |= ARMV8_PMU_INCLUDE_EL2;

/*
* Install the filter into config_base as this is used to
Expand All @@ -784,43 +749,44 @@ static void armv8pmu_reset(void *info)
* Initialize & Reset PMNC. Request overflow interrupt for
* 64 bit cycle counter but cheat in armv8pmu_write_counter().
*/
armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C | ARMV8_PMCR_LC);
armv8pmu_pmcr_write(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C |
ARMV8_PMU_PMCR_LC);
}

static int armv8_pmuv3_map_event(struct perf_event *event)
{
return armpmu_map_event(event, &armv8_pmuv3_perf_map,
&armv8_pmuv3_perf_cache_map,
ARMV8_EVTYPE_EVENT);
ARMV8_PMU_EVTYPE_EVENT);
}

static int armv8_a53_map_event(struct perf_event *event)
{
return armpmu_map_event(event, &armv8_a53_perf_map,
&armv8_a53_perf_cache_map,
ARMV8_EVTYPE_EVENT);
ARMV8_PMU_EVTYPE_EVENT);
}

static int armv8_a57_map_event(struct perf_event *event)
{
return armpmu_map_event(event, &armv8_a57_perf_map,
&armv8_a57_perf_cache_map,
ARMV8_EVTYPE_EVENT);
ARMV8_PMU_EVTYPE_EVENT);
}

static int armv8_thunder_map_event(struct perf_event *event)
{
return armpmu_map_event(event, &armv8_thunder_perf_map,
&armv8_thunder_perf_cache_map,
ARMV8_EVTYPE_EVENT);
ARMV8_PMU_EVTYPE_EVENT);
}

static void armv8pmu_read_num_pmnc_events(void *info)
{
int *nb_cnt = info;

/* Read the nb of CNTx counters supported from PMNC */
*nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;
*nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;

/* Add the CPU cycles counter */
*nb_cnt += 1;
Expand Down

0 comments on commit b8cfadf

Please sign in to comment.