Skip to content

Commit

Permalink
perf cpumap: Give CPUs their own type
Browse files Browse the repository at this point in the history
A common problem is confusing CPU map indices with the CPU, by wrapping
the CPU with a struct then this is avoided. This approach is similar to
atomic_t.

Committer notes:

To make it build with BUILD_BPF_SKEL=1 these files needed the
conversions to 'struct perf_cpu' usage:

  tools/perf/util/bpf_counter.c
  tools/perf/util/bpf_counter_cgroup.c
  tools/perf/util/bpf_ftrace.c

Also perf_env__get_cpu() was removed back in "perf cpumap: Switch
cpu_map__build_map to cpu function".

Additionally these needed to be fixed for the ARM builds to complete:

  tools/perf/arch/arm/util/cs-etm.c
  tools/perf/arch/arm64/util/pmu.c

Suggested-by: John Garry <[email protected]>
Signed-off-by: Ian Rogers <[email protected]>
Cc: Alexander Shishkin <[email protected]>
Cc: Andi Kleen <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: James Clark <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Kajol Jain <[email protected]>
Cc: Kan Liang <[email protected]>
Cc: Leo Yan <[email protected]>
Cc: Mark Rutland <[email protected]>
Cc: Mathieu Poirier <[email protected]>
Cc: Mike Leach <[email protected]>
Cc: Namhyung Kim <[email protected]>
Cc: Paul Clarke <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Riccardo Mancini <[email protected]>
Cc: Stephane Eranian <[email protected]>
Cc: Suzuki Poulouse <[email protected]>
Cc: Vineet Singh <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
  • Loading branch information
captain5050 authored and acmel committed Jan 12, 2022
1 parent ce37ab3 commit 6d18804
Show file tree
Hide file tree
Showing 64 changed files with 431 additions and 356 deletions.
103 changes: 59 additions & 44 deletions tools/lib/perf/cpumap.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,15 +10,24 @@
#include <ctype.h>
#include <limits.h>

struct perf_cpu_map *perf_cpu_map__dummy_new(void)
static struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus)
{
struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int));
struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(struct perf_cpu) * nr_cpus);

if (cpus != NULL) {
cpus->nr = 1;
cpus->map[0] = -1;
cpus->nr = nr_cpus;
refcount_set(&cpus->refcnt, 1);

}
return cpus;
}

struct perf_cpu_map *perf_cpu_map__dummy_new(void)
{
struct perf_cpu_map *cpus = perf_cpu_map__alloc(1);

if (cpus)
cpus->map[0].cpu = -1;

return cpus;
}
Expand Down Expand Up @@ -54,15 +63,12 @@ static struct perf_cpu_map *cpu_map__default_new(void)
if (nr_cpus < 0)
return NULL;

cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int));
cpus = perf_cpu_map__alloc(nr_cpus);
if (cpus != NULL) {
int i;

for (i = 0; i < nr_cpus; ++i)
cpus->map[i] = i;

cpus->nr = nr_cpus;
refcount_set(&cpus->refcnt, 1);
cpus->map[i].cpu = i;
}

return cpus;
Expand All @@ -73,39 +79,40 @@ struct perf_cpu_map *perf_cpu_map__default_new(void)
return cpu_map__default_new();
}

static int cmp_int(const void *a, const void *b)

static int cmp_cpu(const void *a, const void *b)
{
return *(const int *)a - *(const int*)b;
const struct perf_cpu *cpu_a = a, *cpu_b = b;

return cpu_a->cpu - cpu_b->cpu;
}

static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu *tmp_cpus)
{
size_t payload_size = nr_cpus * sizeof(int);
struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
size_t payload_size = nr_cpus * sizeof(struct perf_cpu);
struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr_cpus);
int i, j;

if (cpus != NULL) {
memcpy(cpus->map, tmp_cpus, payload_size);
qsort(cpus->map, nr_cpus, sizeof(int), cmp_int);
qsort(cpus->map, nr_cpus, sizeof(struct perf_cpu), cmp_cpu);
/* Remove dups */
j = 0;
for (i = 0; i < nr_cpus; i++) {
if (i == 0 || cpus->map[i] != cpus->map[i - 1])
cpus->map[j++] = cpus->map[i];
if (i == 0 || cpus->map[i].cpu != cpus->map[i - 1].cpu)
cpus->map[j++].cpu = cpus->map[i].cpu;
}
cpus->nr = j;
assert(j <= nr_cpus);
refcount_set(&cpus->refcnt, 1);
}

return cpus;
}

struct perf_cpu_map *perf_cpu_map__read(FILE *file)
{
struct perf_cpu_map *cpus = NULL;
int nr_cpus = 0;
int *tmp_cpus = NULL, *tmp;
struct perf_cpu *tmp_cpus = NULL, *tmp;
int max_entries = 0;
int n, cpu, prev;
char sep;
Expand All @@ -124,24 +131,24 @@ struct perf_cpu_map *perf_cpu_map__read(FILE *file)

if (new_max >= max_entries) {
max_entries = new_max + MAX_NR_CPUS / 2;
tmp = realloc(tmp_cpus, max_entries * sizeof(int));
tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
if (tmp == NULL)
goto out_free_tmp;
tmp_cpus = tmp;
}

while (++prev < cpu)
tmp_cpus[nr_cpus++] = prev;
tmp_cpus[nr_cpus++].cpu = prev;
}
if (nr_cpus == max_entries) {
max_entries += MAX_NR_CPUS;
tmp = realloc(tmp_cpus, max_entries * sizeof(int));
tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
if (tmp == NULL)
goto out_free_tmp;
tmp_cpus = tmp;
}

tmp_cpus[nr_cpus++] = cpu;
tmp_cpus[nr_cpus++].cpu = cpu;
if (n == 2 && sep == '-')
prev = cpu;
else
Expand Down Expand Up @@ -179,7 +186,7 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
unsigned long start_cpu, end_cpu = 0;
char *p = NULL;
int i, nr_cpus = 0;
int *tmp_cpus = NULL, *tmp;
struct perf_cpu *tmp_cpus = NULL, *tmp;
int max_entries = 0;

if (!cpu_list)
Expand Down Expand Up @@ -220,17 +227,17 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
for (; start_cpu <= end_cpu; start_cpu++) {
/* check for duplicates */
for (i = 0; i < nr_cpus; i++)
if (tmp_cpus[i] == (int)start_cpu)
if (tmp_cpus[i].cpu == (int)start_cpu)
goto invalid;

if (nr_cpus == max_entries) {
max_entries += MAX_NR_CPUS;
tmp = realloc(tmp_cpus, max_entries * sizeof(int));
tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
if (tmp == NULL)
goto invalid;
tmp_cpus = tmp;
}
tmp_cpus[nr_cpus++] = (int)start_cpu;
tmp_cpus[nr_cpus++].cpu = (int)start_cpu;
}
if (*p)
++p;
Expand All @@ -250,12 +257,16 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
return cpus;
}

int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
{
struct perf_cpu result = {
.cpu = -1
};

if (cpus && idx < cpus->nr)
return cpus->map[idx];

return -1;
return result;
}

int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
Expand All @@ -265,10 +276,10 @@ int perf_cpu_map__nr(const struct perf_cpu_map *cpus)

bool perf_cpu_map__empty(const struct perf_cpu_map *map)
{
return map ? map->map[0] == -1 : true;
return map ? map->map[0].cpu == -1 : true;
}

int perf_cpu_map__idx(const struct perf_cpu_map *cpus, int cpu)
int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
{
int low, high;

Expand All @@ -278,13 +289,13 @@ int perf_cpu_map__idx(const struct perf_cpu_map *cpus, int cpu)
low = 0;
high = cpus->nr;
while (low < high) {
int idx = (low + high) / 2,
cpu_at_idx = cpus->map[idx];
int idx = (low + high) / 2;
struct perf_cpu cpu_at_idx = cpus->map[idx];

if (cpu_at_idx == cpu)
if (cpu_at_idx.cpu == cpu.cpu)
return idx;

if (cpu_at_idx > cpu)
if (cpu_at_idx.cpu > cpu.cpu)
high = idx;
else
low = idx + 1;
Expand All @@ -293,15 +304,19 @@ int perf_cpu_map__idx(const struct perf_cpu_map *cpus, int cpu)
return -1;
}

bool perf_cpu_map__has(const struct perf_cpu_map *cpus, int cpu)
bool perf_cpu_map__has(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
{
return perf_cpu_map__idx(cpus, cpu) != -1;
}

int perf_cpu_map__max(struct perf_cpu_map *map)
struct perf_cpu perf_cpu_map__max(struct perf_cpu_map *map)
{
struct perf_cpu result = {
.cpu = -1
};

// cpu_map__trim_new() qsort()s it, cpu_map__default_new() sorts it as well.
return map->nr > 0 ? map->map[map->nr - 1] : -1;
return map->nr > 0 ? map->map[map->nr - 1] : result;
}

/*
Expand All @@ -315,7 +330,7 @@ int perf_cpu_map__max(struct perf_cpu_map *map)
struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
struct perf_cpu_map *other)
{
int *tmp_cpus;
struct perf_cpu *tmp_cpus;
int tmp_len;
int i, j, k;
struct perf_cpu_map *merged;
Expand All @@ -329,19 +344,19 @@ struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
if (!other)
return orig;
if (orig->nr == other->nr &&
!memcmp(orig->map, other->map, orig->nr * sizeof(int)))
!memcmp(orig->map, other->map, orig->nr * sizeof(struct perf_cpu)))
return orig;

tmp_len = orig->nr + other->nr;
tmp_cpus = malloc(tmp_len * sizeof(int));
tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
if (!tmp_cpus)
return NULL;

/* Standard merge algorithm from wikipedia */
i = j = k = 0;
while (i < orig->nr && j < other->nr) {
if (orig->map[i] <= other->map[j]) {
if (orig->map[i] == other->map[j])
if (orig->map[i].cpu <= other->map[j].cpu) {
if (orig->map[i].cpu == other->map[j].cpu)
j++;
tmp_cpus[k++] = orig->map[i++];
} else
Expand Down
4 changes: 2 additions & 2 deletions tools/lib/perf/evlist.c
Original file line number Diff line number Diff line change
Expand Up @@ -407,7 +407,7 @@ perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx)

static int
perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
int output, int cpu)
int output, struct perf_cpu cpu)
{
return perf_mmap__mmap(map, mp, output, cpu);
}
Expand All @@ -426,7 +426,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
int idx, struct perf_mmap_param *mp, int cpu_idx,
int thread, int *_output, int *_output_overwrite)
{
int evlist_cpu = perf_cpu_map__cpu(evlist->cpus, cpu_idx);
struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->cpus, cpu_idx);
struct perf_evsel *evsel;
int revent;

Expand Down
9 changes: 5 additions & 4 deletions tools/lib/perf/evsel.c
Original file line number Diff line number Diff line change
Expand Up @@ -78,10 +78,10 @@ static int perf_evsel__alloc_mmap(struct perf_evsel *evsel, int ncpus, int nthre

static int
sys_perf_event_open(struct perf_event_attr *attr,
pid_t pid, int cpu, int group_fd,
pid_t pid, struct perf_cpu cpu, int group_fd,
unsigned long flags)
{
return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
return syscall(__NR_perf_event_open, attr, pid, cpu.cpu, group_fd, flags);
}

static int get_group_fd(struct perf_evsel *evsel, int cpu_map_idx, int thread, int *group_fd)
Expand Down Expand Up @@ -113,7 +113,8 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu_map_idx, int thread, i
int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
struct perf_thread_map *threads)
{
int cpu, idx, thread, err = 0;
struct perf_cpu cpu;
int idx, thread, err = 0;

if (cpus == NULL) {
static struct perf_cpu_map *empty_cpu_map;
Expand Down Expand Up @@ -252,7 +253,7 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
int *fd = FD(evsel, idx, thread);
struct perf_mmap *map;
int cpu = perf_cpu_map__cpu(evsel->cpus, idx);
struct perf_cpu cpu = perf_cpu_map__cpu(evsel->cpus, idx);

if (fd == NULL || *fd < 0)
continue;
Expand Down
9 changes: 7 additions & 2 deletions tools/lib/perf/include/internal/cpumap.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,11 @@

#include <linux/refcount.h>

/** A wrapper around a CPU to avoid confusion with the perf_cpu_map's map's indices. */
struct perf_cpu {
int cpu;
};

/**
* A sized, reference counted, sorted array of integers representing CPU
* numbers. This is commonly used to capture which CPUs a PMU is associated
Expand All @@ -16,13 +21,13 @@ struct perf_cpu_map {
/** Length of the map array. */
int nr;
/** The CPU values. */
int map[];
struct perf_cpu map[];
};

#ifndef MAX_NR_CPUS
#define MAX_NR_CPUS 2048
#endif

int perf_cpu_map__idx(const struct perf_cpu_map *cpus, int cpu);
int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu);

#endif /* __LIBPERF_INTERNAL_CPUMAP_H */
3 changes: 2 additions & 1 deletion tools/lib/perf/include/internal/evlist.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

#include <linux/list.h>
#include <api/fd/array.h>
#include <internal/cpumap.h>
#include <internal/evsel.h>

#define PERF_EVLIST__HLIST_BITS 8
Expand Down Expand Up @@ -36,7 +37,7 @@ typedef void
typedef struct perf_mmap*
(*perf_evlist_mmap__cb_get_t)(struct perf_evlist*, bool, int);
typedef int
(*perf_evlist_mmap__cb_mmap_t)(struct perf_mmap*, struct perf_mmap_param*, int, int);
(*perf_evlist_mmap__cb_mmap_t)(struct perf_mmap*, struct perf_mmap_param*, int, struct perf_cpu);

struct perf_evlist_mmap_ops {
perf_evlist_mmap__cb_idx_t idx;
Expand Down
4 changes: 2 additions & 2 deletions tools/lib/perf/include/internal/evsel.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@
#include <linux/perf_event.h>
#include <stdbool.h>
#include <sys/types.h>
#include <internal/cpumap.h>

struct perf_cpu_map;
struct perf_thread_map;
struct xyarray;

Expand All @@ -27,7 +27,7 @@ struct perf_sample_id {
* queue number.
*/
int idx;
int cpu;
struct perf_cpu cpu;
pid_t tid;

/* Holds total ID period value for PERF_SAMPLE_READ processing. */
Expand Down
Loading

0 comments on commit 6d18804

Please sign in to comment.