Skip to content

Commit

Permalink
samples: bpf: Refactor BPF map performance test with libbpf
Browse files Browse the repository at this point in the history
Previously, in order to set the numa_node attribute at the time of map
creation using "libbpf", it was necessary to call bpf_create_map_node()
directly (bpf_load approach), instead of calling bpf_object_load()
that handles everything on its own, including map creation. And because
of this problem, this sample had problems with refactoring from bpf_load
to libbbpf.

However, by commit 1bdb6c9 ("libbpf: Add a bunch of attribute
getters/setters for map definitions") added the numa_node attribute and
allowed it to be set in the map.

By using libbpf instead of bpf_load, the inner map definition has
been explicitly declared with BTF-defined format. Also, the element of
ARRAY_OF_MAPS was also statically specified using the BTF format. And
for this reason some logic in fixup_map() was not needed and changed
or removed.

Signed-off-by: Daniel T. Lee <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Andrii Nakryiko <[email protected]>
Link: https://lore.kernel.org/bpf/[email protected]
  • Loading branch information
DanielTimLee authored and borkmann committed Jul 7, 2020
1 parent 88795b4 commit cc7f641
Show file tree
Hide file tree
Showing 2 changed files with 196 additions and 147 deletions.
179 changes: 91 additions & 88 deletions samples/bpf/map_perf_test_kern.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,97 +9,100 @@
#include <linux/version.h>
#include <uapi/linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_legacy.h"
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#include "trace_common.h"

#define MAX_ENTRIES 1000
#define MAX_NR_CPUS 1024

struct bpf_map_def_legacy SEC("maps") hash_map = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(long),
.max_entries = MAX_ENTRIES,
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, u32);
__type(value, long);
__uint(max_entries, MAX_ENTRIES);
} hash_map SEC(".maps");

struct {
__uint(type, BPF_MAP_TYPE_LRU_HASH);
__type(key, u32);
__type(value, long);
__uint(max_entries, 10000);
} lru_hash_map SEC(".maps");

struct {
__uint(type, BPF_MAP_TYPE_LRU_HASH);
__type(key, u32);
__type(value, long);
__uint(max_entries, 10000);
__uint(map_flags, BPF_F_NO_COMMON_LRU);
} nocommon_lru_hash_map SEC(".maps");

struct inner_lru {
__uint(type, BPF_MAP_TYPE_LRU_HASH);
__type(key, u32);
__type(value, long);
__uint(max_entries, MAX_ENTRIES);
__uint(map_flags, BPF_F_NUMA_NODE);
__uint(numa_node, 0);
} inner_lru_hash_map SEC(".maps");

struct {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, MAX_NR_CPUS);
__uint(key_size, sizeof(u32));
__array(values, struct inner_lru); /* use inner_lru as inner map */
} array_of_lru_hashs SEC(".maps") = {
/* statically initialize the first element */
.values = { &inner_lru_hash_map },
};

struct bpf_map_def_legacy SEC("maps") lru_hash_map = {
.type = BPF_MAP_TYPE_LRU_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(long),
.max_entries = 10000,
};

struct bpf_map_def_legacy SEC("maps") nocommon_lru_hash_map = {
.type = BPF_MAP_TYPE_LRU_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(long),
.max_entries = 10000,
.map_flags = BPF_F_NO_COMMON_LRU,
};

struct bpf_map_def_legacy SEC("maps") inner_lru_hash_map = {
.type = BPF_MAP_TYPE_LRU_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(long),
.max_entries = MAX_ENTRIES,
.map_flags = BPF_F_NUMA_NODE,
.numa_node = 0,
};

struct bpf_map_def_legacy SEC("maps") array_of_lru_hashs = {
.type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
.key_size = sizeof(u32),
.max_entries = MAX_NR_CPUS,
};

struct bpf_map_def_legacy SEC("maps") percpu_hash_map = {
.type = BPF_MAP_TYPE_PERCPU_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(long),
.max_entries = MAX_ENTRIES,
};

struct bpf_map_def_legacy SEC("maps") hash_map_alloc = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(long),
.max_entries = MAX_ENTRIES,
.map_flags = BPF_F_NO_PREALLOC,
};

struct bpf_map_def_legacy SEC("maps") percpu_hash_map_alloc = {
.type = BPF_MAP_TYPE_PERCPU_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(long),
.max_entries = MAX_ENTRIES,
.map_flags = BPF_F_NO_PREALLOC,
};

struct bpf_map_def_legacy SEC("maps") lpm_trie_map_alloc = {
.type = BPF_MAP_TYPE_LPM_TRIE,
.key_size = 8,
.value_size = sizeof(long),
.max_entries = 10000,
.map_flags = BPF_F_NO_PREALLOC,
};

struct bpf_map_def_legacy SEC("maps") array_map = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(u32),
.value_size = sizeof(long),
.max_entries = MAX_ENTRIES,
};

struct bpf_map_def_legacy SEC("maps") lru_hash_lookup_map = {
.type = BPF_MAP_TYPE_LRU_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(long),
.max_entries = MAX_ENTRIES,
};

SEC("kprobe/sys_getuid")
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__uint(key_size, sizeof(u32));
__uint(value_size, sizeof(long));
__uint(max_entries, MAX_ENTRIES);
} percpu_hash_map SEC(".maps");

struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, u32);
__type(value, long);
__uint(max_entries, MAX_ENTRIES);
__uint(map_flags, BPF_F_NO_PREALLOC);
} hash_map_alloc SEC(".maps");

struct {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__uint(key_size, sizeof(u32));
__uint(value_size, sizeof(long));
__uint(max_entries, MAX_ENTRIES);
__uint(map_flags, BPF_F_NO_PREALLOC);
} percpu_hash_map_alloc SEC(".maps");

struct {
__uint(type, BPF_MAP_TYPE_LPM_TRIE);
__uint(key_size, 8);
__uint(value_size, sizeof(long));
__uint(max_entries, 10000);
__uint(map_flags, BPF_F_NO_PREALLOC);
} lpm_trie_map_alloc SEC(".maps");

struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, u32);
__type(value, long);
__uint(max_entries, MAX_ENTRIES);
} array_map SEC(".maps");

struct {
__uint(type, BPF_MAP_TYPE_LRU_HASH);
__type(key, u32);
__type(value, long);
__uint(max_entries, MAX_ENTRIES);
} lru_hash_lookup_map SEC(".maps");

SEC("kprobe/" SYSCALL(sys_getuid))
int stress_hmap(struct pt_regs *ctx)
{
u32 key = bpf_get_current_pid_tgid();
Expand All @@ -114,7 +117,7 @@ int stress_hmap(struct pt_regs *ctx)
return 0;
}

SEC("kprobe/sys_geteuid")
SEC("kprobe/" SYSCALL(sys_geteuid))
int stress_percpu_hmap(struct pt_regs *ctx)
{
u32 key = bpf_get_current_pid_tgid();
Expand All @@ -128,7 +131,7 @@ int stress_percpu_hmap(struct pt_regs *ctx)
return 0;
}

SEC("kprobe/sys_getgid")
SEC("kprobe/" SYSCALL(sys_getgid))
int stress_hmap_alloc(struct pt_regs *ctx)
{
u32 key = bpf_get_current_pid_tgid();
Expand All @@ -142,7 +145,7 @@ int stress_hmap_alloc(struct pt_regs *ctx)
return 0;
}

SEC("kprobe/sys_getegid")
SEC("kprobe/" SYSCALL(sys_getegid))
int stress_percpu_hmap_alloc(struct pt_regs *ctx)
{
u32 key = bpf_get_current_pid_tgid();
Expand Down Expand Up @@ -236,7 +239,7 @@ int stress_lru_hmap_alloc(struct pt_regs *ctx)
return 0;
}

SEC("kprobe/sys_gettid")
SEC("kprobe/" SYSCALL(sys_gettid))
int stress_lpm_trie_map_alloc(struct pt_regs *ctx)
{
union {
Expand All @@ -258,7 +261,7 @@ int stress_lpm_trie_map_alloc(struct pt_regs *ctx)
return 0;
}

SEC("kprobe/sys_getpgid")
SEC("kprobe/" SYSCALL(sys_getpgid))
int stress_hash_map_lookup(struct pt_regs *ctx)
{
u32 key = 1, i;
Expand All @@ -271,7 +274,7 @@ int stress_hash_map_lookup(struct pt_regs *ctx)
return 0;
}

SEC("kprobe/sys_getppid")
SEC("kprobe/" SYSCALL(sys_getppid))
int stress_array_map_lookup(struct pt_regs *ctx)
{
u32 key = 1, i;
Expand Down
Loading

0 comments on commit cc7f641

Please sign in to comment.