Skip to content

Commit

Permalink
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Browse files Browse the repository at this point in the history
Alexei Starovoitov says:

====================
pull-request: bpf 2020-11-06

1) Pre-allocated per-cpu hashmap needs to zero-fill reused element, from David.

2) Tighten bpf_lsm function check, from KP.

3) Fix bpftool attaching to flow dissector, from Lorenz.

4) Use -fno-gcse for the whole kernel/bpf/core.c instead of function attribute, from Ard.

* git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  bpf: Update verification logic for LSM programs
  bpf: Zero-fill re-used per-cpu map element
  bpf: BPF_PRELOAD depends on BPF_SYSCALL
  tools/bpftool: Fix attaching flow dissector
  libbpf: Fix possible use after free in xsk_socket__delete
  libbpf: Fix null dereference in xsk_socket__delete
  libbpf, hashmap: Fix undefined behavior in hash_bits
  bpf: Don't rely on GCC __attribute__((optimize)) to disable GCSE
  tools, bpftool: Remove two unused variables.
  tools, bpftool: Avoid array index warnings.
  xsk: Fix possible memory leak at socket close
  bpf: Add struct bpf_redir_neigh forward declaration to BPF helper defs
  samples/bpf: Set rlimit for memlock to infinity in all samples
  bpf: Fix -Wshadow warnings
  selftest/bpf: Fix profiler test using CO-RE relocation for enums
====================

Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Jakub Kicinski <[email protected]>
  • Loading branch information
kuba-moo committed Nov 7, 2020
2 parents bf3e762 + 6f64e47 commit 86bbf01
Show file tree
Hide file tree
Showing 25 changed files with 346 additions and 49 deletions.
2 changes: 0 additions & 2 deletions include/linux/compiler-gcc.h
Original file line number Diff line number Diff line change
Expand Up @@ -175,5 +175,3 @@
#else
#define __diag_GCC_8(s)
#endif

#define __no_fgcse __attribute__((optimize("-fno-gcse")))
4 changes: 0 additions & 4 deletions include/linux/compiler_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -247,10 +247,6 @@ struct ftrace_likely_data {
#define asm_inline asm
#endif

#ifndef __no_fgcse
# define __no_fgcse
#endif

/* Are two types/vars the same type (ignoring qualifiers)? */
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))

Expand Down
22 changes: 11 additions & 11 deletions include/linux/filter.h
Original file line number Diff line number Diff line change
Expand Up @@ -558,21 +558,21 @@ struct sk_filter {
DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);

#define __BPF_PROG_RUN(prog, ctx, dfunc) ({ \
u32 ret; \
u32 __ret; \
cant_migrate(); \
if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
struct bpf_prog_stats *stats; \
u64 start = sched_clock(); \
ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
stats = this_cpu_ptr(prog->aux->stats); \
u64_stats_update_begin(&stats->syncp); \
stats->cnt++; \
stats->nsecs += sched_clock() - start; \
u64_stats_update_end(&stats->syncp); \
struct bpf_prog_stats *__stats; \
u64 __start = sched_clock(); \
__ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
__stats = this_cpu_ptr(prog->aux->stats); \
u64_stats_update_begin(&__stats->syncp); \
__stats->cnt++; \
__stats->nsecs += sched_clock() - __start; \
u64_stats_update_end(&__stats->syncp); \
} else { \
ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
__ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
} \
ret; })
__ret; })

#define BPF_PROG_RUN(prog, ctx) \
__BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func)
Expand Down
2 changes: 1 addition & 1 deletion include/net/xsk_buff_pool.h
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
void xp_destroy(struct xsk_buff_pool *pool);
void xp_release(struct xdp_buff_xsk *xskb);
void xp_get_pool(struct xsk_buff_pool *pool);
void xp_put_pool(struct xsk_buff_pool *pool);
bool xp_put_pool(struct xsk_buff_pool *pool);
void xp_clear_dev(struct xsk_buff_pool *pool);
void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
Expand Down
6 changes: 5 additions & 1 deletion kernel/bpf/Makefile
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
obj-y := core.o
CFLAGS_core.o += $(call cc-disable-warning, override-init)
ifneq ($(CONFIG_BPF_JIT_ALWAYS_ON),y)
# ___bpf_prog_run() needs GCSE disabled on x86; see 3193c0836f203 for details
cflags-nogcse-$(CONFIG_X86)$(CONFIG_CC_IS_GCC) := -fno-gcse
endif
CFLAGS_core.o += $(call cc-disable-warning, override-init) $(cflags-nogcse-yy)

obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o bpf_iter.o map_iter.o task_iter.o prog_iter.o
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o
Expand Down
10 changes: 7 additions & 3 deletions kernel/bpf/bpf_lsm.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include <linux/bpf_verifier.h>
#include <net/bpf_sk_storage.h>
#include <linux/bpf_local_storage.h>
#include <linux/btf_ids.h>

/* For every LSM hook that allows attachment of BPF programs, declare a nop
* function where a BPF program can be attached.
Expand All @@ -26,7 +27,11 @@ noinline RET bpf_lsm_##NAME(__VA_ARGS__) \
#include <linux/lsm_hook_defs.h>
#undef LSM_HOOK

#define BPF_LSM_SYM_PREFX "bpf_lsm_"
#define LSM_HOOK(RET, DEFAULT, NAME, ...) BTF_ID(func, bpf_lsm_##NAME)
BTF_SET_START(bpf_lsm_hooks)
#include <linux/lsm_hook_defs.h>
#undef LSM_HOOK
BTF_SET_END(bpf_lsm_hooks)

int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
const struct bpf_prog *prog)
Expand All @@ -37,8 +42,7 @@ int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
return -EINVAL;
}

if (strncmp(BPF_LSM_SYM_PREFX, prog->aux->attach_func_name,
sizeof(BPF_LSM_SYM_PREFX) - 1)) {
if (!btf_id_set_contains(&bpf_lsm_hooks, prog->aux->attach_btf_id)) {
bpf_log(vlog, "attach_btf_id %u points to wrong type name %s\n",
prog->aux->attach_btf_id, prog->aux->attach_func_name);
return -EINVAL;
Expand Down
2 changes: 1 addition & 1 deletion kernel/bpf/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -1369,7 +1369,7 @@ u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
*
* Decode and execute eBPF instructions.
*/
static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
{
#define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
Expand Down
30 changes: 28 additions & 2 deletions kernel/bpf/hashtab.c
Original file line number Diff line number Diff line change
Expand Up @@ -821,6 +821,32 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
}
}

static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
void *value, bool onallcpus)
{
/* When using prealloc and not setting the initial value on all cpus,
* zero-fill element values for other cpus (just as what happens when
* not using prealloc). Otherwise, bpf program has no way to ensure
* known initial values for cpus other than current one
* (onallcpus=false always when coming from bpf prog).
*/
if (htab_is_prealloc(htab) && !onallcpus) {
u32 size = round_up(htab->map.value_size, 8);
int current_cpu = raw_smp_processor_id();
int cpu;

for_each_possible_cpu(cpu) {
if (cpu == current_cpu)
bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value,
size);
else
memset(per_cpu_ptr(pptr, cpu), 0, size);
}
} else {
pcpu_copy_value(htab, pptr, value, onallcpus);
}
}

static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
{
return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
Expand Down Expand Up @@ -891,7 +917,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
}
}

pcpu_copy_value(htab, pptr, value, onallcpus);
pcpu_init_value(htab, pptr, value, onallcpus);

if (!prealloc)
htab_elem_set_ptr(l_new, key_size, pptr);
Expand Down Expand Up @@ -1183,7 +1209,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
value, onallcpus);
} else {
pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size),
pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size),
value, onallcpus);
hlist_nulls_add_head_rcu(&l_new->hash_node, head);
l_new = NULL;
Expand Down
1 change: 1 addition & 0 deletions kernel/bpf/preload/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ config USERMODE_DRIVER
menuconfig BPF_PRELOAD
bool "Preload BPF file system with kernel specific program and map iterators"
depends on BPF
depends on BPF_SYSCALL
# The dependency on !COMPILE_TEST prevents it from being enabled
# in allmodconfig or allyesconfig configurations
depends on !COMPILE_TEST
Expand Down
3 changes: 2 additions & 1 deletion net/xdp/xsk.c
Original file line number Diff line number Diff line change
Expand Up @@ -1146,7 +1146,8 @@ static void xsk_destruct(struct sock *sk)
if (!sock_flag(sk, SOCK_DEAD))
return;

xp_put_pool(xs->pool);
if (!xp_put_pool(xs->pool))
xdp_put_umem(xs->umem);

sk_refcnt_debug_dec(sk);
}
Expand Down
7 changes: 5 additions & 2 deletions net/xdp/xsk_buff_pool.c
Original file line number Diff line number Diff line change
Expand Up @@ -251,15 +251,18 @@ void xp_get_pool(struct xsk_buff_pool *pool)
refcount_inc(&pool->users);
}

void xp_put_pool(struct xsk_buff_pool *pool)
bool xp_put_pool(struct xsk_buff_pool *pool)
{
if (!pool)
return;
return false;

if (refcount_dec_and_test(&pool->users)) {
INIT_WORK(&pool->work, xp_release_deferred);
schedule_work(&pool->work);
return true;
}

return false;
}

static struct xsk_dma_map *xp_find_dma_map(struct xsk_buff_pool *pool)
Expand Down
2 changes: 1 addition & 1 deletion samples/bpf/task_fd_query_user.c
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ static int test_debug_fs_uprobe(char *binary_path, long offset, bool is_return)

int main(int argc, char **argv)
{
struct rlimit r = {1024*1024, RLIM_INFINITY};
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
extern char __executable_start;
char filename[256], buf[256];
__u64 uprobe_file_offset;
Expand Down
2 changes: 1 addition & 1 deletion samples/bpf/tracex2_user.c
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ static void int_exit(int sig)

int main(int ac, char **argv)
{
struct rlimit r = {1024*1024, RLIM_INFINITY};
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
long key, next_key, value;
struct bpf_link *links[2];
struct bpf_program *prog;
Expand Down
2 changes: 1 addition & 1 deletion samples/bpf/tracex3_user.c
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ static void print_hist(int fd)

int main(int ac, char **argv)
{
struct rlimit r = {1024*1024, RLIM_INFINITY};
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
struct bpf_link *links[2];
struct bpf_program *prog;
struct bpf_object *obj;
Expand Down
2 changes: 1 addition & 1 deletion samples/bpf/xdp_redirect_cpu_user.c
Original file line number Diff line number Diff line change
Expand Up @@ -765,7 +765,7 @@ static int load_cpumap_prog(char *file_name, char *prog_name,

int main(int argc, char **argv)
{
struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY};
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
char *prog_name = "xdp_cpu_map5_lb_hash_ip_pairs";
char *mprog_filename = "xdp_redirect_kern.o";
char *redir_interface = NULL, *redir_map = NULL;
Expand Down
2 changes: 1 addition & 1 deletion samples/bpf/xdp_rxq_info_user.c
Original file line number Diff line number Diff line change
Expand Up @@ -450,7 +450,7 @@ static void stats_poll(int interval, int action, __u32 cfg_opt)
int main(int argc, char **argv)
{
__u32 cfg_options= NO_TOUCH ; /* Default: Don't touch packet memory */
struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY};
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
struct bpf_prog_load_attr prog_load_attr = {
.prog_type = BPF_PROG_TYPE_XDP,
};
Expand Down
1 change: 1 addition & 0 deletions scripts/bpf_helpers_doc.py
Original file line number Diff line number Diff line change
Expand Up @@ -408,6 +408,7 @@ class PrinterHelpers(Printer):
'struct bpf_perf_event_data',
'struct bpf_perf_event_value',
'struct bpf_pidns_info',
'struct bpf_redir_neigh',
'struct bpf_sock',
'struct bpf_sock_addr',
'struct bpf_sock_ops',
Expand Down
7 changes: 6 additions & 1 deletion tools/bpf/bpftool/feature.c
Original file line number Diff line number Diff line change
Expand Up @@ -843,9 +843,14 @@ static int handle_perms(void)
else
p_err("missing %s%s%s%s%s%s%s%srequired for full feature probing; run as root or use 'unprivileged'",
capability_msg(bpf_caps, 0),
#ifdef CAP_BPF
capability_msg(bpf_caps, 1),
capability_msg(bpf_caps, 2),
capability_msg(bpf_caps, 3));
capability_msg(bpf_caps, 3)
#else
"", "", "", "", "", ""
#endif /* CAP_BPF */
);
goto exit_free;
}

Expand Down
2 changes: 1 addition & 1 deletion tools/bpf/bpftool/prog.c
Original file line number Diff line number Diff line change
Expand Up @@ -940,7 +940,7 @@ static int parse_attach_detach_args(int argc, char **argv, int *progfd,
}

if (*attach_type == BPF_FLOW_DISSECTOR) {
*mapfd = -1;
*mapfd = 0;
return 0;
}

Expand Down
4 changes: 2 additions & 2 deletions tools/bpf/bpftool/skeleton/profiler.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ int BPF_PROG(fentry_XXX)
static inline void
fexit_update_maps(u32 id, struct bpf_perf_event_value *after)
{
struct bpf_perf_event_value *before, diff, *accum;
struct bpf_perf_event_value *before, diff;

before = bpf_map_lookup_elem(&fentry_readings, &id);
/* only account samples with a valid fentry_reading */
Expand All @@ -95,7 +95,7 @@ int BPF_PROG(fexit_XXX)
{
struct bpf_perf_event_value readings[MAX_NUM_MATRICS];
u32 cpu = bpf_get_smp_processor_id();
u32 i, one = 1, zero = 0;
u32 i, zero = 0;
int err;
u64 *count;

Expand Down
15 changes: 9 additions & 6 deletions tools/lib/bpf/hashmap.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@
static inline size_t hash_bits(size_t h, int bits)
{
/* shuffle bits and return requested number of upper bits */
if (bits == 0)
return 0;

#if (__SIZEOF_SIZE_T__ == __SIZEOF_LONG_LONG__)
/* LP64 case */
return (h * 11400714819323198485llu) >> (__SIZEOF_LONG_LONG__ * 8 - bits);
Expand Down Expand Up @@ -174,17 +177,17 @@ bool hashmap__find(const struct hashmap *map, const void *key, void **value);
* @key: key to iterate entries for
*/
#define hashmap__for_each_key_entry(map, cur, _key) \
for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\
map->cap_bits); \
map->buckets ? map->buckets[bkt] : NULL; }); \
for (cur = map->buckets \
? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
: NULL; \
cur; \
cur = cur->next) \
if (map->equal_fn(cur->key, (_key), map->ctx))

#define hashmap__for_each_key_entry_safe(map, cur, tmp, _key) \
for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\
map->cap_bits); \
cur = map->buckets ? map->buckets[bkt] : NULL; }); \
for (cur = map->buckets \
? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
: NULL; \
cur && ({ tmp = cur->next; true; }); \
cur = tmp) \
if (map->equal_fn(cur->key, (_key), map->ctx))
Expand Down
9 changes: 6 additions & 3 deletions tools/lib/bpf/xsk.c
Original file line number Diff line number Diff line change
Expand Up @@ -891,13 +891,16 @@ int xsk_umem__delete(struct xsk_umem *umem)
void xsk_socket__delete(struct xsk_socket *xsk)
{
size_t desc_sz = sizeof(struct xdp_desc);
struct xsk_ctx *ctx = xsk->ctx;
struct xdp_mmap_offsets off;
struct xsk_umem *umem;
struct xsk_ctx *ctx;
int err;

if (!xsk)
return;

ctx = xsk->ctx;
umem = ctx->umem;
if (ctx->prog_fd != -1) {
xsk_delete_bpf_maps(xsk);
close(ctx->prog_fd);
Expand All @@ -917,11 +920,11 @@ void xsk_socket__delete(struct xsk_socket *xsk)

xsk_put_ctx(ctx);

ctx->umem->refcount--;
umem->refcount--;
/* Do not close an fd that also has an associated umem connected
* to it.
*/
if (xsk->fd != ctx->umem->fd)
if (xsk->fd != umem->fd)
close(xsk->fd);
free(xsk);
}
Loading

0 comments on commit 86bbf01

Please sign in to comment.