Skip to content

Commit

Permalink
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Browse files Browse the repository at this point in the history
Alexei Starovoitov says:

====================
pull-request: bpf-next 2020-01-22

The following pull-request contains BPF updates for your *net-next* tree.

We've added 92 non-merge commits during the last 16 day(s) which contain
a total of 320 files changed, 7532 insertions(+), 1448 deletions(-).

The main changes are:

1) function by function verification and program extensions from Alexei.

2) massive cleanup of selftests/bpf from Toke and Andrii.

3) batched bpf map operations from Brian and Yonghong.

4) tcp congestion control in bpf from Martin.

5) bulking for non-map xdp_redirect form Toke.

6) bpf_send_signal_thread helper from Yonghong.
====================

Signed-off-by: David S. Miller <[email protected]>
  • Loading branch information
davem330 committed Jan 23, 2020
2 parents c5d19a6 + 85cc12f commit 954b3c4
Show file tree
Hide file tree
Showing 320 changed files with 7,532 additions and 1,448 deletions.
18 changes: 8 additions & 10 deletions arch/x86/net/bpf_jit_comp.c
Original file line number Diff line number Diff line change
Expand Up @@ -1328,7 +1328,7 @@ xadd: if (is_imm8(insn->off))
return proglen;
}

static void save_regs(struct btf_func_model *m, u8 **prog, int nr_args,
static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
int stack_size)
{
int i;
Expand All @@ -1344,7 +1344,7 @@ static void save_regs(struct btf_func_model *m, u8 **prog, int nr_args,
-(stack_size - i * 8));
}

static void restore_regs(struct btf_func_model *m, u8 **prog, int nr_args,
static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
int stack_size)
{
int i;
Expand All @@ -1361,7 +1361,7 @@ static void restore_regs(struct btf_func_model *m, u8 **prog, int nr_args,
-(stack_size - i * 8));
}

static int invoke_bpf(struct btf_func_model *m, u8 **pprog,
static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
struct bpf_prog **progs, int prog_cnt, int stack_size)
{
u8 *prog = *pprog;
Expand Down Expand Up @@ -1456,7 +1456,8 @@ static int invoke_bpf(struct btf_func_model *m, u8 **pprog,
* add rsp, 8 // skip eth_type_trans's frame
* ret // return to its caller
*/
int arch_prepare_bpf_trampoline(void *image, struct btf_func_model *m, u32 flags,
int arch_prepare_bpf_trampoline(void *image, void *image_end,
const struct btf_func_model *m, u32 flags,
struct bpf_prog **fentry_progs, int fentry_cnt,
struct bpf_prog **fexit_progs, int fexit_cnt,
void *orig_call)
Expand Down Expand Up @@ -1523,13 +1524,10 @@ int arch_prepare_bpf_trampoline(void *image, struct btf_func_model *m, u32 flags
/* skip our return address and return to parent */
EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
EMIT1(0xC3); /* ret */
/* One half of the page has active running trampoline.
* Another half is an area for next trampoline.
* Make sure the trampoline generation logic doesn't overflow.
*/
if (WARN_ON_ONCE(prog - (u8 *)image > PAGE_SIZE / 2 - BPF_INSN_SAFETY))
/* Make sure the trampoline generation logic doesn't overflow */
if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY))
return -EFAULT;
return 0;
return prog - (u8 *)image;
}

static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
Expand Down
4 changes: 2 additions & 2 deletions drivers/net/tun.c
Original file line number Diff line number Diff line change
Expand Up @@ -1718,7 +1718,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
if (err < 0)
goto err_xdp;
if (err == XDP_REDIRECT)
xdp_do_flush_map();
xdp_do_flush();
if (err != XDP_PASS)
goto out;

Expand Down Expand Up @@ -2549,7 +2549,7 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
}

if (flush)
xdp_do_flush_map();
xdp_do_flush();

rcu_read_unlock();
local_bh_enable();
Expand Down
2 changes: 1 addition & 1 deletion drivers/net/veth.c
Original file line number Diff line number Diff line change
Expand Up @@ -769,7 +769,7 @@ static int veth_poll(struct napi_struct *napi, int budget)
if (xdp_xmit & VETH_XDP_TX)
veth_xdp_flush(rq->dev, &bq);
if (xdp_xmit & VETH_XDP_REDIR)
xdp_do_flush_map();
xdp_do_flush();
xdp_clear_return_frame_no_direct();

return done;
Expand Down
2 changes: 1 addition & 1 deletion drivers/net/virtio_net.c
Original file line number Diff line number Diff line change
Expand Up @@ -1432,7 +1432,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
virtqueue_napi_complete(napi, rq->vq, received);

if (xdp_xmit & VIRTIO_XDP_REDIR)
xdp_do_flush_map();
xdp_do_flush();

if (xdp_xmit & VIRTIO_XDP_TX) {
sq = virtnet_xdp_sq(vi);
Expand Down
128 changes: 122 additions & 6 deletions include/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include <linux/u64_stats_sync.h>
#include <linux/refcount.h>
#include <linux/mutex.h>
#include <linux/module.h>

struct bpf_verifier_env;
struct bpf_verifier_log;
Expand All @@ -43,6 +44,15 @@ struct bpf_map_ops {
int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
void (*map_release_uref)(struct bpf_map *map);
void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
union bpf_attr __user *uattr);
int (*map_lookup_and_delete_batch)(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr);
int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
union bpf_attr __user *uattr);
int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
union bpf_attr __user *uattr);

/* funcs callable from userspace and from eBPF programs */
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
Expand Down Expand Up @@ -106,6 +116,7 @@ struct bpf_map {
struct btf *btf;
struct bpf_map_memory memory;
char name[BPF_OBJ_NAME_LEN];
u32 btf_vmlinux_value_type_id;
bool unpriv_array;
bool frozen; /* write-once; write-protected by freeze_mutex */
/* 22 bytes hole */
Expand Down Expand Up @@ -183,7 +194,8 @@ static inline bool bpf_map_offload_neutral(const struct bpf_map *map)

static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
{
return map->btf && map->ops->map_seq_show_elem;
return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
map->ops->map_seq_show_elem;
}

int map_check_no_btf(const struct bpf_map *map,
Expand Down Expand Up @@ -349,6 +361,10 @@ struct bpf_verifier_ops {
const struct bpf_insn *src,
struct bpf_insn *dst,
struct bpf_prog *prog, u32 *target_size);
int (*btf_struct_access)(struct bpf_verifier_log *log,
const struct btf_type *t, int off, int size,
enum bpf_access_type atype,
u32 *next_btf_id);
};

struct bpf_prog_offload_ops {
Expand Down Expand Up @@ -437,7 +453,8 @@ struct btf_func_model {
* fentry = a set of program to run before calling original function
* fexit = a set of program to run after original function
*/
int arch_prepare_bpf_trampoline(void *image, struct btf_func_model *m, u32 flags,
int arch_prepare_bpf_trampoline(void *image, void *image_end,
const struct btf_func_model *m, u32 flags,
struct bpf_prog **fentry_progs, int fentry_cnt,
struct bpf_prog **fexit_progs, int fexit_cnt,
void *orig_call);
Expand All @@ -448,7 +465,8 @@ void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
enum bpf_tramp_prog_type {
BPF_TRAMP_FENTRY,
BPF_TRAMP_FEXIT,
BPF_TRAMP_MAX
BPF_TRAMP_MAX,
BPF_TRAMP_REPLACE, /* more than MAX */
};

struct bpf_trampoline {
Expand All @@ -463,6 +481,11 @@ struct bpf_trampoline {
void *addr;
bool ftrace_managed;
} func;
/* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
* program by replacing one of its functions. func.addr is the address
* of the function it replaced.
*/
struct bpf_prog *extension_prog;
/* list of BPF programs using this trampoline */
struct hlist_head progs_hlist[BPF_TRAMP_MAX];
/* Number of attached programs. A counter per kind. */
Expand Down Expand Up @@ -558,6 +581,7 @@ static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
#endif

struct bpf_func_info_aux {
u16 linkage;
bool unreliable;
};

Expand Down Expand Up @@ -668,6 +692,73 @@ struct bpf_array_aux {
struct work_struct work;
};

struct bpf_struct_ops_value;
struct btf_type;
struct btf_member;

#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
struct bpf_struct_ops {
const struct bpf_verifier_ops *verifier_ops;
int (*init)(struct btf *btf);
int (*check_member)(const struct btf_type *t,
const struct btf_member *member);
int (*init_member)(const struct btf_type *t,
const struct btf_member *member,
void *kdata, const void *udata);
int (*reg)(void *kdata);
void (*unreg)(void *kdata);
const struct btf_type *type;
const struct btf_type *value_type;
const char *name;
struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
u32 type_id;
u32 value_id;
};

#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
void bpf_struct_ops_init(struct btf *btf);
bool bpf_struct_ops_get(const void *kdata);
void bpf_struct_ops_put(const void *kdata);
int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
void *value);
static inline bool bpf_try_module_get(const void *data, struct module *owner)
{
if (owner == BPF_MODULE_OWNER)
return bpf_struct_ops_get(data);
else
return try_module_get(owner);
}
static inline void bpf_module_put(const void *data, struct module *owner)
{
if (owner == BPF_MODULE_OWNER)
bpf_struct_ops_put(data);
else
module_put(owner);
}
#else
static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
{
return NULL;
}
static inline void bpf_struct_ops_init(struct btf *btf) { }
static inline bool bpf_try_module_get(const void *data, struct module *owner)
{
return try_module_get(owner);
}
static inline void bpf_module_put(const void *data, struct module *owner)
{
module_put(owner);
}
static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
void *key,
void *value)
{
return -EINVAL;
}
#endif

struct bpf_array {
struct bpf_map map;
u32 elem_size;
Expand Down Expand Up @@ -906,6 +997,15 @@ void *bpf_map_area_alloc(u64 size, int numa_node);
void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
void bpf_map_area_free(void *base);
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
int generic_map_lookup_batch(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr);
int generic_map_update_batch(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr);
int generic_map_delete_batch(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr);

extern int sysctl_unprivileged_bpf_disabled;

Expand Down Expand Up @@ -962,7 +1062,9 @@ struct sk_buff;

struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key);
void __dev_map_flush(void);
void __dev_flush(void);
int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
struct net_device *dev_rx);
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
struct net_device *dev_rx);
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
Expand Down Expand Up @@ -1006,7 +1108,13 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
const char *func_name,
struct btf_func_model *m);

int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog);
struct bpf_reg_state;
int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
struct bpf_reg_state *regs);
int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
struct bpf_reg_state *reg);
int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog,
struct btf *btf, const struct btf_type *t);

struct bpf_prog *bpf_prog_by_id(u32 id);

Expand Down Expand Up @@ -1071,13 +1179,20 @@ static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map
return NULL;
}

static inline void __dev_map_flush(void)
static inline void __dev_flush(void)
{
}

struct xdp_buff;
struct bpf_dtab_netdev;

static inline
int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
struct net_device *dev_rx)
{
return 0;
}

static inline
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
struct net_device *dev_rx)
Expand Down Expand Up @@ -1299,6 +1414,7 @@ extern const struct bpf_func_proto bpf_get_local_storage_proto;
extern const struct bpf_func_proto bpf_strtol_proto;
extern const struct bpf_func_proto bpf_strtoul_proto;
extern const struct bpf_func_proto bpf_tcp_sock_proto;
extern const struct bpf_func_proto bpf_jiffies64_proto;

/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
Expand Down
9 changes: 9 additions & 0 deletions include/linux/bpf_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,12 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2,
BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport,
struct sk_reuseport_md, struct sk_reuseport_kern)
#endif
#if defined(CONFIG_BPF_JIT)
BPF_PROG_TYPE(BPF_PROG_TYPE_STRUCT_OPS, bpf_struct_ops,
void *, void *)
BPF_PROG_TYPE(BPF_PROG_TYPE_EXT, bpf_extension,
void *, void *)
#endif

BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops)
Expand Down Expand Up @@ -105,3 +111,6 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_QUEUE, queue_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops)
#if defined(CONFIG_BPF_JIT)
BPF_MAP_TYPE(BPF_MAP_TYPE_STRUCT_OPS, bpf_struct_ops_map_ops)
#endif
10 changes: 8 additions & 2 deletions include/linux/bpf_verifier.h
Original file line number Diff line number Diff line change
Expand Up @@ -304,11 +304,13 @@ struct bpf_insn_aux_data {
u64 map_key_state; /* constant (32 bit) key tracking for maps */
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
int sanitize_stack_off; /* stack slot to be cleared */
bool seen; /* this insn was processed by the verifier */
u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
bool zext_dst; /* this insn zero extends dst reg */
u8 alu_state; /* used in combination with alu_limit */
bool prune_point;

/* below fields are initialized once */
unsigned int orig_idx; /* original instruction index */
bool prune_point;
};

#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
Expand Down Expand Up @@ -379,6 +381,7 @@ struct bpf_verifier_env {
int *insn_stack;
int cur_stack;
} cfg;
u32 pass_cnt; /* number of times do_check() was called */
u32 subprog_cnt;
/* number of instructions analyzed by the verifier */
u32 prev_insn_processed, insn_processed;
Expand Down Expand Up @@ -428,4 +431,7 @@ bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
void
bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);

int check_ctx_reg(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int regno);

#endif /* _LINUX_BPF_VERIFIER_H */
Loading

0 comments on commit 954b3c4

Please sign in to comment.