Skip to content

Commit

Permalink
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Browse files Browse the repository at this point in the history
Daniel Borkmann says:

====================
pull-request: bpf 2018-02-20

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Fix a memory leak in LPM trie's map_free() callback function, where
   the trie structure itself was not freed since initial implementation.
   Also a synchronize_rcu() was needed in order to wait for outstanding
   programs accessing the trie to complete, from Yonghong.

2) Fix sock_map_alloc()'s error path in order to correctly propagate
   the -EINVAL error in case of too large allocation requests. This
   was just recently introduced when fixing close hooks via ULP layer,
   fix from Eric.

3) Do not use GFP_ATOMIC in __cpu_map_entry_alloc(). Reason is that this
   will not work with the recent __ptr_ring_init_queue_alloc() conversion
   to kvmalloc_array(), where in case of fallback to vmalloc() that GFP
   flag is invalid, from Jason.

4) Fix two recent syzkaller warnings: i) fix bpf_prog_array_copy_to_user()
   when a prog query with a big number of ids was performed where we'd
   otherwise trigger a warning from allocator side, ii) fix a missing
   mlock precharge on arraymaps, from Daniel.

5) Two fixes for bpftool in order to avoid breaking JSON output when used
   in batch mode, from Quentin.

6) Move a pr_debug() in libbpf in order to avoid having an otherwise
   uninitialized variable in bpf_program__reloc_text(), from Jeremy.
====================

Signed-off-by: David S. Miller <[email protected]>
  • Loading branch information
davem330 committed Feb 21, 2018
2 parents 6c4df17 + b1a2ce8 commit bf006d1
Show file tree
Hide file tree
Showing 9 changed files with 36 additions and 22 deletions.
28 changes: 16 additions & 12 deletions kernel/bpf/arraymap.c
Original file line number Diff line number Diff line change
Expand Up @@ -73,11 +73,11 @@ static int array_map_alloc_check(union bpf_attr *attr)
static struct bpf_map *array_map_alloc(union bpf_attr *attr)
{
bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
int numa_node = bpf_map_attr_numa_node(attr);
int ret, numa_node = bpf_map_attr_numa_node(attr);
u32 elem_size, index_mask, max_entries;
bool unpriv = !capable(CAP_SYS_ADMIN);
u64 cost, array_size, mask64;
struct bpf_array *array;
u64 array_size, mask64;

elem_size = round_up(attr->value_size, 8);

Expand Down Expand Up @@ -109,8 +109,19 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
array_size += (u64) max_entries * elem_size;

/* make sure there is no u32 overflow later in round_up() */
if (array_size >= U32_MAX - PAGE_SIZE)
cost = array_size;
if (cost >= U32_MAX - PAGE_SIZE)
return ERR_PTR(-ENOMEM);
if (percpu) {
cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
if (cost >= U32_MAX - PAGE_SIZE)
return ERR_PTR(-ENOMEM);
}
cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;

ret = bpf_map_precharge_memlock(cost);
if (ret < 0)
return ERR_PTR(ret);

/* allocate all map elements and zero-initialize them */
array = bpf_map_area_alloc(array_size, numa_node);
Expand All @@ -121,20 +132,13 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)

/* copy mandatory map attributes */
bpf_map_init_from_attr(&array->map, attr);
array->map.pages = cost;
array->elem_size = elem_size;

if (!percpu)
goto out;

array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();

if (array_size >= U32_MAX - PAGE_SIZE ||
bpf_array_alloc_percpu(array)) {
if (percpu && bpf_array_alloc_percpu(array)) {
bpf_map_area_free(array);
return ERR_PTR(-ENOMEM);
}
out:
array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;

return &array->map;
}
Expand Down
2 changes: 1 addition & 1 deletion kernel/bpf/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -1590,7 +1590,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
* so always copy 'cnt' prog_ids to the user.
* In a rare race the user will see zero prog_ids
*/
ids = kcalloc(cnt, sizeof(u32), GFP_USER);
ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
if (!ids)
return -ENOMEM;
rcu_read_lock();
Expand Down
2 changes: 1 addition & 1 deletion kernel/bpf/cpumap.c
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,7 @@ static int cpu_map_kthread_run(void *data)
static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
int map_id)
{
gfp_t gfp = GFP_ATOMIC|__GFP_NOWARN;
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
struct bpf_cpu_map_entry *rcpu;
int numa, err;

Expand Down
11 changes: 7 additions & 4 deletions kernel/bpf/lpm_trie.c
Original file line number Diff line number Diff line change
Expand Up @@ -555,7 +555,10 @@ static void trie_free(struct bpf_map *map)
struct lpm_trie_node __rcu **slot;
struct lpm_trie_node *node;

raw_spin_lock(&trie->lock);
/* Wait for outstanding programs to complete
* update/lookup/delete/get_next_key and free the trie.
*/
synchronize_rcu();

/* Always start at the root and walk down to a node that has no
* children. Then free that node, nullify its reference in the parent
Expand All @@ -569,7 +572,7 @@ static void trie_free(struct bpf_map *map)
node = rcu_dereference_protected(*slot,
lockdep_is_held(&trie->lock));
if (!node)
goto unlock;
goto out;

if (rcu_access_pointer(node->child[0])) {
slot = &node->child[0];
Expand All @@ -587,8 +590,8 @@ static void trie_free(struct bpf_map *map)
}
}

unlock:
raw_spin_unlock(&trie->lock);
out:
kfree(trie);
}

static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key)
Expand Down
3 changes: 2 additions & 1 deletion kernel/bpf/sockmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -521,8 +521,8 @@ static struct smap_psock *smap_init_psock(struct sock *sock,
static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
{
struct bpf_stab *stab;
int err = -EINVAL;
u64 cost;
int err;

if (!capable(CAP_NET_ADMIN))
return ERR_PTR(-EPERM);
Expand All @@ -547,6 +547,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)

/* make sure page count doesn't overflow */
cost = (u64) stab->map.max_entries * sizeof(struct sock *);
err = -EINVAL;
if (cost >= U32_MAX - PAGE_SIZE)
goto free_stab;

Expand Down
2 changes: 2 additions & 0 deletions kernel/trace/bpf_trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -872,6 +872,8 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
return -EINVAL;
if (copy_from_user(&query, uquery, sizeof(query)))
return -EFAULT;
if (query.ids_len > BPF_TRACE_MAX_PROGS)
return -E2BIG;

mutex_lock(&bpf_event_mutex);
ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
Expand Down
2 changes: 1 addition & 1 deletion tools/bpf/bpftool/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ static int do_batch(int argc, char **argv)
}

if (errno && errno != ENOENT) {
perror("reading batch file failed");
p_err("reading batch file failed: %s", strerror(errno));
err = -1;
} else {
p_info("processed %d lines", lines);
Expand Down
3 changes: 3 additions & 0 deletions tools/bpf/bpftool/prog.c
Original file line number Diff line number Diff line change
Expand Up @@ -774,6 +774,9 @@ static int do_dump(int argc, char **argv)
n < 0 ? strerror(errno) : "short write");
goto err_free;
}

if (json_output)
jsonw_null(json_wtr);
} else {
if (member_len == &info.jited_prog_len) {
const char *name = NULL;
Expand Down
5 changes: 3 additions & 2 deletions tools/lib/bpf/libbpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -1060,11 +1060,12 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
prog->insns = new_insn;
prog->main_prog_cnt = prog->insns_cnt;
prog->insns_cnt = new_cnt;
pr_debug("added %zd insn from %s to prog %s\n",
text->insns_cnt, text->section_name,
prog->section_name);
}
insn = &prog->insns[relo->insn_idx];
insn->imm += prog->main_prog_cnt - relo->insn_idx;
pr_debug("added %zd insn from %s to prog %s\n",
text->insns_cnt, text->section_name, prog->section_name);
return 0;
}

Expand Down

0 comments on commit bf006d1

Please sign in to comment.