Skip to content

Commit

Permalink
bpf: Cleanup check_refcount_ok
Browse files Browse the repository at this point in the history
[ Upstream commit b2d8ef1 ]

Discussion around a recently-submitted patch provided historical
context for check_refcount_ok [0]. Specifically, the function and its
helpers - may_be_acquire_function and arg_type_may_be_refcounted -
predate the OBJ_RELEASE type flag and the addition of many more helpers
with acquire/release semantics.

The purpose of check_refcount_ok is to ensure:
  1) Helper doesn't have multiple uses of return reg's ref_obj_id
  2) Helper with release semantics only has one arg needing to be
  released, since that's tracked using meta->ref_obj_id

With current verifier, it's safe to remove check_refcount_ok and its
helpers. Since addition of OBJ_RELEASE type flag, case 2) has been
handled by the arg_type_is_release check in check_func_arg. To ensure
case 1) won't result in verifier silently prioritizing one use of
ref_obj_id, this patch adds a helper_multiple_ref_obj_use check which
fails loudly if a helper passes > 1 test for use of ref_obj_id.

  [0]: lore.kernel.org/bpf/[email protected]

Signed-off-by: Dave Marchevsky <[email protected]>
Acked-by: Martin KaFai Lau <[email protected]>
Acked-by: Joanne Koong <[email protected]>
Acked-by: Kumar Kartikeya Dwivedi <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Alexei Starovoitov <[email protected]>
Stable-dep-of: 8837434 ("bpf: Fix ref_obj_id for dynptr data slices in verifier")
Signed-off-by: Sasha Levin <[email protected]>
  • Loading branch information
davemarchevsky authored and gregkh committed Oct 24, 2022
1 parent 606cfa0 commit 707d7a7
Showing 1 changed file with 29 additions and 45 deletions.
74 changes: 29 additions & 45 deletions kernel/bpf/verifier.c
Original file line number Diff line number Diff line change
Expand Up @@ -467,25 +467,11 @@ static bool type_is_rdonly_mem(u32 type)
return type & MEM_RDONLY;
}

static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
{
return type == ARG_PTR_TO_SOCK_COMMON;
}

static bool type_may_be_null(u32 type)
{
return type & PTR_MAYBE_NULL;
}

static bool may_be_acquire_function(enum bpf_func_id func_id)
{
return func_id == BPF_FUNC_sk_lookup_tcp ||
func_id == BPF_FUNC_sk_lookup_udp ||
func_id == BPF_FUNC_skc_lookup_tcp ||
func_id == BPF_FUNC_map_lookup_elem ||
func_id == BPF_FUNC_ringbuf_reserve;
}

static bool is_acquire_function(enum bpf_func_id func_id,
const struct bpf_map *map)
{
Expand Down Expand Up @@ -518,6 +504,26 @@ static bool is_ptr_cast_function(enum bpf_func_id func_id)
func_id == BPF_FUNC_skc_to_tcp_request_sock;
}

static bool is_dynptr_acquire_function(enum bpf_func_id func_id)
{
return func_id == BPF_FUNC_dynptr_data;
}

static bool helper_multiple_ref_obj_use(enum bpf_func_id func_id,
const struct bpf_map *map)
{
int ref_obj_uses = 0;

if (is_ptr_cast_function(func_id))
ref_obj_uses++;
if (is_acquire_function(func_id, map))
ref_obj_uses++;
if (is_dynptr_acquire_function(func_id))
ref_obj_uses++;

return ref_obj_uses > 1;
}

static bool is_cmpxchg_insn(const struct bpf_insn *insn)
{
return BPF_CLASS(insn->code) == BPF_STX &&
Expand Down Expand Up @@ -6456,33 +6462,6 @@ static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
return true;
}

static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
{
int count = 0;

if (arg_type_may_be_refcounted(fn->arg1_type))
count++;
if (arg_type_may_be_refcounted(fn->arg2_type))
count++;
if (arg_type_may_be_refcounted(fn->arg3_type))
count++;
if (arg_type_may_be_refcounted(fn->arg4_type))
count++;
if (arg_type_may_be_refcounted(fn->arg5_type))
count++;

/* A reference acquiring function cannot acquire
* another refcounted ptr.
*/
if (may_be_acquire_function(func_id) && count)
return false;

/* We only support one arg being unreferenced at the moment,
* which is sufficient for the helper functions we have right now.
*/
return count <= 1;
}

static bool check_btf_id_ok(const struct bpf_func_proto *fn)
{
int i;
Expand All @@ -6506,8 +6485,7 @@ static int check_func_proto(const struct bpf_func_proto *fn, int func_id,
{
return check_raw_mode_ok(fn) &&
check_arg_pair_ok(fn) &&
check_btf_id_ok(fn) &&
check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
check_btf_id_ok(fn) ? 0 : -EINVAL;
}

/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
Expand Down Expand Up @@ -7410,6 +7388,12 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
if (type_may_be_null(regs[BPF_REG_0].type))
regs[BPF_REG_0].id = ++env->id_gen;

if (helper_multiple_ref_obj_use(func_id, meta.map_ptr)) {
verbose(env, "verifier internal error: func %s#%d sets ref_obj_id more than once\n",
func_id_name(func_id), func_id);
return -EFAULT;
}

if (is_ptr_cast_function(func_id)) {
/* For release_reference() */
regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
Expand All @@ -7422,10 +7406,10 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
regs[BPF_REG_0].id = id;
/* For release_reference() */
regs[BPF_REG_0].ref_obj_id = id;
} else if (func_id == BPF_FUNC_dynptr_data) {
} else if (is_dynptr_acquire_function(func_id)) {
int dynptr_id = 0, i;

/* Find the id of the dynptr we're acquiring a reference to */
/* Find the id of the dynptr we're tracking the reference of */
for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
if (arg_type_is_dynptr(fn->arg_type[i])) {
if (dynptr_id) {
Expand Down

0 comments on commit 707d7a7

Please sign in to comment.