Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
Daniel Borkmann says:

====================
pull-request: bpf-next 2020-12-14

1) Expose bpf_sk_storage_*() helpers to iterator programs, from Florent Revest.

2) Add AF_XDP selftests based on veth devs to BPF selftests, from Weqaar Janjua.

3) Support for finding BTF based kernel attach targets through libbpf's
   bpf_program__set_attach_target() API, from Andrii Nakryiko.

4) Permit pointers on stack for helper calls in the verifier, from Yonghong Song.

5) Fix overflows in hash map elem size after rlimit removal, from Eric Dumazet.

6) Get rid of direct invocation of llc in BPF selftests, from Andrew Delgadillo.

7) Fix xsk_recvmsg() to reorder socket state check before access, from Björn Töpel.

8) Add new libbpf API helper to retrieve ring buffer epoll fd, from Brendan Jackman.

9) Batch of minor BPF selftest improvements all over the place, from Florian Lehner,
   KP Singh, Jiri Olsa and various others.

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (31 commits)
  selftests/bpf: Add a test for ptr_to_map_value on stack for helper access
  bpf: Permits pointers on stack for helper calls
  libbpf: Expose libbpf ring_buffer epoll_fd
  selftests/bpf: Add set_attach_target() API selftest for module target
  libbpf: Support modules in bpf_program__set_attach_target() API
  selftests/bpf: Silence ima_setup.sh when not running in verbose mode.
  selftests/bpf: Drop the need for LLVM's llc
  selftests/bpf: fix bpf_testmod.ko recompilation logic
  samples/bpf: Fix possible hang in xdpsock with multiple threads
  selftests/bpf: Make selftest compilation work on clang 11
  selftests/bpf: Xsk selftests - adding xdpxceiver to .gitignore
  selftests/bpf: Drop tcp-{client,server}.py from Makefile
  selftests/bpf: Xsk selftests - Bi-directional Sockets - SKB, DRV
  selftests/bpf: Xsk selftests - Socket Teardown - SKB, DRV
  selftests/bpf: Xsk selftests - DRV POLL, NOPOLL
  selftests/bpf: Xsk selftests - SKB POLL, NOPOLL
  selftests/bpf: Xsk selftests framework
  bpf: Only provide bpf_sock_from_file with CONFIG_NET
  bpf: Return -ENOTSUPP when attaching to non-kernel BTF
  xsk: Validate socket state in xsk_recvmsg, prior touching socket members
  ...
====================

Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Jakub Kicinski <[email protected]>
  • Loading branch information
kuba-moo committed Dec 14, 2020
2 parents 13458ff + b4b638c commit a6b5e02
Show file tree
Hide file tree
Showing 40 changed files with 2,063 additions and 114 deletions.
3 changes: 1 addition & 2 deletions fs/eventpoll.c
Original file line number Diff line number Diff line change
Expand Up @@ -416,12 +416,11 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
unsigned int napi_id;
struct socket *sock;
struct sock *sk;
int err;

if (!net_busy_loop_on())
return;

sock = sock_from_file(epi->ffd.file, &err);
sock = sock_from_file(epi->ffd.file);
if (!sock)
return;

Expand Down
16 changes: 8 additions & 8 deletions fs/io_uring.c
Original file line number Diff line number Diff line change
Expand Up @@ -4356,9 +4356,9 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
unsigned flags;
int ret;

sock = sock_from_file(req->file, &ret);
sock = sock_from_file(req->file);
if (unlikely(!sock))
return ret;
return -ENOTSOCK;

if (req->async_data) {
kmsg = req->async_data;
Expand Down Expand Up @@ -4405,9 +4405,9 @@ static int io_send(struct io_kiocb *req, bool force_nonblock,
unsigned flags;
int ret;

sock = sock_from_file(req->file, &ret);
sock = sock_from_file(req->file);
if (unlikely(!sock))
return ret;
return -ENOTSOCK;

ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
if (unlikely(ret))
Expand Down Expand Up @@ -4585,9 +4585,9 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
unsigned flags;
int ret, cflags = 0;

sock = sock_from_file(req->file, &ret);
sock = sock_from_file(req->file);
if (unlikely(!sock))
return ret;
return -ENOTSOCK;

if (req->async_data) {
kmsg = req->async_data;
Expand Down Expand Up @@ -4648,9 +4648,9 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock,
unsigned flags;
int ret, cflags = 0;

sock = sock_from_file(req->file, &ret);
sock = sock_from_file(req->file);
if (unlikely(!sock))
return ret;
return -ENOTSOCK;

if (req->flags & REQ_F_BUFFER_SELECT) {
kbuf = io_recv_buffer_select(req, !force_nonblock);
Expand Down
1 change: 1 addition & 0 deletions include/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -1859,6 +1859,7 @@ extern const struct bpf_func_proto bpf_snprintf_btf_proto;
extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
extern const struct bpf_func_proto bpf_sock_from_file_proto;

const struct bpf_func_proto *bpf_tracing_func_proto(
enum bpf_func_id func_id, const struct bpf_prog *prog);
Expand Down
2 changes: 1 addition & 1 deletion include/linux/net.h
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg);
int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags);
struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
struct socket *sockfd_lookup(int fd, int *err);
struct socket *sock_from_file(struct file *file, int *err);
struct socket *sock_from_file(struct file *file);
#define sockfd_put(sock) fput(sock->file)
int net_ratelimit(void);

Expand Down
12 changes: 6 additions & 6 deletions include/trace/events/xdp.h
Original file line number Diff line number Diff line change
Expand Up @@ -145,17 +145,17 @@ DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
TP_ARGS(dev, xdp, tgt, err, map, index)
);

#define _trace_xdp_redirect(dev, xdp, to) \
trace_xdp_redirect(dev, xdp, NULL, 0, NULL, to);
#define _trace_xdp_redirect(dev, xdp, to) \
trace_xdp_redirect(dev, xdp, NULL, 0, NULL, to)

#define _trace_xdp_redirect_err(dev, xdp, to, err) \
trace_xdp_redirect_err(dev, xdp, NULL, err, NULL, to);
#define _trace_xdp_redirect_err(dev, xdp, to, err) \
trace_xdp_redirect_err(dev, xdp, NULL, err, NULL, to)

#define _trace_xdp_redirect_map(dev, xdp, to, map, index) \
trace_xdp_redirect(dev, xdp, to, 0, map, index);
trace_xdp_redirect(dev, xdp, to, 0, map, index)

#define _trace_xdp_redirect_map_err(dev, xdp, to, map, index, err) \
trace_xdp_redirect_err(dev, xdp, to, err, map, index);
trace_xdp_redirect_err(dev, xdp, to, err, map, index)

/* not used anymore, but kept around so as not to break old programs */
DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map,
Expand Down
9 changes: 9 additions & 0 deletions include/uapi/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -3822,6 +3822,14 @@ union bpf_attr {
* The **hash_algo** is returned on success,
* **-EOPNOTSUP** if IMA is disabled or **-EINVAL** if
* invalid arguments are passed.
*
* struct socket *bpf_sock_from_file(struct file *file)
* Description
* If the given file represents a socket, returns the associated
* socket.
* Return
* A pointer to a struct socket on success or NULL if the file is
* not a socket.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
Expand Down Expand Up @@ -3986,6 +3994,7 @@ union bpf_attr {
FN(bprm_opts_set), \
FN(ktime_get_coarse_ns), \
FN(ima_inode_hash), \
FN(sock_from_file), \
/* */

/* integer value in 'imm' field of BPF_CALL instruction selects which helper
Expand Down
6 changes: 3 additions & 3 deletions kernel/bpf/hashtab.c
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)

static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
{
return (struct htab_elem *) (htab->elems + i * htab->elem_size);
return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size);
}

static void htab_free_elems(struct bpf_htab *htab)
Expand Down Expand Up @@ -280,7 +280,7 @@ static int prealloc_init(struct bpf_htab *htab)
if (!htab_is_percpu(htab) && !htab_is_lru(htab))
num_entries += num_possible_cpus();

htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries,
htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries,
htab->map.numa_node);
if (!htab->elems)
return -ENOMEM;
Expand Down Expand Up @@ -1412,7 +1412,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
void *keys = NULL, *values = NULL, *value, *dst_key, *dst_val;
void __user *uvalues = u64_to_user_ptr(attr->batch.values);
void __user *ukeys = u64_to_user_ptr(attr->batch.keys);
void *ubatch = u64_to_user_ptr(attr->batch.in_batch);
void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
u32 batch, max_count, size, bucket_size;
struct htab_elem *node_to_free = NULL;
u64 elem_map_flags, map_flags;
Expand Down
5 changes: 4 additions & 1 deletion kernel/bpf/syscall.c
Original file line number Diff line number Diff line change
Expand Up @@ -2121,8 +2121,11 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
if (IS_ERR(attach_btf))
return -EINVAL;
if (!btf_is_kernel(attach_btf)) {
/* attaching through specifying bpf_prog's BTF
* objects directly might be supported eventually
*/
btf_put(attach_btf);
return -EINVAL;
return -ENOTSUPP;
}
}
} else if (attr->attach_btf_id) {
Expand Down
3 changes: 2 additions & 1 deletion kernel/bpf/verifier.c
Original file line number Diff line number Diff line change
Expand Up @@ -3767,7 +3767,8 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
goto mark;

if (state->stack[spi].slot_type[0] == STACK_SPILL &&
state->stack[spi].spilled_ptr.type == SCALAR_VALUE) {
(state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
env->allow_ptr_leaks)) {
__mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
for (j = 0; j < BPF_REG_SIZE; j++)
state->stack[spi].slot_type[j] = STACK_MISC;
Expand Down
2 changes: 2 additions & 0 deletions kernel/trace/bpf_trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -1758,6 +1758,8 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_sk_storage_get_tracing_proto;
case BPF_FUNC_sk_storage_delete:
return &bpf_sk_storage_delete_tracing_proto;
case BPF_FUNC_sock_from_file:
return &bpf_sock_from_file_proto;
#endif
case BPF_FUNC_seq_printf:
return prog->expected_attach_type == BPF_TRACE_ITER ?
Expand Down
1 change: 1 addition & 0 deletions net/core/bpf_sk_storage.c
Original file line number Diff line number Diff line change
Expand Up @@ -394,6 +394,7 @@ static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog)
* use the bpf_sk_storage_(get|delete) helper.
*/
switch (prog->expected_attach_type) {
case BPF_TRACE_ITER:
case BPF_TRACE_RAW_TP:
/* bpf_sk_storage has no trace point */
return true;
Expand Down
18 changes: 18 additions & 0 deletions net/core/filter.c
Original file line number Diff line number Diff line change
Expand Up @@ -10413,6 +10413,24 @@ const struct bpf_func_proto bpf_skc_to_udp6_sock_proto = {
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UDP6],
};

BPF_CALL_1(bpf_sock_from_file, struct file *, file)
{
return (unsigned long)sock_from_file(file);
}

BTF_ID_LIST(bpf_sock_from_file_btf_ids)
BTF_ID(struct, socket)
BTF_ID(struct, file)

const struct bpf_func_proto bpf_sock_from_file_proto = {
.func = bpf_sock_from_file,
.gpl_only = false,
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
.ret_btf_id = &bpf_sock_from_file_btf_ids[0],
.arg1_type = ARG_PTR_TO_BTF_ID,
.arg1_btf_id = &bpf_sock_from_file_btf_ids[1],
};

static const struct bpf_func_proto *
bpf_sk_base_func_proto(enum bpf_func_id func_id)
{
Expand Down
3 changes: 1 addition & 2 deletions net/core/netclassid_cgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -68,9 +68,8 @@ struct update_classid_context {

static int update_classid_sock(const void *v, struct file *file, unsigned n)
{
int err;
struct update_classid_context *ctx = (void *)v;
struct socket *sock = sock_from_file(file, &err);
struct socket *sock = sock_from_file(file);

if (sock) {
spin_lock(&cgroup_sk_update_lock);
Expand Down
3 changes: 1 addition & 2 deletions net/core/netprio_cgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -220,8 +220,7 @@ static ssize_t write_priomap(struct kernfs_open_file *of,

static int update_netprio(const void *v, struct file *file, unsigned n)
{
int err;
struct socket *sock = sock_from_file(file, &err);
struct socket *sock = sock_from_file(file);
if (sock) {
spin_lock(&cgroup_sk_update_lock);
sock_cgroup_set_prioidx(&sock->sk->sk_cgrp_data,
Expand Down
8 changes: 1 addition & 7 deletions net/core/sock.c
Original file line number Diff line number Diff line change
Expand Up @@ -2827,14 +2827,8 @@ EXPORT_SYMBOL(sock_no_mmap);
void __receive_sock(struct file *file)
{
struct socket *sock;
int error;

/*
* The resulting value of "error" is ignored here since we only
* need to take action when the file is a socket and testing
* "sock" for NULL is sufficient.
*/
sock = sock_from_file(file, &error);
sock = sock_from_file(file);
if (sock) {
sock_update_netprioidx(&sock->sk->sk_cgrp_data);
sock_update_classid(&sock->sk->sk_cgrp_data);
Expand Down
27 changes: 16 additions & 11 deletions net/socket.c
Original file line number Diff line number Diff line change
Expand Up @@ -445,17 +445,15 @@ static int sock_map_fd(struct socket *sock, int flags)
/**
* sock_from_file - Return the &socket bounded to @file.
* @file: file
* @err: pointer to an error code return
*
* On failure returns %NULL and assigns -ENOTSOCK to @err.
* On failure returns %NULL.
*/

struct socket *sock_from_file(struct file *file, int *err)
struct socket *sock_from_file(struct file *file)
{
if (file->f_op == &socket_file_ops)
return file->private_data; /* set in sock_map_fd */

*err = -ENOTSOCK;
return NULL;
}
EXPORT_SYMBOL(sock_from_file);
Expand Down Expand Up @@ -484,9 +482,11 @@ struct socket *sockfd_lookup(int fd, int *err)
return NULL;
}

sock = sock_from_file(file, err);
if (!sock)
sock = sock_from_file(file);
if (!sock) {
*err = -ENOTSOCK;
fput(file);
}
return sock;
}
EXPORT_SYMBOL(sockfd_lookup);
Expand All @@ -498,11 +498,12 @@ static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)

*err = -EBADF;
if (f.file) {
sock = sock_from_file(f.file, err);
sock = sock_from_file(f.file);
if (likely(sock)) {
*fput_needed = f.flags & FDPUT_FPUT;
return sock;
}
*err = -ENOTSOCK;
fdput(f);
}
return NULL;
Expand Down Expand Up @@ -1693,9 +1694,11 @@ int __sys_accept4_file(struct file *file, unsigned file_flags,
if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;

sock = sock_from_file(file, &err);
if (!sock)
sock = sock_from_file(file);
if (!sock) {
err = -ENOTSOCK;
goto out;
}

err = -ENFILE;
newsock = sock_alloc();
Expand Down Expand Up @@ -1818,9 +1821,11 @@ int __sys_connect_file(struct file *file, struct sockaddr_storage *address,
struct socket *sock;
int err;

sock = sock_from_file(file, &err);
if (!sock)
sock = sock_from_file(file);
if (!sock) {
err = -ENOTSOCK;
goto out;
}

err =
security_socket_connect(sock, (struct sockaddr *)address, addrlen);
Expand Down
4 changes: 2 additions & 2 deletions net/xdp/xsk.c
Original file line number Diff line number Diff line change
Expand Up @@ -564,12 +564,12 @@ static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int fl
struct sock *sk = sock->sk;
struct xdp_sock *xs = xdp_sk(sk);

if (unlikely(!xsk_is_bound(xs)))
return -ENXIO;
if (unlikely(!(xs->dev->flags & IFF_UP)))
return -ENETDOWN;
if (unlikely(!xs->rx))
return -ENOBUFS;
if (unlikely(!xsk_is_bound(xs)))
return -ENXIO;
if (unlikely(need_wait))
return -EOPNOTSUPP;

Expand Down
2 changes: 2 additions & 0 deletions samples/bpf/xdpsock_user.c
Original file line number Diff line number Diff line change
Expand Up @@ -1275,6 +1275,8 @@ static void tx_only(struct xsk_socket_info *xsk, u32 *frame_nb, int batch_size)
while (xsk_ring_prod__reserve(&xsk->tx, batch_size, &idx) <
batch_size) {
complete_tx_only(xsk, batch_size);
if (benchmark_done)
return;
}

for (i = 0; i < batch_size; i++) {
Expand Down
4 changes: 4 additions & 0 deletions scripts/bpf_helpers_doc.py
Original file line number Diff line number Diff line change
Expand Up @@ -437,6 +437,8 @@ class PrinterHelpers(Printer):
'struct path',
'struct btf_ptr',
'struct inode',
'struct socket',
'struct file',
]
known_types = {
'...',
Expand Down Expand Up @@ -482,6 +484,8 @@ class PrinterHelpers(Printer):
'struct path',
'struct btf_ptr',
'struct inode',
'struct socket',
'struct file',
}
mapped_types = {
'u8': '__u8',
Expand Down
9 changes: 9 additions & 0 deletions tools/include/uapi/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -3822,6 +3822,14 @@ union bpf_attr {
* The **hash_algo** is returned on success,
* **-EOPNOTSUP** if IMA is disabled or **-EINVAL** if
* invalid arguments are passed.
*
* struct socket *bpf_sock_from_file(struct file *file)
* Description
* If the given file represents a socket, returns the associated
* socket.
* Return
* A pointer to a struct socket on success or NULL if the file is
* not a socket.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
Expand Down Expand Up @@ -3986,6 +3994,7 @@ union bpf_attr {
FN(bprm_opts_set), \
FN(ktime_get_coarse_ns), \
FN(ima_inode_hash), \
FN(sock_from_file), \
/* */

/* integer value in 'imm' field of BPF_CALL instruction selects which helper
Expand Down
Loading

0 comments on commit a6b5e02

Please sign in to comment.