Skip to content

Commit

Permalink
bpf: minor cleanups for dev_map
Browse files Browse the repository at this point in the history
Some minor code cleanups, while going over it I also noticed that
we're accounting the bitmap only for one CPU currently, so fix that
up as well.

Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
  • Loading branch information
borkmann authored and davem330 committed Aug 23, 2017
1 parent e4a8e81 commit af4d045
Showing 1 changed file with 41 additions and 59 deletions.
100 changes: 41 additions & 59 deletions kernel/bpf/devmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -48,30 +48,30 @@
* calls will fail at this point.
*/
#include <linux/bpf.h>
#include <linux/jhash.h>
#include <linux/filter.h>
#include <linux/rculist_nulls.h>
#include "percpu_freelist.h"
#include "bpf_lru_list.h"
#include "map_in_map.h"

struct bpf_dtab_netdev {
struct net_device *dev;
int key;
struct rcu_head rcu;
struct bpf_dtab *dtab;
unsigned int bit;
struct rcu_head rcu;
};

struct bpf_dtab {
struct bpf_map map;
struct bpf_dtab_netdev **netdev_map;
unsigned long int __percpu *flush_needed;
unsigned long __percpu *flush_needed;
struct list_head list;
};

static DEFINE_SPINLOCK(dev_map_lock);
static LIST_HEAD(dev_map_list);

static u64 dev_map_bitmap_size(const union bpf_attr *attr)
{
return BITS_TO_LONGS(attr->max_entries) * sizeof(unsigned long);
}

static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
{
struct bpf_dtab *dtab;
Expand All @@ -95,11 +95,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
dtab->map.map_flags = attr->map_flags;
dtab->map.numa_node = bpf_map_attr_numa_node(attr);

err = -ENOMEM;

/* make sure page count doesn't overflow */
cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
cost += BITS_TO_LONGS(attr->max_entries) * sizeof(unsigned long);
cost += dev_map_bitmap_size(attr) * num_possible_cpus();
if (cost >= U32_MAX - PAGE_SIZE)
goto free_dtab;

Expand All @@ -110,12 +108,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
if (err)
goto free_dtab;

err = -ENOMEM;
/* A per cpu bitfield with a bit per possible net device */
dtab->flush_needed = __alloc_percpu(
BITS_TO_LONGS(attr->max_entries) *
sizeof(unsigned long),
__alignof__(unsigned long));
dtab->flush_needed = __alloc_percpu(dev_map_bitmap_size(attr),
__alignof__(unsigned long));
if (!dtab->flush_needed)
goto free_dtab;

Expand All @@ -128,12 +123,12 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
spin_lock(&dev_map_lock);
list_add_tail_rcu(&dtab->list, &dev_map_list);
spin_unlock(&dev_map_lock);
return &dtab->map;

return &dtab->map;
free_dtab:
free_percpu(dtab->flush_needed);
kfree(dtab);
return ERR_PTR(err);
return ERR_PTR(-ENOMEM);
}

static void dev_map_free(struct bpf_map *map)
Expand Down Expand Up @@ -178,9 +173,6 @@ static void dev_map_free(struct bpf_map *map)
kfree(dev);
}

/* At this point bpf program is detached and all pending operations
* _must_ be complete
*/
free_percpu(dtab->flush_needed);
bpf_map_area_free(dtab->netdev_map);
kfree(dtab);
Expand All @@ -190,7 +182,7 @@ static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
u32 index = key ? *(u32 *)key : U32_MAX;
u32 *next = (u32 *)next_key;
u32 *next = next_key;

if (index >= dtab->map.max_entries) {
*next = 0;
Expand All @@ -199,29 +191,16 @@ static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)

if (index == dtab->map.max_entries - 1)
return -ENOENT;

*next = index + 1;
return 0;
}

void __dev_map_insert_ctx(struct bpf_map *map, u32 key)
void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed);

__set_bit(key, bitmap);
}

struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
struct bpf_dtab_netdev *dev;

if (key >= map->max_entries)
return NULL;

dev = READ_ONCE(dtab->netdev_map[key]);
return dev ? dev->dev : NULL;
__set_bit(bit, bitmap);
}

/* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled
Expand All @@ -248,7 +227,6 @@ void __dev_map_flush(struct bpf_map *map)
continue;

netdev = dev->dev;

__clear_bit(bit, bitmap);
if (unlikely(!netdev || !netdev->netdev_ops->ndo_xdp_flush))
continue;
Expand All @@ -261,43 +239,49 @@ void __dev_map_flush(struct bpf_map *map)
* update happens in parallel here a dev_put wont happen until after reading the
* ifindex.
*/
static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
struct bpf_dtab_netdev *dev;
u32 i = *(u32 *)key;

if (i >= map->max_entries)
if (key >= map->max_entries)
return NULL;

dev = READ_ONCE(dtab->netdev_map[i]);
return dev ? &dev->dev->ifindex : NULL;
dev = READ_ONCE(dtab->netdev_map[key]);
return dev ? dev->dev : NULL;
}

static void dev_map_flush_old(struct bpf_dtab_netdev *old_dev)
static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
{
struct net_device *dev = __dev_map_lookup_elem(map, *(u32 *)key);

return dev ? &dev->ifindex : NULL;
}

static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
{
if (old_dev->dev->netdev_ops->ndo_xdp_flush) {
struct net_device *fl = old_dev->dev;
if (dev->dev->netdev_ops->ndo_xdp_flush) {
struct net_device *fl = dev->dev;
unsigned long *bitmap;
int cpu;

for_each_online_cpu(cpu) {
bitmap = per_cpu_ptr(old_dev->dtab->flush_needed, cpu);
__clear_bit(old_dev->key, bitmap);
bitmap = per_cpu_ptr(dev->dtab->flush_needed, cpu);
__clear_bit(dev->bit, bitmap);

fl->netdev_ops->ndo_xdp_flush(old_dev->dev);
fl->netdev_ops->ndo_xdp_flush(dev->dev);
}
}
}

static void __dev_map_entry_free(struct rcu_head *rcu)
{
struct bpf_dtab_netdev *old_dev;
struct bpf_dtab_netdev *dev;

old_dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
dev_map_flush_old(old_dev);
dev_put(old_dev->dev);
kfree(old_dev);
dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
dev_map_flush_old(dev);
dev_put(dev->dev);
kfree(dev);
}

static int dev_map_delete_elem(struct bpf_map *map, void *key)
Expand All @@ -309,8 +293,8 @@ static int dev_map_delete_elem(struct bpf_map *map, void *key)
if (k >= map->max_entries)
return -EINVAL;

/* Use synchronize_rcu() here to ensure any rcu critical sections
* have completed, but this does not guarantee a flush has happened
/* Use call_rcu() here to ensure any rcu critical sections have
* completed, but this does not guarantee a flush has happened
* yet. Because driver side rcu_read_lock/unlock only protects the
* running XDP program. However, for pending flush operations the
* dev and ctx are stored in another per cpu map. And additionally,
Expand All @@ -334,10 +318,8 @@ static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,

if (unlikely(map_flags > BPF_EXIST))
return -EINVAL;

if (unlikely(i >= dtab->map.max_entries))
return -E2BIG;

if (unlikely(map_flags == BPF_NOEXIST))
return -EEXIST;

Expand All @@ -355,7 +337,7 @@ static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
return -EINVAL;
}

dev->key = i;
dev->bit = i;
dev->dtab = dtab;
}

Expand Down

0 comments on commit af4d045

Please sign in to comment.