Skip to content

Commit

Permalink
ovs-tc: offload datapath rules matching on internal ports
Browse files Browse the repository at this point in the history
Rules applied to OvS internal ports are not represented in TC datapaths.
However, it is possible to support rules matching on internal ports in TC.
The start_xmit ndo of OvS internal ports directs packets back into the OvS
kernel datapath where they are rematched with the ingress port now being
that of the internal port. Due to this, rules matching on an internal port
can be added as TC filters to an egress qdisc for these ports.

Allow rules applied to internal ports to be offloaded to TC as egress
filters. Rules redirecting to an internal port are also offloaded. These
are supported by the redirect ingress functionality applied in an earlier
patch.

Signed-off-by: John Hurley <[email protected]>
Reviewed-by: Roi Dayan <[email protected]>
Signed-off-by: Simon Horman <[email protected]>
  • Loading branch information
jahurley authored and shorman-netronome committed Apr 10, 2019
1 parent 9525501 commit 608ff46
Show file tree
Hide file tree
Showing 3 changed files with 35 additions and 20 deletions.
13 changes: 5 additions & 8 deletions lib/dpif.c
Original file line number Diff line number Diff line change
Expand Up @@ -101,12 +101,9 @@ static bool should_log_flow_message(const struct vlog_module *module,
struct seq *tnl_conf_seq;

static bool
dpif_is_internal_port(const char *type)
dpif_is_tap_port(const char *type)
{
/* For userspace datapath, tap devices are the equivalent
* of internal devices in the kernel datapath, so both
* these types are 'internal' devices. */
return !strcmp(type, "internal") || !strcmp(type, "tap");
return !strcmp(type, "tap");
}

static void
Expand Down Expand Up @@ -359,7 +356,7 @@ do_open(const char *name, const char *type, bool create, struct dpif **dpifp)
struct netdev *netdev;
int err;

if (dpif_is_internal_port(dpif_port.type)) {
if (dpif_is_tap_port(dpif_port.type)) {
continue;
}

Expand Down Expand Up @@ -434,7 +431,7 @@ dpif_remove_netdev_ports(struct dpif *dpif) {
struct dpif_port dpif_port;

DPIF_PORT_FOR_EACH (&dpif_port, &port_dump, dpif) {
if (!dpif_is_internal_port(dpif_port.type)) {
if (!dpif_is_tap_port(dpif_port.type)) {
netdev_ports_remove(dpif_port.port_no, dpif->dpif_class);
}
}
Expand Down Expand Up @@ -582,7 +579,7 @@ dpif_port_add(struct dpif *dpif, struct netdev *netdev, odp_port_t *port_nop)
VLOG_DBG_RL(&dpmsg_rl, "%s: added %s as port %"PRIu32,
dpif_name(dpif), netdev_name, port_no);

if (!dpif_is_internal_port(netdev_get_type(netdev))) {
if (!dpif_is_tap_port(netdev_get_type(netdev))) {

struct dpif_port dpif_port;

Expand Down
1 change: 1 addition & 0 deletions lib/netdev-linux.c
Original file line number Diff line number Diff line change
Expand Up @@ -3340,6 +3340,7 @@ const struct netdev_class netdev_tap_class = {

const struct netdev_class netdev_internal_class = {
NETDEV_LINUX_CLASS_COMMON,
LINUX_FLOW_OFFLOAD_API,
.type = "internal",
.construct = netdev_linux_construct,
.get_stats = netdev_internal_get_stats,
Expand Down
41 changes: 29 additions & 12 deletions lib/netdev-tc-offloads.c
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,12 @@ is_internal_port(const char *type)
return !strcmp(type, "internal");
}

static enum tc_qdisc_hook
get_tc_qdisc_hook(struct netdev *netdev)
{
return is_internal_port(netdev_get_type(netdev)) ? TC_EGRESS : TC_INGRESS;
}

static struct netlink_field set_flower_map[][4] = {
[OVS_KEY_ATTR_IPV4] = {
{ offsetof(struct ovs_key_ipv4, ipv4_src),
Expand Down Expand Up @@ -185,11 +191,12 @@ del_ufid_tc_mapping(const ovs_u128 *ufid)
/* Wrapper function to delete filter and ufid tc mapping */
static int
del_filter_and_ufid_mapping(int ifindex, int prio, int handle,
uint32_t block_id, const ovs_u128 *ufid)
uint32_t block_id, const ovs_u128 *ufid,
enum tc_qdisc_hook hook)
{
int err;

err = tc_del_filter(ifindex, prio, handle, block_id, TC_INGRESS);
err = tc_del_filter(ifindex, prio, handle, block_id, hook);
del_ufid_tc_mapping(ufid);

return err;
Expand Down Expand Up @@ -346,6 +353,7 @@ get_block_id_from_netdev(struct netdev *netdev)
int
netdev_tc_flow_flush(struct netdev *netdev)
{
enum tc_qdisc_hook hook = get_tc_qdisc_hook(netdev);
int ifindex = netdev_get_ifindex(netdev);
uint32_t block_id = 0;

Expand All @@ -357,13 +365,14 @@ netdev_tc_flow_flush(struct netdev *netdev)

block_id = get_block_id_from_netdev(netdev);

return tc_flush(ifindex, block_id, TC_INGRESS);
return tc_flush(ifindex, block_id, hook);
}

int
netdev_tc_flow_dump_create(struct netdev *netdev,
struct netdev_flow_dump **dump_out)
{
enum tc_qdisc_hook hook = get_tc_qdisc_hook(netdev);
struct netdev_flow_dump *dump;
uint32_t block_id = 0;
int ifindex;
Expand All @@ -379,7 +388,7 @@ netdev_tc_flow_dump_create(struct netdev *netdev,
dump = xzalloc(sizeof *dump);
dump->nl_dump = xzalloc(sizeof *dump->nl_dump);
dump->netdev = netdev_ref(netdev);
tc_dump_flower_start(ifindex, dump->nl_dump, block_id, TC_INGRESS);
tc_dump_flower_start(ifindex, dump->nl_dump, block_id, hook);

*dump_out = dump;

Expand Down Expand Up @@ -1080,6 +1089,7 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match,
struct dpif_flow_stats *stats OVS_UNUSED)
{
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
enum tc_qdisc_hook hook = get_tc_qdisc_hook(netdev);
struct tc_flower flower;
const struct flow *key = &match->flow;
struct flow *mask = &match->wc.masks;
Expand Down Expand Up @@ -1342,7 +1352,8 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match,
handle = get_ufid_tc_mapping(ufid, &prio, NULL);
if (handle && prio) {
VLOG_DBG_RL(&rl, "updating old handle: %d prio: %d", handle, prio);
del_filter_and_ufid_mapping(ifindex, prio, handle, block_id, ufid);
del_filter_and_ufid_mapping(ifindex, prio, handle, block_id, ufid,
hook);
}

if (!prio) {
Expand All @@ -1356,8 +1367,7 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match,
flower.act_cookie.data = ufid;
flower.act_cookie.len = sizeof *ufid;

err = tc_replace_flower(ifindex, prio, handle, &flower, block_id,
TC_INGRESS);
err = tc_replace_flower(ifindex, prio, handle, &flower, block_id, hook);
if (!err) {
add_ufid_tc_mapping(ufid, flower.prio, flower.handle, netdev, ifindex);
}
Expand All @@ -1377,6 +1387,7 @@ netdev_tc_flow_get(struct netdev *netdev OVS_UNUSED,
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
struct netdev *dev;
struct tc_flower flower;
enum tc_qdisc_hook hook;
uint32_t block_id = 0;
odp_port_t in_port;
int prio = 0;
Expand All @@ -1389,6 +1400,8 @@ netdev_tc_flow_get(struct netdev *netdev OVS_UNUSED,
return ENOENT;
}

hook = get_tc_qdisc_hook(dev);

ifindex = netdev_get_ifindex(dev);
if (ifindex < 0) {
VLOG_ERR_RL(&error_rl, "flow_get: failed to get ifindex for %s: %s",
Expand All @@ -1400,7 +1413,7 @@ netdev_tc_flow_get(struct netdev *netdev OVS_UNUSED,
block_id = get_block_id_from_netdev(dev);
VLOG_DBG_RL(&rl, "flow get (dev %s prio %d handle %d block_id %d)",
netdev_get_name(dev), prio, handle, block_id);
err = tc_get_flower(ifindex, prio, handle, &flower, block_id, TC_INGRESS);
err = tc_get_flower(ifindex, prio, handle, &flower, block_id, hook);
netdev_close(dev);
if (err) {
VLOG_ERR_RL(&error_rl, "flow get failed (dev %s prio %d handle %d): %s",
Expand All @@ -1423,6 +1436,7 @@ netdev_tc_flow_del(struct netdev *netdev OVS_UNUSED,
struct dpif_flow_stats *stats)
{
struct tc_flower flower;
enum tc_qdisc_hook hook;
uint32_t block_id = 0;
struct netdev *dev;
int prio = 0;
Expand All @@ -1435,6 +1449,8 @@ netdev_tc_flow_del(struct netdev *netdev OVS_UNUSED,
return ENOENT;
}

hook = get_tc_qdisc_hook(dev);

ifindex = netdev_get_ifindex(dev);
if (ifindex < 0) {
VLOG_ERR_RL(&error_rl, "flow_del: failed to get ifindex for %s: %s",
Expand All @@ -1447,15 +1463,15 @@ netdev_tc_flow_del(struct netdev *netdev OVS_UNUSED,

if (stats) {
memset(stats, 0, sizeof *stats);
if (!tc_get_flower(ifindex, prio, handle, &flower, block_id,
TC_INGRESS)) {
if (!tc_get_flower(ifindex, prio, handle, &flower, block_id, hook)) {
stats->n_packets = get_32aligned_u64(&flower.stats.n_packets);
stats->n_bytes = get_32aligned_u64(&flower.stats.n_bytes);
stats->used = flower.lastused;
}
}

error = del_filter_and_ufid_mapping(ifindex, prio, handle, block_id, ufid);
error = del_filter_and_ufid_mapping(ifindex, prio, handle, block_id, ufid,
hook);

netdev_close(dev);

Expand Down Expand Up @@ -1539,6 +1555,7 @@ netdev_tc_init_flow_api(struct netdev *netdev)
{
static struct ovsthread_once multi_mask_once = OVSTHREAD_ONCE_INITIALIZER;
static struct ovsthread_once block_once = OVSTHREAD_ONCE_INITIALIZER;
enum tc_qdisc_hook hook = get_tc_qdisc_hook(netdev);
uint32_t block_id = 0;
int ifindex;
int error;
Expand All @@ -1564,7 +1581,7 @@ netdev_tc_init_flow_api(struct netdev *netdev)
}

block_id = get_block_id_from_netdev(netdev);
error = tc_add_del_qdisc(ifindex, true, block_id, TC_INGRESS);
error = tc_add_del_qdisc(ifindex, true, block_id, hook);

if (error && error != EEXIST) {
VLOG_ERR("failed adding ingress qdisc required for offloading: %s",
Expand Down

0 comments on commit 608ff46

Please sign in to comment.