Skip to content

Commit

Permalink
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Browse files Browse the repository at this point in the history
Minor conflicts in net/mptcp/protocol.h and
tools/testing/selftests/net/Makefile.

In both cases code was added on both sides in the same place
so just keep both.

Signed-off-by: Jakub Kicinski <[email protected]>
  • Loading branch information
kuba-moo committed Oct 15, 2020
2 parents 346e320 + 2ecbc1f commit 2295cdd
Show file tree
Hide file tree
Showing 45 changed files with 1,209 additions and 140 deletions.
6 changes: 3 additions & 3 deletions Documentation/networking/scaling.rst
Original file line number Diff line number Diff line change
Expand Up @@ -465,9 +465,9 @@ XPS Configuration
-----------------

XPS is only available if the kconfig symbol CONFIG_XPS is enabled (on by
default for SMP). The functionality remains disabled until explicitly
configured. To enable XPS, the bitmap of CPUs/receive-queues that may
use a transmit queue is configured using the sysfs file entry:
default for SMP). If compiled in, it is driver dependent whether, and
how, XPS is configured at device init. The mapping of CPUs/receive-queues
to transmit queue can be inspected and configured using sysfs:

For selection based on CPUs map::

Expand Down
2 changes: 0 additions & 2 deletions drivers/net/can/m_can/m_can_platform.c
Original file line number Diff line number Diff line change
Expand Up @@ -144,8 +144,6 @@ static int __maybe_unused m_can_runtime_suspend(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct m_can_classdev *mcan_class = netdev_priv(ndev);

m_can_class_suspend(dev);

clk_disable_unprepare(mcan_class->cclk);
clk_disable_unprepare(mcan_class->hclk);

Expand Down
16 changes: 9 additions & 7 deletions drivers/net/dsa/microchip/ksz_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -103,14 +103,8 @@ void ksz_init_mib_timer(struct ksz_device *dev)

INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work);

/* Read MIB counters every 30 seconds to avoid overflow. */
dev->mib_read_interval = msecs_to_jiffies(30000);

for (i = 0; i < dev->mib_port_cnt; i++)
dev->dev_ops->port_init_cnt(dev, i);

/* Start the timer 2 seconds later. */
schedule_delayed_work(&dev->mib_read, msecs_to_jiffies(2000));
}
EXPORT_SYMBOL_GPL(ksz_init_mib_timer);

Expand Down Expand Up @@ -143,7 +137,9 @@ void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,

/* Read all MIB counters when the link is going down. */
p->read = true;
schedule_delayed_work(&dev->mib_read, 0);
/* timer started */
if (dev->mib_read_interval)
schedule_delayed_work(&dev->mib_read, 0);
}
EXPORT_SYMBOL_GPL(ksz_mac_link_down);

Expand Down Expand Up @@ -451,6 +447,12 @@ int ksz_switch_register(struct ksz_device *dev,
return ret;
}

/* Read MIB counters every 30 seconds to avoid overflow. */
dev->mib_read_interval = msecs_to_jiffies(30000);

/* Start the MIB timer. */
schedule_delayed_work(&dev->mib_read, 0);

return 0;
}
EXPORT_SYMBOL(ksz_switch_register);
Expand Down
175 changes: 162 additions & 13 deletions drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,89 @@ static struct ch_tc_pedit_fields pedits[] = {
PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
};

static const struct cxgb4_natmode_config cxgb4_natmode_config_array[] = {
/* Default supported NAT modes */
{
.chip = CHELSIO_T5,
.flags = CXGB4_ACTION_NATMODE_NONE,
.natmode = NAT_MODE_NONE,
},
{
.chip = CHELSIO_T5,
.flags = CXGB4_ACTION_NATMODE_DIP,
.natmode = NAT_MODE_DIP,
},
{
.chip = CHELSIO_T5,
.flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT,
.natmode = NAT_MODE_DIP_DP,
},
{
.chip = CHELSIO_T5,
.flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
CXGB4_ACTION_NATMODE_SIP,
.natmode = NAT_MODE_DIP_DP_SIP,
},
{
.chip = CHELSIO_T5,
.flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
CXGB4_ACTION_NATMODE_SPORT,
.natmode = NAT_MODE_DIP_DP_SP,
},
{
.chip = CHELSIO_T5,
.flags = CXGB4_ACTION_NATMODE_SIP | CXGB4_ACTION_NATMODE_SPORT,
.natmode = NAT_MODE_SIP_SP,
},
{
.chip = CHELSIO_T5,
.flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
CXGB4_ACTION_NATMODE_SPORT,
.natmode = NAT_MODE_DIP_SIP_SP,
},
{
.chip = CHELSIO_T5,
.flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
CXGB4_ACTION_NATMODE_DPORT |
CXGB4_ACTION_NATMODE_SPORT,
.natmode = NAT_MODE_ALL,
},
/* T6+ can ignore L4 ports when they're disabled. */
{
.chip = CHELSIO_T6,
.flags = CXGB4_ACTION_NATMODE_SIP,
.natmode = NAT_MODE_SIP_SP,
},
{
.chip = CHELSIO_T6,
.flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SPORT,
.natmode = NAT_MODE_DIP_DP_SP,
},
{
.chip = CHELSIO_T6,
.flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP,
.natmode = NAT_MODE_ALL,
},
};

static void cxgb4_action_natmode_tweak(struct ch_filter_specification *fs,
u8 natmode_flags)
{
u8 i = 0;

/* Translate the enabled NAT 4-tuple fields to one of the
* hardware supported NAT mode configurations. This ensures
* that we pick a valid combination, where the disabled fields
* do not get overwritten to 0.
*/
for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
if (cxgb4_natmode_config_array[i].flags == natmode_flags) {
fs->nat_mode = cxgb4_natmode_config_array[i].natmode;
return;
}
}
}

static struct ch_tc_flower_entry *allocate_flower_entry(void)
{
struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
Expand Down Expand Up @@ -289,7 +372,8 @@ static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask,
}

static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
u32 mask, u32 offset, u8 htype)
u32 mask, u32 offset, u8 htype,
u8 *natmode_flags)
{
switch (htype) {
case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
Expand All @@ -314,67 +398,102 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
switch (offset) {
case PEDIT_IP4_SRC:
offload_pedit(fs, val, mask, IP4_SRC);
*natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
break;
case PEDIT_IP4_DST:
offload_pedit(fs, val, mask, IP4_DST);
*natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
}
fs->nat_mode = NAT_MODE_ALL;
break;
case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
switch (offset) {
case PEDIT_IP6_SRC_31_0:
offload_pedit(fs, val, mask, IP6_SRC_31_0);
*natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
break;
case PEDIT_IP6_SRC_63_32:
offload_pedit(fs, val, mask, IP6_SRC_63_32);
*natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
break;
case PEDIT_IP6_SRC_95_64:
offload_pedit(fs, val, mask, IP6_SRC_95_64);
*natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
break;
case PEDIT_IP6_SRC_127_96:
offload_pedit(fs, val, mask, IP6_SRC_127_96);
*natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
break;
case PEDIT_IP6_DST_31_0:
offload_pedit(fs, val, mask, IP6_DST_31_0);
*natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
case PEDIT_IP6_DST_63_32:
offload_pedit(fs, val, mask, IP6_DST_63_32);
*natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
case PEDIT_IP6_DST_95_64:
offload_pedit(fs, val, mask, IP6_DST_95_64);
*natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
case PEDIT_IP6_DST_127_96:
offload_pedit(fs, val, mask, IP6_DST_127_96);
*natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
}
fs->nat_mode = NAT_MODE_ALL;
break;
case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
switch (offset) {
case PEDIT_TCP_SPORT_DPORT:
if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
fs->nat_fport = val;
else
*natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
} else {
fs->nat_lport = val >> 16;
*natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
}
}
fs->nat_mode = NAT_MODE_ALL;
break;
case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
switch (offset) {
case PEDIT_UDP_SPORT_DPORT:
if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
fs->nat_fport = val;
else
*natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
} else {
fs->nat_lport = val >> 16;
*natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
}
}
fs->nat_mode = NAT_MODE_ALL;
break;
}
}

static int cxgb4_action_natmode_validate(struct adapter *adap, u8 natmode_flags,
struct netlink_ext_ack *extack)
{
u8 i = 0;

/* Extract the NAT mode to enable based on what 4-tuple fields
* are enabled to be overwritten. This ensures that the
* disabled fields don't get overwritten to 0.
*/
for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
const struct cxgb4_natmode_config *c;

c = &cxgb4_natmode_config_array[i];
if (CHELSIO_CHIP_VERSION(adap->params.chip) >= c->chip &&
natmode_flags == c->flags)
return 0;
}
NL_SET_ERR_MSG_MOD(extack, "Unsupported NAT mode 4-tuple combination");
return -EOPNOTSUPP;
}

void cxgb4_process_flow_actions(struct net_device *in,
struct flow_action *actions,
struct ch_filter_specification *fs)
{
struct flow_action_entry *act;
u8 natmode_flags = 0;
int i;

flow_action_for_each(i, act, actions) {
Expand Down Expand Up @@ -426,7 +545,8 @@ void cxgb4_process_flow_actions(struct net_device *in,
val = act->mangle.val;
offset = act->mangle.offset;

process_pedit_field(fs, val, mask, offset, htype);
process_pedit_field(fs, val, mask, offset, htype,
&natmode_flags);
}
break;
case FLOW_ACTION_QUEUE:
Expand All @@ -438,6 +558,9 @@ void cxgb4_process_flow_actions(struct net_device *in,
break;
}
}
if (natmode_flags)
cxgb4_action_natmode_tweak(fs, natmode_flags);

}

static bool valid_l4_mask(u32 mask)
Expand All @@ -454,7 +577,8 @@ static bool valid_l4_mask(u32 mask)
}

static bool valid_pedit_action(struct net_device *dev,
const struct flow_action_entry *act)
const struct flow_action_entry *act,
u8 *natmode_flags)
{
u32 mask, offset;
u8 htype;
Expand All @@ -479,7 +603,10 @@ static bool valid_pedit_action(struct net_device *dev,
case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
switch (offset) {
case PEDIT_IP4_SRC:
*natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
break;
case PEDIT_IP4_DST:
*natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
default:
netdev_err(dev, "%s: Unsupported pedit field\n",
Expand All @@ -493,10 +620,13 @@ static bool valid_pedit_action(struct net_device *dev,
case PEDIT_IP6_SRC_63_32:
case PEDIT_IP6_SRC_95_64:
case PEDIT_IP6_SRC_127_96:
*natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
break;
case PEDIT_IP6_DST_31_0:
case PEDIT_IP6_DST_63_32:
case PEDIT_IP6_DST_95_64:
case PEDIT_IP6_DST_127_96:
*natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
default:
netdev_err(dev, "%s: Unsupported pedit field\n",
Expand All @@ -512,6 +642,10 @@ static bool valid_pedit_action(struct net_device *dev,
__func__);
return false;
}
if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
*natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
else
*natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
break;
default:
netdev_err(dev, "%s: Unsupported pedit field\n",
Expand All @@ -527,6 +661,10 @@ static bool valid_pedit_action(struct net_device *dev,
__func__);
return false;
}
if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
*natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
else
*natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
break;
default:
netdev_err(dev, "%s: Unsupported pedit field\n",
Expand All @@ -546,10 +684,12 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
struct netlink_ext_ack *extack,
u8 matchall_filter)
{
struct adapter *adap = netdev2adap(dev);
struct flow_action_entry *act;
bool act_redir = false;
bool act_pedit = false;
bool act_vlan = false;
u8 natmode_flags = 0;
int i;

if (!flow_action_basic_hw_stats_check(actions, extack))
Expand All @@ -563,7 +703,6 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
break;
case FLOW_ACTION_MIRRED:
case FLOW_ACTION_REDIRECT: {
struct adapter *adap = netdev2adap(dev);
struct net_device *n_dev, *target_dev;
bool found = false;
unsigned int i;
Expand Down Expand Up @@ -620,7 +759,8 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
}
break;
case FLOW_ACTION_MANGLE: {
bool pedit_valid = valid_pedit_action(dev, act);
bool pedit_valid = valid_pedit_action(dev, act,
&natmode_flags);

if (!pedit_valid)
return -EOPNOTSUPP;
Expand All @@ -642,6 +782,15 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
return -EINVAL;
}

if (act_pedit) {
int ret;

ret = cxgb4_action_natmode_validate(adap, natmode_flags,
extack);
if (ret)
return ret;
}

return 0;
}

Expand Down
Loading

0 comments on commit 2295cdd

Please sign in to comment.