Commit b9a5cbf8 authored by David S. Miller's avatar David S. Miller

Merge branch 'sfc-tc-offload'

Edward Cree says:

====================
sfc: bare bones TC offload

This series begins the work of supporting TC flower offload on EF100 NICs.
This is the absolute minimum viable TC implementation to get traffic to
 VFs and allow them to be tested; it supports no match fields besides
 ingress port, no actions besides mirred and drop, and no stats.
More matches, actions, and counters will be added in subsequent patches.

Changed in v2:
 - Add missing 'static' on declarations (kernel test robot, sparse)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c87e4ad1 d902e1a7
...@@ -9,7 +9,7 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \ ...@@ -9,7 +9,7 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \
ef100_ethtool.o ef100_rx.o ef100_tx.o ef100_ethtool.o ef100_rx.o ef100_tx.o
sfc-$(CONFIG_SFC_MTD) += mtd.o sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o \ sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o \
mae.o tc.o mae.o tc.o tc_bindings.o
obj-$(CONFIG_SFC) += sfc.o obj-$(CONFIG_SFC) += sfc.o
......
...@@ -43,6 +43,8 @@ const struct ethtool_ops ef100_ethtool_ops = { ...@@ -43,6 +43,8 @@ const struct ethtool_ops ef100_ethtool_ops = {
.get_pauseparam = efx_ethtool_get_pauseparam, .get_pauseparam = efx_ethtool_get_pauseparam,
.set_pauseparam = efx_ethtool_set_pauseparam, .set_pauseparam = efx_ethtool_set_pauseparam,
.get_sset_count = efx_ethtool_get_sset_count, .get_sset_count = efx_ethtool_get_sset_count,
.get_priv_flags = efx_ethtool_get_priv_flags,
.set_priv_flags = efx_ethtool_set_priv_flags,
.self_test = efx_ethtool_self_test, .self_test = efx_ethtool_self_test,
.get_strings = efx_ethtool_get_strings, .get_strings = efx_ethtool_get_strings,
.get_link_ksettings = efx_ethtool_get_link_ksettings, .get_link_ksettings = efx_ethtool_get_link_ksettings,
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include "mcdi_filters.h" #include "mcdi_filters.h"
#include "rx_common.h" #include "rx_common.h"
#include "ef100_sriov.h" #include "ef100_sriov.h"
#include "tc_bindings.h"
static void ef100_update_name(struct efx_nic *efx) static void ef100_update_name(struct efx_nic *efx)
{ {
...@@ -246,6 +247,9 @@ static const struct net_device_ops ef100_netdev_ops = { ...@@ -246,6 +247,9 @@ static const struct net_device_ops ef100_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = efx_filter_rfs, .ndo_rx_flow_steer = efx_filter_rfs,
#endif #endif
#ifdef CONFIG_SFC_SRIOV
.ndo_setup_tc = efx_tc_setup,
#endif
}; };
/* Netdev registration /* Netdev registration
......
...@@ -1137,6 +1137,9 @@ int ef100_probe_netdev_pf(struct efx_nic *efx) ...@@ -1137,6 +1137,9 @@ int ef100_probe_netdev_pf(struct efx_nic *efx)
*/ */
netif_warn(efx, probe, net_dev, "Failed to probe MAE rc %d\n", netif_warn(efx, probe, net_dev, "Failed to probe MAE rc %d\n",
rc); rc);
} else {
net_dev->features |= NETIF_F_HW_TC;
efx->fixed_features |= NETIF_F_HW_TC;
} }
#endif #endif
return 0; return 0;
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include "ef100_nic.h" #include "ef100_nic.h"
#include "mae.h" #include "mae.h"
#include "rx_common.h" #include "rx_common.h"
#include "tc_bindings.h"
#define EFX_EF100_REP_DRIVER "efx_ef100_rep" #define EFX_EF100_REP_DRIVER "efx_ef100_rep"
...@@ -107,6 +108,20 @@ static int efx_ef100_rep_get_phys_port_name(struct net_device *dev, ...@@ -107,6 +108,20 @@ static int efx_ef100_rep_get_phys_port_name(struct net_device *dev,
return 0; return 0;
} }
static int efx_ef100_rep_setup_tc(struct net_device *net_dev,
enum tc_setup_type type, void *type_data)
{
struct efx_rep *efv = netdev_priv(net_dev);
struct efx_nic *efx = efv->parent;
if (type == TC_SETUP_CLSFLOWER)
return efx_tc_flower(efx, net_dev, type_data, efv);
if (type == TC_SETUP_BLOCK)
return efx_tc_setup_block(net_dev, efx, type_data, efv);
return -EOPNOTSUPP;
}
static void efx_ef100_rep_get_stats64(struct net_device *dev, static void efx_ef100_rep_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats) struct rtnl_link_stats64 *stats)
{ {
...@@ -120,13 +135,14 @@ static void efx_ef100_rep_get_stats64(struct net_device *dev, ...@@ -120,13 +135,14 @@ static void efx_ef100_rep_get_stats64(struct net_device *dev,
stats->tx_errors = atomic64_read(&efv->stats.tx_errors); stats->tx_errors = atomic64_read(&efv->stats.tx_errors);
} }
static const struct net_device_ops efx_ef100_rep_netdev_ops = { const struct net_device_ops efx_ef100_rep_netdev_ops = {
.ndo_open = efx_ef100_rep_open, .ndo_open = efx_ef100_rep_open,
.ndo_stop = efx_ef100_rep_close, .ndo_stop = efx_ef100_rep_close,
.ndo_start_xmit = efx_ef100_rep_xmit, .ndo_start_xmit = efx_ef100_rep_xmit,
.ndo_get_port_parent_id = efx_ef100_rep_get_port_parent_id, .ndo_get_port_parent_id = efx_ef100_rep_get_port_parent_id,
.ndo_get_phys_port_name = efx_ef100_rep_get_phys_port_name, .ndo_get_phys_port_name = efx_ef100_rep_get_phys_port_name,
.ndo_get_stats64 = efx_ef100_rep_get_stats64, .ndo_get_stats64 = efx_ef100_rep_get_stats64,
.ndo_setup_tc = efx_ef100_rep_setup_tc,
}; };
static void efx_ef100_rep_get_drvinfo(struct net_device *dev, static void efx_ef100_rep_get_drvinfo(struct net_device *dev,
......
...@@ -66,4 +66,5 @@ void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf); ...@@ -66,4 +66,5 @@ void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf);
* Caller must hold rcu_read_lock(). * Caller must hold rcu_read_lock().
*/ */
struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport); struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport);
extern const struct net_device_ops efx_ef100_rep_netdev_ops;
#endif /* EF100_REP_H */ #endif /* EF100_REP_H */
...@@ -101,6 +101,14 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = { ...@@ -101,6 +101,14 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc) #define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
static const char efx_ethtool_priv_flags_strings[][ETH_GSTRING_LEN] = {
"log-tc-errors",
};
#define EFX_ETHTOOL_PRIV_FLAGS_LOG_TC_ERRS BIT(0)
#define EFX_ETHTOOL_PRIV_FLAGS_COUNT ARRAY_SIZE(efx_ethtool_priv_flags_strings)
void efx_ethtool_get_drvinfo(struct net_device *net_dev, void efx_ethtool_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info) struct ethtool_drvinfo *info)
{ {
...@@ -452,6 +460,8 @@ int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set) ...@@ -452,6 +460,8 @@ int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set)
efx_ptp_describe_stats(efx, NULL); efx_ptp_describe_stats(efx, NULL);
case ETH_SS_TEST: case ETH_SS_TEST:
return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL); return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
case ETH_SS_PRIV_FLAGS:
return EFX_ETHTOOL_PRIV_FLAGS_COUNT;
default: default:
return -EINVAL; return -EINVAL;
} }
...@@ -478,12 +488,39 @@ void efx_ethtool_get_strings(struct net_device *net_dev, ...@@ -478,12 +488,39 @@ void efx_ethtool_get_strings(struct net_device *net_dev,
case ETH_SS_TEST: case ETH_SS_TEST:
efx_ethtool_fill_self_tests(efx, NULL, strings, NULL); efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
break; break;
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < EFX_ETHTOOL_PRIV_FLAGS_COUNT; i++)
strscpy(strings + i * ETH_GSTRING_LEN,
efx_ethtool_priv_flags_strings[i],
ETH_GSTRING_LEN);
break;
default: default:
/* No other string sets */ /* No other string sets */
break; break;
} }
} }
u32 efx_ethtool_get_priv_flags(struct net_device *net_dev)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
u32 ret_flags = 0;
if (efx->log_tc_errs)
ret_flags |= EFX_ETHTOOL_PRIV_FLAGS_LOG_TC_ERRS;
return ret_flags;
}
int efx_ethtool_set_priv_flags(struct net_device *net_dev, u32 flags)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
efx->log_tc_errs =
!!(flags & EFX_ETHTOOL_PRIV_FLAGS_LOG_TC_ERRS);
return 0;
}
void efx_ethtool_get_stats(struct net_device *net_dev, void efx_ethtool_get_stats(struct net_device *net_dev,
struct ethtool_stats *stats, struct ethtool_stats *stats,
u64 *data) u64 *data)
......
...@@ -27,6 +27,8 @@ int efx_ethtool_fill_self_tests(struct efx_nic *efx, ...@@ -27,6 +27,8 @@ int efx_ethtool_fill_self_tests(struct efx_nic *efx,
int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set); int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set);
void efx_ethtool_get_strings(struct net_device *net_dev, u32 string_set, void efx_ethtool_get_strings(struct net_device *net_dev, u32 string_set,
u8 *strings); u8 *strings);
u32 efx_ethtool_get_priv_flags(struct net_device *net_dev);
int efx_ethtool_set_priv_flags(struct net_device *net_dev, u32 flags);
void efx_ethtool_get_stats(struct net_device *net_dev, void efx_ethtool_get_stats(struct net_device *net_dev,
struct ethtool_stats *stats __attribute__ ((unused)), struct ethtool_stats *stats __attribute__ ((unused)),
u64 *data); u64 *data);
......
...@@ -112,6 +112,167 @@ int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id) ...@@ -112,6 +112,167 @@ int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id)
return 0; return 0;
} }
static int efx_mae_get_basic_caps(struct efx_nic *efx, struct mae_caps *caps)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_GET_CAPS_OUT_LEN);
size_t outlen;
int rc;
BUILD_BUG_ON(MC_CMD_MAE_GET_CAPS_IN_LEN);
rc = efx_mcdi_rpc(efx, MC_CMD_MAE_GET_CAPS, NULL, 0, outbuf,
sizeof(outbuf), &outlen);
if (rc)
return rc;
if (outlen < sizeof(outbuf))
return -EIO;
caps->match_field_count = MCDI_DWORD(outbuf, MAE_GET_CAPS_OUT_MATCH_FIELD_COUNT);
caps->action_prios = MCDI_DWORD(outbuf, MAE_GET_CAPS_OUT_ACTION_PRIOS);
return 0;
}
static int efx_mae_get_rule_fields(struct efx_nic *efx, u32 cmd,
u8 *field_support)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_GET_AR_CAPS_OUT_LEN(MAE_NUM_FIELDS));
MCDI_DECLARE_STRUCT_PTR(caps);
unsigned int count;
size_t outlen;
int rc, i;
BUILD_BUG_ON(MC_CMD_MAE_GET_AR_CAPS_IN_LEN);
rc = efx_mcdi_rpc(efx, cmd, NULL, 0, outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
count = MCDI_DWORD(outbuf, MAE_GET_AR_CAPS_OUT_COUNT);
memset(field_support, MAE_FIELD_UNSUPPORTED, MAE_NUM_FIELDS);
caps = _MCDI_DWORD(outbuf, MAE_GET_AR_CAPS_OUT_FIELD_FLAGS);
/* We're only interested in the support status enum, not any other
* flags, so just extract that from each entry.
*/
for (i = 0; i < count; i++)
if (i * sizeof(*outbuf) + MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_OFST < outlen)
field_support[i] = EFX_DWORD_FIELD(caps[i], MAE_FIELD_FLAGS_SUPPORT_STATUS);
return 0;
}
int efx_mae_get_caps(struct efx_nic *efx, struct mae_caps *caps)
{
int rc;
rc = efx_mae_get_basic_caps(efx, caps);
if (rc)
return rc;
return efx_mae_get_rule_fields(efx, MC_CMD_MAE_GET_AR_CAPS,
caps->action_rule_fields);
}
/* Bit twiddling:
* Prefix: 1...110...0
* ~: 0...001...1
* + 1: 0...010...0 is power of two
* so (~x) & ((~x) + 1) == 0. Converse holds also.
*/
#define is_prefix_byte(_x) !(((_x) ^ 0xff) & (((_x) ^ 0xff) + 1))
enum mask_type { MASK_ONES, MASK_ZEROES, MASK_PREFIX, MASK_OTHER };
static const char *mask_type_name(enum mask_type typ)
{
switch (typ) {
case MASK_ONES:
return "all-1s";
case MASK_ZEROES:
return "all-0s";
case MASK_PREFIX:
return "prefix";
case MASK_OTHER:
return "arbitrary";
default: /* can't happen */
return "unknown";
}
}
/* Checks a (big-endian) bytestring is a bit prefix */
static enum mask_type classify_mask(const u8 *mask, size_t len)
{
bool zeroes = true; /* All bits seen so far are zeroes */
bool ones = true; /* All bits seen so far are ones */
bool prefix = true; /* Valid prefix so far */
size_t i;
for (i = 0; i < len; i++) {
if (ones) {
if (!is_prefix_byte(mask[i]))
prefix = false;
} else if (mask[i]) {
prefix = false;
}
if (mask[i] != 0xff)
ones = false;
if (mask[i])
zeroes = false;
}
if (ones)
return MASK_ONES;
if (zeroes)
return MASK_ZEROES;
if (prefix)
return MASK_PREFIX;
return MASK_OTHER;
}
static int efx_mae_match_check_cap_typ(u8 support, enum mask_type typ)
{
switch (support) {
case MAE_FIELD_UNSUPPORTED:
case MAE_FIELD_SUPPORTED_MATCH_NEVER:
if (typ == MASK_ZEROES)
return 0;
return -EOPNOTSUPP;
case MAE_FIELD_SUPPORTED_MATCH_OPTIONAL:
if (typ == MASK_ZEROES)
return 0;
fallthrough;
case MAE_FIELD_SUPPORTED_MATCH_ALWAYS:
if (typ == MASK_ONES)
return 0;
return -EINVAL;
case MAE_FIELD_SUPPORTED_MATCH_PREFIX:
if (typ == MASK_OTHER)
return -EOPNOTSUPP;
return 0;
case MAE_FIELD_SUPPORTED_MATCH_MASK:
return 0;
default:
return -EIO;
}
}
int efx_mae_match_check_caps(struct efx_nic *efx,
const struct efx_tc_match_fields *mask,
struct netlink_ext_ack *extack)
{
const u8 *supported_fields = efx->tc->caps->action_rule_fields;
__be32 ingress_port = cpu_to_be32(mask->ingress_port);
enum mask_type ingress_port_mask_type;
int rc;
/* Check for _PREFIX assumes big-endian, so we need to convert */
ingress_port_mask_type = classify_mask((const u8 *)&ingress_port,
sizeof(ingress_port));
rc = efx_mae_match_check_cap_typ(supported_fields[MAE_FIELD_INGRESS_PORT],
ingress_port_mask_type);
if (rc) {
efx_tc_err(efx, "No support for %s mask in field ingress_port\n",
mask_type_name(ingress_port_mask_type));
NL_SET_ERR_MSG_MOD(extack, "Unsupported mask type for ingress_port");
return rc;
}
return 0;
}
static bool efx_mae_asl_id(u32 id) static bool efx_mae_asl_id(u32 id)
{ {
return !!(id & BIT(31)); return !!(id & BIT(31));
...@@ -279,6 +440,10 @@ static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit), ...@@ -279,6 +440,10 @@ static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit),
} }
MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK, MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK,
match->mask.ingress_port); match->mask.ingress_port);
MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID,
match->value.recirc_id);
MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_MASK,
match->mask.recirc_id);
return 0; return 0;
} }
......
...@@ -27,6 +27,20 @@ void efx_mae_mport_mport(struct efx_nic *efx, u32 mport_id, u32 *out); ...@@ -27,6 +27,20 @@ void efx_mae_mport_mport(struct efx_nic *efx, u32 mport_id, u32 *out);
int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id); int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id);
#define MAE_NUM_FIELDS (MAE_FIELD_ENC_VNET_ID + 1)
struct mae_caps {
u32 match_field_count;
u32 action_prios;
u8 action_rule_fields[MAE_NUM_FIELDS];
};
int efx_mae_get_caps(struct efx_nic *efx, struct mae_caps *caps);
int efx_mae_match_check_caps(struct efx_nic *efx,
const struct efx_tc_match_fields *mask,
struct netlink_ext_ack *extack);
int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act); int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act);
int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id); int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id);
......
...@@ -201,6 +201,12 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); ...@@ -201,6 +201,12 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
((u8 *)(_buf) + (_offset)) ((u8 *)(_buf) + (_offset))
#define MCDI_PTR(_buf, _field) \ #define MCDI_PTR(_buf, _field) \
_MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST) _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
/* Use MCDI_STRUCT_ functions to access members of MCDI structuredefs.
* _buf should point to the start of the structure, typically obtained with
* MCDI_DECLARE_STRUCT_PTR(structure) = _MCDI_DWORD(mcdi_buf, FIELD_WHICH_IS_STRUCT);
*/
#define MCDI_STRUCT_PTR(_buf, _field) \
_MCDI_PTR(_buf, _field ## _OFST)
#define _MCDI_CHECK_ALIGN(_ofst, _align) \ #define _MCDI_CHECK_ALIGN(_ofst, _align) \
((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1))) ((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1)))
#define _MCDI_DWORD(_buf, _field) \ #define _MCDI_DWORD(_buf, _field) \
...@@ -208,6 +214,10 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); ...@@ -208,6 +214,10 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
#define _MCDI_STRUCT_DWORD(_buf, _field) \ #define _MCDI_STRUCT_DWORD(_buf, _field) \
((_buf) + (_MCDI_CHECK_ALIGN(_field ## _OFST, 4) >> 2)) ((_buf) + (_MCDI_CHECK_ALIGN(_field ## _OFST, 4) >> 2))
#define MCDI_STRUCT_SET_BYTE(_buf, _field, _value) do { \
BUILD_BUG_ON(_field ## _LEN != 1); \
*(u8 *)MCDI_STRUCT_PTR(_buf, _field) = _value; \
} while (0)
#define MCDI_BYTE(_buf, _field) \ #define MCDI_BYTE(_buf, _field) \
((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \ ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
*MCDI_PTR(_buf, _field)) *MCDI_PTR(_buf, _field))
......
...@@ -855,6 +855,7 @@ enum efx_xdp_tx_queues_mode { ...@@ -855,6 +855,7 @@ enum efx_xdp_tx_queues_mode {
* @timer_max_ns: Interrupt timer maximum value, in nanoseconds * @timer_max_ns: Interrupt timer maximum value, in nanoseconds
* @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
* @irqs_hooked: Channel interrupts are hooked * @irqs_hooked: Channel interrupts are hooked
* @log_tc_errs: Error logging for TC filter insertion is enabled
* @irq_rx_mod_step_us: Step size for IRQ moderation for RX event queues * @irq_rx_mod_step_us: Step size for IRQ moderation for RX event queues
* @irq_rx_moderation_us: IRQ moderation time for RX event queues * @irq_rx_moderation_us: IRQ moderation time for RX event queues
* @msg_enable: Log message enable flags * @msg_enable: Log message enable flags
...@@ -1017,6 +1018,7 @@ struct efx_nic { ...@@ -1017,6 +1018,7 @@ struct efx_nic {
unsigned int timer_max_ns; unsigned int timer_max_ns;
bool irq_rx_adaptive; bool irq_rx_adaptive;
bool irqs_hooked; bool irqs_hooked;
bool log_tc_errs;
unsigned int irq_mod_step_us; unsigned int irq_mod_step_us;
unsigned int irq_rx_moderation_us; unsigned int irq_rx_moderation_us;
u32 msg_enable; u32 msg_enable;
......
...@@ -9,11 +9,60 @@ ...@@ -9,11 +9,60 @@
* by the Free Software Foundation, incorporated herein by reference. * by the Free Software Foundation, incorporated herein by reference.
*/ */
#include <net/pkt_cls.h>
#include "tc.h" #include "tc.h"
#include "tc_bindings.h"
#include "mae.h" #include "mae.h"
#include "ef100_rep.h" #include "ef100_rep.h"
#include "efx.h" #include "efx.h"
#define EFX_EFV_PF NULL
/* Look up the representor information (efv) for a device.
* May return NULL for the PF (us), or an error pointer for a device that
* isn't supported as a TC offload endpoint
*/
static struct efx_rep *efx_tc_flower_lookup_efv(struct efx_nic *efx,
struct net_device *dev)
{
struct efx_rep *efv;
if (!dev)
return ERR_PTR(-EOPNOTSUPP);
/* Is it us (the PF)? */
if (dev == efx->net_dev)
return EFX_EFV_PF;
/* Is it an efx vfrep at all? */
if (dev->netdev_ops != &efx_ef100_rep_netdev_ops)
return ERR_PTR(-EOPNOTSUPP);
/* Is it ours? We don't support TC rules that include another
* EF100's netdevices (not even on another port of the same NIC).
*/
efv = netdev_priv(dev);
if (efv->parent != efx)
return ERR_PTR(-EOPNOTSUPP);
return efv;
}
/* Convert a driver-internal vport ID into an external device (wire or VF) */
static s64 efx_tc_flower_external_mport(struct efx_nic *efx, struct efx_rep *efv)
{
u32 mport;
if (IS_ERR(efv))
return PTR_ERR(efv);
if (!efv) /* device is PF (us) */
efx_mae_mport_wire(efx, &mport);
else /* device is repr */
efx_mae_mport_mport(efx, efv->mport, &mport);
return mport;
}
static const struct rhashtable_params efx_tc_match_action_ht_params = {
.key_len = sizeof(unsigned long),
.key_offset = offsetof(struct efx_tc_flow_rule, cookie),
.head_offset = offsetof(struct efx_tc_flow_rule, linkage),
};
static void efx_tc_free_action_set(struct efx_nic *efx, static void efx_tc_free_action_set(struct efx_nic *efx,
struct efx_tc_action_set *act, bool in_hw) struct efx_tc_action_set *act, bool in_hw)
{ {
...@@ -58,6 +107,333 @@ static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rul ...@@ -58,6 +107,333 @@ static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rul
rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
} }
static void efx_tc_flow_free(void *ptr, void *arg)
{
struct efx_tc_flow_rule *rule = ptr;
struct efx_nic *efx = arg;
netif_err(efx, drv, efx->net_dev,
"tc rule %lx still present at teardown, removing\n",
rule->cookie);
efx_mae_delete_rule(efx, rule->fw_id);
/* Release entries in subsidiary tables */
efx_tc_free_action_set_list(efx, &rule->acts, true);
kfree(rule);
}
static int efx_tc_flower_parse_match(struct efx_nic *efx,
struct flow_rule *rule,
struct efx_tc_match *match,
struct netlink_ext_ack *extack)
{
struct flow_dissector *dissector = rule->match.dissector;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_match_control fm;
flow_rule_match_control(rule, &fm);
if (fm.mask->flags) {
efx_tc_err(efx, "Unsupported match on control.flags %#x\n",
fm.mask->flags);
NL_SET_ERR_MSG_MOD(extack, "Unsupported match on control.flags");
return -EOPNOTSUPP;
}
}
if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC))) {
efx_tc_err(efx, "Unsupported flower keys %#x\n", dissector->used_keys);
NL_SET_ERR_MSG_MOD(extack, "Unsupported flower keys encountered");
return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic fm;
flow_rule_match_basic(rule, &fm);
if (fm.mask->n_proto) {
EFX_TC_ERR_MSG(efx, extack, "Unsupported eth_proto match\n");
return -EOPNOTSUPP;
}
if (fm.mask->ip_proto) {
EFX_TC_ERR_MSG(efx, extack, "Unsupported ip_proto match\n");
return -EOPNOTSUPP;
}
}
return 0;
}
static int efx_tc_flower_replace(struct efx_nic *efx,
struct net_device *net_dev,
struct flow_cls_offload *tc,
struct efx_rep *efv)
{
struct flow_rule *fr = flow_cls_offload_flow_rule(tc);
struct netlink_ext_ack *extack = tc->common.extack;
struct efx_tc_flow_rule *rule = NULL, *old;
struct efx_tc_action_set *act = NULL;
const struct flow_action_entry *fa;
struct efx_rep *from_efv, *to_efv;
struct efx_tc_match match;
s64 rc;
int i;
if (!tc_can_offload_extack(efx->net_dev, extack))
return -EOPNOTSUPP;
if (WARN_ON(!efx->tc))
return -ENETDOWN;
if (WARN_ON(!efx->tc->up))
return -ENETDOWN;
from_efv = efx_tc_flower_lookup_efv(efx, net_dev);
if (IS_ERR(from_efv)) {
/* Might be a tunnel decap rule from an indirect block.
* Support for those not implemented yet.
*/
return -EOPNOTSUPP;
}
if (efv != from_efv) {
/* can't happen */
efx_tc_err(efx, "for %s efv is %snull but from_efv is %snull\n",
netdev_name(net_dev), efv ? "non-" : "",
from_efv ? "non-" : "");
if (efv)
NL_SET_ERR_MSG_MOD(extack, "vfrep filter has PF net_dev (can't happen)");
else
NL_SET_ERR_MSG_MOD(extack, "PF filter has vfrep net_dev (can't happen)");
return -EINVAL;
}
/* Parse match */
memset(&match, 0, sizeof(match));
rc = efx_tc_flower_external_mport(efx, from_efv);
if (rc < 0) {
EFX_TC_ERR_MSG(efx, extack, "Failed to identify ingress m-port");
return rc;
}
match.value.ingress_port = rc;
match.mask.ingress_port = ~0;
rc = efx_tc_flower_parse_match(efx, fr, &match, extack);
if (rc)
return rc;
if (tc->common.chain_index) {
EFX_TC_ERR_MSG(efx, extack, "No support for nonzero chain_index");
return -EOPNOTSUPP;
}
match.mask.recirc_id = 0xff;
rc = efx_mae_match_check_caps(efx, &match.mask, extack);
if (rc)
return rc;
rule = kzalloc(sizeof(*rule), GFP_USER);
if (!rule)
return -ENOMEM;
INIT_LIST_HEAD(&rule->acts.list);
rule->cookie = tc->cookie;
old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht,
&rule->linkage,
efx_tc_match_action_ht_params);
if (old) {
netif_dbg(efx, drv, efx->net_dev,
"Already offloaded rule (cookie %lx)\n", tc->cookie);
rc = -EEXIST;
NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded");
goto release;
}
/* Parse actions */
act = kzalloc(sizeof(*act), GFP_USER);
if (!act) {
rc = -ENOMEM;
goto release;
}
flow_action_for_each(i, fa, &fr->action) {
struct efx_tc_action_set save;
if (!act) {
/* more actions after a non-pipe action */
EFX_TC_ERR_MSG(efx, extack, "Action follows non-pipe action");
rc = -EINVAL;
goto release;
}
switch (fa->id) {
case FLOW_ACTION_DROP:
rc = efx_mae_alloc_action_set(efx, act);
if (rc) {
EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (drop)");
goto release;
}
list_add_tail(&act->list, &rule->acts.list);
act = NULL; /* end of the line */
break;
case FLOW_ACTION_REDIRECT:
case FLOW_ACTION_MIRRED:
save = *act;
to_efv = efx_tc_flower_lookup_efv(efx, fa->dev);
if (IS_ERR(to_efv)) {
EFX_TC_ERR_MSG(efx, extack, "Mirred egress device not on switch");
rc = PTR_ERR(to_efv);
goto release;
}
rc = efx_tc_flower_external_mport(efx, to_efv);
if (rc < 0) {
EFX_TC_ERR_MSG(efx, extack, "Failed to identify egress m-port");
goto release;
}
act->dest_mport = rc;
act->deliver = 1;
rc = efx_mae_alloc_action_set(efx, act);
if (rc) {
EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (mirred)");
goto release;
}
list_add_tail(&act->list, &rule->acts.list);
act = NULL;
if (fa->id == FLOW_ACTION_REDIRECT)
break; /* end of the line */
/* Mirror, so continue on with saved act */
act = kzalloc(sizeof(*act), GFP_USER);
if (!act) {
rc = -ENOMEM;
goto release;
}
*act = save;
break;
default:
efx_tc_err(efx, "Unhandled action %u\n", fa->id);
rc = -EOPNOTSUPP;
NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
goto release;
}
}
if (act) {
/* Not shot/redirected, so deliver to default dest */
if (from_efv == EFX_EFV_PF)
/* Rule applies to traffic from the wire,
* and default dest is thus the PF
*/
efx_mae_mport_uplink(efx, &act->dest_mport);
else
/* Representor, so rule applies to traffic from
* representee, and default dest is thus the rep.
* All reps use the same mport for delivery
*/
efx_mae_mport_mport(efx, efx->tc->reps_mport_id,
&act->dest_mport);
act->deliver = 1;
rc = efx_mae_alloc_action_set(efx, act);
if (rc) {
EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (deliver)");
goto release;
}
list_add_tail(&act->list, &rule->acts.list);
act = NULL; /* Prevent double-free in error path */
}
netif_dbg(efx, drv, efx->net_dev,
"Successfully parsed filter (cookie %lx)\n",
tc->cookie);
rule->match = match;
rc = efx_mae_alloc_action_set_list(efx, &rule->acts);
if (rc) {
EFX_TC_ERR_MSG(efx, extack, "Failed to write action set list to hw");
goto release;
}
rc = efx_mae_insert_rule(efx, &rule->match, EFX_TC_PRIO_TC,
rule->acts.fw_id, &rule->fw_id);
if (rc) {
EFX_TC_ERR_MSG(efx, extack, "Failed to insert rule in hw");
goto release_acts;
}
return 0;
release_acts:
efx_mae_free_action_set_list(efx, &rule->acts);
release:
/* We failed to insert the rule, so free up any entries we created in
* subsidiary tables.
*/
if (act)
efx_tc_free_action_set(efx, act, false);
if (rule) {
rhashtable_remove_fast(&efx->tc->match_action_ht,
&rule->linkage,
efx_tc_match_action_ht_params);
efx_tc_free_action_set_list(efx, &rule->acts, false);
}
kfree(rule);
return rc;
}
static int efx_tc_flower_destroy(struct efx_nic *efx,
struct net_device *net_dev,
struct flow_cls_offload *tc)
{
struct netlink_ext_ack *extack = tc->common.extack;
struct efx_tc_flow_rule *rule;
rule = rhashtable_lookup_fast(&efx->tc->match_action_ht, &tc->cookie,
efx_tc_match_action_ht_params);
if (!rule) {
/* Only log a message if we're the ingress device. Otherwise
* it's a foreign filter and we might just not have been
* interested (e.g. we might not have been the egress device
* either).
*/
if (!IS_ERR(efx_tc_flower_lookup_efv(efx, net_dev)))
netif_warn(efx, drv, efx->net_dev,
"Filter %lx not found to remove\n", tc->cookie);
NL_SET_ERR_MSG_MOD(extack, "Flow cookie not found in offloaded rules");
return -ENOENT;
}
/* Remove it from HW */
efx_tc_delete_rule(efx, rule);
/* Delete it from SW */
rhashtable_remove_fast(&efx->tc->match_action_ht, &rule->linkage,
efx_tc_match_action_ht_params);
netif_dbg(efx, drv, efx->net_dev, "Removed filter %lx\n", rule->cookie);
kfree(rule);
return 0;
}
int efx_tc_flower(struct efx_nic *efx, struct net_device *net_dev,
struct flow_cls_offload *tc, struct efx_rep *efv)
{
int rc;
if (!efx->tc)
return -EOPNOTSUPP;
mutex_lock(&efx->tc->mutex);
switch (tc->command) {
case FLOW_CLS_REPLACE:
rc = efx_tc_flower_replace(efx, net_dev, tc, efv);
break;
case FLOW_CLS_DESTROY:
rc = efx_tc_flower_destroy(efx, net_dev, tc);
break;
default:
rc = -EOPNOTSUPP;
break;
}
mutex_unlock(&efx->tc->mutex);
return rc;
}
static int efx_tc_configure_default_rule(struct efx_nic *efx, u32 ing_port, static int efx_tc_configure_default_rule(struct efx_nic *efx, u32 ing_port,
u32 eg_port, struct efx_tc_flow_rule *rule) u32 eg_port, struct efx_tc_flow_rule *rule)
{ {
...@@ -201,13 +577,37 @@ int efx_init_tc(struct efx_nic *efx) ...@@ -201,13 +577,37 @@ int efx_init_tc(struct efx_nic *efx)
{ {
int rc; int rc;
rc = efx_mae_get_caps(efx, efx->tc->caps);
if (rc)
return rc;
if (efx->tc->caps->match_field_count > MAE_NUM_FIELDS)
/* Firmware supports some match fields the driver doesn't know
* about. Not fatal, unless any of those fields are required
* (MAE_FIELD_SUPPORTED_MATCH_ALWAYS) but if so we don't know.
*/
netif_warn(efx, probe, efx->net_dev,
"FW reports additional match fields %u\n",
efx->tc->caps->match_field_count);
if (efx->tc->caps->action_prios < EFX_TC_PRIO__NUM) {
netif_err(efx, probe, efx->net_dev,
"Too few action prios supported (have %u, need %u)\n",
efx->tc->caps->action_prios, EFX_TC_PRIO__NUM);
return -EIO;
}
rc = efx_tc_configure_default_rule_pf(efx); rc = efx_tc_configure_default_rule_pf(efx);
if (rc) if (rc)
return rc; return rc;
rc = efx_tc_configure_default_rule_wire(efx); rc = efx_tc_configure_default_rule_wire(efx);
if (rc) if (rc)
return rc; return rc;
return efx_tc_configure_rep_mport(efx); rc = efx_tc_configure_rep_mport(efx);
if (rc)
return rc;
efx->tc->up = true;
rc = flow_indr_dev_register(efx_tc_indr_setup_cb, efx);
if (rc)
return rc;
return 0;
} }
void efx_fini_tc(struct efx_nic *efx) void efx_fini_tc(struct efx_nic *efx)
...@@ -215,20 +615,35 @@ void efx_fini_tc(struct efx_nic *efx) ...@@ -215,20 +615,35 @@ void efx_fini_tc(struct efx_nic *efx)
/* We can get called even if efx_init_struct_tc() failed */ /* We can get called even if efx_init_struct_tc() failed */
if (!efx->tc) if (!efx->tc)
return; return;
if (efx->tc->up)
flow_indr_dev_unregister(efx_tc_indr_setup_cb, efx, efx_tc_block_unbind);
efx_tc_deconfigure_rep_mport(efx); efx_tc_deconfigure_rep_mport(efx);
efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.pf); efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.pf);
efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.wire); efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.wire);
efx->tc->up = false;
} }
int efx_init_struct_tc(struct efx_nic *efx) int efx_init_struct_tc(struct efx_nic *efx)
{ {
int rc;
if (efx->type->is_vf) if (efx->type->is_vf)
return 0; return 0;
efx->tc = kzalloc(sizeof(*efx->tc), GFP_KERNEL); efx->tc = kzalloc(sizeof(*efx->tc), GFP_KERNEL);
if (!efx->tc) if (!efx->tc)
return -ENOMEM; return -ENOMEM;
efx->tc->caps = kzalloc(sizeof(struct mae_caps), GFP_KERNEL);
if (!efx->tc->caps) {
rc = -ENOMEM;
goto fail_alloc_caps;
}
INIT_LIST_HEAD(&efx->tc->block_list);
mutex_init(&efx->tc->mutex);
rc = rhashtable_init(&efx->tc->match_action_ht, &efx_tc_match_action_ht_params);
if (rc < 0)
goto fail_match_action_ht;
efx->tc->reps_filter_uc = -1; efx->tc->reps_filter_uc = -1;
efx->tc->reps_filter_mc = -1; efx->tc->reps_filter_mc = -1;
INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list); INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list);
...@@ -236,6 +651,13 @@ int efx_init_struct_tc(struct efx_nic *efx) ...@@ -236,6 +651,13 @@ int efx_init_struct_tc(struct efx_nic *efx)
INIT_LIST_HEAD(&efx->tc->dflt.wire.acts.list); INIT_LIST_HEAD(&efx->tc->dflt.wire.acts.list);
efx->tc->dflt.wire.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; efx->tc->dflt.wire.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
return 0; return 0;
fail_match_action_ht:
mutex_destroy(&efx->tc->mutex);
kfree(efx->tc->caps);
fail_alloc_caps:
kfree(efx->tc);
efx->tc = NULL;
return rc;
} }
void efx_fini_struct_tc(struct efx_nic *efx) void efx_fini_struct_tc(struct efx_nic *efx)
...@@ -243,10 +665,16 @@ void efx_fini_struct_tc(struct efx_nic *efx) ...@@ -243,10 +665,16 @@ void efx_fini_struct_tc(struct efx_nic *efx)
if (!efx->tc) if (!efx->tc)
return; return;
mutex_lock(&efx->tc->mutex);
EFX_WARN_ON_PARANOID(efx->tc->dflt.pf.fw_id != EFX_WARN_ON_PARANOID(efx->tc->dflt.pf.fw_id !=
MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL); MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
EFX_WARN_ON_PARANOID(efx->tc->dflt.wire.fw_id != EFX_WARN_ON_PARANOID(efx->tc->dflt.wire.fw_id !=
MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL); MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
rhashtable_free_and_destroy(&efx->tc->match_action_ht, efx_tc_flow_free,
efx);
mutex_unlock(&efx->tc->mutex);
mutex_destroy(&efx->tc->mutex);
kfree(efx->tc->caps);
kfree(efx->tc); kfree(efx->tc);
efx->tc = NULL; efx->tc = NULL;
} }
...@@ -11,8 +11,28 @@ ...@@ -11,8 +11,28 @@
#ifndef EFX_TC_H #ifndef EFX_TC_H
#define EFX_TC_H #define EFX_TC_H
#include <net/flow_offload.h>
#include <linux/rhashtable.h>
#include "net_driver.h" #include "net_driver.h"
/* Error reporting: convenience macros. For indicating why a given filter
* insertion is not supported; errors in internal operation or in the
* hardware should be netif_err()s instead.
*/
/* Used when error message is constant. */
#define EFX_TC_ERR_MSG(efx, extack, message) do { \
NL_SET_ERR_MSG_MOD(extack, message); \
if (efx->log_tc_errs) \
netif_info(efx, drv, efx->net_dev, "%s\n", message); \
} while (0)
/* Used when error message is not constant; caller should also supply a
* constant extack message with NL_SET_ERR_MSG_MOD().
*/
#define efx_tc_err(efx, fmt, args...) do { \
if (efx->log_tc_errs) \
netif_info(efx, drv, efx->net_dev, fmt, ##args);\
} while (0)
struct efx_tc_action_set { struct efx_tc_action_set {
u16 deliver:1; u16 deliver:1;
u32 dest_mport; u32 dest_mport;
...@@ -23,6 +43,7 @@ struct efx_tc_action_set { ...@@ -23,6 +43,7 @@ struct efx_tc_action_set {
struct efx_tc_match_fields { struct efx_tc_match_fields {
/* L1 */ /* L1 */
u32 ingress_port; u32 ingress_port;
u8 recirc_id;
}; };
struct efx_tc_match { struct efx_tc_match {
...@@ -36,12 +57,15 @@ struct efx_tc_action_set_list { ...@@ -36,12 +57,15 @@ struct efx_tc_action_set_list {
}; };
struct efx_tc_flow_rule { struct efx_tc_flow_rule {
unsigned long cookie;
struct rhash_head linkage;
struct efx_tc_match match; struct efx_tc_match match;
struct efx_tc_action_set_list acts; struct efx_tc_action_set_list acts;
u32 fw_id; u32 fw_id;
}; };
enum efx_tc_rule_prios { enum efx_tc_rule_prios {
EFX_TC_PRIO_TC, /* Rule inserted by TC */
EFX_TC_PRIO_DFLT, /* Default switch rule; one of efx_tc_default_rules */ EFX_TC_PRIO_DFLT, /* Default switch rule; one of efx_tc_default_rules */
EFX_TC_PRIO__NUM EFX_TC_PRIO__NUM
}; };
...@@ -49,6 +73,10 @@ enum efx_tc_rule_prios { ...@@ -49,6 +73,10 @@ enum efx_tc_rule_prios {
/** /**
* struct efx_tc_state - control plane data for TC offload * struct efx_tc_state - control plane data for TC offload
* *
* @caps: MAE capabilities reported by MCDI
* @block_list: List of &struct efx_tc_block_binding
* @mutex: Used to serialise operations on TC hashtables
* @match_action_ht: Hashtable of TC match-action rules
* @reps_mport_id: MAE port allocated for representor RX * @reps_mport_id: MAE port allocated for representor RX
* @reps_filter_uc: VNIC filter for representor unicast RX (promisc) * @reps_filter_uc: VNIC filter for representor unicast RX (promisc)
* @reps_filter_mc: VNIC filter for representor multicast RX (allmulti) * @reps_filter_mc: VNIC filter for representor multicast RX (allmulti)
...@@ -57,14 +85,20 @@ enum efx_tc_rule_prios { ...@@ -57,14 +85,20 @@ enum efx_tc_rule_prios {
* %EFX_TC_PRIO_DFLT. Named by *ingress* port * %EFX_TC_PRIO_DFLT. Named by *ingress* port
* @dflt.pf: rule for traffic ingressing from PF (egresses to wire) * @dflt.pf: rule for traffic ingressing from PF (egresses to wire)
* @dflt.wire: rule for traffic ingressing from wire (egresses to PF) * @dflt.wire: rule for traffic ingressing from wire (egresses to PF)
* @up: have TC datastructures been set up?
*/ */
struct efx_tc_state { struct efx_tc_state {
struct mae_caps *caps;
struct list_head block_list;
struct mutex mutex;
struct rhashtable match_action_ht;
u32 reps_mport_id, reps_mport_vport_id; u32 reps_mport_id, reps_mport_vport_id;
s32 reps_filter_uc, reps_filter_mc; s32 reps_filter_uc, reps_filter_mc;
struct { struct {
struct efx_tc_flow_rule pf; struct efx_tc_flow_rule pf;
struct efx_tc_flow_rule wire; struct efx_tc_flow_rule wire;
} dflt; } dflt;
bool up;
}; };
struct efx_rep; struct efx_rep;
...@@ -72,6 +106,8 @@ struct efx_rep; ...@@ -72,6 +106,8 @@ struct efx_rep;
int efx_tc_configure_default_rule_rep(struct efx_rep *efv); int efx_tc_configure_default_rule_rep(struct efx_rep *efv);
void efx_tc_deconfigure_default_rule(struct efx_nic *efx, void efx_tc_deconfigure_default_rule(struct efx_nic *efx,
struct efx_tc_flow_rule *rule); struct efx_tc_flow_rule *rule);
int efx_tc_flower(struct efx_nic *efx, struct net_device *net_dev,
struct flow_cls_offload *tc, struct efx_rep *efv);
int efx_tc_insert_rep_filters(struct efx_nic *efx); int efx_tc_insert_rep_filters(struct efx_nic *efx);
void efx_tc_remove_rep_filters(struct efx_nic *efx); void efx_tc_remove_rep_filters(struct efx_nic *efx);
......
// SPDX-License-Identifier: GPL-2.0-only
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2022 Xilinx Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include "tc_bindings.h"
#include "tc.h"
struct efx_tc_block_binding {
struct list_head list;
struct efx_nic *efx;
struct efx_rep *efv;
struct net_device *otherdev; /* may actually be us */
struct flow_block *block;
};
static struct efx_tc_block_binding *efx_tc_find_binding(struct efx_nic *efx,
struct net_device *otherdev)
{
struct efx_tc_block_binding *binding;
ASSERT_RTNL();
list_for_each_entry(binding, &efx->tc->block_list, list)
if (binding->otherdev == otherdev)
return binding;
return NULL;
}
static int efx_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
struct efx_tc_block_binding *binding = cb_priv;
struct flow_cls_offload *tcf = type_data;
switch (type) {
case TC_SETUP_CLSFLOWER:
return efx_tc_flower(binding->efx, binding->otherdev,
tcf, binding->efv);
default:
return -EOPNOTSUPP;
}
}
void efx_tc_block_unbind(void *cb_priv)
{
struct efx_tc_block_binding *binding = cb_priv;
list_del(&binding->list);
kfree(binding);
}
static struct efx_tc_block_binding *efx_tc_create_binding(
struct efx_nic *efx, struct efx_rep *efv,
struct net_device *otherdev, struct flow_block *block)
{
struct efx_tc_block_binding *binding = kmalloc(sizeof(*binding), GFP_KERNEL);
if (!binding)
return ERR_PTR(-ENOMEM);
binding->efx = efx;
binding->efv = efv;
binding->otherdev = otherdev;
binding->block = block;
list_add(&binding->list, &efx->tc->block_list);
return binding;
}
int efx_tc_setup_block(struct net_device *net_dev, struct efx_nic *efx,
struct flow_block_offload *tcb, struct efx_rep *efv)
{
struct efx_tc_block_binding *binding;
struct flow_block_cb *block_cb;
int rc;
if (tcb->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
if (WARN_ON(!efx->tc))
return -ENETDOWN;
switch (tcb->command) {
case FLOW_BLOCK_BIND:
binding = efx_tc_create_binding(efx, efv, net_dev, tcb->block);
if (IS_ERR(binding))
return PTR_ERR(binding);
block_cb = flow_block_cb_alloc(efx_tc_block_cb, binding,
binding, efx_tc_block_unbind);
rc = PTR_ERR_OR_ZERO(block_cb);
netif_dbg(efx, drv, efx->net_dev,
"bind %sdirect block for device %s, rc %d\n",
net_dev == efx->net_dev ? "" :
efv ? "semi" : "in",
net_dev ? net_dev->name : NULL, rc);
if (rc) {
list_del(&binding->list);
kfree(binding);
} else {
flow_block_cb_add(block_cb, tcb);
}
return rc;
case FLOW_BLOCK_UNBIND:
binding = efx_tc_find_binding(efx, net_dev);
if (binding) {
block_cb = flow_block_cb_lookup(tcb->block,
efx_tc_block_cb,
binding);
if (block_cb) {
flow_block_cb_remove(block_cb, tcb);
netif_dbg(efx, drv, efx->net_dev,
"unbound %sdirect block for device %s\n",
net_dev == efx->net_dev ? "" :
binding->efv ? "semi" : "in",
net_dev ? net_dev->name : NULL);
return 0;
}
}
/* If we're in driver teardown, then we expect to have
* already unbound all our blocks (we did it early while
* we still had MCDI to remove the filters), so getting
* unbind callbacks now isn't a problem.
*/
netif_cond_dbg(efx, drv, efx->net_dev,
!efx->tc->up, warn,
"%sdirect block unbind for device %s, was never bound\n",
net_dev == efx->net_dev ? "" : "in",
net_dev ? net_dev->name : NULL);
return -ENOENT;
default:
return -EOPNOTSUPP;
}
}
int efx_tc_indr_setup_cb(struct net_device *net_dev, struct Qdisc *sch,
void *cb_priv, enum tc_setup_type type,
void *type_data, void *data,
void (*cleanup)(struct flow_block_cb *block_cb))
{
struct flow_block_offload *tcb = type_data;
struct efx_tc_block_binding *binding;
struct flow_block_cb *block_cb;
struct efx_nic *efx = cb_priv;
bool is_ovs_int_port;
int rc;
if (!net_dev)
return -EOPNOTSUPP;
if (tcb->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
tcb->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
return -EOPNOTSUPP;
is_ovs_int_port = netif_is_ovs_master(net_dev);
if (tcb->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
!is_ovs_int_port)
return -EOPNOTSUPP;
if (is_ovs_int_port)
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_BLOCK:
switch (tcb->command) {
case FLOW_BLOCK_BIND:
binding = efx_tc_create_binding(efx, NULL, net_dev, tcb->block);
if (IS_ERR(binding))
return PTR_ERR(binding);
block_cb = flow_indr_block_cb_alloc(efx_tc_block_cb, binding,
binding, efx_tc_block_unbind,
tcb, net_dev, sch, data, binding,
cleanup);
rc = PTR_ERR_OR_ZERO(block_cb);
netif_dbg(efx, drv, efx->net_dev,
"bind indr block for device %s, rc %d\n",
net_dev ? net_dev->name : NULL, rc);
if (rc) {
list_del(&binding->list);
kfree(binding);
} else {
flow_block_cb_add(block_cb, tcb);
}
return rc;
case FLOW_BLOCK_UNBIND:
binding = efx_tc_find_binding(efx, net_dev);
if (!binding)
return -ENOENT;
block_cb = flow_block_cb_lookup(tcb->block,
efx_tc_block_cb,
binding);
if (!block_cb)
return -ENOENT;
flow_indr_block_cb_remove(block_cb, tcb);
netif_dbg(efx, drv, efx->net_dev,
"unbind indr block for device %s\n",
net_dev ? net_dev->name : NULL);
return 0;
default:
return -EOPNOTSUPP;
}
default:
return -EOPNOTSUPP;
}
}
/* .ndo_setup_tc implementation
* Entry point for flower block and filter management.
*/
int efx_tc_setup(struct net_device *net_dev, enum tc_setup_type type,
void *type_data)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->is_vf)
return -EOPNOTSUPP;
if (!efx->tc)
return -EOPNOTSUPP;
if (type == TC_SETUP_CLSFLOWER)
return efx_tc_flower(efx, net_dev, type_data, NULL);
if (type == TC_SETUP_BLOCK)
return efx_tc_setup_block(net_dev, efx, type_data, NULL);
return -EOPNOTSUPP;
}
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2022 Xilinx Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_TC_BINDINGS_H
#define EFX_TC_BINDINGS_H
#include "net_driver.h"
#include <net/sch_generic.h>
struct efx_rep;
void efx_tc_block_unbind(void *cb_priv);
int efx_tc_setup_block(struct net_device *net_dev, struct efx_nic *efx,
struct flow_block_offload *tcb, struct efx_rep *efv);
int efx_tc_setup(struct net_device *net_dev, enum tc_setup_type type,
void *type_data);
int efx_tc_indr_setup_cb(struct net_device *net_dev, struct Qdisc *sch,
void *cb_priv, enum tc_setup_type type,
void *type_data, void *data,
void (*cleanup)(struct flow_block_cb *block_cb));
#endif /* EFX_TC_BINDINGS_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment