Commit ae1ae5eb authored by David S. Miller's avatar David S. Miller

Merge branch 'sfc-conntrack-offload'

Edward Cree says:

====================
sfc: basic conntrack offload

Support offloading tracked connections and matching against them in
 TC chains on the PF and on representors.
Later patch serieses will add NAT and conntrack-on-tunnel-netdevs;
 keep it simple for now.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1c2c8c35 01ad088f
......@@ -11,7 +11,7 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o \
mae.o tc.o tc_bindings.o tc_counters.o \
tc_encap_actions.o
tc_encap_actions.o tc_conntrack.o
obj-$(CONFIG_SFC) += sfc.o
......
......@@ -26,6 +26,8 @@
/* Lowest bit numbers and widths */
#define EFX_DUMMY_FIELD_LBN 0
#define EFX_DUMMY_FIELD_WIDTH 0
#define EFX_BYTE_0_LBN 0
#define EFX_BYTE_0_WIDTH 8
#define EFX_WORD_0_LBN 0
#define EFX_WORD_0_WIDTH 16
#define EFX_WORD_1_LBN 16
......
......@@ -16,6 +16,7 @@
#include "mcdi_pcol.h"
#include "mcdi_pcol_mae.h"
#include "tc_encap_actions.h"
#include "tc_conntrack.h"
int efx_mae_allocate_mport(struct efx_nic *efx, u32 *id, u32 *label)
{
......@@ -227,6 +228,256 @@ void efx_mae_counters_grant_credits(struct work_struct *work)
rx_queue->granted_count += credits;
}
static int efx_mae_table_get_desc(struct efx_nic *efx,
struct efx_tc_table_desc *desc,
u32 table_id)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_TABLE_DESCRIPTOR_OUT_LEN(16));
MCDI_DECLARE_BUF(inbuf, MC_CMD_TABLE_DESCRIPTOR_IN_LEN);
unsigned int offset = 0, i;
size_t outlen;
int rc;
memset(desc, 0, sizeof(*desc));
MCDI_SET_DWORD(inbuf, TABLE_DESCRIPTOR_IN_TABLE_ID, table_id);
more:
MCDI_SET_DWORD(inbuf, TABLE_DESCRIPTOR_IN_FIRST_FIELDS_INDEX, offset);
rc = efx_mcdi_rpc(efx, MC_CMD_TABLE_DESCRIPTOR, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
goto fail;
if (outlen < MC_CMD_TABLE_DESCRIPTOR_OUT_LEN(1)) {
rc = -EIO;
goto fail;
}
if (!offset) { /* first iteration: get metadata */
desc->type = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_TYPE);
desc->key_width = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_KEY_WIDTH);
desc->resp_width = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_RESP_WIDTH);
desc->n_keys = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_N_KEY_FIELDS);
desc->n_resps = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_N_RESP_FIELDS);
desc->n_prios = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_N_PRIORITIES);
desc->flags = MCDI_BYTE(outbuf, TABLE_DESCRIPTOR_OUT_FLAGS);
rc = -EOPNOTSUPP;
if (desc->flags)
goto fail;
desc->scheme = MCDI_BYTE(outbuf, TABLE_DESCRIPTOR_OUT_SCHEME);
if (desc->scheme)
goto fail;
rc = -ENOMEM;
desc->keys = kcalloc(desc->n_keys,
sizeof(struct efx_tc_table_field_fmt),
GFP_KERNEL);
if (!desc->keys)
goto fail;
desc->resps = kcalloc(desc->n_resps,
sizeof(struct efx_tc_table_field_fmt),
GFP_KERNEL);
if (!desc->resps)
goto fail;
}
/* FW could have returned more than the 16 field_descrs we
* made room for in our outbuf
*/
outlen = min(outlen, sizeof(outbuf));
for (i = 0; i + offset < desc->n_keys + desc->n_resps; i++) {
struct efx_tc_table_field_fmt *field;
MCDI_DECLARE_STRUCT_PTR(fdesc);
if (outlen < MC_CMD_TABLE_DESCRIPTOR_OUT_LEN(i + 1)) {
offset += i;
goto more;
}
if (i + offset < desc->n_keys)
field = desc->keys + i + offset;
else
field = desc->resps + (i + offset - desc->n_keys);
fdesc = MCDI_ARRAY_STRUCT_PTR(outbuf,
TABLE_DESCRIPTOR_OUT_FIELDS, i);
field->field_id = MCDI_STRUCT_WORD(fdesc,
TABLE_FIELD_DESCR_FIELD_ID);
field->lbn = MCDI_STRUCT_WORD(fdesc, TABLE_FIELD_DESCR_LBN);
field->width = MCDI_STRUCT_WORD(fdesc, TABLE_FIELD_DESCR_WIDTH);
field->masking = MCDI_STRUCT_BYTE(fdesc, TABLE_FIELD_DESCR_MASK_TYPE);
field->scheme = MCDI_STRUCT_BYTE(fdesc, TABLE_FIELD_DESCR_SCHEME);
}
return 0;
fail:
kfree(desc->keys);
kfree(desc->resps);
return rc;
}
static int efx_mae_table_hook_find(u16 n_fields,
struct efx_tc_table_field_fmt *fields,
u16 field_id)
{
unsigned int i;
for (i = 0; i < n_fields; i++) {
if (fields[i].field_id == field_id)
return i;
}
return -EPROTO;
}
#define TABLE_FIND_KEY(_desc, _id) \
efx_mae_table_hook_find((_desc)->n_keys, (_desc)->keys, _id)
#define TABLE_FIND_RESP(_desc, _id) \
efx_mae_table_hook_find((_desc)->n_resps, (_desc)->resps, _id)
#define TABLE_HOOK_KEY(_meta, _name, _mcdi_name) ({ \
int _rc = TABLE_FIND_KEY(&_meta->desc, TABLE_FIELD_ID_##_mcdi_name); \
\
if (_rc > U8_MAX) \
_rc = -EOPNOTSUPP; \
if (_rc >= 0) { \
_meta->keys._name##_idx = _rc; \
_rc = 0; \
} \
_rc; \
})
#define TABLE_HOOK_RESP(_meta, _name, _mcdi_name) ({ \
int _rc = TABLE_FIND_RESP(&_meta->desc, TABLE_FIELD_ID_##_mcdi_name); \
\
if (_rc > U8_MAX) \
_rc = -EOPNOTSUPP; \
if (_rc >= 0) { \
_meta->resps._name##_idx = _rc; \
_rc = 0; \
} \
_rc; \
})
static int efx_mae_table_hook_ct(struct efx_nic *efx,
struct efx_tc_table_ct *meta_ct)
{
int rc;
rc = TABLE_HOOK_KEY(meta_ct, eth_proto, ETHER_TYPE);
if (rc)
return rc;
rc = TABLE_HOOK_KEY(meta_ct, ip_proto, IP_PROTO);
if (rc)
return rc;
rc = TABLE_HOOK_KEY(meta_ct, src_ip, SRC_IP);
if (rc)
return rc;
rc = TABLE_HOOK_KEY(meta_ct, dst_ip, DST_IP);
if (rc)
return rc;
rc = TABLE_HOOK_KEY(meta_ct, l4_sport, SRC_PORT);
if (rc)
return rc;
rc = TABLE_HOOK_KEY(meta_ct, l4_dport, DST_PORT);
if (rc)
return rc;
rc = TABLE_HOOK_KEY(meta_ct, zone, DOMAIN);
if (rc)
return rc;
rc = TABLE_HOOK_RESP(meta_ct, dnat, NAT_DIR);
if (rc)
return rc;
rc = TABLE_HOOK_RESP(meta_ct, nat_ip, NAT_IP);
if (rc)
return rc;
rc = TABLE_HOOK_RESP(meta_ct, l4_natport, NAT_PORT);
if (rc)
return rc;
rc = TABLE_HOOK_RESP(meta_ct, mark, CT_MARK);
if (rc)
return rc;
rc = TABLE_HOOK_RESP(meta_ct, counter_id, COUNTER_ID);
if (rc)
return rc;
meta_ct->hooked = true;
return 0;
}
static void efx_mae_table_free_desc(struct efx_tc_table_desc *desc)
{
kfree(desc->keys);
kfree(desc->resps);
memset(desc, 0, sizeof(*desc));
}
static bool efx_mae_check_table_exists(struct efx_nic *efx, u32 tbl_req)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_TABLE_LIST_OUT_LEN(16));
MCDI_DECLARE_BUF(inbuf, MC_CMD_TABLE_LIST_IN_LEN);
u32 tbl_id, tbl_total, tbl_cnt, pos = 0;
size_t outlen, msg_max;
bool ct_tbl = false;
int rc, idx;
msg_max = sizeof(outbuf);
efx->tc->meta_ct.hooked = false;
more:
memset(outbuf, 0, sizeof(*outbuf));
MCDI_SET_DWORD(inbuf, TABLE_LIST_IN_FIRST_TABLE_ID_INDEX, pos);
rc = efx_mcdi_rpc(efx, MC_CMD_TABLE_LIST, inbuf, sizeof(inbuf), outbuf,
msg_max, &outlen);
if (rc)
return false;
if (outlen < MC_CMD_TABLE_LIST_OUT_LEN(1))
return false;
tbl_total = MCDI_DWORD(outbuf, TABLE_LIST_OUT_N_TABLES);
tbl_cnt = MC_CMD_TABLE_LIST_OUT_TABLE_ID_NUM(min(outlen, msg_max));
for (idx = 0; idx < tbl_cnt; idx++) {
tbl_id = MCDI_ARRAY_DWORD(outbuf, TABLE_LIST_OUT_TABLE_ID, idx);
if (tbl_id == tbl_req) {
ct_tbl = true;
break;
}
}
pos += tbl_cnt;
if (!ct_tbl && pos < tbl_total)
goto more;
return ct_tbl;
}
int efx_mae_get_tables(struct efx_nic *efx)
{
int rc;
efx->tc->meta_ct.hooked = false;
if (efx_mae_check_table_exists(efx, TABLE_ID_CONNTRACK_TABLE)) {
rc = efx_mae_table_get_desc(efx, &efx->tc->meta_ct.desc,
TABLE_ID_CONNTRACK_TABLE);
if (rc) {
pci_info(efx->pci_dev,
"FW does not support conntrack desc rc %d\n",
rc);
return 0;
}
rc = efx_mae_table_hook_ct(efx, &efx->tc->meta_ct);
if (rc) {
pci_info(efx->pci_dev,
"FW does not support conntrack hook rc %d\n",
rc);
return 0;
}
} else {
pci_info(efx->pci_dev,
"FW does not support conntrack table\n");
}
return 0;
}
void efx_mae_free_tables(struct efx_nic *efx)
{
efx_mae_table_free_desc(&efx->tc->meta_ct.desc);
efx->tc->meta_ct.hooked = false;
}
static int efx_mae_get_basic_caps(struct efx_nic *efx, struct mae_caps *caps)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_GET_CAPS_OUT_LEN);
......@@ -444,8 +695,13 @@ int efx_mae_match_check_caps(struct efx_nic *efx,
CHECK(L4_SPORT, l4_sport) ||
CHECK(L4_DPORT, l4_dport) ||
CHECK(TCP_FLAGS, tcp_flags) ||
CHECK_BIT(TCP_SYN_FIN_RST, tcp_syn_fin_rst) ||
CHECK_BIT(IS_IP_FRAG, ip_frag) ||
CHECK_BIT(IP_FIRST_FRAG, ip_firstfrag) ||
CHECK_BIT(DO_CT, ct_state_trk) ||
CHECK_BIT(CT_HIT, ct_state_est) ||
CHECK(CT_MARK, ct_mark) ||
CHECK(CT_DOMAIN, ct_zone) ||
CHECK(RECIRC_ID, recirc_id))
return rc;
/* Matches on outer fields are done in a separate hardware table,
......@@ -471,6 +727,90 @@ int efx_mae_match_check_caps(struct efx_nic *efx,
}
return 0;
}
/* Checks for match fields not supported in LHS Outer Rules */
#define UNSUPPORTED(_field) ({ \
enum mask_type typ = classify_mask((const u8 *)&mask->_field, \
sizeof(mask->_field)); \
\
if (typ != MASK_ZEROES) { \
NL_SET_ERR_MSG_MOD(extack, "Unsupported match field " #_field);\
rc = -EOPNOTSUPP; \
} \
rc; \
})
#define UNSUPPORTED_BIT(_field) ({ \
if (mask->_field) { \
NL_SET_ERR_MSG_MOD(extack, "Unsupported match field " #_field);\
rc = -EOPNOTSUPP; \
} \
rc; \
})
/* LHS rules are (normally) inserted in the Outer Rule table, which means
* they use ENC_ fields in hardware to match regular (not enc_) fields from
* &struct efx_tc_match_fields.
*/
int efx_mae_match_check_caps_lhs(struct efx_nic *efx,
const struct efx_tc_match_fields *mask,
struct netlink_ext_ack *extack)
{
const u8 *supported_fields = efx->tc->caps->outer_rule_fields;
__be32 ingress_port = cpu_to_be32(mask->ingress_port);
enum mask_type ingress_port_mask_type;
int rc;
/* Check for _PREFIX assumes big-endian, so we need to convert */
ingress_port_mask_type = classify_mask((const u8 *)&ingress_port,
sizeof(ingress_port));
rc = efx_mae_match_check_cap_typ(supported_fields[MAE_FIELD_INGRESS_PORT],
ingress_port_mask_type);
if (rc) {
NL_SET_ERR_MSG_FMT_MOD(extack, "No support for %s mask in field %s\n",
mask_type_name(ingress_port_mask_type),
"ingress_port");
return rc;
}
if (CHECK(ENC_ETHER_TYPE, eth_proto) ||
CHECK(ENC_VLAN0_TCI, vlan_tci[0]) ||
CHECK(ENC_VLAN0_PROTO, vlan_proto[0]) ||
CHECK(ENC_VLAN1_TCI, vlan_tci[1]) ||
CHECK(ENC_VLAN1_PROTO, vlan_proto[1]) ||
CHECK(ENC_ETH_SADDR, eth_saddr) ||
CHECK(ENC_ETH_DADDR, eth_daddr) ||
CHECK(ENC_IP_PROTO, ip_proto) ||
CHECK(ENC_IP_TOS, ip_tos) ||
CHECK(ENC_IP_TTL, ip_ttl) ||
CHECK_BIT(ENC_IP_FRAG, ip_frag) ||
UNSUPPORTED_BIT(ip_firstfrag) ||
CHECK(ENC_SRC_IP4, src_ip) ||
CHECK(ENC_DST_IP4, dst_ip) ||
#ifdef CONFIG_IPV6
CHECK(ENC_SRC_IP6, src_ip6) ||
CHECK(ENC_DST_IP6, dst_ip6) ||
#endif
CHECK(ENC_L4_SPORT, l4_sport) ||
CHECK(ENC_L4_DPORT, l4_dport) ||
UNSUPPORTED(tcp_flags) ||
CHECK_BIT(TCP_SYN_FIN_RST, tcp_syn_fin_rst))
return rc;
if (efx_tc_match_is_encap(mask)) {
/* can't happen; disallowed for local rules, translated
* for foreign rules.
*/
NL_SET_ERR_MSG_MOD(extack, "Unexpected encap match in LHS rule");
return -EOPNOTSUPP;
}
if (UNSUPPORTED(enc_keyid) ||
/* Can't filter on conntrack in LHS rules */
UNSUPPORTED_BIT(ct_state_trk) ||
UNSUPPORTED_BIT(ct_state_est) ||
UNSUPPORTED(ct_mark) ||
UNSUPPORTED(recirc_id))
return rc;
return 0;
}
#undef UNSUPPORTED
#undef CHECK_BIT
#undef CHECK
......@@ -1153,6 +1493,465 @@ int efx_mae_unregister_encap_match(struct efx_nic *efx,
return 0;
}
static int efx_mae_populate_lhs_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit),
const struct efx_tc_match *match)
{
if (match->mask.ingress_port) {
if (~match->mask.ingress_port)
return -EOPNOTSUPP;
MCDI_STRUCT_SET_DWORD(match_crit,
MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR,
match->value.ingress_port);
}
MCDI_STRUCT_SET_DWORD(match_crit, MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_MASK,
match->mask.ingress_port);
MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE,
match->value.eth_proto);
MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_MASK,
match->mask.eth_proto);
MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE,
match->value.vlan_tci[0]);
MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_MASK,
match->mask.vlan_tci[0]);
MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE,
match->value.vlan_proto[0]);
MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_MASK,
match->mask.vlan_proto[0]);
MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE,
match->value.vlan_tci[1]);
MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_MASK,
match->mask.vlan_tci[1]);
MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE,
match->value.vlan_proto[1]);
MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_MASK,
match->mask.vlan_proto[1]);
memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE),
match->value.eth_saddr, ETH_ALEN);
memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_MASK),
match->mask.eth_saddr, ETH_ALEN);
memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE),
match->value.eth_daddr, ETH_ALEN);
memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_MASK),
match->mask.eth_daddr, ETH_ALEN);
MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO,
match->value.ip_proto);
MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_MASK,
match->mask.ip_proto);
MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_TOS,
match->value.ip_tos);
MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_MASK,
match->mask.ip_tos);
MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_TTL,
match->value.ip_ttl);
MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_MASK,
match->mask.ip_ttl);
MCDI_STRUCT_POPULATE_BYTE_1(match_crit,
MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS,
MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG,
match->value.ip_frag);
MCDI_STRUCT_POPULATE_BYTE_1(match_crit,
MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_MASK,
MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_MASK,
match->mask.ip_frag);
MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE,
match->value.src_ip);
MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_MASK,
match->mask.src_ip);
MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE,
match->value.dst_ip);
MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_MASK,
match->mask.dst_ip);
#ifdef CONFIG_IPV6
memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE),
&match->value.src_ip6, sizeof(struct in6_addr));
memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_MASK),
&match->mask.src_ip6, sizeof(struct in6_addr));
memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE),
&match->value.dst_ip6, sizeof(struct in6_addr));
memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_MASK),
&match->mask.dst_ip6, sizeof(struct in6_addr));
#endif
MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE,
match->value.l4_sport);
MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_MASK,
match->mask.l4_sport);
MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE,
match->value.l4_dport);
MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_MASK,
match->mask.l4_dport);
/* No enc-keys in LHS rules. Caps check should have caught this; any
* enc-keys from an fLHS should have been translated to regular keys
* and any EM should be a pseudo (we're an OR so can't have a direct
* EM with another OR).
*/
if (WARN_ON_ONCE(match->encap && !match->encap->type))
return -EOPNOTSUPP;
if (WARN_ON_ONCE(match->mask.enc_src_ip))
return -EOPNOTSUPP;
if (WARN_ON_ONCE(match->mask.enc_dst_ip))
return -EOPNOTSUPP;
#ifdef CONFIG_IPV6
if (WARN_ON_ONCE(!ipv6_addr_any(&match->mask.enc_src_ip6)))
return -EOPNOTSUPP;
if (WARN_ON_ONCE(!ipv6_addr_any(&match->mask.enc_dst_ip6)))
return -EOPNOTSUPP;
#endif
if (WARN_ON_ONCE(match->mask.enc_ip_tos))
return -EOPNOTSUPP;
if (WARN_ON_ONCE(match->mask.enc_ip_ttl))
return -EOPNOTSUPP;
if (WARN_ON_ONCE(match->mask.enc_sport))
return -EOPNOTSUPP;
if (WARN_ON_ONCE(match->mask.enc_dport))
return -EOPNOTSUPP;
if (WARN_ON_ONCE(match->mask.enc_keyid))
return -EOPNOTSUPP;
return 0;
}
static int efx_mae_insert_lhs_outer_rule(struct efx_nic *efx,
struct efx_tc_lhs_rule *rule, u32 prio)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_OUTER_RULE_INSERT_IN_LEN(MAE_ENC_FIELD_PAIRS_LEN));
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_OUTER_RULE_INSERT_OUT_LEN);
MCDI_DECLARE_STRUCT_PTR(match_crit);
const struct efx_tc_lhs_action *act;
size_t outlen;
int rc;
MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_PRIO, prio);
/* match */
match_crit = _MCDI_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA);
rc = efx_mae_populate_lhs_match_criteria(match_crit, &rule->match);
if (rc)
return rc;
/* action */
act = &rule->lhs_act;
MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_ENCAP_TYPE,
MAE_MCDI_ENCAP_TYPE_NONE);
/* We always inhibit CT lookup on TCP_INTERESTING_FLAGS, since the
* SW path needs to process the packet to update the conntrack tables
* on connection establishment (SYN) or termination (FIN, RST).
*/
MCDI_POPULATE_DWORD_6(inbuf, MAE_OUTER_RULE_INSERT_IN_LOOKUP_CONTROL,
MAE_OUTER_RULE_INSERT_IN_DO_CT, !!act->zone,
MAE_OUTER_RULE_INSERT_IN_CT_TCP_FLAGS_INHIBIT, 1,
MAE_OUTER_RULE_INSERT_IN_CT_DOMAIN,
act->zone ? act->zone->zone : 0,
MAE_OUTER_RULE_INSERT_IN_CT_VNI_MODE,
MAE_CT_VNI_MODE_ZERO,
MAE_OUTER_RULE_INSERT_IN_DO_COUNT, !!act->count,
MAE_OUTER_RULE_INSERT_IN_RECIRC_ID,
act->rid ? act->rid->fw_id : 0);
if (act->count)
MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_COUNTER_ID,
act->count->cnt->fw_id);
rc = efx_mcdi_rpc(efx, MC_CMD_MAE_OUTER_RULE_INSERT, inbuf,
sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
if (outlen < sizeof(outbuf))
return -EIO;
rule->fw_id = MCDI_DWORD(outbuf, MAE_OUTER_RULE_INSERT_OUT_OR_ID);
return 0;
}
int efx_mae_insert_lhs_rule(struct efx_nic *efx, struct efx_tc_lhs_rule *rule,
u32 prio)
{
return efx_mae_insert_lhs_outer_rule(efx, rule, prio);
}
static int efx_mae_remove_lhs_outer_rule(struct efx_nic *efx,
struct efx_tc_lhs_rule *rule)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_LEN(1));
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_OUTER_RULE_REMOVE_IN_LEN(1));
size_t outlen;
int rc;
MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_REMOVE_IN_OR_ID, rule->fw_id);
rc = efx_mcdi_rpc(efx, MC_CMD_MAE_OUTER_RULE_REMOVE, inbuf,
sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
if (outlen < sizeof(outbuf))
return -EIO;
/* FW freed a different ID than we asked for, should also never happen.
* Warn because it means we've now got a different idea to the FW of
* what encap_mds exist, which could cause mayhem later.
*/
if (WARN_ON(MCDI_DWORD(outbuf, MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID) != rule->fw_id))
return -EIO;
/* We're probably about to free @rule, but let's just make sure its
* fw_id is blatted so that it won't look valid if it leaks out.
*/
rule->fw_id = MC_CMD_MAE_OUTER_RULE_INSERT_OUT_OUTER_RULE_ID_NULL;
return 0;
}
int efx_mae_remove_lhs_rule(struct efx_nic *efx, struct efx_tc_lhs_rule *rule)
{
return efx_mae_remove_lhs_outer_rule(efx, rule);
}
/* Populating is done by taking each byte of @value in turn and storing
* it in the appropriate bits of @row. @value must be big-endian; we
* convert it to little-endianness as we go.
*/
static int efx_mae_table_populate(struct efx_tc_table_field_fmt field,
__le32 *row, size_t row_bits,
void *value, size_t value_size)
{
unsigned int i;
/* For now only scheme 0 is supported for any field, so we check here
* (rather than, say, in calling code, which knows the semantics and
* could in principle encode for other schemes).
*/
if (field.scheme)
return -EOPNOTSUPP;
if (DIV_ROUND_UP(field.width, 8) != value_size)
return -EINVAL;
if (field.lbn + field.width > row_bits)
return -EINVAL;
for (i = 0; i < value_size; i++) {
unsigned int bn = field.lbn + i * 8;
unsigned int wn = bn / 32;
u64 v;
v = ((u8 *)value)[value_size - i - 1];
v <<= (bn % 32);
row[wn] |= cpu_to_le32(v & 0xffffffff);
if (wn * 32 < row_bits)
row[wn + 1] |= cpu_to_le32(v >> 32);
}
return 0;
}
static int efx_mae_table_populate_bool(struct efx_tc_table_field_fmt field,
__le32 *row, size_t row_bits, bool value)
{
u8 v = value ? 1 : 0;
if (field.width != 1)
return -EINVAL;
return efx_mae_table_populate(field, row, row_bits, &v, 1);
}
static int efx_mae_table_populate_ipv4(struct efx_tc_table_field_fmt field,
__le32 *row, size_t row_bits, __be32 value)
{
/* IPv4 is placed in the first 4 bytes of an IPv6-sized field */
struct in6_addr v = {};
if (field.width != 128)
return -EINVAL;
v.s6_addr32[0] = value;
return efx_mae_table_populate(field, row, row_bits, &v, sizeof(v));
}
static int efx_mae_table_populate_u24(struct efx_tc_table_field_fmt field,
__le32 *row, size_t row_bits, u32 value)
{
__be32 v = cpu_to_be32(value);
/* We adjust value_size here since just 3 bytes will be copied, and
* the pointer to the value is set discarding the first byte which is
* the most significant byte for a big-endian 4-bytes value.
*/
return efx_mae_table_populate(field, row, row_bits, ((void *)&v) + 1,
sizeof(v) - 1);
}
#define _TABLE_POPULATE(dst, dw, _field, _value) ({ \
typeof(_value) _v = _value; \
\
(_field.width == sizeof(_value) * 8) ? \
efx_mae_table_populate(_field, dst, dw, &_v, \
sizeof(_v)) : -EINVAL; \
})
#define TABLE_POPULATE_KEY_IPV4(dst, _table, _field, _value) \
efx_mae_table_populate_ipv4(efx->tc->meta_##_table.desc.keys \
[efx->tc->meta_##_table.keys._field##_idx],\
dst, efx->tc->meta_##_table.desc.key_width,\
_value)
#define TABLE_POPULATE_KEY(dst, _table, _field, _value) \
_TABLE_POPULATE(dst, efx->tc->meta_##_table.desc.key_width, \
efx->tc->meta_##_table.desc.keys \
[efx->tc->meta_##_table.keys._field##_idx], \
_value)
#define TABLE_POPULATE_RESP_BOOL(dst, _table, _field, _value) \
efx_mae_table_populate_bool(efx->tc->meta_##_table.desc.resps \
[efx->tc->meta_##_table.resps._field##_idx],\
dst, efx->tc->meta_##_table.desc.resp_width,\
_value)
#define TABLE_POPULATE_RESP(dst, _table, _field, _value) \
_TABLE_POPULATE(dst, efx->tc->meta_##_table.desc.resp_width, \
efx->tc->meta_##_table.desc.resps \
[efx->tc->meta_##_table.resps._field##_idx], \
_value)
#define TABLE_POPULATE_RESP_U24(dst, _table, _field, _value) \
efx_mae_table_populate_u24(efx->tc->meta_##_table.desc.resps \
[efx->tc->meta_##_table.resps._field##_idx],\
dst, efx->tc->meta_##_table.desc.resp_width,\
_value)
static int efx_mae_populate_ct_key(struct efx_nic *efx, __le32 *key, size_t kw,
struct efx_tc_ct_entry *conn)
{
bool ipv6 = conn->eth_proto == htons(ETH_P_IPV6);
int rc;
rc = TABLE_POPULATE_KEY(key, ct, eth_proto, conn->eth_proto);
if (rc)
return rc;
rc = TABLE_POPULATE_KEY(key, ct, ip_proto, conn->ip_proto);
if (rc)
return rc;
if (ipv6)
rc = TABLE_POPULATE_KEY(key, ct, src_ip, conn->src_ip6);
else
rc = TABLE_POPULATE_KEY_IPV4(key, ct, src_ip, conn->src_ip);
if (rc)
return rc;
if (ipv6)
rc = TABLE_POPULATE_KEY(key, ct, dst_ip, conn->dst_ip6);
else
rc = TABLE_POPULATE_KEY_IPV4(key, ct, dst_ip, conn->dst_ip);
if (rc)
return rc;
rc = TABLE_POPULATE_KEY(key, ct, l4_sport, conn->l4_sport);
if (rc)
return rc;
rc = TABLE_POPULATE_KEY(key, ct, l4_dport, conn->l4_dport);
if (rc)
return rc;
return TABLE_POPULATE_KEY(key, ct, zone, cpu_to_be16(conn->zone->zone));
}
int efx_mae_insert_ct(struct efx_nic *efx, struct efx_tc_ct_entry *conn)
{
bool ipv6 = conn->eth_proto == htons(ETH_P_IPV6);
__le32 *key = NULL, *resp = NULL;
size_t inlen, kw, rw;
efx_dword_t *inbuf;
int rc = -ENOMEM;
/* Check table access is supported */
if (!efx->tc->meta_ct.hooked)
return -EOPNOTSUPP;
/* key/resp widths are in bits; convert to dwords for IN_LEN */
kw = DIV_ROUND_UP(efx->tc->meta_ct.desc.key_width, 32);
rw = DIV_ROUND_UP(efx->tc->meta_ct.desc.resp_width, 32);
BUILD_BUG_ON(sizeof(__le32) != MC_CMD_TABLE_INSERT_IN_DATA_LEN);
inlen = MC_CMD_TABLE_INSERT_IN_LEN(kw + rw);
if (inlen > MC_CMD_TABLE_INSERT_IN_LENMAX_MCDI2)
return -E2BIG;
inbuf = kzalloc(inlen, GFP_KERNEL);
if (!inbuf)
return -ENOMEM;
key = kcalloc(kw, sizeof(__le32), GFP_KERNEL);
if (!key)
goto out_free;
resp = kcalloc(rw, sizeof(__le32), GFP_KERNEL);
if (!resp)
goto out_free;
rc = efx_mae_populate_ct_key(efx, key, kw, conn);
if (rc)
goto out_free;
rc = TABLE_POPULATE_RESP_BOOL(resp, ct, dnat, conn->dnat);
if (rc)
goto out_free;
/* No support in hw for IPv6 NAT; field is only 32 bits */
if (!ipv6)
rc = TABLE_POPULATE_RESP(resp, ct, nat_ip, conn->nat_ip);
if (rc)
goto out_free;
rc = TABLE_POPULATE_RESP(resp, ct, l4_natport, conn->l4_natport);
if (rc)
goto out_free;
rc = TABLE_POPULATE_RESP(resp, ct, mark, cpu_to_be32(conn->mark));
if (rc)
goto out_free;
rc = TABLE_POPULATE_RESP_U24(resp, ct, counter_id, conn->cnt->fw_id);
if (rc)
goto out_free;
MCDI_SET_DWORD(inbuf, TABLE_INSERT_IN_TABLE_ID, TABLE_ID_CONNTRACK_TABLE);
MCDI_SET_WORD(inbuf, TABLE_INSERT_IN_KEY_WIDTH,
efx->tc->meta_ct.desc.key_width);
/* MASK_WIDTH is zero as CT is a BCAM */
MCDI_SET_WORD(inbuf, TABLE_INSERT_IN_RESP_WIDTH,
efx->tc->meta_ct.desc.resp_width);
memcpy(MCDI_PTR(inbuf, TABLE_INSERT_IN_DATA), key, kw * sizeof(__le32));
memcpy(MCDI_PTR(inbuf, TABLE_INSERT_IN_DATA) + kw * sizeof(__le32),
resp, rw * sizeof(__le32));
BUILD_BUG_ON(MC_CMD_TABLE_INSERT_OUT_LEN);
rc = efx_mcdi_rpc(efx, MC_CMD_TABLE_INSERT, inbuf, inlen, NULL, 0, NULL);
out_free:
kfree(resp);
kfree(key);
kfree(inbuf);
return rc;
}
int efx_mae_remove_ct(struct efx_nic *efx, struct efx_tc_ct_entry *conn)
{
__le32 *key = NULL;
efx_dword_t *inbuf;
size_t inlen, kw;
int rc = -ENOMEM;
/* Check table access is supported */
if (!efx->tc->meta_ct.hooked)
return -EOPNOTSUPP;
/* key width is in bits; convert to dwords for IN_LEN */
kw = DIV_ROUND_UP(efx->tc->meta_ct.desc.key_width, 32);
BUILD_BUG_ON(sizeof(__le32) != MC_CMD_TABLE_DELETE_IN_DATA_LEN);
inlen = MC_CMD_TABLE_DELETE_IN_LEN(kw);
if (inlen > MC_CMD_TABLE_DELETE_IN_LENMAX_MCDI2)
return -E2BIG;
inbuf = kzalloc(inlen, GFP_KERNEL);
if (!inbuf)
return -ENOMEM;
key = kcalloc(kw, sizeof(__le32), GFP_KERNEL);
if (!key)
goto out_free;
rc = efx_mae_populate_ct_key(efx, key, kw, conn);
if (rc)
goto out_free;
MCDI_SET_DWORD(inbuf, TABLE_DELETE_IN_TABLE_ID, TABLE_ID_CONNTRACK_TABLE);
MCDI_SET_WORD(inbuf, TABLE_DELETE_IN_KEY_WIDTH,
efx->tc->meta_ct.desc.key_width);
/* MASK_WIDTH is zero as CT is a BCAM */
/* RESP_WIDTH is zero for DELETE */
memcpy(MCDI_PTR(inbuf, TABLE_DELETE_IN_DATA), key, kw * sizeof(__le32));
BUILD_BUG_ON(MC_CMD_TABLE_DELETE_OUT_LEN);
rc = efx_mcdi_rpc(efx, MC_CMD_TABLE_DELETE, inbuf, inlen, NULL, 0, NULL);
out_free:
kfree(key);
kfree(inbuf);
return rc;
}
static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit),
const struct efx_tc_match *match)
{
......@@ -1165,20 +1964,40 @@ static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit),
}
MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK,
match->mask.ingress_port);
EFX_POPULATE_DWORD_2(*_MCDI_STRUCT_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS),
EFX_POPULATE_DWORD_5(*_MCDI_STRUCT_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS),
MAE_FIELD_MASK_VALUE_PAIRS_V2_DO_CT,
match->value.ct_state_trk,
MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_HIT,
match->value.ct_state_est,
MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_IP_FRAG,
match->value.ip_frag,
MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FIRST_FRAG,
match->value.ip_firstfrag);
EFX_POPULATE_DWORD_2(*_MCDI_STRUCT_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK),
match->value.ip_firstfrag,
MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_SYN_FIN_RST,
match->value.tcp_syn_fin_rst);
EFX_POPULATE_DWORD_5(*_MCDI_STRUCT_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK),
MAE_FIELD_MASK_VALUE_PAIRS_V2_DO_CT,
match->mask.ct_state_trk,
MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_HIT,
match->mask.ct_state_est,
MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_IP_FRAG,
match->mask.ip_frag,
MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FIRST_FRAG,
match->mask.ip_firstfrag);
match->mask.ip_firstfrag,
MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_SYN_FIN_RST,
match->mask.tcp_syn_fin_rst);
MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID,
match->value.recirc_id);
MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_MASK,
match->mask.recirc_id);
MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK,
match->value.ct_mark);
MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_MASK,
match->mask.ct_mark);
MCDI_STRUCT_SET_WORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN,
match->value.ct_zone);
MCDI_STRUCT_SET_WORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_MASK,
match->mask.ct_zone);
MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE,
match->value.eth_proto);
MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_MASK,
......
......@@ -66,6 +66,9 @@ int efx_mae_start_counters(struct efx_nic *efx, struct efx_rx_queue *rx_queue);
int efx_mae_stop_counters(struct efx_nic *efx, struct efx_rx_queue *rx_queue);
void efx_mae_counters_grant_credits(struct work_struct *work);
int efx_mae_get_tables(struct efx_nic *efx);
void efx_mae_free_tables(struct efx_nic *efx);
#define MAE_NUM_FIELDS (MAE_FIELD_ENC_VNET_ID + 1)
struct mae_caps {
......@@ -81,6 +84,9 @@ int efx_mae_get_caps(struct efx_nic *efx, struct mae_caps *caps);
int efx_mae_match_check_caps(struct efx_nic *efx,
const struct efx_tc_match_fields *mask,
struct netlink_ext_ack *extack);
int efx_mae_match_check_caps_lhs(struct efx_nic *efx,
const struct efx_tc_match_fields *mask,
struct netlink_ext_ack *extack);
int efx_mae_check_encap_match_caps(struct efx_nic *efx, bool ipv6,
u8 ip_tos_mask, __be16 udp_sport_mask,
struct netlink_ext_ack *extack);
......@@ -109,6 +115,12 @@ int efx_mae_register_encap_match(struct efx_nic *efx,
struct efx_tc_encap_match *encap);
int efx_mae_unregister_encap_match(struct efx_nic *efx,
struct efx_tc_encap_match *encap);
int efx_mae_insert_lhs_rule(struct efx_nic *efx, struct efx_tc_lhs_rule *rule,
u32 prio);
int efx_mae_remove_lhs_rule(struct efx_nic *efx, struct efx_tc_lhs_rule *rule);
struct efx_tc_ct_entry; /* see tc_conntrack.h */
int efx_mae_insert_ct(struct efx_nic *efx, struct efx_tc_ct_entry *conn);
int efx_mae_remove_ct(struct efx_nic *efx, struct efx_tc_ct_entry *conn);
int efx_mae_insert_rule(struct efx_nic *efx, const struct efx_tc_match *match,
u32 prio, u32 acts_id, u32 *id);
......
......@@ -218,14 +218,28 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
BUILD_BUG_ON(_field ## _LEN != 1); \
*(u8 *)MCDI_STRUCT_PTR(_buf, _field) = _value; \
} while (0)
#define MCDI_STRUCT_POPULATE_BYTE_1(_buf, _field, _name, _value) do { \
efx_dword_t _temp; \
EFX_POPULATE_DWORD_1(_temp, _name, _value); \
MCDI_STRUCT_SET_BYTE(_buf, _field, \
EFX_DWORD_FIELD(_temp, EFX_BYTE_0)); \
} while (0)
#define MCDI_BYTE(_buf, _field) \
((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
*MCDI_PTR(_buf, _field))
#define MCDI_STRUCT_BYTE(_buf, _field) \
((void)BUILD_BUG_ON_ZERO(_field ## _LEN != 1), \
*MCDI_STRUCT_PTR(_buf, _field))
#define MCDI_SET_WORD(_buf, _field, _value) do { \
BUILD_BUG_ON(MC_CMD_ ## _field ## _LEN != 2); \
BUILD_BUG_ON(MC_CMD_ ## _field ## _OFST & 1); \
*(__force __le16 *)MCDI_PTR(_buf, _field) = cpu_to_le16(_value);\
} while (0)
#define MCDI_STRUCT_SET_WORD(_buf, _field, _value) do { \
BUILD_BUG_ON(_field ## _LEN != 2); \
BUILD_BUG_ON(_field ## _OFST & 1); \
*(__force __le16 *)MCDI_STRUCT_PTR(_buf, _field) = cpu_to_le16(_value);\
} while (0)
#define MCDI_WORD(_buf, _field) \
((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
......
......@@ -12,9 +12,11 @@
#include <net/pkt_cls.h>
#include <net/vxlan.h>
#include <net/geneve.h>
#include <net/tc_act/tc_ct.h>
#include "tc.h"
#include "tc_bindings.h"
#include "tc_encap_actions.h"
#include "tc_conntrack.h"
#include "mae.h"
#include "ef100_rep.h"
#include "efx.h"
......@@ -96,6 +98,18 @@ static const struct rhashtable_params efx_tc_match_action_ht_params = {
.head_offset = offsetof(struct efx_tc_flow_rule, linkage),
};
static const struct rhashtable_params efx_tc_lhs_rule_ht_params = {
.key_len = sizeof(unsigned long),
.key_offset = offsetof(struct efx_tc_lhs_rule, cookie),
.head_offset = offsetof(struct efx_tc_lhs_rule, linkage),
};
static const struct rhashtable_params efx_tc_recirc_ht_params = {
.key_len = offsetof(struct efx_tc_recirc_id, linkage),
.key_offset = 0,
.head_offset = offsetof(struct efx_tc_recirc_id, linkage),
};
static void efx_tc_free_action_set(struct efx_nic *efx,
struct efx_tc_action_set *act, bool in_hw)
{
......@@ -215,6 +229,7 @@ static int efx_tc_flower_parse_match(struct efx_nic *efx,
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
BIT_ULL(FLOW_DISSECTOR_KEY_CT) |
BIT_ULL(FLOW_DISSECTOR_KEY_TCP) |
BIT_ULL(FLOW_DISSECTOR_KEY_IP))) {
NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported flower keys %#llx",
......@@ -356,6 +371,31 @@ static int efx_tc_flower_parse_match(struct efx_nic *efx,
dissector->used_keys);
return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT)) {
struct flow_match_ct fm;
flow_rule_match_ct(rule, &fm);
match->value.ct_state_trk = !!(fm.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED);
match->mask.ct_state_trk = !!(fm.mask->ct_state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED);
match->value.ct_state_est = !!(fm.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED);
match->mask.ct_state_est = !!(fm.mask->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED);
if (fm.mask->ct_state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED)) {
NL_SET_ERR_MSG_FMT_MOD(extack,
"Unsupported ct_state match %#x",
fm.mask->ct_state);
return -EOPNOTSUPP;
}
match->value.ct_mark = fm.key->ct_mark;
match->mask.ct_mark = fm.mask->ct_mark;
match->value.ct_zone = fm.key->ct_zone;
match->mask.ct_zone = fm.mask->ct_zone;
if (memchr_inv(fm.mask->ct_labels, 0, sizeof(fm.mask->ct_labels))) {
NL_SET_ERR_MSG_MOD(extack, "Matching on ct_label not supported");
return -EOPNOTSUPP;
}
}
return 0;
}
......@@ -575,12 +615,65 @@ static int efx_tc_flower_record_encap_match(struct efx_nic *efx,
return rc;
}
static struct efx_tc_recirc_id *efx_tc_get_recirc_id(struct efx_nic *efx,
u32 chain_index,
struct net_device *net_dev)
{
struct efx_tc_recirc_id *rid, *old;
int rc;
rid = kzalloc(sizeof(*rid), GFP_USER);
if (!rid)
return ERR_PTR(-ENOMEM);
rid->chain_index = chain_index;
/* We don't take a reference here, because it's implied - if there's
* a rule on the net_dev that's been offloaded to us, then the net_dev
* can't go away until the rule has been deoffloaded.
*/
rid->net_dev = net_dev;
old = rhashtable_lookup_get_insert_fast(&efx->tc->recirc_ht,
&rid->linkage,
efx_tc_recirc_ht_params);
if (old) {
/* don't need our new entry */
kfree(rid);
if (!refcount_inc_not_zero(&old->ref))
return ERR_PTR(-EAGAIN);
/* existing entry found */
rid = old;
} else {
rc = ida_alloc_range(&efx->tc->recirc_ida, 1, U8_MAX, GFP_USER);
if (rc < 0) {
rhashtable_remove_fast(&efx->tc->recirc_ht,
&rid->linkage,
efx_tc_recirc_ht_params);
kfree(rid);
return ERR_PTR(rc);
}
rid->fw_id = rc;
refcount_set(&rid->ref, 1);
}
return rid;
}
static void efx_tc_put_recirc_id(struct efx_nic *efx, struct efx_tc_recirc_id *rid)
{
if (!refcount_dec_and_test(&rid->ref))
return; /* still in use */
rhashtable_remove_fast(&efx->tc->recirc_ht, &rid->linkage,
efx_tc_recirc_ht_params);
ida_free(&efx->tc->recirc_ida, rid->fw_id);
kfree(rid);
}
static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rule)
{
efx_mae_delete_rule(efx, rule->fw_id);
/* Release entries in subsidiary tables */
efx_tc_free_action_set_list(efx, &rule->acts, true);
if (rule->match.rid)
efx_tc_put_recirc_id(efx, rule->match.rid);
if (rule->match.encap)
efx_tc_flower_release_encap_match(efx, rule->match.encap);
rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
......@@ -650,6 +743,163 @@ static bool efx_tc_flower_action_order_ok(const struct efx_tc_action_set *act,
}
}
/**
* DOC: TC conntrack sequences
*
* The MAE hardware can handle at most two rounds of action rule matching,
* consequently we support conntrack through the notion of a "left-hand side
* rule". This is a rule which typically contains only the actions "ct" and
* "goto chain N", and corresponds to one or more "right-hand side rules" in
* chain N, which typically match on +trk+est, and may perform ct(nat) actions.
* RHS rules go in the Action Rule table as normal but with a nonzero recirc_id
* (the hardware equivalent of chain_index), while LHS rules may go in either
* the Action Rule or the Outer Rule table, the latter being preferred for
* performance reasons, and set both DO_CT and a recirc_id in their response.
*
* Besides the RHS rules, there are often also similar rules matching on
* +trk+new which perform the ct(commit) action. These are not offloaded.
*/
static bool efx_tc_rule_is_lhs_rule(struct flow_rule *fr,
struct efx_tc_match *match)
{
const struct flow_action_entry *fa;
int i;
flow_action_for_each(i, fa, &fr->action) {
switch (fa->id) {
case FLOW_ACTION_GOTO:
return true;
case FLOW_ACTION_CT:
/* If rule is -trk, or doesn't mention trk at all, then
* a CT action implies a conntrack lookup (hence it's an
* LHS rule). If rule is +trk, then a CT action could
* just be ct(nat) or even ct(commit) (though the latter
* can't be offloaded).
*/
if (!match->mask.ct_state_trk || !match->value.ct_state_trk)
return true;
break;
default:
break;
}
}
return false;
}
static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx,
struct flow_cls_offload *tc,
struct flow_rule *fr,
struct net_device *net_dev,
struct efx_tc_lhs_rule *rule)
{
struct netlink_ext_ack *extack = tc->common.extack;
struct efx_tc_lhs_action *act = &rule->lhs_act;
const struct flow_action_entry *fa;
bool pipe = true;
int i;
flow_action_for_each(i, fa, &fr->action) {
struct efx_tc_ct_zone *ct_zone;
struct efx_tc_recirc_id *rid;
if (!pipe) {
/* more actions after a non-pipe action */
NL_SET_ERR_MSG_MOD(extack, "Action follows non-pipe action");
return -EINVAL;
}
switch (fa->id) {
case FLOW_ACTION_GOTO:
if (!fa->chain_index) {
NL_SET_ERR_MSG_MOD(extack, "Can't goto chain 0, no looping in hw");
return -EOPNOTSUPP;
}
rid = efx_tc_get_recirc_id(efx, fa->chain_index,
net_dev);
if (IS_ERR(rid)) {
NL_SET_ERR_MSG_MOD(extack, "Failed to allocate a hardware recirculation ID for this chain_index");
return PTR_ERR(rid);
}
act->rid = rid;
if (fa->hw_stats) {
struct efx_tc_counter_index *cnt;
if (!(fa->hw_stats & FLOW_ACTION_HW_STATS_DELAYED)) {
NL_SET_ERR_MSG_FMT_MOD(extack,
"hw_stats_type %u not supported (only 'delayed')",
fa->hw_stats);
return -EOPNOTSUPP;
}
cnt = efx_tc_flower_get_counter_index(efx, tc->cookie,
EFX_TC_COUNTER_TYPE_OR);
if (IS_ERR(cnt)) {
NL_SET_ERR_MSG_MOD(extack, "Failed to obtain a counter");
return PTR_ERR(cnt);
}
WARN_ON(act->count); /* can't happen */
act->count = cnt;
}
pipe = false;
break;
case FLOW_ACTION_CT:
if (act->zone) {
NL_SET_ERR_MSG_MOD(extack, "Can't offload multiple ct actions");
return -EOPNOTSUPP;
}
if (fa->ct.action & (TCA_CT_ACT_COMMIT |
TCA_CT_ACT_FORCE)) {
NL_SET_ERR_MSG_MOD(extack, "Can't offload ct commit/force");
return -EOPNOTSUPP;
}
if (fa->ct.action & TCA_CT_ACT_CLEAR) {
NL_SET_ERR_MSG_MOD(extack, "Can't clear ct in LHS rule");
return -EOPNOTSUPP;
}
if (fa->ct.action & (TCA_CT_ACT_NAT |
TCA_CT_ACT_NAT_SRC |
TCA_CT_ACT_NAT_DST)) {
NL_SET_ERR_MSG_MOD(extack, "Can't perform NAT in LHS rule - packet isn't conntracked yet");
return -EOPNOTSUPP;
}
if (fa->ct.action) {
NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled ct.action %u for LHS rule\n",
fa->ct.action);
return -EOPNOTSUPP;
}
ct_zone = efx_tc_ct_register_zone(efx, fa->ct.zone,
fa->ct.flow_table);
if (IS_ERR(ct_zone)) {
NL_SET_ERR_MSG_MOD(extack, "Failed to register for CT updates");
return PTR_ERR(ct_zone);
}
act->zone = ct_zone;
break;
default:
NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u for LHS rule\n",
fa->id);
return -EOPNOTSUPP;
}
}
if (pipe) {
NL_SET_ERR_MSG_MOD(extack, "Missing goto chain in LHS rule");
return -EOPNOTSUPP;
}
return 0;
}
static void efx_tc_flower_release_lhs_actions(struct efx_nic *efx,
struct efx_tc_lhs_action *act)
{
if (act->rid)
efx_tc_put_recirc_id(efx, act->rid);
if (act->zone)
efx_tc_ct_unregister_zone(efx, act->zone);
if (act->count)
efx_tc_flower_put_counter_index(efx, act->count);
}
static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
struct net_device *net_dev,
struct flow_cls_offload *tc)
......@@ -684,11 +934,40 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
match.mask.ingress_port = ~0;
if (tc->common.chain_index) {
NL_SET_ERR_MSG_MOD(extack, "No support for nonzero chain_index");
return -EOPNOTSUPP;
struct efx_tc_recirc_id *rid;
rid = efx_tc_get_recirc_id(efx, tc->common.chain_index, net_dev);
if (IS_ERR(rid)) {
NL_SET_ERR_MSG_FMT_MOD(extack,
"Failed to allocate a hardware recirculation ID for chain_index %u",
tc->common.chain_index);
return PTR_ERR(rid);
}
match.rid = rid;
match.value.recirc_id = rid->fw_id;
}
match.mask.recirc_id = 0xff;
/* AR table can't match on DO_CT (+trk). But a commonly used pattern is
* +trk+est, which is strictly implied by +est, so rewrite it to that.
*/
if (match.mask.ct_state_trk && match.value.ct_state_trk &&
match.mask.ct_state_est && match.value.ct_state_est)
match.mask.ct_state_trk = 0;
/* Thanks to CT_TCP_FLAGS_INHIBIT, packets with interesting flags could
* match +trk-est (CT_HIT=0) despite being on an established connection.
* So make -est imply -tcp_syn_fin_rst match to ensure these packets
* still hit the software path.
*/
if (match.mask.ct_state_est && !match.value.ct_state_est) {
if (match.value.tcp_syn_fin_rst) {
/* Can't offload this combination */
rc = -EOPNOTSUPP;
goto release;
}
match.mask.tcp_syn_fin_rst = true;
}
flow_action_for_each(i, fa, &fr->action) {
switch (fa->id) {
case FLOW_ACTION_REDIRECT:
......@@ -705,12 +984,13 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
if (!found) { /* We don't care. */
netif_dbg(efx, drv, efx->net_dev,
"Ignoring foreign filter that doesn't egdev us\n");
return -EOPNOTSUPP;
rc = -EOPNOTSUPP;
goto release;
}
rc = efx_mae_match_check_caps(efx, &match.mask, NULL);
if (rc)
return rc;
goto release;
if (efx_tc_match_is_encap(&match.mask)) {
enum efx_encap_type type;
......@@ -719,7 +999,8 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
if (type == EFX_ENCAP_TYPE_NONE) {
NL_SET_ERR_MSG_MOD(extack,
"Egress encap match on unsupported tunnel device");
return -EOPNOTSUPP;
rc = -EOPNOTSUPP;
goto release;
}
rc = efx_mae_check_encap_type_supported(efx, type);
......@@ -727,25 +1008,26 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
NL_SET_ERR_MSG_FMT_MOD(extack,
"Firmware reports no support for %s encap match",
efx_tc_encap_type_name(type));
return rc;
goto release;
}
rc = efx_tc_flower_record_encap_match(efx, &match, type,
EFX_TC_EM_DIRECT, 0, 0,
extack);
if (rc)
return rc;
goto release;
} else {
/* This is not a tunnel decap rule, ignore it */
netif_dbg(efx, drv, efx->net_dev,
"Ignoring foreign filter without encap match\n");
return -EOPNOTSUPP;
rc = -EOPNOTSUPP;
goto release;
}
rule = kzalloc(sizeof(*rule), GFP_USER);
if (!rule) {
rc = -ENOMEM;
goto out_free;
goto release;
}
INIT_LIST_HEAD(&rule->acts.list);
rule->cookie = tc->cookie;
......@@ -757,7 +1039,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
"Ignoring already-offloaded rule (cookie %lx)\n",
tc->cookie);
rc = -EEXIST;
goto out_free;
goto release;
}
act = kzalloc(sizeof(*act), GFP_USER);
......@@ -915,21 +1197,95 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
/* We failed to insert the rule, so free up any entries we created in
* subsidiary tables.
*/
if (match.rid)
efx_tc_put_recirc_id(efx, match.rid);
if (act)
efx_tc_free_action_set(efx, act, false);
if (rule) {
rhashtable_remove_fast(&efx->tc->match_action_ht,
&rule->linkage,
efx_tc_match_action_ht_params);
if (!old)
rhashtable_remove_fast(&efx->tc->match_action_ht,
&rule->linkage,
efx_tc_match_action_ht_params);
efx_tc_free_action_set_list(efx, &rule->acts, false);
}
out_free:
kfree(rule);
if (match.encap)
efx_tc_flower_release_encap_match(efx, match.encap);
return rc;
}
static int efx_tc_flower_replace_lhs(struct efx_nic *efx,
struct flow_cls_offload *tc,
struct flow_rule *fr,
struct efx_tc_match *match,
struct efx_rep *efv,
struct net_device *net_dev)
{
struct netlink_ext_ack *extack = tc->common.extack;
struct efx_tc_lhs_rule *rule, *old;
int rc;
if (tc->common.chain_index) {
NL_SET_ERR_MSG_MOD(extack, "LHS rule only allowed in chain 0");
return -EOPNOTSUPP;
}
if (match->mask.ct_state_trk && match->value.ct_state_trk) {
NL_SET_ERR_MSG_MOD(extack, "LHS rule can never match +trk");
return -EOPNOTSUPP;
}
/* LHS rules are always -trk, so we don't need to match on that */
match->mask.ct_state_trk = 0;
match->value.ct_state_trk = 0;
rc = efx_mae_match_check_caps_lhs(efx, &match->mask, extack);
if (rc)
return rc;
rule = kzalloc(sizeof(*rule), GFP_USER);
if (!rule)
return -ENOMEM;
rule->cookie = tc->cookie;
old = rhashtable_lookup_get_insert_fast(&efx->tc->lhs_rule_ht,
&rule->linkage,
efx_tc_lhs_rule_ht_params);
if (old) {
netif_dbg(efx, drv, efx->net_dev,
"Already offloaded rule (cookie %lx)\n", tc->cookie);
rc = -EEXIST;
NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded");
goto release;
}
/* Parse actions */
/* See note in efx_tc_flower_replace() regarding passed net_dev
* (used for efx_tc_get_recirc_id()).
*/
rc = efx_tc_flower_handle_lhs_actions(efx, tc, fr, efx->net_dev, rule);
if (rc)
goto release;
rule->match = *match;
rc = efx_mae_insert_lhs_rule(efx, rule, EFX_TC_PRIO_TC);
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Failed to insert rule in hw");
goto release;
}
netif_dbg(efx, drv, efx->net_dev,
"Successfully parsed lhs rule (cookie %lx)\n",
tc->cookie);
return 0;
release:
efx_tc_flower_release_lhs_actions(efx, &rule->lhs_act);
if (!old)
rhashtable_remove_fast(&efx->tc->lhs_rule_ht, &rule->linkage,
efx_tc_lhs_rule_ht_params);
kfree(rule);
return rc;
}
static int efx_tc_flower_replace(struct efx_nic *efx,
struct net_device *net_dev,
struct flow_cls_offload *tc,
......@@ -985,19 +1341,69 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
return -EOPNOTSUPP;
}
if (efx_tc_rule_is_lhs_rule(fr, &match))
return efx_tc_flower_replace_lhs(efx, tc, fr, &match, efv,
net_dev);
/* chain_index 0 is always recirc_id 0 (and does not appear in recirc_ht).
* Conveniently, match.rid == NULL and match.value.recirc_id == 0 owing
* to the initial memset(), so we don't need to do anything in that case.
*/
if (tc->common.chain_index) {
NL_SET_ERR_MSG_MOD(extack, "No support for nonzero chain_index");
return -EOPNOTSUPP;
struct efx_tc_recirc_id *rid;
/* Note regarding passed net_dev:
* VFreps and PF can share chain namespace, as they have
* distinct ingress_mports. So we don't need to burn an
* extra recirc_id if both use the same chain_index.
* (Strictly speaking, we could give each VFrep its own
* recirc_id namespace that doesn't take IDs away from the
* PF, but that would require a bunch of additional IDAs -
* one for each representor - and that's not likely to be
* the main cause of recirc_id exhaustion anyway.)
*/
rid = efx_tc_get_recirc_id(efx, tc->common.chain_index,
efx->net_dev);
if (IS_ERR(rid)) {
NL_SET_ERR_MSG_FMT_MOD(extack,
"Failed to allocate a hardware recirculation ID for chain_index %u",
tc->common.chain_index);
return PTR_ERR(rid);
}
match.rid = rid;
match.value.recirc_id = rid->fw_id;
}
match.mask.recirc_id = 0xff;
/* AR table can't match on DO_CT (+trk). But a commonly used pattern is
* +trk+est, which is strictly implied by +est, so rewrite it to that.
*/
if (match.mask.ct_state_trk && match.value.ct_state_trk &&
match.mask.ct_state_est && match.value.ct_state_est)
match.mask.ct_state_trk = 0;
/* Thanks to CT_TCP_FLAGS_INHIBIT, packets with interesting flags could
* match +trk-est (CT_HIT=0) despite being on an established connection.
* So make -est imply -tcp_syn_fin_rst match to ensure these packets
* still hit the software path.
*/
if (match.mask.ct_state_est && !match.value.ct_state_est) {
if (match.value.tcp_syn_fin_rst) {
/* Can't offload this combination */
rc = -EOPNOTSUPP;
goto release;
}
match.mask.tcp_syn_fin_rst = true;
}
rc = efx_mae_match_check_caps(efx, &match.mask, extack);
if (rc)
return rc;
goto release;
rule = kzalloc(sizeof(*rule), GFP_USER);
if (!rule)
return -ENOMEM;
if (!rule) {
rc = -ENOMEM;
goto release;
}
INIT_LIST_HEAD(&rule->acts.list);
rule->cookie = tc->cookie;
old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht,
......@@ -1007,8 +1413,8 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
netif_dbg(efx, drv, efx->net_dev,
"Already offloaded rule (cookie %lx)\n", tc->cookie);
NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded");
kfree(rule);
return -EEXIST;
rc = -EEXIST;
goto release;
}
/* Parse actions */
......@@ -1326,12 +1732,15 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
/* We failed to insert the rule, so free up any entries we created in
* subsidiary tables.
*/
if (match.rid)
efx_tc_put_recirc_id(efx, match.rid);
if (act)
efx_tc_free_action_set(efx, act, false);
if (rule) {
rhashtable_remove_fast(&efx->tc->match_action_ht,
&rule->linkage,
efx_tc_match_action_ht_params);
if (!old)
rhashtable_remove_fast(&efx->tc->match_action_ht,
&rule->linkage,
efx_tc_match_action_ht_params);
efx_tc_free_action_set_list(efx, &rule->acts, false);
}
kfree(rule);
......@@ -1343,8 +1752,26 @@ static int efx_tc_flower_destroy(struct efx_nic *efx,
struct flow_cls_offload *tc)
{
struct netlink_ext_ack *extack = tc->common.extack;
struct efx_tc_lhs_rule *lhs_rule;
struct efx_tc_flow_rule *rule;
lhs_rule = rhashtable_lookup_fast(&efx->tc->lhs_rule_ht, &tc->cookie,
efx_tc_lhs_rule_ht_params);
if (lhs_rule) {
/* Remove it from HW */
efx_mae_remove_lhs_rule(efx, lhs_rule);
/* Delete it from SW */
efx_tc_flower_release_lhs_actions(efx, &lhs_rule->lhs_act);
rhashtable_remove_fast(&efx->tc->lhs_rule_ht, &lhs_rule->linkage,
efx_tc_lhs_rule_ht_params);
if (lhs_rule->match.encap)
efx_tc_flower_release_encap_match(efx, lhs_rule->match.encap);
netif_dbg(efx, drv, efx->net_dev, "Removed (lhs) filter %lx\n",
lhs_rule->cookie);
kfree(lhs_rule);
return 0;
}
rule = rhashtable_lookup_fast(&efx->tc->match_action_ht, &tc->cookie,
efx_tc_match_action_ht_params);
if (!rule) {
......@@ -1658,13 +2085,19 @@ int efx_init_tc(struct efx_nic *efx)
if (rc)
return rc;
rc = efx_tc_configure_fallback_acts_reps(efx);
if (rc)
return rc;
rc = efx_mae_get_tables(efx);
if (rc)
return rc;
efx->tc->up = true;
rc = flow_indr_dev_register(efx_tc_indr_setup_cb, efx);
if (rc)
return rc;
goto out_free;
return 0;
out_free:
efx_mae_free_tables(efx);
return rc;
}
void efx_fini_tc(struct efx_nic *efx)
......@@ -1680,6 +2113,7 @@ void efx_fini_tc(struct efx_nic *efx)
efx_tc_deconfigure_fallback_acts(efx, &efx->tc->facts.pf);
efx_tc_deconfigure_fallback_acts(efx, &efx->tc->facts.reps);
efx->tc->up = false;
efx_mae_free_tables(efx);
}
/* At teardown time, all TC filter rules (and thus all resources they created)
......@@ -1694,6 +2128,34 @@ static void efx_tc_encap_match_free(void *ptr, void *__unused)
kfree(encap);
}
static void efx_tc_recirc_free(void *ptr, void *arg)
{
struct efx_tc_recirc_id *rid = ptr;
struct efx_nic *efx = arg;
WARN_ON(refcount_read(&rid->ref));
ida_free(&efx->tc->recirc_ida, rid->fw_id);
kfree(rid);
}
static void efx_tc_lhs_free(void *ptr, void *arg)
{
struct efx_tc_lhs_rule *rule = ptr;
struct efx_nic *efx = arg;
netif_err(efx, drv, efx->net_dev,
"tc lhs_rule %lx still present at teardown, removing\n",
rule->cookie);
if (rule->lhs_act.zone)
efx_tc_ct_unregister_zone(efx, rule->lhs_act.zone);
if (rule->lhs_act.count)
efx_tc_flower_put_counter_index(efx, rule->lhs_act.count);
efx_mae_remove_lhs_rule(efx, rule);
kfree(rule);
}
static void efx_tc_flow_free(void *ptr, void *arg)
{
struct efx_tc_flow_rule *rule = ptr;
......@@ -1740,6 +2202,16 @@ int efx_init_struct_tc(struct efx_nic *efx)
rc = rhashtable_init(&efx->tc->match_action_ht, &efx_tc_match_action_ht_params);
if (rc < 0)
goto fail_match_action_ht;
rc = rhashtable_init(&efx->tc->lhs_rule_ht, &efx_tc_lhs_rule_ht_params);
if (rc < 0)
goto fail_lhs_rule_ht;
rc = efx_tc_init_conntrack(efx);
if (rc < 0)
goto fail_conntrack;
rc = rhashtable_init(&efx->tc->recirc_ht, &efx_tc_recirc_ht_params);
if (rc < 0)
goto fail_recirc_ht;
ida_init(&efx->tc->recirc_ida);
efx->tc->reps_filter_uc = -1;
efx->tc->reps_filter_mc = -1;
INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list);
......@@ -1752,6 +2224,12 @@ int efx_init_struct_tc(struct efx_nic *efx)
efx->tc->facts.reps.fw_id = MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL;
efx->extra_channel_type[EFX_EXTRA_CHANNEL_TC] = &efx_tc_channel_type;
return 0;
fail_recirc_ht:
efx_tc_destroy_conntrack(efx);
fail_conntrack:
rhashtable_destroy(&efx->tc->lhs_rule_ht);
fail_lhs_rule_ht:
rhashtable_destroy(&efx->tc->match_action_ht);
fail_match_action_ht:
rhashtable_destroy(&efx->tc->encap_match_ht);
fail_encap_match_ht:
......@@ -1781,10 +2259,15 @@ void efx_fini_struct_tc(struct efx_nic *efx)
MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL);
EFX_WARN_ON_PARANOID(efx->tc->facts.reps.fw_id !=
MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL);
rhashtable_free_and_destroy(&efx->tc->lhs_rule_ht, efx_tc_lhs_free, efx);
rhashtable_free_and_destroy(&efx->tc->match_action_ht, efx_tc_flow_free,
efx);
rhashtable_free_and_destroy(&efx->tc->encap_match_ht,
efx_tc_encap_match_free, NULL);
efx_tc_fini_conntrack(efx);
rhashtable_free_and_destroy(&efx->tc->recirc_ht, efx_tc_recirc_free, efx);
WARN_ON(!ida_is_empty(&efx->tc->recirc_ida));
ida_destroy(&efx->tc->recirc_ida);
efx_tc_fini_counters(efx);
efx_tc_fini_encap_actions(efx);
mutex_unlock(&efx->tc->mutex);
......
......@@ -18,12 +18,10 @@
#define IS_ALL_ONES(v) (!(typeof (v))~(v))
#ifdef CONFIG_IPV6
static inline bool efx_ipv6_addr_all_ones(struct in6_addr *addr)
{
return !memchr_inv(addr, 0xff, sizeof(*addr));
}
#endif
struct efx_tc_encap_action; /* see tc_encap_actions.h */
......@@ -47,7 +45,7 @@ struct efx_tc_action_set {
struct efx_tc_match_fields {
/* L1 */
u32 ingress_port;
u8 recirc_id;
u8 recirc_id; /* mapped from (u32) TC chain_index to smaller space */
/* L2 (inner when encap) */
__be16 eth_proto;
__be16 vlan_tci[2], vlan_proto[2];
......@@ -62,6 +60,7 @@ struct efx_tc_match_fields {
/* L4 */
__be16 l4_sport, l4_dport; /* Ports (UDP, TCP) */
__be16 tcp_flags;
bool tcp_syn_fin_rst; /* true if ANY of SYN/FIN/RST are set */
/* Encap. The following are *outer* fields. Note that there are no
* outer eth (L2) fields; this is because TC doesn't have them.
*/
......@@ -70,6 +69,10 @@ struct efx_tc_match_fields {
u8 enc_ip_tos, enc_ip_ttl;
__be16 enc_sport, enc_dport;
__be32 enc_keyid; /* e.g. VNI, VSID */
/* Conntrack. */
u16 ct_state_trk:1, ct_state_est:1;
u32 ct_mark;
u16 ct_zone;
};
static inline bool efx_tc_match_is_encap(const struct efx_tc_match_fields *mask)
......@@ -117,10 +120,19 @@ struct efx_tc_encap_match {
struct efx_tc_encap_match *pseudo; /* Referenced pseudo EM if needed */
};
struct efx_tc_recirc_id {
u32 chain_index;
struct net_device *net_dev;
struct rhash_head linkage;
refcount_t ref;
u8 fw_id; /* index allocated for use in the MAE */
};
struct efx_tc_match {
struct efx_tc_match_fields value;
struct efx_tc_match_fields mask;
struct efx_tc_encap_match *encap;
struct efx_tc_recirc_id *rid;
};
struct efx_tc_action_set_list {
......@@ -128,6 +140,12 @@ struct efx_tc_action_set_list {
u32 fw_id;
};
struct efx_tc_lhs_action {
struct efx_tc_recirc_id *rid;
struct efx_tc_ct_zone *zone;
struct efx_tc_counter_index *count;
};
struct efx_tc_flow_rule {
unsigned long cookie;
struct rhash_head linkage;
......@@ -137,12 +155,62 @@ struct efx_tc_flow_rule {
u32 fw_id;
};
struct efx_tc_lhs_rule {
unsigned long cookie;
struct efx_tc_match match;
struct efx_tc_lhs_action lhs_act;
struct rhash_head linkage;
u32 fw_id;
};
enum efx_tc_rule_prios {
EFX_TC_PRIO_TC, /* Rule inserted by TC */
EFX_TC_PRIO_DFLT, /* Default switch rule; one of efx_tc_default_rules */
EFX_TC_PRIO__NUM
};
struct efx_tc_table_field_fmt {
u16 field_id;
u16 lbn;
u16 width;
u8 masking;
u8 scheme;
};
struct efx_tc_table_desc {
u16 type;
u16 key_width;
u16 resp_width;
u16 n_keys;
u16 n_resps;
u16 n_prios;
u8 flags;
u8 scheme;
struct efx_tc_table_field_fmt *keys;
struct efx_tc_table_field_fmt *resps;
};
struct efx_tc_table_ct { /* TABLE_ID_CONNTRACK_TABLE */
struct efx_tc_table_desc desc;
bool hooked;
struct { /* indices of named fields within @desc.keys */
u8 eth_proto_idx;
u8 ip_proto_idx;
u8 src_ip_idx; /* either v4 or v6 */
u8 dst_ip_idx;
u8 l4_sport_idx;
u8 l4_dport_idx;
u8 zone_idx; /* for TABLE_FIELD_ID_DOMAIN */
} keys;
struct { /* indices of named fields within @desc.resps */
u8 dnat_idx;
u8 nat_ip_idx;
u8 l4_natport_idx;
u8 mark_idx;
u8 counter_id_idx;
} resps;
};
/**
* struct efx_tc_state - control plane data for TC offload
*
......@@ -154,7 +222,13 @@ enum efx_tc_rule_prios {
* @encap_ht: Hashtable of TC encap actions
* @encap_match_ht: Hashtable of TC encap matches
* @match_action_ht: Hashtable of TC match-action rules
* @lhs_rule_ht: Hashtable of TC left-hand (act ct & goto chain) rules
* @ct_zone_ht: Hashtable of TC conntrack flowtable bindings
* @ct_ht: Hashtable of TC conntrack flow entries
* @neigh_ht: Hashtable of neighbour watches (&struct efx_neigh_binder)
* @recirc_ht: Hashtable of recirculation ID mappings (&struct efx_tc_recirc_id)
* @recirc_ida: Recirculation ID allocator
* @meta_ct: MAE table layout for conntrack table
* @reps_mport_id: MAE port allocated for representor RX
* @reps_filter_uc: VNIC filter for representor unicast RX (promisc)
* @reps_filter_mc: VNIC filter for representor multicast RX (allmulti)
......@@ -185,7 +259,13 @@ struct efx_tc_state {
struct rhashtable encap_ht;
struct rhashtable encap_match_ht;
struct rhashtable match_action_ht;
struct rhashtable lhs_rule_ht;
struct rhashtable ct_zone_ht;
struct rhashtable ct_ht;
struct rhashtable neigh_ht;
struct rhashtable recirc_ht;
struct ida recirc_ida;
struct efx_tc_table_ct meta_ct;
u32 reps_mport_id, reps_mport_vport_id;
s32 reps_filter_uc, reps_filter_mc;
bool flush_counters;
......
// SPDX-License-Identifier: GPL-2.0-only
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2023, Advanced Micro Devices, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include "tc_conntrack.h"
#include "tc.h"
#include "mae.h"
static int efx_tc_flow_block(enum tc_setup_type type, void *type_data,
void *cb_priv);
static const struct rhashtable_params efx_tc_ct_zone_ht_params = {
.key_len = offsetof(struct efx_tc_ct_zone, linkage),
.key_offset = 0,
.head_offset = offsetof(struct efx_tc_ct_zone, linkage),
};
static const struct rhashtable_params efx_tc_ct_ht_params = {
.key_len = offsetof(struct efx_tc_ct_entry, linkage),
.key_offset = 0,
.head_offset = offsetof(struct efx_tc_ct_entry, linkage),
};
static void efx_tc_ct_zone_free(void *ptr, void *arg)
{
struct efx_tc_ct_zone *zone = ptr;
struct efx_nic *efx = zone->efx;
netif_err(efx, drv, efx->net_dev,
"tc ct_zone %u still present at teardown, removing\n",
zone->zone);
nf_flow_table_offload_del_cb(zone->nf_ft, efx_tc_flow_block, zone);
kfree(zone);
}
static void efx_tc_ct_free(void *ptr, void *arg)
{
struct efx_tc_ct_entry *conn = ptr;
struct efx_nic *efx = arg;
netif_err(efx, drv, efx->net_dev,
"tc ct_entry %lx still present at teardown\n",
conn->cookie);
/* We can release the counter, but we can't remove the CT itself
* from hardware because the table meta is already gone.
*/
efx_tc_flower_release_counter(efx, conn->cnt);
kfree(conn);
}
int efx_tc_init_conntrack(struct efx_nic *efx)
{
int rc;
rc = rhashtable_init(&efx->tc->ct_zone_ht, &efx_tc_ct_zone_ht_params);
if (rc < 0)
goto fail_ct_zone_ht;
rc = rhashtable_init(&efx->tc->ct_ht, &efx_tc_ct_ht_params);
if (rc < 0)
goto fail_ct_ht;
return 0;
fail_ct_ht:
rhashtable_destroy(&efx->tc->ct_zone_ht);
fail_ct_zone_ht:
return rc;
}
/* Only call this in init failure teardown.
* Normal exit should fini instead as there may be entries in the table.
*/
void efx_tc_destroy_conntrack(struct efx_nic *efx)
{
rhashtable_destroy(&efx->tc->ct_ht);
rhashtable_destroy(&efx->tc->ct_zone_ht);
}
void efx_tc_fini_conntrack(struct efx_nic *efx)
{
rhashtable_free_and_destroy(&efx->tc->ct_zone_ht, efx_tc_ct_zone_free, NULL);
rhashtable_free_and_destroy(&efx->tc->ct_ht, efx_tc_ct_free, efx);
}
#define EFX_NF_TCP_FLAG(flg) cpu_to_be16(be32_to_cpu(TCP_FLAG_##flg) >> 16)
static int efx_tc_ct_parse_match(struct efx_nic *efx, struct flow_rule *fr,
struct efx_tc_ct_entry *conn)
{
struct flow_dissector *dissector = fr->match.dissector;
unsigned char ipv = 0;
bool tcp = false;
if (flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_match_control fm;
flow_rule_match_control(fr, &fm);
if (IS_ALL_ONES(fm.mask->addr_type))
switch (fm.key->addr_type) {
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
ipv = 4;
break;
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
ipv = 6;
break;
default:
break;
}
}
if (!ipv) {
netif_dbg(efx, drv, efx->net_dev,
"Conntrack missing ipv specification\n");
return -EOPNOTSUPP;
}
if (dissector->used_keys &
~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
BIT_ULL(FLOW_DISSECTOR_KEY_TCP) |
BIT_ULL(FLOW_DISSECTOR_KEY_META))) {
netif_dbg(efx, drv, efx->net_dev,
"Unsupported conntrack keys %#llx\n",
dissector->used_keys);
return -EOPNOTSUPP;
}
if (flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic fm;
flow_rule_match_basic(fr, &fm);
if (!IS_ALL_ONES(fm.mask->n_proto)) {
netif_dbg(efx, drv, efx->net_dev,
"Conntrack eth_proto is not exact-match; mask %04x\n",
ntohs(fm.mask->n_proto));
return -EOPNOTSUPP;
}
conn->eth_proto = fm.key->n_proto;
if (conn->eth_proto != (ipv == 4 ? htons(ETH_P_IP)
: htons(ETH_P_IPV6))) {
netif_dbg(efx, drv, efx->net_dev,
"Conntrack eth_proto is not IPv%u, is %04x\n",
ipv, ntohs(conn->eth_proto));
return -EOPNOTSUPP;
}
if (!IS_ALL_ONES(fm.mask->ip_proto)) {
netif_dbg(efx, drv, efx->net_dev,
"Conntrack ip_proto is not exact-match; mask %02x\n",
fm.mask->ip_proto);
return -EOPNOTSUPP;
}
conn->ip_proto = fm.key->ip_proto;
switch (conn->ip_proto) {
case IPPROTO_TCP:
tcp = true;
break;
case IPPROTO_UDP:
break;
default:
netif_dbg(efx, drv, efx->net_dev,
"Conntrack ip_proto not TCP or UDP, is %02x\n",
conn->ip_proto);
return -EOPNOTSUPP;
}
} else {
netif_dbg(efx, drv, efx->net_dev,
"Conntrack missing eth_proto, ip_proto\n");
return -EOPNOTSUPP;
}
if (ipv == 4 && flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
struct flow_match_ipv4_addrs fm;
flow_rule_match_ipv4_addrs(fr, &fm);
if (!IS_ALL_ONES(fm.mask->src)) {
netif_dbg(efx, drv, efx->net_dev,
"Conntrack ipv4.src is not exact-match; mask %08x\n",
ntohl(fm.mask->src));
return -EOPNOTSUPP;
}
conn->src_ip = fm.key->src;
if (!IS_ALL_ONES(fm.mask->dst)) {
netif_dbg(efx, drv, efx->net_dev,
"Conntrack ipv4.dst is not exact-match; mask %08x\n",
ntohl(fm.mask->dst));
return -EOPNOTSUPP;
}
conn->dst_ip = fm.key->dst;
} else if (ipv == 6 && flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
struct flow_match_ipv6_addrs fm;
flow_rule_match_ipv6_addrs(fr, &fm);
if (!efx_ipv6_addr_all_ones(&fm.mask->src)) {
netif_dbg(efx, drv, efx->net_dev,
"Conntrack ipv6.src is not exact-match; mask %pI6\n",
&fm.mask->src);
return -EOPNOTSUPP;
}
conn->src_ip6 = fm.key->src;
if (!efx_ipv6_addr_all_ones(&fm.mask->dst)) {
netif_dbg(efx, drv, efx->net_dev,
"Conntrack ipv6.dst is not exact-match; mask %pI6\n",
&fm.mask->dst);
return -EOPNOTSUPP;
}
conn->dst_ip6 = fm.key->dst;
} else {
netif_dbg(efx, drv, efx->net_dev,
"Conntrack missing IPv%u addrs\n", ipv);
return -EOPNOTSUPP;
}
if (flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports fm;
flow_rule_match_ports(fr, &fm);
if (!IS_ALL_ONES(fm.mask->src)) {
netif_dbg(efx, drv, efx->net_dev,
"Conntrack ports.src is not exact-match; mask %04x\n",
ntohs(fm.mask->src));
return -EOPNOTSUPP;
}
conn->l4_sport = fm.key->src;
if (!IS_ALL_ONES(fm.mask->dst)) {
netif_dbg(efx, drv, efx->net_dev,
"Conntrack ports.dst is not exact-match; mask %04x\n",
ntohs(fm.mask->dst));
return -EOPNOTSUPP;
}
conn->l4_dport = fm.key->dst;
} else {
netif_dbg(efx, drv, efx->net_dev, "Conntrack missing L4 ports\n");
return -EOPNOTSUPP;
}
if (flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_TCP)) {
__be16 tcp_interesting_flags;
struct flow_match_tcp fm;
if (!tcp) {
netif_dbg(efx, drv, efx->net_dev,
"Conntrack matching on TCP keys but ipproto is not tcp\n");
return -EOPNOTSUPP;
}
flow_rule_match_tcp(fr, &fm);
tcp_interesting_flags = EFX_NF_TCP_FLAG(SYN) |
EFX_NF_TCP_FLAG(RST) |
EFX_NF_TCP_FLAG(FIN);
/* If any of the tcp_interesting_flags is set, we always
* inhibit CT lookup in LHS (so SW can update CT table).
*/
if (fm.key->flags & tcp_interesting_flags) {
netif_dbg(efx, drv, efx->net_dev,
"Unsupported conntrack tcp.flags %04x/%04x\n",
ntohs(fm.key->flags), ntohs(fm.mask->flags));
return -EOPNOTSUPP;
}
/* Other TCP flags cannot be filtered at CT */
if (fm.mask->flags & ~tcp_interesting_flags) {
netif_dbg(efx, drv, efx->net_dev,
"Unsupported conntrack tcp.flags %04x/%04x\n",
ntohs(fm.key->flags), ntohs(fm.mask->flags));
return -EOPNOTSUPP;
}
}
return 0;
}
static int efx_tc_ct_replace(struct efx_tc_ct_zone *ct_zone,
struct flow_cls_offload *tc)
{
struct flow_rule *fr = flow_cls_offload_flow_rule(tc);
struct efx_tc_ct_entry *conn, *old;
struct efx_nic *efx = ct_zone->efx;
const struct flow_action_entry *fa;
struct efx_tc_counter *cnt;
int rc, i;
if (WARN_ON(!efx->tc))
return -ENETDOWN;
if (WARN_ON(!efx->tc->up))
return -ENETDOWN;
conn = kzalloc(sizeof(*conn), GFP_USER);
if (!conn)
return -ENOMEM;
conn->cookie = tc->cookie;
old = rhashtable_lookup_get_insert_fast(&efx->tc->ct_ht,
&conn->linkage,
efx_tc_ct_ht_params);
if (old) {
netif_dbg(efx, drv, efx->net_dev,
"Already offloaded conntrack (cookie %lx)\n", tc->cookie);
rc = -EEXIST;
goto release;
}
/* Parse match */
conn->zone = ct_zone;
rc = efx_tc_ct_parse_match(efx, fr, conn);
if (rc)
goto release;
/* Parse actions */
flow_action_for_each(i, fa, &fr->action) {
switch (fa->id) {
case FLOW_ACTION_CT_METADATA:
conn->mark = fa->ct_metadata.mark;
if (memchr_inv(fa->ct_metadata.labels, 0, sizeof(fa->ct_metadata.labels))) {
netif_dbg(efx, drv, efx->net_dev,
"Setting CT label not supported\n");
rc = -EOPNOTSUPP;
goto release;
}
break;
default:
netif_dbg(efx, drv, efx->net_dev,
"Unhandled action %u for conntrack\n", fa->id);
rc = -EOPNOTSUPP;
goto release;
}
}
/* fill in defaults for unmangled values */
conn->nat_ip = conn->dnat ? conn->dst_ip : conn->src_ip;
conn->l4_natport = conn->dnat ? conn->l4_dport : conn->l4_sport;
cnt = efx_tc_flower_allocate_counter(efx, EFX_TC_COUNTER_TYPE_CT);
if (IS_ERR(cnt)) {
rc = PTR_ERR(cnt);
goto release;
}
conn->cnt = cnt;
rc = efx_mae_insert_ct(efx, conn);
if (rc) {
netif_dbg(efx, drv, efx->net_dev,
"Failed to insert conntrack, %d\n", rc);
goto release;
}
mutex_lock(&ct_zone->mutex);
list_add_tail(&conn->list, &ct_zone->cts);
mutex_unlock(&ct_zone->mutex);
return 0;
release:
if (conn->cnt)
efx_tc_flower_release_counter(efx, conn->cnt);
if (!old)
rhashtable_remove_fast(&efx->tc->ct_ht, &conn->linkage,
efx_tc_ct_ht_params);
kfree(conn);
return rc;
}
/* Caller must follow with efx_tc_ct_remove_finish() after RCU grace period! */
static void efx_tc_ct_remove(struct efx_nic *efx, struct efx_tc_ct_entry *conn)
{
int rc;
/* Remove it from HW */
rc = efx_mae_remove_ct(efx, conn);
/* Delete it from SW */
rhashtable_remove_fast(&efx->tc->ct_ht, &conn->linkage,
efx_tc_ct_ht_params);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"Failed to remove conntrack %lx from hw, rc %d\n",
conn->cookie, rc);
} else {
netif_dbg(efx, drv, efx->net_dev, "Removed conntrack %lx\n",
conn->cookie);
}
}
static void efx_tc_ct_remove_finish(struct efx_nic *efx, struct efx_tc_ct_entry *conn)
{
/* Remove related CT counter. This is delayed after the conn object we
* are working with has been successfully removed. This protects the
* counter from being used-after-free inside efx_tc_ct_stats.
*/
efx_tc_flower_release_counter(efx, conn->cnt);
kfree(conn);
}
static int efx_tc_ct_destroy(struct efx_tc_ct_zone *ct_zone,
struct flow_cls_offload *tc)
{
struct efx_nic *efx = ct_zone->efx;
struct efx_tc_ct_entry *conn;
conn = rhashtable_lookup_fast(&efx->tc->ct_ht, &tc->cookie,
efx_tc_ct_ht_params);
if (!conn) {
netif_warn(efx, drv, efx->net_dev,
"Conntrack %lx not found to remove\n", tc->cookie);
return -ENOENT;
}
mutex_lock(&ct_zone->mutex);
list_del(&conn->list);
efx_tc_ct_remove(efx, conn);
mutex_unlock(&ct_zone->mutex);
synchronize_rcu();
efx_tc_ct_remove_finish(efx, conn);
return 0;
}
static int efx_tc_ct_stats(struct efx_tc_ct_zone *ct_zone,
struct flow_cls_offload *tc)
{
struct efx_nic *efx = ct_zone->efx;
struct efx_tc_ct_entry *conn;
struct efx_tc_counter *cnt;
rcu_read_lock();
conn = rhashtable_lookup_fast(&efx->tc->ct_ht, &tc->cookie,
efx_tc_ct_ht_params);
if (!conn) {
netif_warn(efx, drv, efx->net_dev,
"Conntrack %lx not found for stats\n", tc->cookie);
rcu_read_unlock();
return -ENOENT;
}
cnt = conn->cnt;
spin_lock_bh(&cnt->lock);
/* Report only last use */
flow_stats_update(&tc->stats, 0, 0, 0, cnt->touched,
FLOW_ACTION_HW_STATS_DELAYED);
spin_unlock_bh(&cnt->lock);
rcu_read_unlock();
return 0;
}
static int efx_tc_flow_block(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
struct flow_cls_offload *tcb = type_data;
struct efx_tc_ct_zone *ct_zone = cb_priv;
if (type != TC_SETUP_CLSFLOWER)
return -EOPNOTSUPP;
switch (tcb->command) {
case FLOW_CLS_REPLACE:
return efx_tc_ct_replace(ct_zone, tcb);
case FLOW_CLS_DESTROY:
return efx_tc_ct_destroy(ct_zone, tcb);
case FLOW_CLS_STATS:
return efx_tc_ct_stats(ct_zone, tcb);
default:
break;
};
return -EOPNOTSUPP;
}
struct efx_tc_ct_zone *efx_tc_ct_register_zone(struct efx_nic *efx, u16 zone,
struct nf_flowtable *ct_ft)
{
struct efx_tc_ct_zone *ct_zone, *old;
int rc;
ct_zone = kzalloc(sizeof(*ct_zone), GFP_USER);
if (!ct_zone)
return ERR_PTR(-ENOMEM);
ct_zone->zone = zone;
old = rhashtable_lookup_get_insert_fast(&efx->tc->ct_zone_ht,
&ct_zone->linkage,
efx_tc_ct_zone_ht_params);
if (old) {
/* don't need our new entry */
kfree(ct_zone);
if (!refcount_inc_not_zero(&old->ref))
return ERR_PTR(-EAGAIN);
/* existing entry found */
WARN_ON_ONCE(old->nf_ft != ct_ft);
netif_dbg(efx, drv, efx->net_dev,
"Found existing ct_zone for %u\n", zone);
return old;
}
ct_zone->nf_ft = ct_ft;
ct_zone->efx = efx;
INIT_LIST_HEAD(&ct_zone->cts);
mutex_init(&ct_zone->mutex);
rc = nf_flow_table_offload_add_cb(ct_ft, efx_tc_flow_block, ct_zone);
netif_dbg(efx, drv, efx->net_dev, "Adding new ct_zone for %u, rc %d\n",
zone, rc);
if (rc < 0)
goto fail;
refcount_set(&ct_zone->ref, 1);
return ct_zone;
fail:
rhashtable_remove_fast(&efx->tc->ct_zone_ht, &ct_zone->linkage,
efx_tc_ct_zone_ht_params);
kfree(ct_zone);
return ERR_PTR(rc);
}
void efx_tc_ct_unregister_zone(struct efx_nic *efx,
struct efx_tc_ct_zone *ct_zone)
{
struct efx_tc_ct_entry *conn, *next;
if (!refcount_dec_and_test(&ct_zone->ref))
return; /* still in use */
nf_flow_table_offload_del_cb(ct_zone->nf_ft, efx_tc_flow_block, ct_zone);
rhashtable_remove_fast(&efx->tc->ct_zone_ht, &ct_zone->linkage,
efx_tc_ct_zone_ht_params);
mutex_lock(&ct_zone->mutex);
list_for_each_entry(conn, &ct_zone->cts, list)
efx_tc_ct_remove(efx, conn);
synchronize_rcu();
/* need to use _safe because efx_tc_ct_remove_finish() frees conn */
list_for_each_entry_safe(conn, next, &ct_zone->cts, list)
efx_tc_ct_remove_finish(efx, conn);
mutex_unlock(&ct_zone->mutex);
mutex_destroy(&ct_zone->mutex);
netif_dbg(efx, drv, efx->net_dev, "Removed ct_zone for %u\n",
ct_zone->zone);
kfree(ct_zone);
}
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2023, Advanced Micro Devices, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_TC_CONNTRACK_H
#define EFX_TC_CONNTRACK_H
#include "net_driver.h"
#if IS_ENABLED(CONFIG_SFC_SRIOV)
#include <linux/refcount.h>
#include <net/netfilter/nf_flow_table.h>
struct efx_tc_ct_zone {
u16 zone;
struct rhash_head linkage;
refcount_t ref;
struct nf_flowtable *nf_ft;
struct efx_nic *efx;
struct mutex mutex; /* protects cts list */
struct list_head cts; /* list of efx_tc_ct_entry in this zone */
};
/* create/uncreate/teardown hashtables */
int efx_tc_init_conntrack(struct efx_nic *efx);
void efx_tc_destroy_conntrack(struct efx_nic *efx);
void efx_tc_fini_conntrack(struct efx_nic *efx);
struct efx_tc_ct_zone *efx_tc_ct_register_zone(struct efx_nic *efx, u16 zone,
struct nf_flowtable *ct_ft);
void efx_tc_ct_unregister_zone(struct efx_nic *efx,
struct efx_tc_ct_zone *ct_zone);
struct efx_tc_ct_entry {
unsigned long cookie;
struct rhash_head linkage;
__be16 eth_proto;
u8 ip_proto;
bool dnat;
__be32 src_ip, dst_ip, nat_ip;
struct in6_addr src_ip6, dst_ip6;
__be16 l4_sport, l4_dport, l4_natport; /* Ports (UDP, TCP) */
struct efx_tc_ct_zone *zone;
u32 mark;
struct efx_tc_counter *cnt;
struct list_head list; /* entry on zone->cts */
};
#endif /* CONFIG_SFC_SRIOV */
#endif /* EFX_TC_CONNTRACK_H */
......@@ -129,8 +129,8 @@ static void efx_tc_counter_work(struct work_struct *work)
/* Counter allocation */
static struct efx_tc_counter *efx_tc_flower_allocate_counter(struct efx_nic *efx,
int type)
struct efx_tc_counter *efx_tc_flower_allocate_counter(struct efx_nic *efx,
int type)
{
struct efx_tc_counter *cnt;
int rc, rc2;
......@@ -169,8 +169,8 @@ static struct efx_tc_counter *efx_tc_flower_allocate_counter(struct efx_nic *efx
return ERR_PTR(rc > 0 ? -EIO : rc);
}
static void efx_tc_flower_release_counter(struct efx_nic *efx,
struct efx_tc_counter *cnt)
void efx_tc_flower_release_counter(struct efx_nic *efx,
struct efx_tc_counter *cnt)
{
int rc;
......
......@@ -49,6 +49,10 @@ int efx_tc_init_counters(struct efx_nic *efx);
void efx_tc_destroy_counters(struct efx_nic *efx);
void efx_tc_fini_counters(struct efx_nic *efx);
struct efx_tc_counter *efx_tc_flower_allocate_counter(struct efx_nic *efx,
int type);
void efx_tc_flower_release_counter(struct efx_nic *efx,
struct efx_tc_counter *cnt);
struct efx_tc_counter_index *efx_tc_flower_get_counter_index(
struct efx_nic *efx, unsigned long cookie,
enum efx_tc_counter_type type);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment