Commit f53e1432 authored by David S. Miller's avatar David S. Miller

Merge branch 'sparx5-TC-key'

Steen Hegelund says:

====================
Extend TC key support for Sparx5 IS2 VCAP

This provides extended tc flower filter key support for the Sparx5 VCAP
functionality.

It builds on top of the initial IS2 VCAP support found in this series:

https://lore.kernel.org/all/20221020130904.1215072-1-steen.hegelund@microchip.com/

Overview:
=========

The added flower filter key (dissector) support is this:

- ipv4_addr (sip and dip)
- ipv6_addr (sip and dip)
- control (IPv4 fragments)
- portnum (tcp and udp port numbers)
- basic (L3 and L4 protocol)
- vlan (outer vlan tag info)
- tcp (tcp flags)
- ip (tos field)

The IS2 VCAP supports classified VLAN information which amounts to the
outer VLAN info in case of multiple tags.

Functionality:
==============

Before frames can match IS2 VCAP rules with e.g an IPv4 source address, the
IS2 VCAPs keyset configuration must include keyset that contains a IPv4
source address and this must be configured for the lookup/port/traffic-type
that you want to match on.

The Sparx5 IS2 VCAP has the following traffic types:

- Non-Ethernet frames
- IPv4 Unicast frames
- IPv4 Multicast frames
- IPv6 Unicast frames
- IPv6 Multicast frames
- ARP frames

So to cover IPv4 traffic the two IPv4 categories must be configured with a
keyset that contains IPv4 address information such as the
VCAP_KFS_IP4_TCP_UDP keyset.

The IPv4 and IPv6 traffic types are configured with useful default keysets,
in later series we will use the tc template functionality when we want to
change these defaults.

The flower filter must contain a goto action as its last action and the
chain id must specify the chain id of the next lookup in a VCAP or a
destination outside the VCAP ranges.

To activate the VCAP lookups on a port you must add a TC matchall filter on
the port containing a single goto action that points to the chain id of the
first lookup in the IS2 VCAP.

From then on frames arriving on this port will be matched against the
rules in the IS2 VCAP lookups.

Removing the matchall filter will deactivate the IS2 lookups, but will
leave the VCAP rules in the memory of the VCAP instance, and from then in
frames will no longer be matched against the rules the in IS2 VCAP.

If the matchall rule is added back again the IS2 rules will be active
once more.

Delivery:
=========

This is current plan for delivering the full VCAP feature set of Sparx5:

- TC flower filter statistics and rule order by size and priority
- debugfs support for inspecting rules
- support for TC protocol all
- Sparx5 IS0 VCAP support
- add TC policer and drop action support (depends on the Sparx5 QoS support
  upstreamed separately)
- Sparx5 ES0 VCAP support
- TC flower template support
- TC matchall filter support for mirroring and policing ports
- TC flower filter mirror action support
- Sparx5 ES2 VCAP support

Version History:
================
v6      Rebased on the latest next-next master branch.
        No other implementation changes.

v5      Add support for a TC matchall filter with a single goto action
        which will activate the lookups of the VCAP.  Removing this filter
        will deactivate the VCAP lookups again.

v4      Add support for TC flower filter goto action and a check of the
        actions: check action combinations and the goto chain id.

v3      Add some more details to the explanation in the commit message
        about support for MAC_ETYPE keysets and "protocol all" as well as
        the classified VLAN information.  This is done to help testing the
        feature.
        No implementation changes in this version.

v2      Split one of the KUNIT tests into 3 tests to fix a kernel robot
        build warning.

v1      Initial version
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 573c3853 c956b9b3
......@@ -9,7 +9,7 @@ sparx5-switch-y := sparx5_main.o sparx5_packet.o \
sparx5_netdev.o sparx5_phylink.o sparx5_port.o sparx5_mactable.o sparx5_vlan.o \
sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o \
sparx5_ptp.o sparx5_pgid.o sparx5_tc.o sparx5_qos.o \
sparx5_vcap_impl.o sparx5_vcap_ag_api.o sparx5_tc_flower.o
sparx5_vcap_impl.o sparx5_vcap_ag_api.o sparx5_tc_flower.o sparx5_tc_matchall.o
sparx5-switch-$(CONFIG_SPARX5_DCB) += sparx5_dcb.o
......
......@@ -19,9 +19,14 @@ static int sparx5_tc_block_cb(enum tc_setup_type type,
{
struct net_device *ndev = cb_priv;
if (type == TC_SETUP_CLSFLOWER)
switch (type) {
case TC_SETUP_CLSMATCHALL:
return sparx5_tc_matchall(ndev, type_data, ingress);
case TC_SETUP_CLSFLOWER:
return sparx5_tc_flower(ndev, type_data, ingress);
default:
return -EOPNOTSUPP;
}
}
static int sparx5_tc_block_cb_ingress(enum tc_setup_type type,
......
......@@ -8,6 +8,7 @@
#define __SPARX5_TC_H__
#include <net/flow_offload.h>
#include <net/pkt_cls.h>
#include <linux/netdevice.h>
/* Controls how PORT_MASK is applied */
......@@ -23,6 +24,10 @@ enum SPX5_PORT_MASK_MODE {
int sparx5_port_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data);
int sparx5_tc_matchall(struct net_device *ndev,
struct tc_cls_matchall_offload *tmo,
bool ingress);
int sparx5_tc_flower(struct net_device *ndev, struct flow_cls_offload *fco,
bool ingress);
......
......@@ -16,9 +16,32 @@ struct sparx5_tc_flower_parse_usage {
struct flow_cls_offload *fco;
struct flow_rule *frule;
struct vcap_rule *vrule;
u16 l3_proto;
u8 l4_proto;
unsigned int used_keys;
};
/* These protocols have dedicated keysets in IS2 and a TC dissector
* ETH_P_ARP does not have a TC dissector
*/
static u16 sparx5_tc_known_etypes[] = {
ETH_P_ALL,
ETH_P_IP,
ETH_P_IPV6,
};
static bool sparx5_tc_is_known_etype(u16 etype)
{
int idx;
/* For now this only knows about IS2 traffic classification */
for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_known_etypes); ++idx)
if (sparx5_tc_known_etypes[idx] == etype)
return true;
return false;
}
static int sparx5_tc_flower_handler_ethaddr_usage(struct sparx5_tc_flower_parse_usage *st)
{
enum vcap_key_field smac_key = VCAP_KF_L2_SMAC;
......@@ -54,18 +77,379 @@ static int sparx5_tc_flower_handler_ethaddr_usage(struct sparx5_tc_flower_parse_
return err;
}
static int
sparx5_tc_flower_handler_ipv4_usage(struct sparx5_tc_flower_parse_usage *st)
{
int err = 0;
if (st->l3_proto == ETH_P_IP) {
struct flow_match_ipv4_addrs mt;
flow_rule_match_ipv4_addrs(st->frule, &mt);
if (mt.mask->src) {
err = vcap_rule_add_key_u32(st->vrule,
VCAP_KF_L3_IP4_SIP,
be32_to_cpu(mt.key->src),
be32_to_cpu(mt.mask->src));
if (err)
goto out;
}
if (mt.mask->dst) {
err = vcap_rule_add_key_u32(st->vrule,
VCAP_KF_L3_IP4_DIP,
be32_to_cpu(mt.key->dst),
be32_to_cpu(mt.mask->dst));
if (err)
goto out;
}
}
st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS);
return err;
out:
NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv4_addr parse error");
return err;
}
static int
sparx5_tc_flower_handler_ipv6_usage(struct sparx5_tc_flower_parse_usage *st)
{
int err = 0;
if (st->l3_proto == ETH_P_IPV6) {
struct flow_match_ipv6_addrs mt;
struct vcap_u128_key sip;
struct vcap_u128_key dip;
flow_rule_match_ipv6_addrs(st->frule, &mt);
/* Check if address masks are non-zero */
if (!ipv6_addr_any(&mt.mask->src)) {
vcap_netbytes_copy(sip.value, mt.key->src.s6_addr, 16);
vcap_netbytes_copy(sip.mask, mt.mask->src.s6_addr, 16);
err = vcap_rule_add_key_u128(st->vrule,
VCAP_KF_L3_IP6_SIP, &sip);
if (err)
goto out;
}
if (!ipv6_addr_any(&mt.mask->dst)) {
vcap_netbytes_copy(dip.value, mt.key->dst.s6_addr, 16);
vcap_netbytes_copy(dip.mask, mt.mask->dst.s6_addr, 16);
err = vcap_rule_add_key_u128(st->vrule,
VCAP_KF_L3_IP6_DIP, &dip);
if (err)
goto out;
}
}
st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
return err;
out:
NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv6_addr parse error");
return err;
}
static int
sparx5_tc_flower_handler_control_usage(struct sparx5_tc_flower_parse_usage *st)
{
struct flow_match_control mt;
u32 value, mask;
int err = 0;
flow_rule_match_control(st->frule, &mt);
if (mt.mask->flags) {
if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
value = 1; /* initial fragment */
mask = 0x3;
} else {
if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
value = 3; /* follow up fragment */
mask = 0x3;
} else {
value = 0; /* no fragment */
mask = 0x3;
}
}
} else {
if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
value = 3; /* follow up fragment */
mask = 0x3;
} else {
value = 0; /* no fragment */
mask = 0x3;
}
}
err = vcap_rule_add_key_u32(st->vrule,
VCAP_KF_L3_FRAGMENT_TYPE,
value, mask);
if (err)
goto out;
}
st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
return err;
out:
NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
return err;
}
static int
sparx5_tc_flower_handler_portnum_usage(struct sparx5_tc_flower_parse_usage *st)
{
struct flow_match_ports mt;
u16 value, mask;
int err = 0;
flow_rule_match_ports(st->frule, &mt);
if (mt.mask->src) {
value = be16_to_cpu(mt.key->src);
mask = be16_to_cpu(mt.mask->src);
err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_SPORT, value,
mask);
if (err)
goto out;
}
if (mt.mask->dst) {
value = be16_to_cpu(mt.key->dst);
mask = be16_to_cpu(mt.mask->dst);
err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_DPORT, value,
mask);
if (err)
goto out;
}
st->used_keys |= BIT(FLOW_DISSECTOR_KEY_PORTS);
return err;
out:
NL_SET_ERR_MSG_MOD(st->fco->common.extack, "port parse error");
return err;
}
static int
sparx5_tc_flower_handler_basic_usage(struct sparx5_tc_flower_parse_usage *st)
{
struct flow_match_basic mt;
int err = 0;
flow_rule_match_basic(st->frule, &mt);
if (mt.mask->n_proto) {
st->l3_proto = be16_to_cpu(mt.key->n_proto);
if (!sparx5_tc_is_known_etype(st->l3_proto)) {
err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE,
st->l3_proto, ~0);
if (err)
goto out;
} else if (st->l3_proto == ETH_P_IP) {
err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
VCAP_BIT_1);
if (err)
goto out;
} else if (st->l3_proto == ETH_P_IPV6) {
err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
VCAP_BIT_0);
if (err)
goto out;
}
}
if (mt.mask->ip_proto) {
st->l4_proto = mt.key->ip_proto;
if (st->l4_proto == IPPROTO_TCP) {
err = vcap_rule_add_key_bit(st->vrule,
VCAP_KF_TCP_IS,
VCAP_BIT_1);
if (err)
goto out;
} else if (st->l4_proto == IPPROTO_UDP) {
err = vcap_rule_add_key_bit(st->vrule,
VCAP_KF_TCP_IS,
VCAP_BIT_0);
if (err)
goto out;
} else {
err = vcap_rule_add_key_u32(st->vrule,
VCAP_KF_L3_IP_PROTO,
st->l4_proto, ~0);
if (err)
goto out;
}
}
st->used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC);
return err;
out:
NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_proto parse error");
return err;
}
static int
sparx5_tc_flower_handler_vlan_usage(struct sparx5_tc_flower_parse_usage *st)
{
enum vcap_key_field vid_key = VCAP_KF_8021Q_VID_CLS;
enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP_CLS;
struct flow_match_vlan mt;
int err;
flow_rule_match_vlan(st->frule, &mt);
if (mt.mask->vlan_id) {
err = vcap_rule_add_key_u32(st->vrule, vid_key,
mt.key->vlan_id,
mt.mask->vlan_id);
if (err)
goto out;
}
if (mt.mask->vlan_priority) {
err = vcap_rule_add_key_u32(st->vrule, pcp_key,
mt.key->vlan_priority,
mt.mask->vlan_priority);
if (err)
goto out;
}
st->used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
return err;
out:
NL_SET_ERR_MSG_MOD(st->fco->common.extack, "vlan parse error");
return err;
}
static int
sparx5_tc_flower_handler_tcp_usage(struct sparx5_tc_flower_parse_usage *st)
{
struct flow_match_tcp mt;
u16 tcp_flags_mask;
u16 tcp_flags_key;
enum vcap_bit val;
int err = 0;
flow_rule_match_tcp(st->frule, &mt);
tcp_flags_key = be16_to_cpu(mt.key->flags);
tcp_flags_mask = be16_to_cpu(mt.mask->flags);
if (tcp_flags_mask & TCPHDR_FIN) {
val = VCAP_BIT_0;
if (tcp_flags_key & TCPHDR_FIN)
val = VCAP_BIT_1;
err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_FIN, val);
if (err)
goto out;
}
if (tcp_flags_mask & TCPHDR_SYN) {
val = VCAP_BIT_0;
if (tcp_flags_key & TCPHDR_SYN)
val = VCAP_BIT_1;
err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_SYN, val);
if (err)
goto out;
}
if (tcp_flags_mask & TCPHDR_RST) {
val = VCAP_BIT_0;
if (tcp_flags_key & TCPHDR_RST)
val = VCAP_BIT_1;
err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_RST, val);
if (err)
goto out;
}
if (tcp_flags_mask & TCPHDR_PSH) {
val = VCAP_BIT_0;
if (tcp_flags_key & TCPHDR_PSH)
val = VCAP_BIT_1;
err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_PSH, val);
if (err)
goto out;
}
if (tcp_flags_mask & TCPHDR_ACK) {
val = VCAP_BIT_0;
if (tcp_flags_key & TCPHDR_ACK)
val = VCAP_BIT_1;
err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_ACK, val);
if (err)
goto out;
}
if (tcp_flags_mask & TCPHDR_URG) {
val = VCAP_BIT_0;
if (tcp_flags_key & TCPHDR_URG)
val = VCAP_BIT_1;
err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_URG, val);
if (err)
goto out;
}
st->used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
return err;
out:
NL_SET_ERR_MSG_MOD(st->fco->common.extack, "tcp_flags parse error");
return err;
}
static int
sparx5_tc_flower_handler_ip_usage(struct sparx5_tc_flower_parse_usage *st)
{
struct flow_match_ip mt;
int err = 0;
flow_rule_match_ip(st->frule, &mt);
if (mt.mask->tos) {
err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_TOS,
mt.key->tos,
mt.mask->tos);
if (err)
goto out;
}
st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IP);
return err;
out:
NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_tos parse error");
return err;
}
static int (*sparx5_tc_flower_usage_handlers[])(struct sparx5_tc_flower_parse_usage *st) = {
/* More dissector handlers will be added here later */
[FLOW_DISSECTOR_KEY_ETH_ADDRS] = sparx5_tc_flower_handler_ethaddr_usage,
[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = sparx5_tc_flower_handler_ipv4_usage,
[FLOW_DISSECTOR_KEY_IPV6_ADDRS] = sparx5_tc_flower_handler_ipv6_usage,
[FLOW_DISSECTOR_KEY_CONTROL] = sparx5_tc_flower_handler_control_usage,
[FLOW_DISSECTOR_KEY_PORTS] = sparx5_tc_flower_handler_portnum_usage,
[FLOW_DISSECTOR_KEY_BASIC] = sparx5_tc_flower_handler_basic_usage,
[FLOW_DISSECTOR_KEY_VLAN] = sparx5_tc_flower_handler_vlan_usage,
[FLOW_DISSECTOR_KEY_TCP] = sparx5_tc_flower_handler_tcp_usage,
[FLOW_DISSECTOR_KEY_IP] = sparx5_tc_flower_handler_ip_usage,
};
static int sparx5_tc_use_dissectors(struct flow_cls_offload *fco,
struct vcap_admin *admin,
struct vcap_rule *vrule)
struct vcap_rule *vrule,
u16 *l3_proto)
{
struct sparx5_tc_flower_parse_usage state = {
.fco = fco,
.vrule = vrule,
.l3_proto = ETH_P_ALL,
};
int idx, err = 0;
......@@ -79,9 +463,72 @@ static int sparx5_tc_use_dissectors(struct flow_cls_offload *fco,
if (err)
return err;
}
if (state.frule->match.dissector->used_keys ^ state.used_keys) {
NL_SET_ERR_MSG_MOD(fco->common.extack,
"Unsupported match item");
return -ENOENT;
}
if (l3_proto)
*l3_proto = state.l3_proto;
return err;
}
static int sparx5_tc_flower_action_check(struct vcap_control *vctrl,
struct flow_cls_offload *fco,
struct vcap_admin *admin)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(fco);
struct flow_action_entry *actent, *last_actent = NULL;
struct flow_action *act = &rule->action;
u64 action_mask = 0;
int idx;
if (!flow_action_has_entries(act)) {
NL_SET_ERR_MSG_MOD(fco->common.extack, "No actions");
return -EINVAL;
}
if (!flow_action_basic_hw_stats_check(act, fco->common.extack))
return -EOPNOTSUPP;
flow_action_for_each(idx, actent, act) {
if (action_mask & BIT(actent->id)) {
NL_SET_ERR_MSG_MOD(fco->common.extack,
"More actions of the same type");
return -EINVAL;
}
action_mask |= BIT(actent->id);
last_actent = actent; /* Save last action for later check */
}
/* Check that last action is a goto */
if (last_actent->id != FLOW_ACTION_GOTO) {
NL_SET_ERR_MSG_MOD(fco->common.extack,
"Last action must be 'goto'");
return -EINVAL;
}
/* Check if the goto chain is in the next lookup */
if (!vcap_is_next_lookup(vctrl, fco->common.chain_index,
last_actent->chain_index)) {
NL_SET_ERR_MSG_MOD(fco->common.extack,
"Invalid goto chain");
return -EINVAL;
}
/* Catch unsupported combinations of actions */
if (action_mask & BIT(FLOW_ACTION_TRAP) &&
action_mask & BIT(FLOW_ACTION_ACCEPT)) {
NL_SET_ERR_MSG_MOD(fco->common.extack,
"Cannot combine pass and trap action");
return -EOPNOTSUPP;
}
return 0;
}
static int sparx5_tc_flower_replace(struct net_device *ndev,
struct flow_cls_offload *fco,
struct vcap_admin *admin)
......@@ -91,25 +538,23 @@ static int sparx5_tc_flower_replace(struct net_device *ndev,
struct vcap_control *vctrl;
struct flow_rule *frule;
struct vcap_rule *vrule;
u16 l3_proto;
int err, idx;
frule = flow_cls_offload_flow_rule(fco);
if (!flow_action_has_entries(&frule->action)) {
NL_SET_ERR_MSG_MOD(fco->common.extack, "No actions");
return -EINVAL;
}
vctrl = port->sparx5->vcap_ctrl;
if (!flow_action_basic_hw_stats_check(&frule->action, fco->common.extack))
return -EOPNOTSUPP;
err = sparx5_tc_flower_action_check(vctrl, fco, admin);
if (err)
return err;
vctrl = port->sparx5->vcap_ctrl;
vrule = vcap_alloc_rule(vctrl, ndev, fco->common.chain_index, VCAP_USER_TC,
fco->common.prio, 0);
if (IS_ERR(vrule))
return PTR_ERR(vrule);
vrule->cookie = fco->cookie;
sparx5_tc_use_dissectors(fco, admin, vrule);
sparx5_tc_use_dissectors(fco, admin, vrule, &l3_proto);
frule = flow_cls_offload_flow_rule(fco);
flow_action_for_each(idx, act, &frule->action) {
switch (act->id) {
case FLOW_ACTION_TRAP:
......@@ -139,6 +584,9 @@ static int sparx5_tc_flower_replace(struct net_device *ndev,
if (err)
goto out;
break;
case FLOW_ACTION_GOTO:
/* Links between VCAPs will be added later */
break;
default:
NL_SET_ERR_MSG_MOD(fco->common.extack,
"Unsupported TC action");
......@@ -146,14 +594,8 @@ static int sparx5_tc_flower_replace(struct net_device *ndev,
goto out;
}
}
/* For now the keyset is hardcoded */
err = vcap_set_rule_set_keyset(vrule, VCAP_KFS_MAC_ETYPE);
if (err) {
NL_SET_ERR_MSG_MOD(fco->common.extack,
"No matching port keyset for filter protocol and keys");
goto out;
}
err = vcap_val_rule(vrule, ETH_P_ALL);
/* provide the l3 protocol to guide the keyset selection */
err = vcap_val_rule(vrule, l3_proto);
if (err) {
vcap_set_tc_exterr(fco, vrule);
goto out;
......
// SPDX-License-Identifier: GPL-2.0+
/* Microchip VCAP API
*
* Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
*/
#include "sparx5_tc.h"
#include "vcap_api.h"
#include "vcap_api_client.h"
#include "sparx5_main_regs.h"
#include "sparx5_main.h"
#include "sparx5_vcap_impl.h"
static int sparx5_tc_matchall_replace(struct net_device *ndev,
struct tc_cls_matchall_offload *tmo,
bool ingress)
{
struct sparx5_port *port = netdev_priv(ndev);
struct flow_action_entry *action;
struct sparx5 *sparx5;
int err;
if (!flow_offload_has_one_action(&tmo->rule->action)) {
NL_SET_ERR_MSG_MOD(tmo->common.extack,
"Only one action per filter is supported");
return -EOPNOTSUPP;
}
action = &tmo->rule->action.entries[0];
sparx5 = port->sparx5;
switch (action->id) {
case FLOW_ACTION_GOTO:
err = vcap_enable_lookups(sparx5->vcap_ctrl, ndev,
action->chain_index, tmo->cookie,
true);
if (err == -EFAULT) {
NL_SET_ERR_MSG_MOD(tmo->common.extack,
"Unsupported goto chain");
return -EOPNOTSUPP;
}
if (err == -EADDRINUSE) {
NL_SET_ERR_MSG_MOD(tmo->common.extack,
"VCAP already enabled");
return -EOPNOTSUPP;
}
if (err) {
NL_SET_ERR_MSG_MOD(tmo->common.extack,
"Could not enable VCAP lookups");
return err;
}
break;
default:
NL_SET_ERR_MSG_MOD(tmo->common.extack, "Unsupported action");
return -EOPNOTSUPP;
}
return 0;
}
static int sparx5_tc_matchall_destroy(struct net_device *ndev,
struct tc_cls_matchall_offload *tmo,
bool ingress)
{
struct sparx5_port *port = netdev_priv(ndev);
struct sparx5 *sparx5;
int err;
sparx5 = port->sparx5;
if (!tmo->rule && tmo->cookie) {
err = vcap_enable_lookups(sparx5->vcap_ctrl, ndev, 0,
tmo->cookie, false);
if (err)
return err;
return 0;
}
NL_SET_ERR_MSG_MOD(tmo->common.extack, "Unsupported action");
return -EOPNOTSUPP;
}
int sparx5_tc_matchall(struct net_device *ndev,
struct tc_cls_matchall_offload *tmo,
bool ingress)
{
if (!tc_cls_can_offload_and_chain0(ndev, &tmo->common)) {
NL_SET_ERR_MSG_MOD(tmo->common.extack,
"Only chain zero is supported");
return -EOPNOTSUPP;
}
switch (tmo->command) {
case TC_CLSMATCHALL_REPLACE:
return sparx5_tc_matchall_replace(ndev, tmo, ingress);
case TC_CLSMATCHALL_DESTROY:
return sparx5_tc_matchall_destroy(ndev, tmo, ingress);
default:
return -EOPNOTSUPP;
}
}
......@@ -21,6 +21,14 @@
#define STREAMSIZE (64 * 4) /* bytes in the VCAP cache area */
#define SPARX5_IS2_LOOKUPS 4
#define VCAP_IS2_KEYSEL(_ena, _noneth, _v4_mc, _v4_uc, _v6_mc, _v6_uc, _arp) \
(ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA_SET(_ena) | \
ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL_SET(_noneth) | \
ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL_SET(_v4_mc) | \
ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL_SET(_v4_uc) | \
ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL_SET(_v6_mc) | \
ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL_SET(_v6_uc) | \
ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_SET(_arp))
/* IS2 port keyset selection control */
......@@ -152,7 +160,7 @@ static const char *sparx5_vcap_keyset_name(struct net_device *ndev,
{
struct sparx5_port *port = netdev_priv(ndev);
return port->sparx5->vcap_ctrl->stats->keyfield_set_names[keyset];
return vcap_keyset_name(port->sparx5->vcap_ctrl, keyset);
}
/* Check if this is the first lookup of IS2 */
......@@ -196,6 +204,127 @@ static void sparx5_vcap_add_wide_port_mask(struct vcap_rule *rule,
vcap_rule_add_key_u72(rule, VCAP_KF_IF_IGR_PORT_MASK, &port_mask);
}
/* Convert chain id to vcap lookup id */
static int sparx5_vcap_cid_to_lookup(int cid)
{
int lookup = 0;
/* For now only handle IS2 */
if (cid >= SPARX5_VCAP_CID_IS2_L1 && cid < SPARX5_VCAP_CID_IS2_L2)
lookup = 1;
else if (cid >= SPARX5_VCAP_CID_IS2_L2 && cid < SPARX5_VCAP_CID_IS2_L3)
lookup = 2;
else if (cid >= SPARX5_VCAP_CID_IS2_L3 && cid < SPARX5_VCAP_CID_IS2_MAX)
lookup = 3;
return lookup;
}
/* Return the list of keysets for the vcap port configuration */
static int sparx5_vcap_is2_get_port_keysets(struct net_device *ndev,
int lookup,
struct vcap_keyset_list *keysetlist,
u16 l3_proto)
{
struct sparx5_port *port = netdev_priv(ndev);
struct sparx5 *sparx5 = port->sparx5;
int portno = port->portno;
u32 value;
/* Check if the port keyset selection is enabled */
value = spx5_rd(sparx5, ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup));
if (!ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA_GET(value))
return -ENOENT;
/* Collect all keysets for the port in a list */
if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_ARP) {
switch (ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_GET(value)) {
case VCAP_IS2_PS_ARP_MAC_ETYPE:
vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
break;
case VCAP_IS2_PS_ARP_ARP:
vcap_keyset_list_add(keysetlist, VCAP_KFS_ARP);
break;
}
}
if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IP) {
switch (ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL_GET(value)) {
case VCAP_IS2_PS_IPV4_UC_MAC_ETYPE:
vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
break;
case VCAP_IS2_PS_IPV4_UC_IP4_TCP_UDP_OTHER:
vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP);
vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER);
break;
case VCAP_IS2_PS_IPV4_UC_IP_7TUPLE:
vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
break;
}
switch (ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL_GET(value)) {
case VCAP_IS2_PS_IPV4_MC_MAC_ETYPE:
vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
break;
case VCAP_IS2_PS_IPV4_MC_IP4_TCP_UDP_OTHER:
vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP);
vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER);
break;
case VCAP_IS2_PS_IPV4_MC_IP_7TUPLE:
vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
break;
}
}
if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IPV6) {
switch (ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL_GET(value)) {
case VCAP_IS2_PS_IPV6_UC_MAC_ETYPE:
vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
break;
case VCAP_IS2_PS_IPV6_UC_IP_7TUPLE:
vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
break;
case VCAP_IS2_PS_IPV6_UC_IP6_STD:
vcap_keyset_list_add(keysetlist, VCAP_KFS_IP6_STD);
break;
case VCAP_IS2_PS_IPV6_UC_IP4_TCP_UDP_OTHER:
vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP);
vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER);
break;
}
switch (ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL_GET(value)) {
case VCAP_IS2_PS_IPV6_MC_MAC_ETYPE:
vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
break;
case VCAP_IS2_PS_IPV6_MC_IP_7TUPLE:
vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
break;
case VCAP_IS2_PS_IPV6_MC_IP6_STD:
vcap_keyset_list_add(keysetlist, VCAP_KFS_IP6_STD);
break;
case VCAP_IS2_PS_IPV6_MC_IP4_TCP_UDP_OTHER:
vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP);
vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER);
break;
case VCAP_IS2_PS_IPV6_MC_IP6_VID:
/* Not used */
break;
}
}
if (l3_proto != ETH_P_ARP && l3_proto != ETH_P_IP &&
l3_proto != ETH_P_IPV6) {
switch (ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL_GET(value)) {
case VCAP_IS2_PS_NONETH_MAC_ETYPE:
/* IS2 non-classified frames generate MAC_ETYPE */
vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
break;
}
}
return 0;
}
/* API callback used for validating a field keyset (check the port keysets) */
static enum vcap_keyfield_set
sparx5_vcap_validate_keyset(struct net_device *ndev,
......@@ -204,10 +333,30 @@ sparx5_vcap_validate_keyset(struct net_device *ndev,
struct vcap_keyset_list *kslist,
u16 l3_proto)
{
struct vcap_keyset_list keysetlist = {};
enum vcap_keyfield_set keysets[10] = {};
int idx, jdx, lookup;
if (!kslist || kslist->cnt == 0)
return VCAP_KFS_NO_VALUE;
/* for now just return whatever the API suggests */
return kslist->keysets[0];
/* Get a list of currently configured keysets in the lookups */
lookup = sparx5_vcap_cid_to_lookup(rule->vcap_chain_id);
keysetlist.max = ARRAY_SIZE(keysets);
keysetlist.keysets = keysets;
sparx5_vcap_is2_get_port_keysets(ndev, lookup, &keysetlist, l3_proto);
/* Check if there is a match and return the match */
for (idx = 0; idx < kslist->cnt; ++idx)
for (jdx = 0; jdx < keysetlist.cnt; ++jdx)
if (kslist->keysets[idx] == keysets[jdx])
return kslist->keysets[idx];
pr_err("%s:%d: %s not supported in port key selection\n",
__func__, __LINE__,
sparx5_vcap_keyset_name(ndev, kslist->keysets[0]));
return -ENOENT;
}
/* API callback used for adding default fields to a rule */
......@@ -340,6 +489,28 @@ static int sparx5_port_info(struct net_device *ndev, enum vcap_type vtype,
return 0;
}
/* Enable all lookups in the VCAP instance */
static int sparx5_vcap_enable(struct net_device *ndev,
struct vcap_admin *admin,
bool enable)
{
struct sparx5_port *port = netdev_priv(ndev);
struct sparx5 *sparx5;
int portno;
sparx5 = port->sparx5;
portno = port->portno;
/* For now we only consider IS2 */
if (enable)
spx5_wr(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0xf), sparx5,
ANA_ACL_VCAP_S2_CFG(portno));
else
spx5_wr(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0), sparx5,
ANA_ACL_VCAP_S2_CFG(portno));
return 0;
}
/* API callback operations: only IS2 is supported for now */
static struct vcap_operations sparx5_vcap_ops = {
.validate_keyset = sparx5_vcap_validate_keyset,
......@@ -351,6 +522,7 @@ static struct vcap_operations sparx5_vcap_ops = {
.update = sparx5_vcap_update,
.move = sparx5_vcap_move,
.port_info = sparx5_port_info,
.enable = sparx5_vcap_enable,
};
/* Enable lookups per port and set the keyset generation: only IS2 for now */
......@@ -360,21 +532,15 @@ static void sparx5_vcap_port_key_selection(struct sparx5 *sparx5,
int portno, lookup;
u32 keysel;
/* enable all 4 lookups on all ports */
for (portno = 0; portno < SPX5_PORTS; ++portno)
spx5_wr(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0xf), sparx5,
ANA_ACL_VCAP_S2_CFG(portno));
/* all traffic types generate the MAC_ETYPE keyset for now in all
* lookups on all ports
*/
keysel = ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA_SET(true) |
ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL_SET(VCAP_IS2_PS_NONETH_MAC_ETYPE) |
ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL_SET(VCAP_IS2_PS_IPV4_MC_MAC_ETYPE) |
ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL_SET(VCAP_IS2_PS_IPV4_UC_MAC_ETYPE) |
ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL_SET(VCAP_IS2_PS_IPV6_MC_MAC_ETYPE) |
ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL_SET(VCAP_IS2_PS_IPV6_UC_MAC_ETYPE) |
ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_SET(VCAP_IS2_PS_ARP_MAC_ETYPE);
keysel = VCAP_IS2_KEYSEL(true, VCAP_IS2_PS_NONETH_MAC_ETYPE,
VCAP_IS2_PS_IPV4_MC_IP4_TCP_UDP_OTHER,
VCAP_IS2_PS_IPV4_UC_IP4_TCP_UDP_OTHER,
VCAP_IS2_PS_IPV6_MC_IP_7TUPLE,
VCAP_IS2_PS_IPV6_UC_IP_7TUPLE,
VCAP_IS2_PS_ARP_MAC_ETYPE);
for (lookup = 0; lookup < admin->lookups; ++lookup) {
for (portno = 0; portno < SPX5_PORTS; ++portno) {
spx5_wr(keysel, sparx5,
......@@ -418,6 +584,7 @@ sparx5_vcap_admin_alloc(struct sparx5 *sparx5, struct vcap_control *ctrl,
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&admin->list);
INIT_LIST_HEAD(&admin->rules);
INIT_LIST_HEAD(&admin->enabled);
admin->vtype = cfg->vtype;
admin->vinst = cfg->vinst;
admin->lookups = cfg->lookups;
......
......@@ -44,6 +44,13 @@ struct vcap_stream_iter {
const struct vcap_typegroup *tg; /* current typegroup */
};
/* Stores the filter cookie that enabled the port */
struct vcap_enabled_port {
struct list_head list; /* for insertion in enabled ports list */
struct net_device *ndev; /* the enabled port */
unsigned long cookie; /* filter that enabled the port */
};
static void vcap_iter_set(struct vcap_stream_iter *itr, int sw_width,
const struct vcap_typegroup *tg, u32 offset)
{
......@@ -516,7 +523,7 @@ static int vcap_api_check(struct vcap_control *ctrl)
!ctrl->ops->add_default_fields || !ctrl->ops->cache_erase ||
!ctrl->ops->cache_write || !ctrl->ops->cache_read ||
!ctrl->ops->init || !ctrl->ops->update || !ctrl->ops->move ||
!ctrl->ops->port_info) {
!ctrl->ops->port_info || !ctrl->ops->enable) {
pr_err("%s:%d: client operations are missing\n",
__func__, __LINE__);
return -ENOENT;
......@@ -644,6 +651,23 @@ static int vcap_write_rule(struct vcap_rule_internal *ri)
return 0;
}
/* Convert a chain id to a VCAP lookup index */
int vcap_chain_id_to_lookup(struct vcap_admin *admin, int cur_cid)
{
int lookup_first = admin->vinst * admin->lookups_per_instance;
int lookup_last = lookup_first + admin->lookups_per_instance;
int cid_next = admin->first_cid + VCAP_CID_LOOKUP_SIZE;
int cid = admin->first_cid;
int lookup;
for (lookup = lookup_first; lookup < lookup_last; ++lookup,
cid += VCAP_CID_LOOKUP_SIZE, cid_next += VCAP_CID_LOOKUP_SIZE)
if (cur_cid >= cid && cur_cid < cid_next)
return lookup;
return 0;
}
EXPORT_SYMBOL_GPL(vcap_chain_id_to_lookup);
/* Lookup a vcap instance using chain id */
struct vcap_admin *vcap_find_admin(struct vcap_control *vctrl, int cid)
{
......@@ -660,6 +684,42 @@ struct vcap_admin *vcap_find_admin(struct vcap_control *vctrl, int cid)
}
EXPORT_SYMBOL_GPL(vcap_find_admin);
/* Is the next chain id in the following lookup, possible in another VCAP */
bool vcap_is_next_lookup(struct vcap_control *vctrl, int cur_cid, int next_cid)
{
struct vcap_admin *admin, *next_admin;
int lookup, next_lookup;
/* The offset must be at least one lookup */
if (next_cid < cur_cid + VCAP_CID_LOOKUP_SIZE)
return false;
if (vcap_api_check(vctrl))
return false;
admin = vcap_find_admin(vctrl, cur_cid);
if (!admin)
return false;
/* If no VCAP contains the next chain, the next chain must be beyond
* the last chain in the current VCAP
*/
next_admin = vcap_find_admin(vctrl, next_cid);
if (!next_admin)
return next_cid > admin->last_cid;
lookup = vcap_chain_id_to_lookup(admin, cur_cid);
next_lookup = vcap_chain_id_to_lookup(next_admin, next_cid);
/* Next lookup must be the following lookup */
if (admin == next_admin || admin->vtype == next_admin->vtype)
return next_lookup == lookup + 1;
/* Must be the first lookup in the next VCAP instance */
return next_lookup == 0;
}
EXPORT_SYMBOL_GPL(vcap_is_next_lookup);
/* Check if there is room for a new rule */
static int vcap_rule_space(struct vcap_admin *admin, int size)
{
......@@ -704,15 +764,122 @@ static int vcap_add_type_keyfield(struct vcap_rule *rule)
return 0;
}
/* Add a keyset to a keyset list */
bool vcap_keyset_list_add(struct vcap_keyset_list *keysetlist,
enum vcap_keyfield_set keyset)
{
int idx;
if (keysetlist->cnt < keysetlist->max) {
/* Avoid duplicates */
for (idx = 0; idx < keysetlist->cnt; ++idx)
if (keysetlist->keysets[idx] == keyset)
return keysetlist->cnt < keysetlist->max;
keysetlist->keysets[keysetlist->cnt++] = keyset;
}
return keysetlist->cnt < keysetlist->max;
}
EXPORT_SYMBOL_GPL(vcap_keyset_list_add);
/* map keyset id to a string with the keyset name */
const char *vcap_keyset_name(struct vcap_control *vctrl,
enum vcap_keyfield_set keyset)
{
return vctrl->stats->keyfield_set_names[keyset];
}
EXPORT_SYMBOL_GPL(vcap_keyset_name);
/* map key field id to a string with the key name */
const char *vcap_keyfield_name(struct vcap_control *vctrl,
enum vcap_key_field key)
{
return vctrl->stats->keyfield_names[key];
}
EXPORT_SYMBOL_GPL(vcap_keyfield_name);
/* map action field id to a string with the action name */
static const char *vcap_actionfield_name(struct vcap_control *vctrl,
enum vcap_action_field action)
{
return vctrl->stats->actionfield_names[action];
}
/* Return the keyfield that matches a key in a keyset */
static const struct vcap_field *
vcap_find_keyset_keyfield(struct vcap_control *vctrl,
enum vcap_type vtype,
enum vcap_keyfield_set keyset,
enum vcap_key_field key)
{
const struct vcap_field *fields;
int idx, count;
fields = vcap_keyfields(vctrl, vtype, keyset);
if (!fields)
return NULL;
/* Iterate the keyfields of the keyset */
count = vcap_keyfield_count(vctrl, vtype, keyset);
for (idx = 0; idx < count; ++idx) {
if (fields[idx].width == 0)
continue;
if (key == idx)
return &fields[idx];
}
return NULL;
}
/* Match a list of keys against the keysets available in a vcap type */
static bool vcap_rule_find_keysets(struct vcap_rule_internal *ri,
struct vcap_keyset_list *matches)
{
const struct vcap_client_keyfield *ckf;
int keyset, found, keycount, map_size;
const struct vcap_field **map;
enum vcap_type vtype;
vtype = ri->admin->vtype;
map = ri->vctrl->vcaps[vtype].keyfield_set_map;
map_size = ri->vctrl->vcaps[vtype].keyfield_set_size;
/* Get a count of the keyfields we want to match */
keycount = 0;
list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list)
++keycount;
matches->cnt = 0;
/* Iterate the keysets of the VCAP */
for (keyset = 0; keyset < map_size; ++keyset) {
if (!map[keyset])
continue;
/* Iterate the keys in the rule */
found = 0;
list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list)
if (vcap_find_keyset_keyfield(ri->vctrl, vtype,
keyset, ckf->ctrl.key))
++found;
/* Save the keyset if all keyfields were found */
if (found == keycount)
if (!vcap_keyset_list_add(matches, keyset))
/* bail out when the quota is filled */
break;
}
return matches->cnt > 0;
}
/* Validate a rule with respect to available port keys */
int vcap_val_rule(struct vcap_rule *rule, u16 l3_proto)
{
struct vcap_rule_internal *ri = to_intrule(rule);
struct vcap_keyset_list matches = {};
enum vcap_keyfield_set keysets[10];
struct vcap_keyset_list kslist;
int ret;
/* This validation will be much expanded later */
ret = vcap_api_check(ri->vctrl);
if (ret)
return ret;
......@@ -724,24 +891,41 @@ int vcap_val_rule(struct vcap_rule *rule, u16 l3_proto)
ri->data.exterr = VCAP_ERR_NO_NETDEV;
return -EINVAL;
}
matches.keysets = keysets;
matches.max = ARRAY_SIZE(keysets);
if (ri->data.keyset == VCAP_KFS_NO_VALUE) {
/* Iterate over rule keyfields and select keysets that fits */
if (!vcap_rule_find_keysets(ri, &matches)) {
ri->data.exterr = VCAP_ERR_NO_KEYSET_MATCH;
return -EINVAL;
}
} else {
/* prepare for keyset validation */
keysets[0] = ri->data.keyset;
kslist.keysets = keysets;
kslist.cnt = 1;
matches.cnt = 1;
}
/* Pick a keyset that is supported in the port lookups */
ret = ri->vctrl->ops->validate_keyset(ri->ndev, ri->admin, rule, &kslist,
l3_proto);
ret = ri->vctrl->ops->validate_keyset(ri->ndev, ri->admin, rule,
&matches, l3_proto);
if (ret < 0) {
pr_err("%s:%d: keyset validation failed: %d\n",
__func__, __LINE__, ret);
ri->data.exterr = VCAP_ERR_NO_PORT_KEYSET_MATCH;
return ret;
}
/* use the keyset that is supported in the port lookups */
ret = vcap_set_rule_set_keyset(rule, ret);
if (ret < 0) {
pr_err("%s:%d: keyset was not updated: %d\n",
__func__, __LINE__, ret);
return ret;
}
if (ri->data.actionset == VCAP_AFS_NO_VALUE) {
/* Later also actionsets will be matched against actions in
* the rule, and the type will be set accordingly
*/
ri->data.exterr = VCAP_ERR_NO_ACTIONSET_MATCH;
return -EINVAL;
}
......@@ -951,6 +1135,7 @@ EXPORT_SYMBOL_GPL(vcap_del_rule);
/* Delete all rules in the VCAP instance */
int vcap_del_rules(struct vcap_control *vctrl, struct vcap_admin *admin)
{
struct vcap_enabled_port *eport, *next_eport;
struct vcap_rule_internal *ri, *next_ri;
int ret = vcap_api_check(vctrl);
......@@ -962,6 +1147,13 @@ int vcap_del_rules(struct vcap_control *vctrl, struct vcap_admin *admin)
kfree(ri);
}
admin->last_used_addr = admin->last_valid_addr;
/* Remove list of enabled ports */
list_for_each_entry_safe(eport, next_eport, &admin->enabled, list) {
list_del(&eport->list);
kfree(eport);
}
return 0;
}
EXPORT_SYMBOL_GPL(vcap_del_rules);
......@@ -992,14 +1184,60 @@ static void vcap_copy_from_client_keyfield(struct vcap_rule *rule,
memcpy(&field->data, data, sizeof(field->data));
}
/* Check if the keyfield is already in the rule */
static bool vcap_keyfield_unique(struct vcap_rule *rule,
enum vcap_key_field key)
{
struct vcap_rule_internal *ri = to_intrule(rule);
const struct vcap_client_keyfield *ckf;
list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list)
if (ckf->ctrl.key == key)
return false;
return true;
}
/* Check if the keyfield is in the keyset */
static bool vcap_keyfield_match_keyset(struct vcap_rule *rule,
enum vcap_key_field key)
{
struct vcap_rule_internal *ri = to_intrule(rule);
enum vcap_keyfield_set keyset = rule->keyset;
enum vcap_type vt = ri->admin->vtype;
const struct vcap_field *fields;
/* the field is accepted if the rule has no keyset yet */
if (keyset == VCAP_KFS_NO_VALUE)
return true;
fields = vcap_keyfields(ri->vctrl, vt, keyset);
if (!fields)
return false;
/* if there is a width there is a way */
return fields[key].width > 0;
}
static int vcap_rule_add_key(struct vcap_rule *rule,
enum vcap_key_field key,
enum vcap_field_type ftype,
struct vcap_client_keyfield_data *data)
{
struct vcap_rule_internal *ri = to_intrule(rule);
struct vcap_client_keyfield *field;
/* More validation will be added here later */
if (!vcap_keyfield_unique(rule, key)) {
pr_warn("%s:%d: keyfield %s is already in the rule\n",
__func__, __LINE__,
vcap_keyfield_name(ri->vctrl, key));
return -EINVAL;
}
if (!vcap_keyfield_match_keyset(rule, key)) {
pr_err("%s:%d: keyfield %s does not belong in the rule keyset\n",
__func__, __LINE__,
vcap_keyfield_name(ri->vctrl, key));
return -EINVAL;
}
field = kzalloc(sizeof(*field), GFP_KERNEL);
if (!field)
return -ENOMEM;
......@@ -1073,6 +1311,17 @@ int vcap_rule_add_key_u72(struct vcap_rule *rule, enum vcap_key_field key,
}
EXPORT_SYMBOL_GPL(vcap_rule_add_key_u72);
/* Add a 128 bit key with value and mask to the rule */
int vcap_rule_add_key_u128(struct vcap_rule *rule, enum vcap_key_field key,
struct vcap_u128_key *fieldval)
{
struct vcap_client_keyfield_data data;
memcpy(&data.u128, fieldval, sizeof(data.u128));
return vcap_rule_add_key(rule, key, VCAP_FIELD_U128, &data);
}
EXPORT_SYMBOL_GPL(vcap_rule_add_key_u128);
static void vcap_copy_from_client_actionfield(struct vcap_rule *rule,
struct vcap_client_actionfield *field,
struct vcap_client_actionfield_data *data)
......@@ -1081,14 +1330,60 @@ static void vcap_copy_from_client_actionfield(struct vcap_rule *rule,
memcpy(&field->data, data, sizeof(field->data));
}
/* Check if the actionfield is already in the rule */
static bool vcap_actionfield_unique(struct vcap_rule *rule,
enum vcap_action_field act)
{
struct vcap_rule_internal *ri = to_intrule(rule);
const struct vcap_client_actionfield *caf;
list_for_each_entry(caf, &ri->data.actionfields, ctrl.list)
if (caf->ctrl.action == act)
return false;
return true;
}
/* Check if the actionfield is in the actionset */
static bool vcap_actionfield_match_actionset(struct vcap_rule *rule,
enum vcap_action_field action)
{
enum vcap_actionfield_set actionset = rule->actionset;
struct vcap_rule_internal *ri = to_intrule(rule);
enum vcap_type vt = ri->admin->vtype;
const struct vcap_field *fields;
/* the field is accepted if the rule has no actionset yet */
if (actionset == VCAP_AFS_NO_VALUE)
return true;
fields = vcap_actionfields(ri->vctrl, vt, actionset);
if (!fields)
return false;
/* if there is a width there is a way */
return fields[action].width > 0;
}
static int vcap_rule_add_action(struct vcap_rule *rule,
enum vcap_action_field action,
enum vcap_field_type ftype,
struct vcap_client_actionfield_data *data)
{
struct vcap_rule_internal *ri = to_intrule(rule);
struct vcap_client_actionfield *field;
/* More validation will be added here later */
if (!vcap_actionfield_unique(rule, action)) {
pr_warn("%s:%d: actionfield %s is already in the rule\n",
__func__, __LINE__,
vcap_actionfield_name(ri->vctrl, action));
return -EINVAL;
}
if (!vcap_actionfield_match_actionset(rule, action)) {
pr_err("%s:%d: actionfield %s does not belong in the rule actionset\n",
__func__, __LINE__,
vcap_actionfield_name(ri->vctrl, action));
return -EINVAL;
}
field = kzalloc(sizeof(*field), GFP_KERNEL);
if (!field)
return -ENOMEM;
......@@ -1179,6 +1474,109 @@ void vcap_set_tc_exterr(struct flow_cls_offload *fco, struct vcap_rule *vrule)
}
EXPORT_SYMBOL_GPL(vcap_set_tc_exterr);
/* Check if this port is already enabled for this VCAP instance */
static bool vcap_is_enabled(struct vcap_admin *admin, struct net_device *ndev,
unsigned long cookie)
{
struct vcap_enabled_port *eport;
list_for_each_entry(eport, &admin->enabled, list)
if (eport->cookie == cookie || eport->ndev == ndev)
return true;
return false;
}
/* Enable this port for this VCAP instance */
static int vcap_enable(struct vcap_admin *admin, struct net_device *ndev,
unsigned long cookie)
{
struct vcap_enabled_port *eport;
eport = kzalloc(sizeof(*eport), GFP_KERNEL);
if (!eport)
return -ENOMEM;
eport->ndev = ndev;
eport->cookie = cookie;
list_add_tail(&eport->list, &admin->enabled);
return 0;
}
/* Disable this port for this VCAP instance */
static int vcap_disable(struct vcap_admin *admin, struct net_device *ndev,
unsigned long cookie)
{
struct vcap_enabled_port *eport;
list_for_each_entry(eport, &admin->enabled, list) {
if (eport->cookie == cookie && eport->ndev == ndev) {
list_del(&eport->list);
kfree(eport);
return 0;
}
}
return -ENOENT;
}
/* Find the VCAP instance that enabled the port using a specific filter */
static struct vcap_admin *vcap_find_admin_by_cookie(struct vcap_control *vctrl,
unsigned long cookie)
{
struct vcap_enabled_port *eport;
struct vcap_admin *admin;
list_for_each_entry(admin, &vctrl->list, list)
list_for_each_entry(eport, &admin->enabled, list)
if (eport->cookie == cookie)
return admin;
return NULL;
}
/* Enable/Disable the VCAP instance lookups. Chain id 0 means disable */
int vcap_enable_lookups(struct vcap_control *vctrl, struct net_device *ndev,
int chain_id, unsigned long cookie, bool enable)
{
struct vcap_admin *admin;
int err;
err = vcap_api_check(vctrl);
if (err)
return err;
if (!ndev)
return -ENODEV;
if (chain_id)
admin = vcap_find_admin(vctrl, chain_id);
else
admin = vcap_find_admin_by_cookie(vctrl, cookie);
if (!admin)
return -ENOENT;
/* first instance and first chain */
if (admin->vinst || chain_id > admin->first_cid)
return -EFAULT;
err = vctrl->ops->enable(ndev, admin, enable);
if (err)
return err;
if (chain_id) {
if (vcap_is_enabled(admin, ndev, cookie))
return -EADDRINUSE;
vcap_enable(admin, ndev, cookie);
} else {
vcap_disable(admin, ndev, cookie);
}
return 0;
}
EXPORT_SYMBOL_GPL(vcap_enable_lookups);
#ifdef CONFIG_VCAP_KUNIT_TEST
#include "vcap_api_kunit.c"
#endif
......@@ -166,6 +166,7 @@ enum vcap_rule_error {
struct vcap_admin {
struct list_head list; /* for insertion in vcap_control */
struct list_head rules; /* list of rules */
struct list_head enabled; /* list of enabled ports */
enum vcap_type vtype; /* type of vcap */
int vinst; /* instance number within the same type */
int first_cid; /* first chain id in this vcap */
......@@ -255,6 +256,11 @@ struct vcap_operations {
int (*pf)(void *out, int arg, const char *fmt, ...),
void *out,
int arg);
/* enable/disable the lookups in a vcap instance */
int (*enable)
(struct net_device *ndev,
struct vcap_admin *admin,
bool enable);
};
/* VCAP API Client control interface */
......
......@@ -143,6 +143,10 @@ enum vcap_bit {
VCAP_BIT_1
};
/* Enable/Disable the VCAP instance lookups. Chain id 0 means disable */
int vcap_enable_lookups(struct vcap_control *vctrl, struct net_device *ndev,
int chain_id, unsigned long cookie, bool enable);
/* VCAP rule operations */
/* Allocate a rule and fill in the basic information */
struct vcap_rule *vcap_alloc_rule(struct vcap_control *vctrl,
......@@ -176,12 +180,16 @@ int vcap_rule_add_key_u48(struct vcap_rule *rule, enum vcap_key_field key,
struct vcap_u48_key *fieldval);
int vcap_rule_add_key_u72(struct vcap_rule *rule, enum vcap_key_field key,
struct vcap_u72_key *fieldval);
int vcap_rule_add_key_u128(struct vcap_rule *rule, enum vcap_key_field key,
struct vcap_u128_key *fieldval);
int vcap_rule_add_action_bit(struct vcap_rule *rule,
enum vcap_action_field action, enum vcap_bit val);
int vcap_rule_add_action_u32(struct vcap_rule *rule,
enum vcap_action_field action, u32 value);
/* VCAP lookup operations */
/* Convert a chain id to a VCAP lookup index */
int vcap_chain_id_to_lookup(struct vcap_admin *admin, int cur_cid);
/* Lookup a vcap instance using chain id */
struct vcap_admin *vcap_find_admin(struct vcap_control *vctrl, int cid);
/* Find information on a key field in a rule */
......@@ -189,6 +197,8 @@ const struct vcap_field *vcap_lookup_keyfield(struct vcap_rule *rule,
enum vcap_key_field key);
/* Find a rule id with a provided cookie */
int vcap_lookup_rule_by_cookie(struct vcap_control *vctrl, u64 cookie);
/* Is the next chain id in the following lookup, possible in another VCAP */
bool vcap_is_next_lookup(struct vcap_control *vctrl, int cur_cid, int next_cid);
/* Copy to host byte order */
void vcap_netbytes_copy(u8 *dst, u8 *src, int count);
......@@ -199,4 +209,15 @@ void vcap_set_tc_exterr(struct flow_cls_offload *fco, struct vcap_rule *vrule);
/* Cleanup a VCAP instance */
int vcap_del_rules(struct vcap_control *vctrl, struct vcap_admin *admin);
/* Add a keyset to a keyset list */
bool vcap_keyset_list_add(struct vcap_keyset_list *keysetlist,
enum vcap_keyfield_set keyset);
/* map keyset id to a string with the keyset name */
const char *vcap_keyset_name(struct vcap_control *vctrl,
enum vcap_keyfield_set keyset);
/* map key field id to a string with the key name */
const char *vcap_keyfield_name(struct vcap_control *vctrl,
enum vcap_key_field key);
#endif /* __VCAP_API_CLIENT__ */
......@@ -22,6 +22,7 @@ static u32 test_init_start;
static u32 test_init_count;
static u32 test_hw_counter_id;
static struct vcap_cache_data test_hw_cache;
static struct net_device test_netdev = {};
/* Callback used by the VCAP API */
static enum vcap_keyfield_set test_val_keyset(struct net_device *ndev,
......@@ -204,6 +205,13 @@ static int vcap_test_port_info(struct net_device *ndev, enum vcap_type vtype,
return 0;
}
static int vcap_test_enable(struct net_device *ndev,
struct vcap_admin *admin,
bool enable)
{
return 0;
}
static struct vcap_operations test_callbacks = {
.validate_keyset = test_val_keyset,
.add_default_fields = test_add_def_fields,
......@@ -214,6 +222,7 @@ static struct vcap_operations test_callbacks = {
.update = test_cache_update,
.move = test_cache_move,
.port_info = vcap_test_port_info,
.enable = vcap_test_enable,
};
static struct vcap_control test_vctrl = {
......@@ -904,6 +913,586 @@ static void vcap_api_encode_rule_actionset_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[11]);
}
static void vcap_api_rule_add_keyvalue_test(struct kunit *test)
{
struct vcap_admin admin = {
.vtype = VCAP_TYPE_IS2,
};
struct vcap_rule_internal ri = {
.admin = &admin,
.data = {
.keyset = VCAP_KFS_NO_VALUE,
},
.vctrl = &test_vctrl,
};
struct vcap_rule *rule = (struct vcap_rule *)&ri;
struct vcap_client_keyfield *kf;
int ret;
struct vcap_u128_key dip = {
.value = {0x17, 0x26, 0x35, 0x44, 0x63, 0x62, 0x71},
.mask = {0xf1, 0xf2, 0xf3, 0xf4, 0x4f, 0x3f, 0x2f, 0x1f},
};
int idx;
INIT_LIST_HEAD(&rule->keyfields);
ret = vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_0);
KUNIT_EXPECT_EQ(test, 0, ret);
ret = list_empty(&rule->keyfields);
KUNIT_EXPECT_EQ(test, false, ret);
kf = list_first_entry(&rule->keyfields, struct vcap_client_keyfield,
ctrl.list);
KUNIT_EXPECT_EQ(test, VCAP_KF_LOOKUP_FIRST_IS, kf->ctrl.key);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.value);
KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.mask);
INIT_LIST_HEAD(&rule->keyfields);
ret = vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_1);
KUNIT_EXPECT_EQ(test, 0, ret);
ret = list_empty(&rule->keyfields);
KUNIT_EXPECT_EQ(test, false, ret);
kf = list_first_entry(&rule->keyfields, struct vcap_client_keyfield,
ctrl.list);
KUNIT_EXPECT_EQ(test, VCAP_KF_LOOKUP_FIRST_IS, kf->ctrl.key);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.value);
KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.mask);
INIT_LIST_HEAD(&rule->keyfields);
ret = vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
VCAP_BIT_ANY);
KUNIT_EXPECT_EQ(test, 0, ret);
ret = list_empty(&rule->keyfields);
KUNIT_EXPECT_EQ(test, false, ret);
kf = list_first_entry(&rule->keyfields, struct vcap_client_keyfield,
ctrl.list);
KUNIT_EXPECT_EQ(test, VCAP_KF_LOOKUP_FIRST_IS, kf->ctrl.key);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.value);
KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.mask);
INIT_LIST_HEAD(&rule->keyfields);
ret = vcap_rule_add_key_u32(rule, VCAP_KF_TYPE, 0x98765432, 0xff00ffab);
KUNIT_EXPECT_EQ(test, 0, ret);
ret = list_empty(&rule->keyfields);
KUNIT_EXPECT_EQ(test, false, ret);
kf = list_first_entry(&rule->keyfields, struct vcap_client_keyfield,
ctrl.list);
KUNIT_EXPECT_EQ(test, VCAP_KF_TYPE, kf->ctrl.key);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, kf->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x98765432, kf->data.u32.value);
KUNIT_EXPECT_EQ(test, 0xff00ffab, kf->data.u32.mask);
INIT_LIST_HEAD(&rule->keyfields);
ret = vcap_rule_add_key_u128(rule, VCAP_KF_L3_IP6_SIP, &dip);
KUNIT_EXPECT_EQ(test, 0, ret);
ret = list_empty(&rule->keyfields);
KUNIT_EXPECT_EQ(test, false, ret);
kf = list_first_entry(&rule->keyfields, struct vcap_client_keyfield,
ctrl.list);
KUNIT_EXPECT_EQ(test, VCAP_KF_L3_IP6_SIP, kf->ctrl.key);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_U128, kf->ctrl.type);
for (idx = 0; idx < ARRAY_SIZE(dip.value); ++idx)
KUNIT_EXPECT_EQ(test, dip.value[idx], kf->data.u128.value[idx]);
for (idx = 0; idx < ARRAY_SIZE(dip.mask); ++idx)
KUNIT_EXPECT_EQ(test, dip.mask[idx], kf->data.u128.mask[idx]);
}
static void vcap_api_rule_add_actionvalue_test(struct kunit *test)
{
struct vcap_admin admin = {
.vtype = VCAP_TYPE_IS2,
};
struct vcap_rule_internal ri = {
.admin = &admin,
.data = {
.actionset = VCAP_AFS_NO_VALUE,
},
};
struct vcap_rule *rule = (struct vcap_rule *)&ri;
struct vcap_client_actionfield *af;
int ret;
INIT_LIST_HEAD(&rule->actionfields);
ret = vcap_rule_add_action_bit(rule, VCAP_AF_POLICE_ENA, VCAP_BIT_0);
KUNIT_EXPECT_EQ(test, 0, ret);
ret = list_empty(&rule->actionfields);
KUNIT_EXPECT_EQ(test, false, ret);
af = list_first_entry(&rule->actionfields,
struct vcap_client_actionfield, ctrl.list);
KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x0, af->data.u1.value);
INIT_LIST_HEAD(&rule->actionfields);
ret = vcap_rule_add_action_bit(rule, VCAP_AF_POLICE_ENA, VCAP_BIT_1);
KUNIT_EXPECT_EQ(test, 0, ret);
ret = list_empty(&rule->actionfields);
KUNIT_EXPECT_EQ(test, false, ret);
af = list_first_entry(&rule->actionfields,
struct vcap_client_actionfield, ctrl.list);
KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x1, af->data.u1.value);
INIT_LIST_HEAD(&rule->actionfields);
ret = vcap_rule_add_action_bit(rule, VCAP_AF_POLICE_ENA, VCAP_BIT_ANY);
KUNIT_EXPECT_EQ(test, 0, ret);
ret = list_empty(&rule->actionfields);
KUNIT_EXPECT_EQ(test, false, ret);
af = list_first_entry(&rule->actionfields,
struct vcap_client_actionfield, ctrl.list);
KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x0, af->data.u1.value);
INIT_LIST_HEAD(&rule->actionfields);
ret = vcap_rule_add_action_u32(rule, VCAP_AF_TYPE, 0x98765432);
KUNIT_EXPECT_EQ(test, 0, ret);
ret = list_empty(&rule->actionfields);
KUNIT_EXPECT_EQ(test, false, ret);
af = list_first_entry(&rule->actionfields,
struct vcap_client_actionfield, ctrl.list);
KUNIT_EXPECT_EQ(test, VCAP_AF_TYPE, af->ctrl.action);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, af->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x98765432, af->data.u32.value);
INIT_LIST_HEAD(&rule->actionfields);
ret = vcap_rule_add_action_u32(rule, VCAP_AF_MASK_MODE, 0xaabbccdd);
KUNIT_EXPECT_EQ(test, 0, ret);
ret = list_empty(&rule->actionfields);
KUNIT_EXPECT_EQ(test, false, ret);
af = list_first_entry(&rule->actionfields,
struct vcap_client_actionfield, ctrl.list);
KUNIT_EXPECT_EQ(test, VCAP_AF_MASK_MODE, af->ctrl.action);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, af->ctrl.type);
KUNIT_EXPECT_EQ(test, 0xaabbccdd, af->data.u32.value);
}
static void vcap_api_rule_find_keyset_basic_test(struct kunit *test)
{
struct vcap_keyset_list matches = {};
struct vcap_admin admin = {
.vtype = VCAP_TYPE_IS2,
};
struct vcap_rule_internal ri = {
.admin = &admin,
.vctrl = &test_vctrl,
};
struct vcap_client_keyfield ckf[] = {
{
.ctrl.key = VCAP_KF_TYPE,
}, {
.ctrl.key = VCAP_KF_LOOKUP_FIRST_IS,
}, {
.ctrl.key = VCAP_KF_IF_IGR_PORT_MASK_L3,
}, {
.ctrl.key = VCAP_KF_IF_IGR_PORT_MASK_RNG,
}, {
.ctrl.key = VCAP_KF_IF_IGR_PORT_MASK,
}, {
.ctrl.key = VCAP_KF_L2_DMAC,
}, {
.ctrl.key = VCAP_KF_ETYPE_LEN_IS,
}, {
.ctrl.key = VCAP_KF_ETYPE,
},
};
int idx;
bool ret;
enum vcap_keyfield_set keysets[10] = {};
matches.keysets = keysets;
matches.max = ARRAY_SIZE(keysets);
INIT_LIST_HEAD(&ri.data.keyfields);
for (idx = 0; idx < ARRAY_SIZE(ckf); idx++)
list_add_tail(&ckf[idx].ctrl.list, &ri.data.keyfields);
ret = vcap_rule_find_keysets(&ri, &matches);
KUNIT_EXPECT_EQ(test, true, ret);
KUNIT_EXPECT_EQ(test, 1, matches.cnt);
KUNIT_EXPECT_EQ(test, VCAP_KFS_MAC_ETYPE, matches.keysets[0]);
}
static void vcap_api_rule_find_keyset_failed_test(struct kunit *test)
{
struct vcap_keyset_list matches = {};
struct vcap_admin admin = {
.vtype = VCAP_TYPE_IS2,
};
struct vcap_rule_internal ri = {
.admin = &admin,
.vctrl = &test_vctrl,
};
struct vcap_client_keyfield ckf[] = {
{
.ctrl.key = VCAP_KF_TYPE,
}, {
.ctrl.key = VCAP_KF_LOOKUP_FIRST_IS,
}, {
.ctrl.key = VCAP_KF_ARP_OPCODE,
}, {
.ctrl.key = VCAP_KF_L3_IP4_SIP,
}, {
.ctrl.key = VCAP_KF_L3_IP4_DIP,
}, {
.ctrl.key = VCAP_KF_8021Q_PCP_CLS,
}, {
.ctrl.key = VCAP_KF_ETYPE_LEN_IS, /* Not with ARP */
}, {
.ctrl.key = VCAP_KF_ETYPE, /* Not with ARP */
},
};
int idx;
bool ret;
enum vcap_keyfield_set keysets[10] = {};
matches.keysets = keysets;
matches.max = ARRAY_SIZE(keysets);
INIT_LIST_HEAD(&ri.data.keyfields);
for (idx = 0; idx < ARRAY_SIZE(ckf); idx++)
list_add_tail(&ckf[idx].ctrl.list, &ri.data.keyfields);
ret = vcap_rule_find_keysets(&ri, &matches);
KUNIT_EXPECT_EQ(test, false, ret);
KUNIT_EXPECT_EQ(test, 0, matches.cnt);
KUNIT_EXPECT_EQ(test, VCAP_KFS_NO_VALUE, matches.keysets[0]);
}
static void vcap_api_rule_find_keyset_many_test(struct kunit *test)
{
struct vcap_keyset_list matches = {};
struct vcap_admin admin = {
.vtype = VCAP_TYPE_IS2,
};
struct vcap_rule_internal ri = {
.admin = &admin,
.vctrl = &test_vctrl,
};
struct vcap_client_keyfield ckf[] = {
{
.ctrl.key = VCAP_KF_TYPE,
}, {
.ctrl.key = VCAP_KF_LOOKUP_FIRST_IS,
}, {
.ctrl.key = VCAP_KF_8021Q_DEI_CLS,
}, {
.ctrl.key = VCAP_KF_8021Q_PCP_CLS,
}, {
.ctrl.key = VCAP_KF_8021Q_VID_CLS,
}, {
.ctrl.key = VCAP_KF_ISDX_CLS,
}, {
.ctrl.key = VCAP_KF_L2_MC_IS,
}, {
.ctrl.key = VCAP_KF_L2_BC_IS,
},
};
int idx;
bool ret;
enum vcap_keyfield_set keysets[10] = {};
matches.keysets = keysets;
matches.max = ARRAY_SIZE(keysets);
INIT_LIST_HEAD(&ri.data.keyfields);
for (idx = 0; idx < ARRAY_SIZE(ckf); idx++)
list_add_tail(&ckf[idx].ctrl.list, &ri.data.keyfields);
ret = vcap_rule_find_keysets(&ri, &matches);
KUNIT_EXPECT_EQ(test, true, ret);
KUNIT_EXPECT_EQ(test, 6, matches.cnt);
KUNIT_EXPECT_EQ(test, VCAP_KFS_ARP, matches.keysets[0]);
KUNIT_EXPECT_EQ(test, VCAP_KFS_IP4_OTHER, matches.keysets[1]);
KUNIT_EXPECT_EQ(test, VCAP_KFS_IP4_TCP_UDP, matches.keysets[2]);
KUNIT_EXPECT_EQ(test, VCAP_KFS_IP6_STD, matches.keysets[3]);
KUNIT_EXPECT_EQ(test, VCAP_KFS_IP_7TUPLE, matches.keysets[4]);
KUNIT_EXPECT_EQ(test, VCAP_KFS_MAC_ETYPE, matches.keysets[5]);
}
static void vcap_api_encode_rule_test(struct kunit *test)
{
/* Data used by VCAP Library callback */
static u32 keydata[32] = {};
static u32 mskdata[32] = {};
static u32 actdata[32] = {};
struct vcap_admin is2_admin = {
.vtype = VCAP_TYPE_IS2,
.first_cid = 10000,
.last_cid = 19999,
.lookups = 4,
.last_valid_addr = 3071,
.first_valid_addr = 0,
.last_used_addr = 800,
.cache = {
.keystream = keydata,
.maskstream = mskdata,
.actionstream = actdata,
},
};
struct vcap_rule *rule = 0;
struct vcap_rule_internal *ri = 0;
int vcap_chain_id = 10005;
enum vcap_user user = VCAP_USER_VCAP_UTIL;
u16 priority = 10;
int id = 100;
int ret;
struct vcap_u48_key smac = {
.value = { 0x88, 0x75, 0x32, 0x34, 0x9e, 0xb1 },
.mask = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }
};
struct vcap_u48_key dmac = {
.value = { 0x06, 0x05, 0x04, 0x03, 0x02, 0x01 },
.mask = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }
};
u32 port_mask_rng_value = 0x05;
u32 port_mask_rng_mask = 0x0f;
u32 igr_port_mask_value = 0xffabcd01;
u32 igr_port_mask_mask = ~0;
/* counter is not written yet, so it is not in expwriteaddr */
u32 expwriteaddr[] = {792, 793, 794, 795, 796, 797, 0};
int idx;
vcap_test_api_init(&is2_admin);
/* Allocate the rule */
rule = vcap_alloc_rule(&test_vctrl, &test_netdev, vcap_chain_id, user,
priority, id);
KUNIT_EXPECT_PTR_NE(test, NULL, rule);
ri = (struct vcap_rule_internal *)rule;
/* Add rule keys */
ret = vcap_rule_add_key_u48(rule, VCAP_KF_L2_DMAC, &dmac);
KUNIT_EXPECT_EQ(test, 0, ret);
ret = vcap_rule_add_key_u48(rule, VCAP_KF_L2_SMAC, &smac);
KUNIT_EXPECT_EQ(test, 0, ret);
ret = vcap_rule_add_key_bit(rule, VCAP_KF_ETYPE_LEN_IS, VCAP_BIT_1);
KUNIT_EXPECT_EQ(test, 0, ret);
/* Cannot add the same field twice */
ret = vcap_rule_add_key_bit(rule, VCAP_KF_ETYPE_LEN_IS, VCAP_BIT_1);
KUNIT_EXPECT_EQ(test, -EINVAL, ret);
ret = vcap_rule_add_key_bit(rule, VCAP_KF_IF_IGR_PORT_MASK_L3,
VCAP_BIT_ANY);
KUNIT_EXPECT_EQ(test, 0, ret);
ret = vcap_rule_add_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK_RNG,
port_mask_rng_value, port_mask_rng_mask);
KUNIT_EXPECT_EQ(test, 0, ret);
ret = vcap_rule_add_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK,
igr_port_mask_value, igr_port_mask_mask);
KUNIT_EXPECT_EQ(test, 0, ret);
/* Add rule actions */
ret = vcap_rule_add_action_bit(rule, VCAP_AF_POLICE_ENA, VCAP_BIT_1);
KUNIT_EXPECT_EQ(test, 0, ret);
ret = vcap_rule_add_action_u32(rule, VCAP_AF_CNT_ID, id);
KUNIT_EXPECT_EQ(test, 0, ret);
ret = vcap_rule_add_action_u32(rule, VCAP_AF_MATCH_ID, 1);
KUNIT_EXPECT_EQ(test, 0, ret);
ret = vcap_rule_add_action_u32(rule, VCAP_AF_MATCH_ID_MASK, 1);
KUNIT_EXPECT_EQ(test, 0, ret);
/* For now the actionset is hardcoded */
ret = vcap_set_rule_set_actionset(rule, VCAP_AFS_BASE_TYPE);
KUNIT_EXPECT_EQ(test, 0, ret);
/* Validation with validate keyset callback */
ret = vcap_val_rule(rule, ETH_P_ALL);
KUNIT_EXPECT_EQ(test, 0, ret);
KUNIT_EXPECT_EQ(test, VCAP_KFS_MAC_ETYPE, rule->keyset);
KUNIT_EXPECT_EQ(test, VCAP_AFS_BASE_TYPE, rule->actionset);
KUNIT_EXPECT_EQ(test, 6, ri->size);
KUNIT_EXPECT_EQ(test, 2, ri->keyset_sw_regs);
KUNIT_EXPECT_EQ(test, 4, ri->actionset_sw_regs);
/* Add rule with write callback */
ret = vcap_add_rule(rule);
KUNIT_EXPECT_EQ(test, 0, ret);
KUNIT_EXPECT_EQ(test, 792, is2_admin.last_used_addr);
for (idx = 0; idx < ARRAY_SIZE(expwriteaddr); ++idx)
KUNIT_EXPECT_EQ(test, expwriteaddr[idx], test_updateaddr[idx]);
/* Check that the rule has been added */
ret = list_empty(&is2_admin.rules);
KUNIT_EXPECT_EQ(test, false, ret);
KUNIT_EXPECT_EQ(test, 0, ret);
vcap_free_rule(rule);
/* Check that the rule has been freed: tricky to access since this
* memory should not be accessible anymore
*/
KUNIT_EXPECT_PTR_NE(test, NULL, rule);
ret = list_empty(&rule->keyfields);
KUNIT_EXPECT_EQ(test, true, ret);
ret = list_empty(&rule->actionfields);
KUNIT_EXPECT_EQ(test, true, ret);
}
static void vcap_api_next_lookup_basic_test(struct kunit *test)
{
struct vcap_admin admin1 = {
.vtype = VCAP_TYPE_IS2,
.vinst = 0,
.first_cid = 8000000,
.last_cid = 8199999,
.lookups = 4,
.lookups_per_instance = 2,
};
struct vcap_admin admin2 = {
.vtype = VCAP_TYPE_IS2,
.vinst = 1,
.first_cid = 8200000,
.last_cid = 8399999,
.lookups = 4,
.lookups_per_instance = 2,
};
bool ret;
vcap_test_api_init(&admin1);
list_add_tail(&admin2.list, &test_vctrl.list);
ret = vcap_is_next_lookup(&test_vctrl, 8000000, 1001000);
KUNIT_EXPECT_EQ(test, false, ret);
ret = vcap_is_next_lookup(&test_vctrl, 8000000, 8001000);
KUNIT_EXPECT_EQ(test, false, ret);
ret = vcap_is_next_lookup(&test_vctrl, 8000000, 8101000);
KUNIT_EXPECT_EQ(test, true, ret);
ret = vcap_is_next_lookup(&test_vctrl, 8100000, 8101000);
KUNIT_EXPECT_EQ(test, false, ret);
ret = vcap_is_next_lookup(&test_vctrl, 8100000, 8201000);
KUNIT_EXPECT_EQ(test, true, ret);
ret = vcap_is_next_lookup(&test_vctrl, 8200000, 8201000);
KUNIT_EXPECT_EQ(test, false, ret);
ret = vcap_is_next_lookup(&test_vctrl, 8200000, 8301000);
KUNIT_EXPECT_EQ(test, true, ret);
ret = vcap_is_next_lookup(&test_vctrl, 8300000, 8301000);
KUNIT_EXPECT_EQ(test, false, ret);
ret = vcap_is_next_lookup(&test_vctrl, 8300000, 8401000);
KUNIT_EXPECT_EQ(test, true, ret);
}
static void vcap_api_next_lookup_advanced_test(struct kunit *test)
{
struct vcap_admin admin1 = {
.vtype = VCAP_TYPE_IS0,
.vinst = 0,
.first_cid = 1000000,
.last_cid = 1199999,
.lookups = 6,
.lookups_per_instance = 2,
};
struct vcap_admin admin2 = {
.vtype = VCAP_TYPE_IS0,
.vinst = 1,
.first_cid = 1200000,
.last_cid = 1399999,
.lookups = 6,
.lookups_per_instance = 2,
};
struct vcap_admin admin3 = {
.vtype = VCAP_TYPE_IS0,
.vinst = 2,
.first_cid = 1400000,
.last_cid = 1599999,
.lookups = 6,
.lookups_per_instance = 2,
};
struct vcap_admin admin4 = {
.vtype = VCAP_TYPE_IS2,
.vinst = 0,
.first_cid = 8000000,
.last_cid = 8199999,
.lookups = 4,
.lookups_per_instance = 2,
};
struct vcap_admin admin5 = {
.vtype = VCAP_TYPE_IS2,
.vinst = 1,
.first_cid = 8200000,
.last_cid = 8399999,
.lookups = 4,
.lookups_per_instance = 2,
};
bool ret;
vcap_test_api_init(&admin1);
list_add_tail(&admin2.list, &test_vctrl.list);
list_add_tail(&admin3.list, &test_vctrl.list);
list_add_tail(&admin4.list, &test_vctrl.list);
list_add_tail(&admin5.list, &test_vctrl.list);
ret = vcap_is_next_lookup(&test_vctrl, 1000000, 1001000);
KUNIT_EXPECT_EQ(test, false, ret);
ret = vcap_is_next_lookup(&test_vctrl, 1000000, 1101000);
KUNIT_EXPECT_EQ(test, true, ret);
ret = vcap_is_next_lookup(&test_vctrl, 1100000, 1201000);
KUNIT_EXPECT_EQ(test, true, ret);
ret = vcap_is_next_lookup(&test_vctrl, 1100000, 1301000);
KUNIT_EXPECT_EQ(test, false, ret);
ret = vcap_is_next_lookup(&test_vctrl, 1100000, 8101000);
KUNIT_EXPECT_EQ(test, false, ret);
ret = vcap_is_next_lookup(&test_vctrl, 1300000, 1401000);
KUNIT_EXPECT_EQ(test, true, ret);
ret = vcap_is_next_lookup(&test_vctrl, 1400000, 1501000);
KUNIT_EXPECT_EQ(test, true, ret);
ret = vcap_is_next_lookup(&test_vctrl, 1500000, 8001000);
KUNIT_EXPECT_EQ(test, true, ret);
ret = vcap_is_next_lookup(&test_vctrl, 8000000, 8001000);
KUNIT_EXPECT_EQ(test, false, ret);
ret = vcap_is_next_lookup(&test_vctrl, 8000000, 8101000);
KUNIT_EXPECT_EQ(test, true, ret);
ret = vcap_is_next_lookup(&test_vctrl, 8300000, 8301000);
KUNIT_EXPECT_EQ(test, false, ret);
ret = vcap_is_next_lookup(&test_vctrl, 8300000, 8401000);
KUNIT_EXPECT_EQ(test, true, ret);
}
static struct kunit_case vcap_api_support_test_cases[] = {
KUNIT_CASE(vcap_api_next_lookup_basic_test),
KUNIT_CASE(vcap_api_next_lookup_advanced_test),
{}
};
static struct kunit_suite vcap_api_support_test_suite = {
.name = "VCAP_API_Support_Testsuite",
.test_cases = vcap_api_support_test_cases,
};
static struct kunit_case vcap_api_full_rule_test_cases[] = {
KUNIT_CASE(vcap_api_rule_find_keyset_basic_test),
KUNIT_CASE(vcap_api_rule_find_keyset_failed_test),
KUNIT_CASE(vcap_api_rule_find_keyset_many_test),
KUNIT_CASE(vcap_api_encode_rule_test),
{}
};
static struct kunit_suite vcap_api_full_rule_test_suite = {
.name = "VCAP_API_Full_Rule_Testsuite",
.test_cases = vcap_api_full_rule_test_cases,
};
static struct kunit_case vcap_api_rule_value_test_cases[] = {
KUNIT_CASE(vcap_api_rule_add_keyvalue_test),
KUNIT_CASE(vcap_api_rule_add_actionvalue_test),
{}
};
static struct kunit_suite vcap_api_rule_value_test_suite = {
.name = "VCAP_API_Rule_Value_Testsuite",
.test_cases = vcap_api_rule_value_test_cases,
};
static struct kunit_case vcap_api_encoding_test_cases[] = {
KUNIT_CASE(vcap_api_set_bit_1_test),
KUNIT_CASE(vcap_api_set_bit_0_test),
......@@ -930,4 +1519,7 @@ static struct kunit_suite vcap_api_encoding_test_suite = {
.test_cases = vcap_api_encoding_test_cases,
};
kunit_test_suite(vcap_api_support_test_suite);
kunit_test_suite(vcap_api_full_rule_test_suite);
kunit_test_suite(vcap_api_rule_value_test_suite);
kunit_test_suite(vcap_api_encoding_test_suite);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment