Commit 28e6a60f authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'octeontx2-af-tc-flower-offload-changes'

Suman Ghosh says:

====================
octeontx2-af: TC flower offload changes

This patchset includes minor code restructuring related to TC
flower offload for outer vlan and adding support for TC inner
vlan offload.

Patch #1 Code restructure to handle TC flower outer vlan offload

Patch #2 Add TC flower offload support for inner vlan
====================

Link: https://lore.kernel.org/r/20230804045935.3010554-1-sumang@marvell.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 66244337 21e74835
......@@ -1465,6 +1465,7 @@ struct flow_msg {
u8 ip_flag;
u8 next_header;
};
__be16 vlan_itci;
};
struct npc_install_flow_req {
......
......@@ -184,6 +184,7 @@ enum key_fields {
NPC_VLAN_ETYPE_CTAG, /* 0x8100 */
NPC_VLAN_ETYPE_STAG, /* 0x88A8 */
NPC_OUTER_VID,
NPC_INNER_VID,
NPC_TOS,
NPC_IPFRAG_IPV4,
NPC_SIP_IPV4,
......@@ -230,6 +231,8 @@ enum key_fields {
NPC_VLAN_TAG1,
/* outer vlan tci for double tagged frame */
NPC_VLAN_TAG2,
/* inner vlan tci for double tagged frame */
NPC_VLAN_TAG3,
/* other header fields programmed to extract but not of our interest */
NPC_UNKNOWN,
NPC_KEY_FIELDS_MAX,
......
......@@ -2787,6 +2787,11 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
seq_printf(s, "mask 0x%x\n",
ntohs(rule->mask.vlan_tci));
break;
case NPC_INNER_VID:
seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_itci));
seq_printf(s, "mask 0x%x\n",
ntohs(rule->mask.vlan_itci));
break;
case NPC_TOS:
seq_printf(s, "%d ", rule->packet.tos);
seq_printf(s, "mask 0x%x\n", rule->mask.tos);
......
......@@ -20,6 +20,7 @@ static const char * const npc_flow_names[] = {
[NPC_VLAN_ETYPE_CTAG] = "vlan ether type ctag",
[NPC_VLAN_ETYPE_STAG] = "vlan ether type stag",
[NPC_OUTER_VID] = "outer vlan id",
[NPC_INNER_VID] = "inner vlan id",
[NPC_TOS] = "tos",
[NPC_IPFRAG_IPV4] = "fragmented IPv4 header ",
[NPC_SIP_IPV4] = "ipv4 source ip",
......@@ -328,6 +329,8 @@ static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf)
*/
struct npc_key_field *vlan_tag1;
struct npc_key_field *vlan_tag2;
/* Inner VLAN TCI for double tagged frames */
struct npc_key_field *vlan_tag3;
u64 *features;
u8 start_lid;
int i;
......@@ -350,6 +353,7 @@ static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf)
etype_tag2 = &key_fields[NPC_ETYPE_TAG2];
vlan_tag1 = &key_fields[NPC_VLAN_TAG1];
vlan_tag2 = &key_fields[NPC_VLAN_TAG2];
vlan_tag3 = &key_fields[NPC_VLAN_TAG3];
/* if key profile programmed does not extract Ethertype at all */
if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) {
......@@ -431,6 +435,12 @@ static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf)
goto done;
}
*features |= BIT_ULL(NPC_OUTER_VID);
/* If key profile extracts inner vlan tci */
if (vlan_tag3->nr_kws) {
key_fields[NPC_INNER_VID] = *vlan_tag3;
*features |= BIT_ULL(NPC_INNER_VID);
}
done:
return;
}
......@@ -513,6 +523,7 @@ do { \
NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2);
NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2);
NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2);
NPC_SCAN_HDR(NPC_VLAN_TAG3, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 6, 2);
NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6);
NPC_SCAN_HDR(NPC_IPSEC_SPI, NPC_LID_LD, NPC_LT_LD_AH, 4, 4);
......@@ -943,6 +954,8 @@ do { \
NPC_WRITE_FLOW(NPC_OUTER_VID, vlan_tci, ntohs(pkt->vlan_tci), 0,
ntohs(mask->vlan_tci), 0);
NPC_WRITE_FLOW(NPC_INNER_VID, vlan_itci, ntohs(pkt->vlan_itci), 0,
ntohs(mask->vlan_itci), 0);
NPC_WRITE_FLOW(NPC_IPFRAG_IPV6, next_header, pkt->next_header, 0,
mask->next_header, 0);
......
......@@ -439,6 +439,62 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
return 0;
}
static int otx2_tc_process_vlan(struct otx2_nic *nic, struct flow_msg *flow_spec,
struct flow_msg *flow_mask, struct flow_rule *rule,
struct npc_install_flow_req *req, bool is_inner)
{
struct flow_match_vlan match;
u16 vlan_tci, vlan_tci_mask;
if (is_inner)
flow_rule_match_cvlan(rule, &match);
else
flow_rule_match_vlan(rule, &match);
if (!eth_type_vlan(match.key->vlan_tpid)) {
netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n",
ntohs(match.key->vlan_tpid));
return -EOPNOTSUPP;
}
if (!match.mask->vlan_id) {
struct flow_action_entry *act;
int i;
flow_action_for_each(i, act, &rule->action) {
if (act->id == FLOW_ACTION_DROP) {
netdev_err(nic->netdev,
"vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n",
ntohs(match.key->vlan_tpid), match.key->vlan_id);
return -EOPNOTSUPP;
}
}
}
if (match.mask->vlan_id ||
match.mask->vlan_dei ||
match.mask->vlan_priority) {
vlan_tci = match.key->vlan_id |
match.key->vlan_dei << 12 |
match.key->vlan_priority << 13;
vlan_tci_mask = match.mask->vlan_id |
match.mask->vlan_dei << 12 |
match.mask->vlan_priority << 13;
if (is_inner) {
flow_spec->vlan_itci = htons(vlan_tci);
flow_mask->vlan_itci = htons(vlan_tci_mask);
req->features |= BIT_ULL(NPC_INNER_VID);
} else {
flow_spec->vlan_tci = htons(vlan_tci);
flow_mask->vlan_tci = htons(vlan_tci_mask);
req->features |= BIT_ULL(NPC_OUTER_VID);
}
}
return 0;
}
static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
struct flow_cls_offload *f,
struct npc_install_flow_req *req)
......@@ -458,6 +514,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
BIT(FLOW_DISSECTOR_KEY_CVLAN) |
BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
......@@ -591,47 +648,19 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
u16 vlan_tci, vlan_tci_mask;
flow_rule_match_vlan(rule, &match);
if (ntohs(match.key->vlan_tpid) != ETH_P_8021Q) {
netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n",
ntohs(match.key->vlan_tpid));
return -EOPNOTSUPP;
}
int ret;
if (!match.mask->vlan_id) {
struct flow_action_entry *act;
int i;
flow_action_for_each(i, act, &rule->action) {
if (act->id == FLOW_ACTION_DROP) {
netdev_err(nic->netdev,
"vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n",
ntohs(match.key->vlan_tpid),
match.key->vlan_id);
return -EOPNOTSUPP;
}
}
}
if (match.mask->vlan_id ||
match.mask->vlan_dei ||
match.mask->vlan_priority) {
vlan_tci = match.key->vlan_id |
match.key->vlan_dei << 12 |
match.key->vlan_priority << 13;
ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, false);
if (ret)
return ret;
}
vlan_tci_mask = match.mask->vlan_id |
match.mask->vlan_dei << 12 |
match.mask->vlan_priority << 13;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
int ret;
flow_spec->vlan_tci = htons(vlan_tci);
flow_mask->vlan_tci = htons(vlan_tci_mask);
req->features |= BIT_ULL(NPC_OUTER_VID);
}
ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, true);
if (ret)
return ret;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment