Commit 31222162 authored by David S. Miller's avatar David S. Miller

Merge branch 'octeon-tc-offloads'

Naveen Mamindlapalli says:

====================
Add tc hardware offloads

This patch series adds support for tc hardware offloads.

Patch #1 adds support for offloading flows that matches IP tos and IP
         protocol which will be used by tc hw offload support. Also
         added ethtool n-tuple filter to code to offload the flows
         matching the above fields.
Patch #2 adds tc flower hardware offload support on ingress traffic.
Patch #3 adds TC flower offload stats.
Patch #4 adds tc TC_MATCHALL egress ratelimiting offload.

* tc flower hardware offload in PF driver

The driver parses the flow match fields and actions received from the tc
subsystem and adds/delete MCAM rules for the same. Each flow contains set
of match and action fields. If the action or fields are not supported,
the rule cannot be offloaded to hardware. The tc uses same set of MCAM
rules allocated for ethtool n-tuple filters. So, at a time only one entity
can offload the flows to hardware, they're made mutually exclusive in the
driver.

Following match and actions are supported.

Match: Eth dst_mac, EtherType, 802.1Q {vlan_id,vlan_prio}, vlan EtherType,
       IP proto {tcp,udp,sctp,icmp,icmp6}, IPv4 tos, IPv4{dst_ip,src_ip},
       L4 proto {dst_port|src_port number}.
Actions: drop, accept, vlan pop, redirect to another port on the device.

The Hardware stats are also supported. Currently only packet counter stats
are updated.

* tc egress rate limiting support
Added TC-MATCHALL classifier offload with police action applied for all
egress traffic on the specified interface.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a04be4b6 e638a83f
...@@ -216,6 +216,9 @@ M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \ ...@@ -216,6 +216,9 @@ M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \
npc_mcam_read_entry_rsp) \ npc_mcam_read_entry_rsp) \
M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \ M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \
msg_req, npc_mcam_read_base_rule_rsp) \ msg_req, npc_mcam_read_base_rule_rsp) \
M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats, \
npc_mcam_get_stats_req, \
npc_mcam_get_stats_rsp) \
/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \ /* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \ M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \
nix_lf_alloc_req, nix_lf_alloc_rsp) \ nix_lf_alloc_req, nix_lf_alloc_rsp) \
...@@ -1195,6 +1198,17 @@ struct npc_mcam_read_base_rule_rsp { ...@@ -1195,6 +1198,17 @@ struct npc_mcam_read_base_rule_rsp {
struct mcam_entry entry; struct mcam_entry entry;
}; };
struct npc_mcam_get_stats_req {
struct mbox_msghdr hdr;
u16 entry; /* mcam entry */
};
struct npc_mcam_get_stats_rsp {
struct mbox_msghdr hdr;
u64 stat; /* counter stats */
u8 stat_ena; /* enabled */
};
enum ptp_op { enum ptp_op {
PTP_OP_ADJFINE = 0, PTP_OP_ADJFINE = 0,
PTP_OP_GET_CLOCK = 1, PTP_OP_GET_CLOCK = 1,
......
...@@ -167,6 +167,8 @@ enum key_fields { ...@@ -167,6 +167,8 @@ enum key_fields {
NPC_IPPROTO_SCTP, NPC_IPPROTO_SCTP,
NPC_IPPROTO_AH, NPC_IPPROTO_AH,
NPC_IPPROTO_ESP, NPC_IPPROTO_ESP,
NPC_IPPROTO_ICMP,
NPC_IPPROTO_ICMP6,
NPC_SPORT_TCP, NPC_SPORT_TCP,
NPC_DPORT_TCP, NPC_DPORT_TCP,
NPC_SPORT_UDP, NPC_SPORT_UDP,
......
...@@ -2002,7 +2002,7 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s, ...@@ -2002,7 +2002,7 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype)); seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
break; break;
case NPC_OUTER_VID: case NPC_OUTER_VID:
seq_printf(s, "%d ", ntohs(rule->packet.vlan_tci)); seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
seq_printf(s, "mask 0x%x\n", seq_printf(s, "mask 0x%x\n",
ntohs(rule->mask.vlan_tci)); ntohs(rule->mask.vlan_tci));
break; break;
......
...@@ -2806,3 +2806,42 @@ int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu, ...@@ -2806,3 +2806,42 @@ int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu,
out: out:
return rc; return rc;
} }
int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu,
struct npc_mcam_get_stats_req *req,
struct npc_mcam_get_stats_rsp *rsp)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
u16 index, cntr;
int blkaddr;
u64 regval;
u32 bank;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return NPC_MCAM_INVALID_REQ;
mutex_lock(&mcam->lock);
index = req->entry & (mcam->banksize - 1);
bank = npc_get_bank(mcam, req->entry);
/* read MCAM entry STAT_ACT register */
regval = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank));
if (!(regval & BIT_ULL(9))) {
rsp->stat_ena = 0;
mutex_unlock(&mcam->lock);
return 0;
}
cntr = regval & 0x1FF;
rsp->stat_ena = 1;
rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(cntr));
rsp->stat &= BIT_ULL(48) - 1;
mutex_unlock(&mcam->lock);
return 0;
}
...@@ -29,6 +29,8 @@ static const char * const npc_flow_names[] = { ...@@ -29,6 +29,8 @@ static const char * const npc_flow_names[] = {
[NPC_IPPROTO_TCP] = "ip proto tcp", [NPC_IPPROTO_TCP] = "ip proto tcp",
[NPC_IPPROTO_UDP] = "ip proto udp", [NPC_IPPROTO_UDP] = "ip proto udp",
[NPC_IPPROTO_SCTP] = "ip proto sctp", [NPC_IPPROTO_SCTP] = "ip proto sctp",
[NPC_IPPROTO_ICMP] = "ip proto icmp",
[NPC_IPPROTO_ICMP6] = "ip proto icmp6",
[NPC_IPPROTO_AH] = "ip proto AH", [NPC_IPPROTO_AH] = "ip proto AH",
[NPC_IPPROTO_ESP] = "ip proto ESP", [NPC_IPPROTO_ESP] = "ip proto ESP",
[NPC_SPORT_TCP] = "tcp source port", [NPC_SPORT_TCP] = "tcp source port",
...@@ -427,6 +429,7 @@ do { \ ...@@ -427,6 +429,7 @@ do { \
* packet header fields below. * packet header fields below.
* Example: Source IP is 4 bytes and starts at 12th byte of IP header * Example: Source IP is 4 bytes and starts at 12th byte of IP header
*/ */
NPC_SCAN_HDR(NPC_TOS, NPC_LID_LC, NPC_LT_LC_IP, 1, 1);
NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4); NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4);
NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4); NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4);
NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16); NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16);
...@@ -477,9 +480,12 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) ...@@ -477,9 +480,12 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
BIT_ULL(NPC_IPPROTO_SCTP); BIT_ULL(NPC_IPPROTO_SCTP);
} }
/* for AH, check if corresponding layer type is present in the key */ /* for AH/ICMP/ICMPv6/, check if corresponding layer type is present in the key */
if (npc_check_field(rvu, blkaddr, NPC_LD, intf)) if (npc_check_field(rvu, blkaddr, NPC_LD, intf)) {
*features |= BIT_ULL(NPC_IPPROTO_AH); *features |= BIT_ULL(NPC_IPPROTO_AH);
*features |= BIT_ULL(NPC_IPPROTO_ICMP);
*features |= BIT_ULL(NPC_IPPROTO_ICMP6);
}
/* for ESP, check if corresponding layer type is present in the key */ /* for ESP, check if corresponding layer type is present in the key */
if (npc_check_field(rvu, blkaddr, NPC_LE, intf)) if (npc_check_field(rvu, blkaddr, NPC_LE, intf))
...@@ -769,6 +775,12 @@ static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry, ...@@ -769,6 +775,12 @@ static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry,
if (features & BIT_ULL(NPC_IPPROTO_SCTP)) if (features & BIT_ULL(NPC_IPPROTO_SCTP))
npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_SCTP, npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_SCTP,
0, ~0ULL, 0, intf); 0, ~0ULL, 0, intf);
if (features & BIT_ULL(NPC_IPPROTO_ICMP))
npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP,
0, ~0ULL, 0, intf);
if (features & BIT_ULL(NPC_IPPROTO_ICMP6))
npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP6,
0, ~0ULL, 0, intf);
if (features & BIT_ULL(NPC_OUTER_VID)) if (features & BIT_ULL(NPC_OUTER_VID))
npc_update_entry(rvu, NPC_LB, entry, npc_update_entry(rvu, NPC_LB, entry,
...@@ -798,6 +810,7 @@ do { \ ...@@ -798,6 +810,7 @@ do { \
NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0); NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0);
NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0, NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0,
ntohs(mask->etype), 0); ntohs(mask->etype), 0);
NPC_WRITE_FLOW(NPC_TOS, tos, pkt->tos, 0, mask->tos, 0);
NPC_WRITE_FLOW(NPC_SIP_IPV4, ip4src, ntohl(pkt->ip4src), 0, NPC_WRITE_FLOW(NPC_SIP_IPV4, ip4src, ntohl(pkt->ip4src), 0,
ntohl(mask->ip4src), 0); ntohl(mask->ip4src), 0);
NPC_WRITE_FLOW(NPC_DIP_IPV4, ip4dst, ntohl(pkt->ip4dst), 0, NPC_WRITE_FLOW(NPC_DIP_IPV4, ip4dst, ntohl(pkt->ip4dst), 0,
......
...@@ -7,7 +7,7 @@ obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o ...@@ -7,7 +7,7 @@ obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o
obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
otx2_ptp.o otx2_flows.o cn10k.o otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o
rvu_nicvf-y := otx2_vf.o rvu_nicvf-y := otx2_vf.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/ptp_clock_kernel.h> #include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h> #include <linux/timecounter.h>
#include <linux/soc/marvell/octeontx2/asm.h> #include <linux/soc/marvell/octeontx2/asm.h>
#include <net/pkt_cls.h>
#include <mbox.h> #include <mbox.h>
#include <npc.h> #include <npc.h>
...@@ -264,6 +265,7 @@ struct otx2_flow_config { ...@@ -264,6 +265,7 @@ struct otx2_flow_config {
#define OTX2_MAX_NTUPLE_FLOWS 32 #define OTX2_MAX_NTUPLE_FLOWS 32
#define OTX2_MAX_UNICAST_FLOWS 8 #define OTX2_MAX_UNICAST_FLOWS 8
#define OTX2_MAX_VLAN_FLOWS 1 #define OTX2_MAX_VLAN_FLOWS 1
#define OTX2_MAX_TC_FLOWS OTX2_MAX_NTUPLE_FLOWS
#define OTX2_MCAM_COUNT (OTX2_MAX_NTUPLE_FLOWS + \ #define OTX2_MCAM_COUNT (OTX2_MAX_NTUPLE_FLOWS + \
OTX2_MAX_UNICAST_FLOWS + \ OTX2_MAX_UNICAST_FLOWS + \
OTX2_MAX_VLAN_FLOWS) OTX2_MAX_VLAN_FLOWS)
...@@ -274,10 +276,20 @@ struct otx2_flow_config { ...@@ -274,10 +276,20 @@ struct otx2_flow_config {
#define OTX2_PER_VF_VLAN_FLOWS 2 /* rx+tx per VF */ #define OTX2_PER_VF_VLAN_FLOWS 2 /* rx+tx per VF */
#define OTX2_VF_VLAN_RX_INDEX 0 #define OTX2_VF_VLAN_RX_INDEX 0
#define OTX2_VF_VLAN_TX_INDEX 1 #define OTX2_VF_VLAN_TX_INDEX 1
u32 tc_flower_offset;
u32 ntuple_max_flows; u32 ntuple_max_flows;
u32 tc_max_flows;
struct list_head flow_list; struct list_head flow_list;
}; };
struct otx2_tc_info {
/* hash table to store TC offloaded flows */
struct rhashtable flow_table;
struct rhashtable_params flow_ht_params;
DECLARE_BITMAP(tc_entries_bitmap, OTX2_MAX_TC_FLOWS);
unsigned long num_entries;
};
struct dev_hw_ops { struct dev_hw_ops {
int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura); int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura);
void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq, void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq,
...@@ -305,6 +317,8 @@ struct otx2_nic { ...@@ -305,6 +317,8 @@ struct otx2_nic {
#define OTX2_FLAG_PF_SHUTDOWN BIT_ULL(8) #define OTX2_FLAG_PF_SHUTDOWN BIT_ULL(8)
#define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9) #define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9)
#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10) #define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
#define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11)
#define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12)
u64 flags; u64 flags;
struct otx2_qset qset; struct otx2_qset qset;
...@@ -347,6 +361,7 @@ struct otx2_nic { ...@@ -347,6 +361,7 @@ struct otx2_nic {
struct hwtstamp_config tstamp; struct hwtstamp_config tstamp;
struct otx2_flow_config *flow_cfg; struct otx2_flow_config *flow_cfg;
struct otx2_tc_info tc_info;
}; };
static inline bool is_otx2_lbkvf(struct pci_dev *pdev) static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
...@@ -802,4 +817,9 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac); ...@@ -802,4 +817,9 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable); int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf); int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
u16 otx2_get_max_mtu(struct otx2_nic *pfvf); u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
/* tc support */
int otx2_init_tc(struct otx2_nic *nic);
void otx2_shutdown_tc(struct otx2_nic *nic);
int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data);
#endif /* OTX2_COMMON_H */ #endif /* OTX2_COMMON_H */
...@@ -57,10 +57,13 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf) ...@@ -57,10 +57,13 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
flow_cfg->ntuple_max_flows = rsp->count; flow_cfg->ntuple_max_flows = rsp->count;
flow_cfg->ntuple_offset = 0; flow_cfg->ntuple_offset = 0;
pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT; pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
flow_cfg->tc_max_flows = flow_cfg->ntuple_max_flows;
pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
} else { } else {
flow_cfg->vf_vlan_offset = 0; flow_cfg->vf_vlan_offset = 0;
flow_cfg->ntuple_offset = flow_cfg->vf_vlan_offset + flow_cfg->ntuple_offset = flow_cfg->vf_vlan_offset +
vf_vlan_max_flows; vf_vlan_max_flows;
flow_cfg->tc_flower_offset = flow_cfg->ntuple_offset;
flow_cfg->unicast_offset = flow_cfg->ntuple_offset + flow_cfg->unicast_offset = flow_cfg->ntuple_offset +
OTX2_MAX_NTUPLE_FLOWS; OTX2_MAX_NTUPLE_FLOWS;
flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset + flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
...@@ -69,6 +72,7 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf) ...@@ -69,6 +72,7 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT; pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT; pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT; pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
} }
for (i = 0; i < rsp->count; i++) for (i = 0; i < rsp->count; i++)
...@@ -93,6 +97,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf) ...@@ -93,6 +97,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
INIT_LIST_HEAD(&pf->flow_cfg->flow_list); INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
pf->flow_cfg->ntuple_max_flows = OTX2_MAX_NTUPLE_FLOWS; pf->flow_cfg->ntuple_max_flows = OTX2_MAX_NTUPLE_FLOWS;
pf->flow_cfg->tc_max_flows = pf->flow_cfg->ntuple_max_flows;
err = otx2_alloc_mcam_entries(pf); err = otx2_alloc_mcam_entries(pf);
if (err) if (err)
...@@ -301,6 +306,35 @@ static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp, ...@@ -301,6 +306,35 @@ static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
sizeof(pmask->ip4dst)); sizeof(pmask->ip4dst));
req->features |= BIT_ULL(NPC_DIP_IPV4); req->features |= BIT_ULL(NPC_DIP_IPV4);
} }
if (ipv4_usr_mask->tos) {
pkt->tos = ipv4_usr_hdr->tos;
pmask->tos = ipv4_usr_mask->tos;
req->features |= BIT_ULL(NPC_TOS);
}
if (ipv4_usr_mask->proto) {
switch (ipv4_usr_hdr->proto) {
case IPPROTO_ICMP:
req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
break;
case IPPROTO_TCP:
req->features |= BIT_ULL(NPC_IPPROTO_TCP);
break;
case IPPROTO_UDP:
req->features |= BIT_ULL(NPC_IPPROTO_UDP);
break;
case IPPROTO_SCTP:
req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
break;
case IPPROTO_AH:
req->features |= BIT_ULL(NPC_IPPROTO_AH);
break;
case IPPROTO_ESP:
req->features |= BIT_ULL(NPC_IPPROTO_ESP);
break;
default:
return -EOPNOTSUPP;
}
}
pkt->etype = cpu_to_be16(ETH_P_IP); pkt->etype = cpu_to_be16(ETH_P_IP);
pmask->etype = cpu_to_be16(0xFFFF); pmask->etype = cpu_to_be16(0xFFFF);
req->features |= BIT_ULL(NPC_ETYPE); req->features |= BIT_ULL(NPC_ETYPE);
...@@ -325,6 +359,11 @@ static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp, ...@@ -325,6 +359,11 @@ static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
sizeof(pmask->ip4dst)); sizeof(pmask->ip4dst));
req->features |= BIT_ULL(NPC_DIP_IPV4); req->features |= BIT_ULL(NPC_DIP_IPV4);
} }
if (ipv4_l4_mask->tos) {
pkt->tos = ipv4_l4_hdr->tos;
pmask->tos = ipv4_l4_mask->tos;
req->features |= BIT_ULL(NPC_TOS);
}
if (ipv4_l4_mask->psrc) { if (ipv4_l4_mask->psrc) {
memcpy(&pkt->sport, &ipv4_l4_hdr->psrc, memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
sizeof(pkt->sport)); sizeof(pkt->sport));
...@@ -375,10 +414,14 @@ static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp, ...@@ -375,10 +414,14 @@ static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
sizeof(pmask->ip4dst)); sizeof(pmask->ip4dst));
req->features |= BIT_ULL(NPC_DIP_IPV4); req->features |= BIT_ULL(NPC_DIP_IPV4);
} }
if (ah_esp_mask->tos) {
pkt->tos = ah_esp_hdr->tos;
pmask->tos = ah_esp_mask->tos;
req->features |= BIT_ULL(NPC_TOS);
}
/* NPC profile doesn't extract AH/ESP header fields */ /* NPC profile doesn't extract AH/ESP header fields */
if ((ah_esp_mask->spi & ah_esp_hdr->spi) || if (ah_esp_mask->spi & ah_esp_hdr->spi)
(ah_esp_mask->tos & ah_esp_mask->tos))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (flow_type == AH_V4_FLOW) if (flow_type == AH_V4_FLOW)
......
...@@ -1760,6 +1760,24 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -1760,6 +1760,24 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
static netdev_features_t otx2_fix_features(struct net_device *dev,
netdev_features_t features)
{
/* check if n-tuple filters are ON */
if ((features & NETIF_F_HW_TC) && (dev->features & NETIF_F_NTUPLE)) {
netdev_info(dev, "Disabling n-tuple filters\n");
features &= ~NETIF_F_NTUPLE;
}
/* check if tc hw offload is ON */
if ((features & NETIF_F_NTUPLE) && (dev->features & NETIF_F_HW_TC)) {
netdev_info(dev, "Disabling TC hardware offload\n");
features &= ~NETIF_F_HW_TC;
}
return features;
}
static void otx2_set_rx_mode(struct net_device *netdev) static void otx2_set_rx_mode(struct net_device *netdev)
{ {
struct otx2_nic *pf = netdev_priv(netdev); struct otx2_nic *pf = netdev_priv(netdev);
...@@ -1822,6 +1840,12 @@ static int otx2_set_features(struct net_device *netdev, ...@@ -1822,6 +1840,12 @@ static int otx2_set_features(struct net_device *netdev,
if ((changed & NETIF_F_NTUPLE) && !ntuple) if ((changed & NETIF_F_NTUPLE) && !ntuple)
otx2_destroy_ntuple_flows(pf); otx2_destroy_ntuple_flows(pf);
if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
pf->tc_info.num_entries) {
netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n");
return -EBUSY;
}
return 0; return 0;
} }
...@@ -2220,6 +2244,7 @@ static const struct net_device_ops otx2_netdev_ops = { ...@@ -2220,6 +2244,7 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_open = otx2_open, .ndo_open = otx2_open,
.ndo_stop = otx2_stop, .ndo_stop = otx2_stop,
.ndo_start_xmit = otx2_xmit, .ndo_start_xmit = otx2_xmit,
.ndo_fix_features = otx2_fix_features,
.ndo_set_mac_address = otx2_set_mac_address, .ndo_set_mac_address = otx2_set_mac_address,
.ndo_change_mtu = otx2_change_mtu, .ndo_change_mtu = otx2_change_mtu,
.ndo_set_rx_mode = otx2_set_rx_mode, .ndo_set_rx_mode = otx2_set_rx_mode,
...@@ -2230,6 +2255,7 @@ static const struct net_device_ops otx2_netdev_ops = { ...@@ -2230,6 +2255,7 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_set_vf_mac = otx2_set_vf_mac, .ndo_set_vf_mac = otx2_set_vf_mac,
.ndo_set_vf_vlan = otx2_set_vf_vlan, .ndo_set_vf_vlan = otx2_set_vf_vlan,
.ndo_get_vf_config = otx2_get_vf_config, .ndo_get_vf_config = otx2_get_vf_config,
.ndo_setup_tc = otx2_setup_tc,
}; };
static int otx2_wq_init(struct otx2_nic *pf) static int otx2_wq_init(struct otx2_nic *pf)
...@@ -2449,6 +2475,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2449,6 +2475,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
NETIF_F_HW_VLAN_STAG_RX; NETIF_F_HW_VLAN_STAG_RX;
netdev->features |= netdev->hw_features; netdev->features |= netdev->hw_features;
/* HW supports tc offload but mutually exclusive with n-tuple filters */
if (pf->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)
netdev->hw_features |= NETIF_F_HW_TC;
netdev->gso_max_segs = OTX2_MAX_GSO_SEGS; netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
netdev->watchdog_timeo = OTX2_TX_TIMEOUT; netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
...@@ -2470,6 +2500,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2470,6 +2500,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
otx2_set_ethtool_ops(netdev); otx2_set_ethtool_ops(netdev);
err = otx2_init_tc(pf);
if (err)
goto err_mcam_flow_del;
/* Enable link notifications */ /* Enable link notifications */
otx2_cgx_config_linkevents(pf, true); otx2_cgx_config_linkevents(pf, true);
...@@ -2479,6 +2513,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2479,6 +2513,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0; return 0;
err_mcam_flow_del:
otx2_mcam_flow_del(pf);
err_unreg_netdev: err_unreg_netdev:
unregister_netdev(netdev); unregister_netdev(netdev);
err_del_mcam_entries: err_del_mcam_entries:
...@@ -2646,6 +2682,7 @@ static void otx2_remove(struct pci_dev *pdev) ...@@ -2646,6 +2682,7 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_ptp_destroy(pf); otx2_ptp_destroy(pf);
otx2_mcam_flow_del(pf); otx2_mcam_flow_del(pf);
otx2_shutdown_tc(pf);
otx2_detach_resources(&pf->mbox); otx2_detach_resources(&pf->mbox);
if (pf->hw.lmt_base) if (pf->hw.lmt_base)
iounmap(pf->hw.lmt_base); iounmap(pf->hw.lmt_base);
......
...@@ -152,6 +152,7 @@ ...@@ -152,6 +152,7 @@
#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16) #define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16)
#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16) #define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16)
#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16) #define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16)
#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16)
#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16) #define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16)
#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16) #define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16)
#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3) #define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3)
......
// SPDX-License-Identifier: GPL-2.0
/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
*
* Copyright (C) 2021 Marvell.
*/
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/inetdevice.h>
#include <linux/rhashtable.h>
#include <linux/bitfield.h>
#include <net/flow_dissector.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_vlan.h>
#include <net/ipv6.h>
#include "otx2_common.h"
/* Egress rate limiting definitions */
#define MAX_BURST_EXPONENT 0x0FULL
#define MAX_BURST_MANTISSA 0xFFULL
#define MAX_BURST_SIZE 130816ULL
#define MAX_RATE_DIVIDER_EXPONENT 12ULL
#define MAX_RATE_EXPONENT 0x0FULL
#define MAX_RATE_MANTISSA 0xFFULL
/* Bitfields in NIX_TLX_PIR register */
#define TLX_RATE_MANTISSA GENMASK_ULL(8, 1)
#define TLX_RATE_EXPONENT GENMASK_ULL(12, 9)
#define TLX_RATE_DIVIDER_EXPONENT GENMASK_ULL(16, 13)
#define TLX_BURST_MANTISSA GENMASK_ULL(36, 29)
#define TLX_BURST_EXPONENT GENMASK_ULL(40, 37)
struct otx2_tc_flow_stats {
u64 bytes;
u64 pkts;
u64 used;
};
struct otx2_tc_flow {
struct rhash_head node;
unsigned long cookie;
u16 entry;
unsigned int bitpos;
struct rcu_head rcu;
struct otx2_tc_flow_stats stats;
spinlock_t lock; /* lock for stats */
};
static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp,
u32 *burst_mantissa)
{
unsigned int tmp;
/* Burst is calculated as
* ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256
* Max supported burst size is 130,816 bytes.
*/
burst = min_t(u32, burst, MAX_BURST_SIZE);
if (burst) {
*burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0;
tmp = burst - rounddown_pow_of_two(burst);
if (burst < MAX_BURST_MANTISSA)
*burst_mantissa = tmp * 2;
else
*burst_mantissa = tmp / (1ULL << (*burst_exp - 7));
} else {
*burst_exp = MAX_BURST_EXPONENT;
*burst_mantissa = MAX_BURST_MANTISSA;
}
}
static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp,
u32 *mantissa, u32 *div_exp)
{
unsigned int tmp;
/* Rate calculation by hardware
*
* PIR_ADD = ((256 + mantissa) << exp) / 256
* rate = (2 * PIR_ADD) / ( 1 << div_exp)
* The resultant rate is in Mbps.
*/
/* 2Mbps to 100Gbps can be expressed with div_exp = 0.
* Setting this to '0' will ease the calculation of
* exponent and mantissa.
*/
*div_exp = 0;
if (maxrate) {
*exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0;
tmp = maxrate - rounddown_pow_of_two(maxrate);
if (maxrate < MAX_RATE_MANTISSA)
*mantissa = tmp * 2;
else
*mantissa = tmp / (1ULL << (*exp - 7));
} else {
/* Instead of disabling rate limiting, set all values to max */
*exp = MAX_RATE_EXPONENT;
*mantissa = MAX_RATE_MANTISSA;
}
}
static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 maxrate)
{
struct otx2_hw *hw = &nic->hw;
struct nix_txschq_config *req;
u32 burst_exp, burst_mantissa;
u32 exp, mantissa, div_exp;
int txschq, err;
/* All SQs share the same TL4, so pick the first scheduler */
txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
/* Get exponent and mantissa values from the desired rate */
otx2_get_egress_burst_cfg(burst, &burst_exp, &burst_mantissa);
otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
mutex_lock(&nic->mbox.lock);
req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox);
if (!req) {
mutex_unlock(&nic->mbox.lock);
return -ENOMEM;
}
req->lvl = NIX_TXSCH_LVL_TL4;
req->num_regs = 1;
req->reg[0] = NIX_AF_TL4X_PIR(txschq);
req->regval[0] = FIELD_PREP(TLX_BURST_EXPONENT, burst_exp) |
FIELD_PREP(TLX_BURST_MANTISSA, burst_mantissa) |
FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
FIELD_PREP(TLX_RATE_EXPONENT, exp) |
FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
err = otx2_sync_mbox_msg(&nic->mbox);
mutex_unlock(&nic->mbox.lock);
return err;
}
static int otx2_tc_validate_flow(struct otx2_nic *nic,
struct flow_action *actions,
struct netlink_ext_ack *extack)
{
if (nic->flags & OTX2_FLAG_INTF_DOWN) {
NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
return -EINVAL;
}
if (!flow_action_has_entries(actions)) {
NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action");
return -EINVAL;
}
if (!flow_offload_has_one_action(actions)) {
NL_SET_ERR_MSG_MOD(extack,
"Egress MATCHALL offload supports only 1 policing action");
return -EINVAL;
}
return 0;
}
static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
struct tc_cls_matchall_offload *cls)
{
struct netlink_ext_ack *extack = cls->common.extack;
struct flow_action *actions = &cls->rule->action;
struct flow_action_entry *entry;
u32 rate;
int err;
err = otx2_tc_validate_flow(nic, actions, extack);
if (err)
return err;
if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) {
NL_SET_ERR_MSG_MOD(extack,
"Only one Egress MATCHALL ratelimitter can be offloaded");
return -ENOMEM;
}
entry = &cls->rule->action.entries[0];
switch (entry->id) {
case FLOW_ACTION_POLICE:
if (entry->police.rate_pkt_ps) {
NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
return -EOPNOTSUPP;
}
/* Convert bytes per second to Mbps */
rate = entry->police.rate_bytes_ps * 8;
rate = max_t(u32, rate / 1000000, 1);
err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate);
if (err)
return err;
nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
break;
default:
NL_SET_ERR_MSG_MOD(extack,
"Only police action is supported with Egress MATCHALL offload");
return -EOPNOTSUPP;
}
return 0;
}
static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
struct tc_cls_matchall_offload *cls)
{
struct netlink_ext_ack *extack = cls->common.extack;
int err;
if (nic->flags & OTX2_FLAG_INTF_DOWN) {
NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
return -EINVAL;
}
err = otx2_set_matchall_egress_rate(nic, 0, 0);
nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
return err;
}
static int otx2_tc_parse_actions(struct otx2_nic *nic,
struct flow_action *flow_action,
struct npc_install_flow_req *req)
{
struct flow_action_entry *act;
struct net_device *target;
struct otx2_nic *priv;
int i;
if (!flow_action_has_entries(flow_action)) {
netdev_info(nic->netdev, "no tc actions specified");
return -EINVAL;
}
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_DROP:
req->op = NIX_RX_ACTIONOP_DROP;
return 0;
case FLOW_ACTION_ACCEPT:
req->op = NIX_RX_ACTION_DEFAULT;
return 0;
case FLOW_ACTION_REDIRECT_INGRESS:
target = act->dev;
priv = netdev_priv(target);
/* npc_install_flow_req doesn't support passing a target pcifunc */
if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) {
netdev_info(nic->netdev,
"can't redirect to other pf/vf\n");
return -EOPNOTSUPP;
}
req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
req->op = NIX_RX_ACTION_DEFAULT;
return 0;
case FLOW_ACTION_VLAN_POP:
req->vtag0_valid = true;
/* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */
req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
break;
default:
return -EOPNOTSUPP;
}
}
return 0;
}
static int otx2_tc_prepare_flow(struct otx2_nic *nic,
struct flow_cls_offload *f,
struct npc_install_flow_req *req)
{
struct flow_msg *flow_spec = &req->packet;
struct flow_msg *flow_mask = &req->mask;
struct flow_dissector *dissector;
struct flow_rule *rule;
u8 ip_proto = 0;
rule = flow_cls_offload_flow_rule(f);
dissector = rule->match.dissector;
if ((dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_VLAN) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_PORTS) |
BIT(FLOW_DISSECTOR_KEY_IP)))) {
netdev_info(nic->netdev, "unsupported flow used key 0x%x",
dissector->used_keys);
return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
flow_rule_match_basic(rule, &match);
/* All EtherTypes can be matched, no hw limitation */
flow_spec->etype = match.key->n_proto;
flow_mask->etype = match.mask->n_proto;
req->features |= BIT_ULL(NPC_ETYPE);
if (match.mask->ip_proto &&
(match.key->ip_proto != IPPROTO_TCP &&
match.key->ip_proto != IPPROTO_UDP &&
match.key->ip_proto != IPPROTO_SCTP &&
match.key->ip_proto != IPPROTO_ICMP &&
match.key->ip_proto != IPPROTO_ICMPV6)) {
netdev_info(nic->netdev,
"ip_proto=0x%x not supported\n",
match.key->ip_proto);
return -EOPNOTSUPP;
}
if (match.mask->ip_proto)
ip_proto = match.key->ip_proto;
if (ip_proto == IPPROTO_UDP)
req->features |= BIT_ULL(NPC_IPPROTO_UDP);
else if (ip_proto == IPPROTO_TCP)
req->features |= BIT_ULL(NPC_IPPROTO_TCP);
else if (ip_proto == IPPROTO_SCTP)
req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
else if (ip_proto == IPPROTO_ICMP)
req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
else if (ip_proto == IPPROTO_ICMPV6)
req->features |= BIT_ULL(NPC_IPPROTO_ICMP6);
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
flow_rule_match_eth_addrs(rule, &match);
if (!is_zero_ether_addr(match.mask->src)) {
netdev_err(nic->netdev, "src mac match not supported\n");
return -EOPNOTSUPP;
}
if (!is_zero_ether_addr(match.mask->dst)) {
ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst);
ether_addr_copy(flow_mask->dmac,
(u8 *)&match.mask->dst);
req->features |= BIT_ULL(NPC_DMAC);
}
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
struct flow_match_ip match;
flow_rule_match_ip(rule, &match);
if ((ntohs(flow_spec->etype) != ETH_P_IP) &&
match.mask->tos) {
netdev_err(nic->netdev, "tos not supported\n");
return -EOPNOTSUPP;
}
if (match.mask->ttl) {
netdev_err(nic->netdev, "ttl not supported\n");
return -EOPNOTSUPP;
}
flow_spec->tos = match.key->tos;
flow_mask->tos = match.mask->tos;
req->features |= BIT_ULL(NPC_TOS);
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
u16 vlan_tci, vlan_tci_mask;
flow_rule_match_vlan(rule, &match);
if (ntohs(match.key->vlan_tpid) != ETH_P_8021Q) {
netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n",
ntohs(match.key->vlan_tpid));
return -EOPNOTSUPP;
}
if (match.mask->vlan_id ||
match.mask->vlan_dei ||
match.mask->vlan_priority) {
vlan_tci = match.key->vlan_id |
match.key->vlan_dei << 12 |
match.key->vlan_priority << 13;
vlan_tci_mask = match.mask->vlan_id |
match.key->vlan_dei << 12 |
match.key->vlan_priority << 13;
flow_spec->vlan_tci = htons(vlan_tci);
flow_mask->vlan_tci = htons(vlan_tci_mask);
req->features |= BIT_ULL(NPC_OUTER_VID);
}
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
struct flow_match_ipv4_addrs match;
flow_rule_match_ipv4_addrs(rule, &match);
flow_spec->ip4dst = match.key->dst;
flow_mask->ip4dst = match.mask->dst;
req->features |= BIT_ULL(NPC_DIP_IPV4);
flow_spec->ip4src = match.key->src;
flow_mask->ip4src = match.mask->src;
req->features |= BIT_ULL(NPC_SIP_IPV4);
} else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
struct flow_match_ipv6_addrs match;
flow_rule_match_ipv6_addrs(rule, &match);
if (ipv6_addr_loopback(&match.key->dst) ||
ipv6_addr_loopback(&match.key->src)) {
netdev_err(nic->netdev,
"Flow matching on IPv6 loopback addr is not supported\n");
return -EOPNOTSUPP;
}
if (!ipv6_addr_any(&match.mask->dst)) {
memcpy(&flow_spec->ip6dst,
(struct in6_addr *)&match.key->dst,
sizeof(flow_spec->ip6dst));
memcpy(&flow_mask->ip6dst,
(struct in6_addr *)&match.mask->dst,
sizeof(flow_spec->ip6dst));
req->features |= BIT_ULL(NPC_DIP_IPV6);
}
if (!ipv6_addr_any(&match.mask->src)) {
memcpy(&flow_spec->ip6src,
(struct in6_addr *)&match.key->src,
sizeof(flow_spec->ip6src));
memcpy(&flow_mask->ip6src,
(struct in6_addr *)&match.mask->src,
sizeof(flow_spec->ip6src));
req->features |= BIT_ULL(NPC_SIP_IPV6);
}
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports match;
flow_rule_match_ports(rule, &match);
flow_spec->dport = match.key->dst;
flow_mask->dport = match.mask->dst;
if (ip_proto == IPPROTO_UDP)
req->features |= BIT_ULL(NPC_DPORT_UDP);
else if (ip_proto == IPPROTO_TCP)
req->features |= BIT_ULL(NPC_DPORT_TCP);
else if (ip_proto == IPPROTO_SCTP)
req->features |= BIT_ULL(NPC_DPORT_SCTP);
flow_spec->sport = match.key->src;
flow_mask->sport = match.mask->src;
if (ip_proto == IPPROTO_UDP)
req->features |= BIT_ULL(NPC_SPORT_UDP);
else if (ip_proto == IPPROTO_TCP)
req->features |= BIT_ULL(NPC_SPORT_TCP);
else if (ip_proto == IPPROTO_SCTP)
req->features |= BIT_ULL(NPC_SPORT_SCTP);
}
return otx2_tc_parse_actions(nic, &rule->action, req);
}
static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
{
struct npc_delete_flow_req *req;
int err;
mutex_lock(&nic->mbox.lock);
req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox);
if (!req) {
mutex_unlock(&nic->mbox.lock);
return -ENOMEM;
}
req->entry = entry;
/* Send message to AF */
err = otx2_sync_mbox_msg(&nic->mbox);
if (err) {
netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n",
entry);
mutex_unlock(&nic->mbox.lock);
return -EFAULT;
}
mutex_unlock(&nic->mbox.lock);
return 0;
}
static int otx2_tc_del_flow(struct otx2_nic *nic,
struct flow_cls_offload *tc_flow_cmd)
{
struct otx2_tc_info *tc_info = &nic->tc_info;
struct otx2_tc_flow *flow_node;
flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
&tc_flow_cmd->cookie,
tc_info->flow_ht_params);
if (!flow_node) {
netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n",
tc_flow_cmd->cookie);
return -EINVAL;
}
otx2_del_mcam_flow_entry(nic, flow_node->entry);
WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table,
&flow_node->node,
nic->tc_info.flow_ht_params));
kfree_rcu(flow_node, rcu);
clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap);
tc_info->num_entries--;
return 0;
}
static int otx2_tc_add_flow(struct otx2_nic *nic,
struct flow_cls_offload *tc_flow_cmd)
{
struct otx2_tc_info *tc_info = &nic->tc_info;
struct otx2_tc_flow *new_node, *old_node;
struct npc_install_flow_req *req;
int rc;
if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
return -ENOMEM;
/* allocate memory for the new flow and it's node */
new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
if (!new_node)
return -ENOMEM;
new_node->cookie = tc_flow_cmd->cookie;
mutex_lock(&nic->mbox.lock);
req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
if (!req) {
mutex_unlock(&nic->mbox.lock);
return -ENOMEM;
}
rc = otx2_tc_prepare_flow(nic, tc_flow_cmd, req);
if (rc) {
otx2_mbox_reset(&nic->mbox.mbox, 0);
mutex_unlock(&nic->mbox.lock);
return rc;
}
/* If a flow exists with the same cookie, delete it */
old_node = rhashtable_lookup_fast(&tc_info->flow_table,
&tc_flow_cmd->cookie,
tc_info->flow_ht_params);
if (old_node)
otx2_tc_del_flow(nic, tc_flow_cmd);
if (bitmap_full(tc_info->tc_entries_bitmap, nic->flow_cfg->tc_max_flows)) {
netdev_err(nic->netdev, "Not enough MCAM space to add the flow\n");
otx2_mbox_reset(&nic->mbox.mbox, 0);
mutex_unlock(&nic->mbox.lock);
return -ENOMEM;
}
new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap,
nic->flow_cfg->tc_max_flows);
req->channel = nic->hw.rx_chan_base;
req->entry = nic->flow_cfg->entry[nic->flow_cfg->tc_flower_offset +
nic->flow_cfg->tc_max_flows - new_node->bitpos];
req->intf = NIX_INTF_RX;
req->set_cntr = 1;
new_node->entry = req->entry;
/* Send message to AF */
rc = otx2_sync_mbox_msg(&nic->mbox);
if (rc) {
netdev_err(nic->netdev, "Failed to install MCAM flow entry\n");
mutex_unlock(&nic->mbox.lock);
goto out;
}
mutex_unlock(&nic->mbox.lock);
/* add new flow to flow-table */
rc = rhashtable_insert_fast(&nic->tc_info.flow_table, &new_node->node,
nic->tc_info.flow_ht_params);
if (rc) {
otx2_del_mcam_flow_entry(nic, req->entry);
kfree_rcu(new_node, rcu);
goto out;
}
set_bit(new_node->bitpos, tc_info->tc_entries_bitmap);
tc_info->num_entries++;
out:
return rc;
}
static int otx2_tc_get_flow_stats(struct otx2_nic *nic,
struct flow_cls_offload *tc_flow_cmd)
{
struct otx2_tc_info *tc_info = &nic->tc_info;
struct npc_mcam_get_stats_req *req;
struct npc_mcam_get_stats_rsp *rsp;
struct otx2_tc_flow_stats *stats;
struct otx2_tc_flow *flow_node;
int err;
flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
&tc_flow_cmd->cookie,
tc_info->flow_ht_params);
if (!flow_node) {
netdev_info(nic->netdev, "tc flow not found for cookie %lx",
tc_flow_cmd->cookie);
return -EINVAL;
}
mutex_lock(&nic->mbox.lock);
req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox);
if (!req) {
mutex_unlock(&nic->mbox.lock);
return -ENOMEM;
}
req->entry = flow_node->entry;
err = otx2_sync_mbox_msg(&nic->mbox);
if (err) {
netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n",
req->entry);
mutex_unlock(&nic->mbox.lock);
return -EFAULT;
}
rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp
(&nic->mbox.mbox, 0, &req->hdr);
if (IS_ERR(rsp)) {
mutex_unlock(&nic->mbox.lock);
return PTR_ERR(rsp);
}
mutex_unlock(&nic->mbox.lock);
if (!rsp->stat_ena)
return -EINVAL;
stats = &flow_node->stats;
spin_lock(&flow_node->lock);
flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0,
FLOW_ACTION_HW_STATS_IMMEDIATE);
stats->pkts = rsp->stat;
spin_unlock(&flow_node->lock);
return 0;
}
static int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
struct flow_cls_offload *cls_flower)
{
switch (cls_flower->command) {
case FLOW_CLS_REPLACE:
return otx2_tc_add_flow(nic, cls_flower);
case FLOW_CLS_DESTROY:
return otx2_tc_del_flow(nic, cls_flower);
case FLOW_CLS_STATS:
return otx2_tc_get_flow_stats(nic, cls_flower);
default:
return -EOPNOTSUPP;
}
}
static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
struct otx2_nic *nic = cb_priv;
if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_CLSFLOWER:
return otx2_setup_tc_cls_flower(nic, type_data);
default:
break;
}
return -EOPNOTSUPP;
}
static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic,
struct tc_cls_matchall_offload *cls_matchall)
{
switch (cls_matchall->command) {
case TC_CLSMATCHALL_REPLACE:
return otx2_tc_egress_matchall_install(nic, cls_matchall);
case TC_CLSMATCHALL_DESTROY:
return otx2_tc_egress_matchall_delete(nic, cls_matchall);
case TC_CLSMATCHALL_STATS:
default:
break;
}
return -EOPNOTSUPP;
}
static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
struct otx2_nic *nic = cb_priv;
if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_CLSMATCHALL:
return otx2_setup_tc_egress_matchall(nic, type_data);
default:
break;
}
return -EOPNOTSUPP;
}
static LIST_HEAD(otx2_block_cb_list);
static int otx2_setup_tc_block(struct net_device *netdev,
struct flow_block_offload *f)
{
struct otx2_nic *nic = netdev_priv(netdev);
flow_setup_cb_t *cb;
bool ingress;
if (f->block_shared)
return -EOPNOTSUPP;
if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
cb = otx2_setup_tc_block_ingress_cb;
ingress = true;
} else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
cb = otx2_setup_tc_block_egress_cb;
ingress = false;
} else {
return -EOPNOTSUPP;
}
return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb,
nic, nic, ingress);
}
int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data)
{
switch (type) {
case TC_SETUP_BLOCK:
return otx2_setup_tc_block(netdev, type_data);
default:
return -EOPNOTSUPP;
}
}
static const struct rhashtable_params tc_flow_ht_params = {
.head_offset = offsetof(struct otx2_tc_flow, node),
.key_offset = offsetof(struct otx2_tc_flow, cookie),
.key_len = sizeof(((struct otx2_tc_flow *)0)->cookie),
.automatic_shrinking = true,
};
int otx2_init_tc(struct otx2_nic *nic)
{
struct otx2_tc_info *tc = &nic->tc_info;
tc->flow_ht_params = tc_flow_ht_params;
return rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
}
void otx2_shutdown_tc(struct otx2_nic *nic)
{
struct otx2_tc_info *tc = &nic->tc_info;
rhashtable_destroy(&tc->flow_table);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment