Commit 5cfa9a61 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'add-ethtool-ntuple-filters-support'

Naveen Mamindlapalli says:

====================
Add ethtool ntuple filters support

This patch series adds support for ethtool ntuple filters, unicast
address filtering, VLAN offload and SR-IOV ndo handlers. All of the
above features are based on the Admin Function(AF) driver support to
install and delete the low level MCAM entries. Each MCAM entry is
programmed with the packet fields to match and what actions to take
if the match succeeds. The PF driver requests AF driver to allocate
set of MCAM entries to be used to install the flows by that PF. The
entries will be freed when the PF driver is unloaded.

* The patches 1 to 4 adds AF driver infrastructure to install and
  delete the low level MCAM flow entries.
* Patch 5 adds ethtool ntuple filter support.
* Patch 6 adds unicast MAC address filtering.
* Patch 7 adds support for dumping the MCAM entries via debugfs.
* Patches 8 to 10 adds support for VLAN offload.
* Patch 10 to 11 adds support for SR-IOV ndo handlers.
* Patch 12 adds support to read the MCAM entries.

Misc:
* Removed redundant mailbox NIX_RXVLAN_ALLOC.
====================

Link: https://lore.kernel.org/r/20201114195303.25967-1-naveenm@marvell.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents f7365919 5a579667
......@@ -9,4 +9,4 @@ obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
octeontx2_mbox-y := mbox.o rvu_trace.o
octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o
......@@ -162,6 +162,8 @@ enum nix_scheduler {
#define NIX_RX_ACTIONOP_UCAST_IPSEC (0x2ull)
#define NIX_RX_ACTIONOP_MCAST (0x3ull)
#define NIX_RX_ACTIONOP_RSS (0x4ull)
/* Use the RX action set in the default unicast entry */
#define NIX_RX_ACTION_DEFAULT (0xfull)
/* NIX TX action operation*/
#define NIX_TX_ACTIONOP_DROP (0x0ull)
......
......@@ -188,10 +188,19 @@ M(NPC_MCAM_ALLOC_AND_WRITE_ENTRY, 0x600b, npc_mcam_alloc_and_write_entry, \
npc_mcam_alloc_and_write_entry_rsp) \
M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, \
msg_req, npc_get_kex_cfg_rsp) \
M(NPC_INSTALL_FLOW, 0x600d, npc_install_flow, \
npc_install_flow_req, npc_install_flow_rsp) \
M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \
npc_delete_flow_req, msg_rsp) \
M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \
npc_mcam_read_entry_req, \
npc_mcam_read_entry_rsp) \
M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \
msg_req, npc_mcam_read_base_rule_rsp) \
/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \
nix_lf_alloc_req, nix_lf_alloc_rsp) \
M(NIX_LF_FREE, 0x8001, nix_lf_free, msg_req, msg_rsp) \
M(NIX_LF_FREE, 0x8001, nix_lf_free, nix_lf_free_req, msg_rsp) \
M(NIX_AQ_ENQ, 0x8002, nix_aq_enq, nix_aq_enq_req, nix_aq_enq_rsp) \
M(NIX_HWCTX_DISABLE, 0x8003, nix_hwctx_disable, \
hwctx_disable_req, msg_rsp) \
......@@ -200,7 +209,8 @@ M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc, \
M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free, nix_txsch_free_req, msg_rsp) \
M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, msg_rsp) \
M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp) \
M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, msg_rsp) \
M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, \
nix_vtag_config_rsp) \
M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, \
nix_rss_flowkey_cfg, \
nix_rss_flowkey_cfg_rsp) \
......@@ -216,7 +226,6 @@ M(NIX_SET_RX_CFG, 0x8010, nix_set_rx_cfg, nix_rx_cfg, msg_rsp) \
M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
nix_lso_format_cfg, \
nix_lso_format_cfg_rsp) \
M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp) \
M(NIX_LF_PTP_TX_ENABLE, 0x8013, nix_lf_ptp_tx_enable, msg_req, msg_rsp) \
M(NIX_LF_PTP_TX_DISABLE, 0x8014, nix_lf_ptp_tx_disable, msg_req, msg_rsp) \
M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \
......@@ -473,6 +482,20 @@ enum nix_af_status {
NIX_AF_ERR_LSO_CFG_FAIL = -418,
NIX_AF_INVAL_NPA_PF_FUNC = -419,
NIX_AF_INVAL_SSO_PF_FUNC = -420,
NIX_AF_ERR_TX_VTAG_NOSPC = -421,
NIX_AF_ERR_RX_VTAG_INUSE = -422,
};
/* For NIX RX vtag action */
enum nix_rx_vtag0_type {
NIX_AF_LFX_RX_VTAG_TYPE0, /* reserved for rx vlan offload */
NIX_AF_LFX_RX_VTAG_TYPE1,
NIX_AF_LFX_RX_VTAG_TYPE2,
NIX_AF_LFX_RX_VTAG_TYPE3,
NIX_AF_LFX_RX_VTAG_TYPE4,
NIX_AF_LFX_RX_VTAG_TYPE5,
NIX_AF_LFX_RX_VTAG_TYPE6,
NIX_AF_LFX_RX_VTAG_TYPE7,
};
/* For NIX LF context alloc and init */
......@@ -510,6 +533,13 @@ struct nix_lf_alloc_rsp {
u8 sdp_links; /* No. of SDP links present in HW */
};
struct nix_lf_free_req {
struct mbox_msghdr hdr;
#define NIX_LF_DISABLE_FLOWS BIT_ULL(0)
#define NIX_LF_DONT_FREE_TX_VTAG BIT_ULL(1)
u64 flags;
};
/* NIX AQ enqueue msg */
struct nix_aq_enq_req {
struct mbox_msghdr hdr;
......@@ -600,14 +630,40 @@ struct nix_vtag_config {
union {
/* valid when cfg_type is '0' */
struct {
/* tx vlan0 tag(C-VLAN) */
u64 vlan0;
/* tx vlan1 tag(S-VLAN) */
u64 vlan1;
/* insert tx vlan tag */
u8 insert_vlan :1;
/* insert tx double vlan tag */
u8 double_vlan :1;
u64 vtag0;
u64 vtag1;
/* cfg_vtag0 & cfg_vtag1 fields are valid
* when free_vtag0 & free_vtag1 are '0's.
*/
/* cfg_vtag0 = 1 to configure vtag0 */
u8 cfg_vtag0 :1;
/* cfg_vtag1 = 1 to configure vtag1 */
u8 cfg_vtag1 :1;
/* vtag0_idx & vtag1_idx are only valid when
* both cfg_vtag0 & cfg_vtag1 are '0's,
* these fields are used along with free_vtag0
* & free_vtag1 to free the nix lf's tx_vlan
* configuration.
*
* Denotes the indices of tx_vtag def registers
* that needs to be cleared and freed.
*/
int vtag0_idx;
int vtag1_idx;
/* free_vtag0 & free_vtag1 fields are valid
* when cfg_vtag0 & cfg_vtag1 are '0's.
*/
/* free_vtag0 = 1 clears vtag0 configuration
* vtag0_idx denotes the index to be cleared.
*/
u8 free_vtag0 :1;
/* free_vtag1 = 1 clears vtag1 configuration
* vtag1_idx denotes the index to be cleared.
*/
u8 free_vtag1 :1;
} tx;
/* valid when cfg_type is '1' */
......@@ -622,6 +678,17 @@ struct nix_vtag_config {
};
};
struct nix_vtag_config_rsp {
struct mbox_msghdr hdr;
int vtag0_idx;
int vtag1_idx;
/* Indices of tx_vtag def registers used to configure
* tx vtag0 & vtag1 headers, these indices are valid
* when nix_vtag_config mbox requested for vtag0 and/
* or vtag1 configuration.
*/
};
struct nix_rss_flowkey_cfg {
struct mbox_msghdr hdr;
int mcam_index; /* MCAM entry index to modify */
......@@ -882,6 +949,87 @@ struct npc_get_kex_cfg_rsp {
u8 mkex_pfl_name[MKEX_NAME_LEN];
};
struct flow_msg {
unsigned char dmac[6];
unsigned char smac[6];
__be16 etype;
__be16 vlan_etype;
__be16 vlan_tci;
union {
__be32 ip4src;
__be32 ip6src[4];
};
union {
__be32 ip4dst;
__be32 ip6dst[4];
};
u8 tos;
u8 ip_ver;
u8 ip_proto;
u8 tc;
__be16 sport;
__be16 dport;
};
struct npc_install_flow_req {
struct mbox_msghdr hdr;
struct flow_msg packet;
struct flow_msg mask;
u64 features;
u16 entry;
u16 channel;
u8 intf;
u8 set_cntr; /* If counter is available set counter for this entry ? */
u8 default_rule;
u8 append; /* overwrite(0) or append(1) flow to default rule? */
u16 vf;
/* action */
u32 index;
u16 match_id;
u8 flow_key_alg;
u8 op;
/* vtag rx action */
u8 vtag0_type;
u8 vtag0_valid;
u8 vtag1_type;
u8 vtag1_valid;
/* vtag tx action */
u16 vtag0_def;
u8 vtag0_op;
u16 vtag1_def;
u8 vtag1_op;
};
struct npc_install_flow_rsp {
struct mbox_msghdr hdr;
int counter; /* negative if no counter else counter number */
};
struct npc_delete_flow_req {
struct mbox_msghdr hdr;
u16 entry;
u16 start;/*Disable range of entries */
u16 end;
u8 all; /* PF + VFs */
};
struct npc_mcam_read_entry_req {
struct mbox_msghdr hdr;
u16 entry; /* MCAM entry to read */
};
struct npc_mcam_read_entry_rsp {
struct mbox_msghdr hdr;
struct mcam_entry entry_data;
u8 intf;
u8 enable;
};
struct npc_mcam_read_base_rule_rsp {
struct mbox_msghdr hdr;
struct mcam_entry entry;
};
enum ptp_op {
PTP_OP_ADJFINE = 0,
PTP_OP_GET_CLOCK = 1,
......
......@@ -140,6 +140,63 @@ enum npc_kpu_lh_ltype {
NPC_LT_LH_CUSTOM1 = 0xF,
};
/* NPC port kind defines how the incoming or outgoing packets
* are processed. NPC accepts packets from up to 64 pkinds.
* Software assigns pkind for each incoming port such as CGX
* Ethernet interfaces, LBK interfaces, etc.
*/
enum npc_pkind_type {
NPC_TX_DEF_PKIND = 63ULL, /* NIX-TX PKIND */
};
/* list of known and supported fields in packet header and
* fields present in key structure.
*/
enum key_fields {
NPC_DMAC,
NPC_SMAC,
NPC_ETYPE,
NPC_OUTER_VID,
NPC_TOS,
NPC_SIP_IPV4,
NPC_DIP_IPV4,
NPC_SIP_IPV6,
NPC_DIP_IPV6,
NPC_SPORT_TCP,
NPC_DPORT_TCP,
NPC_SPORT_UDP,
NPC_DPORT_UDP,
NPC_SPORT_SCTP,
NPC_DPORT_SCTP,
NPC_HEADER_FIELDS_MAX,
NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */
NPC_PF_FUNC, /* Valid when Tx */
NPC_ERRLEV,
NPC_ERRCODE,
NPC_LXMB,
NPC_LA,
NPC_LB,
NPC_LC,
NPC_LD,
NPC_LE,
NPC_LF,
NPC_LG,
NPC_LH,
/* Ethertype for untagged frame */
NPC_ETYPE_ETHER,
/* Ethertype for single tagged frame */
NPC_ETYPE_TAG1,
/* Ethertype for double tagged frame */
NPC_ETYPE_TAG2,
/* outer vlan tci for single tagged frame */
NPC_VLAN_TAG1,
/* outer vlan tci for double tagged frame */
NPC_VLAN_TAG2,
/* other header fields programmed to extract but not of our interest */
NPC_UNKNOWN,
NPC_KEY_FIELDS_MAX,
};
struct npc_kpu_profile_cam {
u8 state;
u8 state_mask;
......@@ -300,11 +357,63 @@ struct nix_rx_action {
/* NPC_AF_INTFX_KEX_CFG field masks */
#define NPC_PARSE_NIBBLE GENMASK_ULL(30, 0)
/* NPC_PARSE_KEX_S nibble definitions for each field */
#define NPC_PARSE_NIBBLE_CHAN GENMASK_ULL(2, 0)
#define NPC_PARSE_NIBBLE_ERRLEV BIT_ULL(3)
#define NPC_PARSE_NIBBLE_ERRCODE GENMASK_ULL(5, 4)
#define NPC_PARSE_NIBBLE_L2L3_BCAST BIT_ULL(6)
#define NPC_PARSE_NIBBLE_LA_FLAGS GENMASK_ULL(8, 7)
#define NPC_PARSE_NIBBLE_LA_LTYPE BIT_ULL(9)
#define NPC_PARSE_NIBBLE_LB_FLAGS GENMASK_ULL(11, 10)
#define NPC_PARSE_NIBBLE_LB_LTYPE BIT_ULL(12)
#define NPC_PARSE_NIBBLE_LC_FLAGS GENMASK_ULL(14, 13)
#define NPC_PARSE_NIBBLE_LC_LTYPE BIT_ULL(15)
#define NPC_PARSE_NIBBLE_LD_FLAGS GENMASK_ULL(17, 16)
#define NPC_PARSE_NIBBLE_LD_LTYPE BIT_ULL(18)
#define NPC_PARSE_NIBBLE_LE_FLAGS GENMASK_ULL(20, 19)
#define NPC_PARSE_NIBBLE_LE_LTYPE BIT_ULL(21)
#define NPC_PARSE_NIBBLE_LF_FLAGS GENMASK_ULL(23, 22)
#define NPC_PARSE_NIBBLE_LF_LTYPE BIT_ULL(24)
#define NPC_PARSE_NIBBLE_LG_FLAGS GENMASK_ULL(26, 25)
#define NPC_PARSE_NIBBLE_LG_LTYPE BIT_ULL(27)
#define NPC_PARSE_NIBBLE_LH_FLAGS GENMASK_ULL(29, 28)
#define NPC_PARSE_NIBBLE_LH_LTYPE BIT_ULL(30)
struct nix_tx_action {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 rsvd_63_48 :16;
u64 match_id :16;
u64 index :20;
u64 rsvd_11_8 :8;
u64 op :4;
#else
u64 op :4;
u64 rsvd_11_8 :8;
u64 index :20;
u64 match_id :16;
u64 rsvd_63_48 :16;
#endif
};
/* NIX Receive Vtag Action Structure */
#define VTAG0_VALID_BIT BIT_ULL(15)
#define VTAG0_TYPE_MASK GENMASK_ULL(14, 12)
#define VTAG0_LID_MASK GENMASK_ULL(10, 8)
#define VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
#define RX_VTAG0_VALID_BIT BIT_ULL(15)
#define RX_VTAG0_TYPE_MASK GENMASK_ULL(14, 12)
#define RX_VTAG0_LID_MASK GENMASK_ULL(10, 8)
#define RX_VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
#define RX_VTAG1_VALID_BIT BIT_ULL(47)
#define RX_VTAG1_TYPE_MASK GENMASK_ULL(46, 44)
#define RX_VTAG1_LID_MASK GENMASK_ULL(42, 40)
#define RX_VTAG1_RELPTR_MASK GENMASK_ULL(39, 32)
/* NIX Transmit Vtag Action Structure */
#define TX_VTAG0_DEF_MASK GENMASK_ULL(25, 16)
#define TX_VTAG0_OP_MASK GENMASK_ULL(13, 12)
#define TX_VTAG0_LID_MASK GENMASK_ULL(10, 8)
#define TX_VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
#define TX_VTAG1_DEF_MASK GENMASK_ULL(57, 48)
#define TX_VTAG1_OP_MASK GENMASK_ULL(45, 44)
#define TX_VTAG1_LID_MASK GENMASK_ULL(42, 40)
#define TX_VTAG1_RELPTR_MASK GENMASK_ULL(39, 32)
struct npc_mcam_kex {
/* MKEX Profle Header */
......@@ -357,4 +466,24 @@ struct npc_lt_def_cfg {
struct npc_lt_def pck_iip4;
};
struct rvu_npc_mcam_rule {
struct flow_msg packet;
struct flow_msg mask;
u8 intf;
union {
struct nix_tx_action tx_action;
struct nix_rx_action rx_action;
};
u64 vtag_action;
struct list_head list;
u64 features;
u16 owner;
u16 entry;
u16 cntr;
bool has_cntr;
u8 default_rule;
bool enable;
bool vfvlan_cfg;
};
#endif /* NPC_H */
......@@ -148,6 +148,20 @@
(((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
((flags_ena) << 6) | ((key_ofs) & 0x3F))
/* Rx parse key extract nibble enable */
#define NPC_PARSE_NIBBLE_INTF_RX (NPC_PARSE_NIBBLE_CHAN | \
NPC_PARSE_NIBBLE_LA_LTYPE | \
NPC_PARSE_NIBBLE_LB_LTYPE | \
NPC_PARSE_NIBBLE_LC_LTYPE | \
NPC_PARSE_NIBBLE_LD_LTYPE | \
NPC_PARSE_NIBBLE_LE_LTYPE)
/* Tx parse key extract nibble enable */
#define NPC_PARSE_NIBBLE_INTF_TX (NPC_PARSE_NIBBLE_LA_LTYPE | \
NPC_PARSE_NIBBLE_LB_LTYPE | \
NPC_PARSE_NIBBLE_LC_LTYPE | \
NPC_PARSE_NIBBLE_LD_LTYPE | \
NPC_PARSE_NIBBLE_LE_LTYPE)
enum npc_kpu_parser_state {
NPC_S_NA = 0,
NPC_S_KPU1_ETHER,
......@@ -13385,9 +13399,10 @@ static struct npc_mcam_kex npc_mkex_default = {
.name = "default",
.kpu_version = NPC_KPU_PROFILE_VER,
.keyx_cfg = {
/* nibble: LA..LE (ltype only) + Channel */
[NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | 0x49247,
[NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | ((1ULL << 19) - 1),
/* nibble: LA..LE (ltype only) + channel */
[NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_RX,
/* nibble: LA..LE (ltype only) */
[NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_TX,
},
.intf_lid_lt_ld = {
/* Default RX MCAM KEX profile */
......@@ -13405,12 +13420,14 @@ static struct npc_mcam_kex npc_mkex_default = {
/* Layer B: Single VLAN (CTAG) */
/* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
[NPC_LT_LB_CTAG] = {
KEX_LD_CFG(0x03, 0x0, 0x1, 0x0, 0x4),
KEX_LD_CFG(0x03, 0x2, 0x1, 0x0, 0x4),
},
/* Layer B: Stacked VLAN (STAG|QinQ) */
[NPC_LT_LB_STAG_QINQ] = {
/* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4),
/* Outer VLAN: 2 bytes, KW0[63:48] */
KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
/* Ethertype: 2 bytes, KW0[47:32] */
KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x4),
},
[NPC_LT_LB_FDSA] = {
/* SWITCH PORT: 1 byte, KW0[63:48] */
......@@ -13436,17 +13453,71 @@ static struct npc_mcam_kex npc_mkex_default = {
[NPC_LID_LD] = {
/* Layer D:UDP */
[NPC_LT_LD_UDP] = {
/* SPORT: 2 bytes, KW3[15:0] */
KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18),
/* DPORT: 2 bytes, KW3[31:16] */
KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a),
/* SPORT+DPORT: 4 bytes, KW3[31:0] */
KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
},
/* Layer D:TCP */
[NPC_LT_LD_TCP] = {
/* SPORT+DPORT: 4 bytes, KW3[31:0] */
KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
},
},
},
/* Default TX MCAM KEX profile */
[NIX_INTF_TX] = {
[NPC_LID_LA] = {
/* Layer A: NIX_INST_HDR_S + Ethernet */
/* NIX appends 8 bytes of NIX_INST_HDR_S at the
* start of each TX packet supplied to NPC.
*/
[NPC_LT_LA_IH_NIX_ETHER] = {
/* PF_FUNC: 2B , KW0 [47:32] */
KEX_LD_CFG(0x01, 0x0, 0x1, 0x0, 0x4),
/* DMAC: 6 bytes, KW1[63:16] */
KEX_LD_CFG(0x05, 0x8, 0x1, 0x0, 0xa),
},
},
[NPC_LID_LB] = {
/* Layer B: Single VLAN (CTAG) */
[NPC_LT_LB_CTAG] = {
/* CTAG VLAN[2..3] KW0[63:48] */
KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
/* CTAG VLAN[2..3] KW1[15:0] */
KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x8),
},
/* Layer B: Stacked VLAN (STAG|QinQ) */
[NPC_LT_LB_STAG_QINQ] = {
/* Outer VLAN: 2 bytes, KW0[63:48] */
KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
/* Outer VLAN: 2 Bytes, KW1[15:0] */
KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x8),
},
},
[NPC_LID_LC] = {
/* Layer C: IPv4 */
[NPC_LT_LC_IP] = {
/* SIP+DIP: 8 bytes, KW2[63:0] */
KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10),
/* TOS: 1 byte, KW1[63:56] */
KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf),
},
/* Layer C: IPv6 */
[NPC_LT_LC_IP6] = {
/* Everything up to SADDR: 8 bytes, KW2[63:0] */
KEX_LD_CFG(0x07, 0x0, 0x1, 0x0, 0x10),
},
},
[NPC_LID_LD] = {
/* Layer D:UDP */
[NPC_LT_LD_UDP] = {
/* SPORT+DPORT: 4 bytes, KW3[31:0] */
KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
},
/* Layer D:TCP */
[NPC_LT_LD_TCP] = {
/* SPORT: 2 bytes, KW3[15:0] */
KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18),
/* DPORT: 2 bytes, KW3[31:16] */
KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a),
/* SPORT+DPORT: 4 bytes, KW3[31:0] */
KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
},
},
},
......
......@@ -727,6 +727,10 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
u64 *mac;
for (pf = 0; pf < hw->total_pfs; pf++) {
/* For PF0(AF), Assign MAC address to only VFs (LBKVFs) */
if (!pf)
goto lbkvf;
if (!is_pf_cgxmapped(rvu, pf))
continue;
/* Assign MAC address to PF */
......@@ -740,8 +744,10 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
} else {
eth_random_addr(pfvf->mac_addr);
}
ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
/* Assign MAC address to VFs */
lbkvf:
/* Assign MAC address to VFs*/
rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
for (vf = 0; vf < numvfs; vf++, hwvf++) {
pfvf = &rvu->hwvf[hwvf];
......@@ -754,6 +760,7 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
} else {
eth_random_addr(pfvf->mac_addr);
}
ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
}
}
}
......@@ -1176,6 +1183,9 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
if (blkaddr < 0)
return;
if (blktype == BLKTYPE_NIX)
rvu_nix_reset_mac(pfvf, pcifunc);
block = &hw->block[blkaddr];
num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
......@@ -2642,7 +2652,7 @@ static void rvu_enable_afvf_intr(struct rvu *rvu)
#define PCI_DEVID_OCTEONTX2_LBK 0xA061
static int lbk_get_num_chans(void)
int rvu_get_num_lbk_chans(void)
{
struct pci_dev *pdev;
void __iomem *base;
......@@ -2677,7 +2687,7 @@ static int rvu_enable_sriov(struct rvu *rvu)
return 0;
}
chans = lbk_get_num_chans();
chans = rvu_get_num_lbk_chans();
if (chans < 0)
return chans;
......
......@@ -15,6 +15,7 @@
#include "rvu_struct.h"
#include "common.h"
#include "mbox.h"
#include "npc.h"
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065
......@@ -105,6 +106,36 @@ struct nix_mce_list {
int max;
};
/* layer metadata to uniquely identify a packet header field */
struct npc_layer_mdata {
u8 lid;
u8 ltype;
u8 hdr;
u8 key;
u8 len;
};
/* Structure to represent a field present in the
* generated key. A key field may present anywhere and can
* be of any size in the generated key. Once this structure
* is populated for fields of interest then field's presence
* and location (if present) can be known.
*/
struct npc_key_field {
/* Masks where all set bits indicate position
* of a field in the key
*/
u64 kw_mask[NPC_MAX_KWS_IN_KEY];
/* Number of words in the key a field spans. If a field is
* of 16 bytes and key offset is 4 then the field will use
* 4 bytes in KW0, 8 bytes in KW1 and 4 bytes in KW2 and
* nr_kws will be 3(KW0, KW1 and KW2).
*/
int nr_kws;
/* used by packet header fields */
struct npc_layer_mdata layer_mdata;
};
struct npc_mcam {
struct rsrc_bmap counters;
struct mutex lock; /* MCAM entries and counters update lock */
......@@ -116,6 +147,7 @@ struct npc_mcam {
u16 *entry2cntr_map;
u16 *cntr2pfvf_map;
u16 *cntr_refcnt;
u16 *entry2target_pffunc;
u8 keysize; /* MCAM keysize 112/224/448 bits */
u8 banks; /* Number of MCAM banks */
u8 banks_per_entry;/* Number of keywords in key */
......@@ -128,6 +160,12 @@ struct npc_mcam {
u16 hprio_count;
u16 hprio_end;
u16 rx_miss_act_cntr; /* Counter for RX MISS action */
/* fields present in the generated key */
struct npc_key_field tx_key_fields[NPC_KEY_FIELDS_MAX];
struct npc_key_field rx_key_fields[NPC_KEY_FIELDS_MAX];
u64 tx_features;
u64 rx_features;
struct list_head mcam_rules;
};
/* Structure for per RVU func info ie PF/VF */
......@@ -171,16 +209,15 @@ struct rvu_pfvf {
u16 maxlen;
u16 minlen;
u8 pf_set_vf_cfg;
u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
u8 default_mac[ETH_ALEN]; /* MAC address from FWdata */
/* Broadcast pkt replication info */
u16 bcast_mce_idx;
struct nix_mce_list bcast_mce_list;
/* VLAN offload */
struct mcam_entry entry;
int rxvlan_index;
bool rxvlan;
struct rvu_npc_mcam_rule *def_ucast_rule;
bool cgx_in_use; /* this PF/VF using CGX? */
int cgx_users; /* number of cgx users - used only by PFs */
......@@ -224,6 +261,13 @@ struct nix_lso {
u8 in_use;
};
struct nix_txvlan {
#define NIX_TX_VTAG_DEF_MAX 0x400
struct rsrc_bmap rsrc;
u16 *entry2pfvf_map;
struct mutex rsrc_lock; /* Serialize resource alloc/free */
};
struct nix_hw {
int blkaddr;
struct rvu *rvu;
......@@ -232,6 +276,7 @@ struct nix_hw {
struct nix_flowkey flowkey;
struct nix_mark_format mark_format;
struct nix_lso lso;
struct nix_txvlan txvlan;
};
/* RVU block's capabilities or functionality,
......@@ -445,6 +490,7 @@ int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
int rvu_get_num_lbk_chans(void);
/* RVU HW reg validation */
enum regmap_block {
......@@ -503,6 +549,7 @@ int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr);
int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr);
int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr);
void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc);
/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
......@@ -519,8 +566,8 @@ void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan);
void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable);
int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
......@@ -535,6 +582,20 @@ bool is_npc_intf_tx(u8 intf);
bool is_npc_intf_rx(u8 intf);
bool is_npc_interface_valid(struct rvu *rvu, u8 intf);
int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena);
int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel);
int npc_flow_steering_init(struct rvu *rvu, int blkaddr);
const char *npc_get_field_name(u8 hdr);
bool rvu_npc_write_default_rule(struct rvu *rvu, int blkaddr, int nixlf,
u16 pcifunc, u8 intf, struct mcam_entry *entry,
int *entry_index);
int npc_get_bank(struct npc_mcam *mcam, int index);
void npc_mcam_enable_flows(struct rvu *rvu, u16 target);
void npc_mcam_disable_flows(struct rvu *rvu, u16 target);
void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index, bool enable);
void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 src, struct mcam_entry *entry,
u8 *intf, u8 *ena);
#ifdef CONFIG_DEBUG_FS
void rvu_dbg_init(struct rvu *rvu);
......
......@@ -1770,6 +1770,198 @@ static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
struct rvu_npc_mcam_rule *rule)
{
u8 bit;
for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
seq_printf(s, "\t%s ", npc_get_field_name(bit));
switch (bit) {
case NPC_DMAC:
seq_printf(s, "%pM ", rule->packet.dmac);
seq_printf(s, "mask %pM\n", rule->mask.dmac);
break;
case NPC_SMAC:
seq_printf(s, "%pM ", rule->packet.smac);
seq_printf(s, "mask %pM\n", rule->mask.smac);
break;
case NPC_ETYPE:
seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
break;
case NPC_OUTER_VID:
seq_printf(s, "%d ", ntohs(rule->packet.vlan_tci));
seq_printf(s, "mask 0x%x\n",
ntohs(rule->mask.vlan_tci));
break;
case NPC_TOS:
seq_printf(s, "%d ", rule->packet.tos);
seq_printf(s, "mask 0x%x\n", rule->mask.tos);
break;
case NPC_SIP_IPV4:
seq_printf(s, "%pI4 ", &rule->packet.ip4src);
seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
break;
case NPC_DIP_IPV4:
seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
break;
case NPC_SIP_IPV6:
seq_printf(s, "%pI6 ", rule->packet.ip6src);
seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
break;
case NPC_DIP_IPV6:
seq_printf(s, "%pI6 ", rule->packet.ip6dst);
seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
break;
case NPC_SPORT_TCP:
case NPC_SPORT_UDP:
case NPC_SPORT_SCTP:
seq_printf(s, "%d ", ntohs(rule->packet.sport));
seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
break;
case NPC_DPORT_TCP:
case NPC_DPORT_UDP:
case NPC_DPORT_SCTP:
seq_printf(s, "%d ", ntohs(rule->packet.dport));
seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
break;
default:
break;
}
}
}
static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
struct rvu_npc_mcam_rule *rule)
{
if (rule->intf == NIX_INTF_TX) {
switch (rule->tx_action.op) {
case NIX_TX_ACTIONOP_DROP:
seq_puts(s, "\taction: Drop\n");
break;
case NIX_TX_ACTIONOP_UCAST_DEFAULT:
seq_puts(s, "\taction: Unicast to default channel\n");
break;
case NIX_TX_ACTIONOP_UCAST_CHAN:
seq_printf(s, "\taction: Unicast to channel %d\n",
rule->tx_action.index);
break;
case NIX_TX_ACTIONOP_MCAST:
seq_puts(s, "\taction: Multicast\n");
break;
case NIX_TX_ACTIONOP_DROP_VIOL:
seq_puts(s, "\taction: Lockdown Violation Drop\n");
break;
default:
break;
};
} else {
switch (rule->rx_action.op) {
case NIX_RX_ACTIONOP_DROP:
seq_puts(s, "\taction: Drop\n");
break;
case NIX_RX_ACTIONOP_UCAST:
seq_printf(s, "\taction: Direct to queue %d\n",
rule->rx_action.index);
break;
case NIX_RX_ACTIONOP_RSS:
seq_puts(s, "\taction: RSS\n");
break;
case NIX_RX_ACTIONOP_UCAST_IPSEC:
seq_puts(s, "\taction: Unicast ipsec\n");
break;
case NIX_RX_ACTIONOP_MCAST:
seq_puts(s, "\taction: Multicast\n");
break;
default:
break;
};
}
}
static const char *rvu_dbg_get_intf_name(int intf)
{
switch (intf) {
case NIX_INTFX_RX(0):
return "NIX0_RX";
case NIX_INTFX_RX(1):
return "NIX1_RX";
case NIX_INTFX_TX(0):
return "NIX0_TX";
case NIX_INTFX_TX(1):
return "NIX1_TX";
default:
break;
}
return "unknown";
}
static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
{
struct rvu_npc_mcam_rule *iter;
struct rvu *rvu = s->private;
struct npc_mcam *mcam;
int pf, vf = -1;
int blkaddr;
u16 target;
u64 hits;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return 0;
mcam = &rvu->hw->mcam;
mutex_lock(&mcam->lock);
list_for_each_entry(iter, &mcam->mcam_rules, list) {
pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
seq_printf(s, "\n\tInstalled by: PF%d ", pf);
if (iter->owner & RVU_PFVF_FUNC_MASK) {
vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
seq_printf(s, "VF%d", vf);
}
seq_puts(s, "\n");
seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
"RX" : "TX");
seq_printf(s, "\tinterface: %s\n",
rvu_dbg_get_intf_name(iter->intf));
seq_printf(s, "\tmcam entry: %d\n", iter->entry);
rvu_dbg_npc_mcam_show_flows(s, iter);
if (iter->intf == NIX_INTF_RX) {
target = iter->rx_action.pf_func;
pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
seq_printf(s, "\tForward to: PF%d ", pf);
if (target & RVU_PFVF_FUNC_MASK) {
vf = (target & RVU_PFVF_FUNC_MASK) - 1;
seq_printf(s, "VF%d", vf);
}
seq_puts(s, "\n");
}
rvu_dbg_npc_mcam_show_action(s, iter);
seq_printf(s, "\tenabled: %s\n", iter->enable ? "yes" : "no");
if (!iter->has_cntr)
continue;
seq_printf(s, "\tcounter: %d\n", iter->cntr);
hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
seq_printf(s, "\thits: %lld\n", hits);
}
mutex_unlock(&mcam->lock);
return 0;
}
RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
static void rvu_dbg_npc_init(struct rvu *rvu)
{
const struct device *dev = &rvu->pdev->dev;
......@@ -1784,6 +1976,11 @@ static void rvu_dbg_npc_init(struct rvu *rvu)
if (!pfile)
goto create_failed;
pfile = debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc,
rvu, &rvu_dbg_npc_mcam_rules_fops);
if (!pfile)
goto create_failed;
pfile = debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc,
rvu, &rvu_dbg_npc_rx_miss_act_fops);
if (!pfile)
......
This diff is collapsed.
......@@ -921,4 +921,15 @@ enum nix_vtag_size {
VTAGSIZE_T4 = 0x0,
VTAGSIZE_T8 = 0x1,
};
enum nix_tx_vtag_op {
NOP = 0x0,
VTAG_INSERT = 0x1,
VTAG_REPLACE = 0x2,
};
/* NIX RX VTAG actions */
#define VTAG_STRIP BIT_ULL(4)
#define VTAG_CAPTURE BIT_ULL(5)
#endif /* RVU_STRUCT_H */
......@@ -7,7 +7,7 @@ obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o
obj-$(CONFIG_OCTEONTX2_VF) += octeontx2_nicvf.o
octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
otx2_ptp.o
otx2_ptp.o otx2_flows.o
octeontx2_nicvf-y := otx2_vf.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
......@@ -191,10 +191,14 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data))
if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) {
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
else
/* update dmac field in vlan offload rule */
if (pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
otx2_install_rxvlan_offload_flow(pfvf);
} else {
return -EPERM;
}
return 0;
}
......
......@@ -18,6 +18,7 @@
#include <linux/timecounter.h>
#include <mbox.h>
#include <npc.h>
#include "otx2_reg.h"
#include "otx2_txrx.h"
#include <rvu_trace.h>
......@@ -205,6 +206,9 @@ struct otx2_vf_config {
struct otx2_nic *pf;
struct delayed_work link_event_work;
bool intf_down; /* interface was either configured or not */
u8 mac[ETH_ALEN];
u16 vlan;
int tx_vtag_idx;
};
struct flr_work {
......@@ -228,6 +232,32 @@ struct otx2_ptp {
#define OTX2_HW_TIMESTAMP_LEN 8
struct otx2_mac_table {
u8 addr[ETH_ALEN];
u16 mcam_entry;
bool inuse;
};
struct otx2_flow_config {
u16 entry[NPC_MAX_NONCONTIG_ENTRIES];
u32 nr_flows;
#define OTX2_MAX_NTUPLE_FLOWS 32
#define OTX2_MAX_UNICAST_FLOWS 8
#define OTX2_MAX_VLAN_FLOWS 1
#define OTX2_MCAM_COUNT (OTX2_MAX_NTUPLE_FLOWS + \
OTX2_MAX_UNICAST_FLOWS + \
OTX2_MAX_VLAN_FLOWS)
u32 ntuple_offset;
u32 unicast_offset;
u32 rx_vlan_offset;
u32 vf_vlan_offset;
#define OTX2_PER_VF_VLAN_FLOWS 2 /* rx+tx per VF */
#define OTX2_VF_VLAN_RX_INDEX 0
#define OTX2_VF_VLAN_TX_INDEX 1
u32 ntuple_max_flows;
struct list_head flow_list;
};
struct otx2_nic {
void __iomem *reg_base;
struct net_device *netdev;
......@@ -238,6 +268,12 @@ struct otx2_nic {
#define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0)
#define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1)
#define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
#define OTX2_FLAG_MCAM_ENTRIES_ALLOC BIT_ULL(3)
#define OTX2_FLAG_NTUPLE_SUPPORT BIT_ULL(4)
#define OTX2_FLAG_UCAST_FLTR_SUPPORT BIT_ULL(5)
#define OTX2_FLAG_RX_VLAN_SUPPORT BIT_ULL(6)
#define OTX2_FLAG_VF_VLAN_SUPPORT BIT_ULL(7)
#define OTX2_FLAG_PF_SHUTDOWN BIT_ULL(8)
#define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9)
#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
u64 flags;
......@@ -266,6 +302,7 @@ struct otx2_nic {
struct refill_work *refill_wrk;
struct workqueue_struct *otx2_wq;
struct work_struct rx_mode_work;
struct otx2_mac_table *mac_table;
/* Ethtool stuff */
u32 msg_enable;
......@@ -275,6 +312,8 @@ struct otx2_nic {
struct otx2_ptp *ptp;
struct hwtstamp_config tstamp;
struct otx2_flow_config *flow_cfg;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
......@@ -644,4 +683,24 @@ int otx2_open(struct net_device *netdev);
int otx2_stop(struct net_device *netdev);
int otx2_set_real_num_queues(struct net_device *netdev,
int tx_queues, int rx_queues);
/* MCAM filter related APIs */
int otx2_mcam_flow_init(struct otx2_nic *pf);
int otx2_alloc_mcam_entries(struct otx2_nic *pfvf);
void otx2_mcam_flow_del(struct otx2_nic *pf);
int otx2_destroy_ntuple_flows(struct otx2_nic *pf);
int otx2_destroy_mcam_flows(struct otx2_nic *pfvf);
int otx2_get_flow(struct otx2_nic *pfvf,
struct ethtool_rxnfc *nfc, u32 location);
int otx2_get_all_flows(struct otx2_nic *pfvf,
struct ethtool_rxnfc *nfc, u32 *rule_locs);
int otx2_add_flow(struct otx2_nic *pfvf,
struct ethtool_rx_flow_spec *fsp);
int otx2_remove_flow(struct otx2_nic *pfvf, u32 location);
int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
struct npc_install_flow_req *req);
int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
#endif /* OTX2_COMMON_H */
......@@ -551,6 +551,16 @@ static int otx2_get_rxnfc(struct net_device *dev,
nfc->data = pfvf->hw.rx_queues;
ret = 0;
break;
case ETHTOOL_GRXCLSRLCNT:
nfc->rule_cnt = pfvf->flow_cfg->nr_flows;
ret = 0;
break;
case ETHTOOL_GRXCLSRULE:
ret = otx2_get_flow(pfvf, nfc, nfc->fs.location);
break;
case ETHTOOL_GRXCLSRLALL:
ret = otx2_get_all_flows(pfvf, nfc, rules);
break;
case ETHTOOL_GRXFH:
return otx2_get_rss_hash_opts(pfvf, nfc);
default:
......@@ -560,6 +570,50 @@ static int otx2_get_rxnfc(struct net_device *dev,
}
static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
{
bool ntuple = !!(dev->features & NETIF_F_NTUPLE);
struct otx2_nic *pfvf = netdev_priv(dev);
int ret = -EOPNOTSUPP;
switch (nfc->cmd) {
case ETHTOOL_SRXFH:
ret = otx2_set_rss_hash_opts(pfvf, nfc);
break;
case ETHTOOL_SRXCLSRLINS:
if (netif_running(dev) && ntuple)
ret = otx2_add_flow(pfvf, &nfc->fs);
break;
case ETHTOOL_SRXCLSRLDEL:
if (netif_running(dev) && ntuple)
ret = otx2_remove_flow(pfvf, nfc->fs.location);
break;
default:
break;
}
return ret;
}
static int otx2vf_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *nfc, u32 *rules)
{
struct otx2_nic *pfvf = netdev_priv(dev);
int ret = -EOPNOTSUPP;
switch (nfc->cmd) {
case ETHTOOL_GRXRINGS:
nfc->data = pfvf->hw.rx_queues;
ret = 0;
break;
case ETHTOOL_GRXFH:
return otx2_get_rss_hash_opts(pfvf, nfc);
default:
break;
}
return ret;
}
static int otx2vf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
{
struct otx2_nic *pfvf = netdev_priv(dev);
int ret = -EOPNOTSUPP;
......@@ -806,8 +860,8 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.get_sset_count = otx2vf_get_sset_count,
.set_channels = otx2_set_channels,
.get_channels = otx2_get_channels,
.get_rxnfc = otx2_get_rxnfc,
.set_rxnfc = otx2_set_rxnfc,
.get_rxnfc = otx2vf_get_rxnfc,
.set_rxnfc = otx2vf_set_rxnfc,
.get_rxfh_key_size = otx2_get_rxfh_key_size,
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
......
This diff is collapsed.
......@@ -556,6 +556,19 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
ext->tstmp = 1;
}
#define OTX2_VLAN_PTR_OFFSET (ETH_HLEN - ETH_TLEN)
if (skb_vlan_tag_present(skb)) {
if (skb->vlan_proto == htons(ETH_P_8021Q)) {
ext->vlan1_ins_ena = 1;
ext->vlan1_ins_ptr = OTX2_VLAN_PTR_OFFSET;
ext->vlan1_ins_tci = skb_vlan_tag_get(skb);
} else if (skb->vlan_proto == htons(ETH_P_8021AD)) {
ext->vlan0_ins_ena = 1;
ext->vlan0_ins_ptr = OTX2_VLAN_PTR_OFFSET;
ext->vlan0_ins_tci = skb_vlan_tag_get(skb);
}
}
*offset += sizeof(*ext);
}
......@@ -871,6 +884,9 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
}
if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
/* Insert vlan tag before giving pkt to tso */
if (skb_vlan_tag_present(skb))
skb = __vlan_hwaccel_push_inside(skb);
otx2_sq_append_tso(pfvf, sq, skb, qidx);
return true;
}
......
......@@ -558,6 +558,11 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_L4;
netdev->features = netdev->hw_features;
/* Support TSO on tag interface */
netdev->vlan_features |= netdev->features;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
netdev->features |= netdev->hw_features;
netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment