Commit 3eaf3ca6 authored by David S. Miller's avatar David S. Miller

Merge branch 'octeontx2-next'

Jerin Jacob says:

====================
octeontx2-af: NIX and NPC enhancements

This patchset is a continuation to earlier submitted four patch
series to add a new driver for Marvell's OcteonTX2 SOC's
Resource virtualization unit (RVU) admin function driver.

1. octeontx2-af: Add RVU Admin Function driver
   https://www.spinics.net/lists/netdev/msg528272.html
2. octeontx2-af: NPA and NIX blocks initialization
   https://www.spinics.net/lists/netdev/msg529163.html
3. octeontx2-af: NPC parser and NIX blocks initialization
   https://www.spinics.net/lists/netdev/msg530252.html
4. octeontx2-af: NPC MCAM support and FLR handling
   https://www.spinics.net/lists/netdev/msg534392.html

This patch series adds support for below

NPC block:
- Add NPC(mkex) profile support for various Key extraction configurations

NIX block:
- Enable dynamic RSS flow key algorithm configuration
- Enhancements on Rx checksum and error checks
- Add support for Tx packet marking support
- TL1 schedule queue allocation enhancements
- Add LSO format configuration mbox
- VLAN TPID configuration
- Skip multicast entry init for broadcast tables

v2:

- Rename FLOW_* to NIX_FLOW_* to avoid serious global namespace collisions,
as we have various FLOW_* definitions coming from
include/uapi/linux/pkt_cls.h for example.(David Miller)
- Pack the arguments of rvu_get_tl1_schqs() function
as 80 columns allows.(David Miller)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 6e360f73 23705adb
...@@ -498,6 +498,60 @@ static inline bool cgx_event_is_linkevent(u64 event) ...@@ -498,6 +498,60 @@ static inline bool cgx_event_is_linkevent(u64 event)
return false; return false;
} }
static inline int cgx_fwi_get_mkex_prfl_sz(u64 *prfl_sz,
struct cgx *cgx)
{
u64 req = 0;
u64 resp;
int err;
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_SIZE, req);
err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
if (!err)
*prfl_sz = FIELD_GET(RESP_MKEX_PRFL_SIZE, resp);
return err;
}
static inline int cgx_fwi_get_mkex_prfl_addr(u64 *prfl_addr,
struct cgx *cgx)
{
u64 req = 0;
u64 resp;
int err;
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_ADDR, req);
err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
if (!err)
*prfl_addr = FIELD_GET(RESP_MKEX_PRFL_ADDR, resp);
return err;
}
int cgx_get_mkex_prfl_info(u64 *addr, u64 *size)
{
struct cgx *cgx_dev;
int err;
if (!addr || !size)
return -EINVAL;
cgx_dev = list_first_entry(&cgx_list, struct cgx, cgx_list);
if (!cgx_dev)
return -ENXIO;
err = cgx_fwi_get_mkex_prfl_sz(size, cgx_dev);
if (err)
return -EIO;
err = cgx_fwi_get_mkex_prfl_addr(addr, cgx_dev);
if (err)
return -EIO;
return 0;
}
EXPORT_SYMBOL(cgx_get_mkex_prfl_info);
static irqreturn_t cgx_fwi_event_handler(int irq, void *data) static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
{ {
struct lmac *lmac = data; struct lmac *lmac = data;
......
...@@ -111,4 +111,5 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable); ...@@ -111,4 +111,5 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
int cgx_get_link_info(void *cgxd, int lmac_id, int cgx_get_link_info(void *cgxd, int lmac_id,
struct cgx_link_user_info *linfo); struct cgx_link_user_info *linfo);
int cgx_lmac_linkup_start(void *cgxd); int cgx_lmac_linkup_start(void *cgxd);
int cgx_get_mkex_prfl_info(u64 *addr, u64 *size);
#endif /* CGX_H */ #endif /* CGX_H */
...@@ -78,6 +78,8 @@ enum cgx_cmd_id { ...@@ -78,6 +78,8 @@ enum cgx_cmd_id {
CGX_CMD_LINK_STATE_CHANGE, CGX_CMD_LINK_STATE_CHANGE,
CGX_CMD_MODE_CHANGE, /* hot plug support */ CGX_CMD_MODE_CHANGE, /* hot plug support */
CGX_CMD_INTF_SHUTDOWN, CGX_CMD_INTF_SHUTDOWN,
CGX_CMD_GET_MKEX_PRFL_SIZE,
CGX_CMD_GET_MKEX_PRFL_ADDR
}; };
/* async event ids */ /* async event ids */
...@@ -137,6 +139,16 @@ enum cgx_cmd_own { ...@@ -137,6 +139,16 @@ enum cgx_cmd_own {
*/ */
#define RESP_MAC_ADDR GENMASK_ULL(56, 9) #define RESP_MAC_ADDR GENMASK_ULL(56, 9)
/* Response to cmd ID as CGX_CMD_GET_MKEX_PRFL_SIZE with cmd status as
* CGX_STAT_SUCCESS
*/
#define RESP_MKEX_PRFL_SIZE GENMASK_ULL(63, 9)
/* Response to cmd ID as CGX_CMD_GET_MKEX_PRFL_ADDR with cmd status as
* CGX_STAT_SUCCESS
*/
#define RESP_MKEX_PRFL_ADDR GENMASK_ULL(63, 9)
/* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE /* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE
* status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS * status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS
* *
......
...@@ -143,6 +143,9 @@ enum nix_scheduler { ...@@ -143,6 +143,9 @@ enum nix_scheduler {
NIX_TXSCH_LVL_CNT = 0x5, NIX_TXSCH_LVL_CNT = 0x5,
}; };
#define TXSCH_TL1_DFLT_RR_QTM ((1 << 24) - 1)
#define TXSCH_TL1_DFLT_RR_PRIO (0x1ull)
/* Min/Max packet sizes, excluding FCS */ /* Min/Max packet sizes, excluding FCS */
#define NIC_HW_MIN_FRS 40 #define NIC_HW_MIN_FRS 40
#define NIC_HW_MAX_FRS 9212 #define NIC_HW_MAX_FRS 9212
...@@ -193,26 +196,4 @@ enum nix_scheduler { ...@@ -193,26 +196,4 @@ enum nix_scheduler {
#define DEFAULT_RSS_CONTEXT_GROUP 0 #define DEFAULT_RSS_CONTEXT_GROUP 0
#define MAX_RSS_INDIR_TBL_SIZE 256 /* 1 << Max adder bits */ #define MAX_RSS_INDIR_TBL_SIZE 256 /* 1 << Max adder bits */
/* NIX flow tag, key type flags */
#define FLOW_KEY_TYPE_PORT BIT(0)
#define FLOW_KEY_TYPE_IPV4 BIT(1)
#define FLOW_KEY_TYPE_IPV6 BIT(2)
#define FLOW_KEY_TYPE_TCP BIT(3)
#define FLOW_KEY_TYPE_UDP BIT(4)
#define FLOW_KEY_TYPE_SCTP BIT(5)
/* NIX flow tag algorithm indices, max is 31 */
enum {
FLOW_KEY_ALG_PORT,
FLOW_KEY_ALG_IP,
FLOW_KEY_ALG_TCP,
FLOW_KEY_ALG_UDP,
FLOW_KEY_ALG_SCTP,
FLOW_KEY_ALG_TCP_UDP,
FLOW_KEY_ALG_TCP_SCTP,
FLOW_KEY_ALG_UDP_SCTP,
FLOW_KEY_ALG_TCP_UDP_SCTP,
FLOW_KEY_ALG_MAX,
};
#endif /* COMMON_H */ #endif /* COMMON_H */
...@@ -193,12 +193,20 @@ M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, msg_rsp) \ ...@@ -193,12 +193,20 @@ M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, msg_rsp) \
M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp) \ M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp) \
M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, msg_rsp) \ M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, msg_rsp) \
M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, \ M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, \
nix_rss_flowkey_cfg, msg_rsp) \ nix_rss_flowkey_cfg, \
nix_rss_flowkey_cfg_rsp) \
M(NIX_SET_MAC_ADDR, 0x800a, nix_set_mac_addr, nix_set_mac_addr, msg_rsp) \ M(NIX_SET_MAC_ADDR, 0x800a, nix_set_mac_addr, nix_set_mac_addr, msg_rsp) \
M(NIX_SET_RX_MODE, 0x800b, nix_set_rx_mode, nix_rx_mode, msg_rsp) \ M(NIX_SET_RX_MODE, 0x800b, nix_set_rx_mode, nix_rx_mode, msg_rsp) \
M(NIX_SET_HW_FRS, 0x800c, nix_set_hw_frs, nix_frs_cfg, msg_rsp) \ M(NIX_SET_HW_FRS, 0x800c, nix_set_hw_frs, nix_frs_cfg, msg_rsp) \
M(NIX_LF_START_RX, 0x800d, nix_lf_start_rx, msg_req, msg_rsp) \ M(NIX_LF_START_RX, 0x800d, nix_lf_start_rx, msg_req, msg_rsp) \
M(NIX_LF_STOP_RX, 0x800e, nix_lf_stop_rx, msg_req, msg_rsp) \ M(NIX_LF_STOP_RX, 0x800e, nix_lf_stop_rx, msg_req, msg_rsp) \
M(NIX_MARK_FORMAT_CFG, 0x800f, nix_mark_format_cfg, \
nix_mark_format_cfg, \
nix_mark_format_cfg_rsp) \
M(NIX_SET_RX_CFG, 0x8010, nix_set_rx_cfg, nix_rx_cfg, msg_rsp) \
M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
nix_lso_format_cfg, \
nix_lso_format_cfg_rsp) \
M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp) M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp)
/* Messages initiated by AF (range 0xC00 - 0xDFF) */ /* Messages initiated by AF (range 0xC00 - 0xDFF) */
...@@ -413,6 +421,10 @@ enum nix_af_status { ...@@ -413,6 +421,10 @@ enum nix_af_status {
NIX_AF_INVAL_TXSCHQ_CFG = -412, NIX_AF_INVAL_TXSCHQ_CFG = -412,
NIX_AF_SMQ_FLUSH_FAILED = -413, NIX_AF_SMQ_FLUSH_FAILED = -413,
NIX_AF_ERR_LF_RESET = -414, NIX_AF_ERR_LF_RESET = -414,
NIX_AF_ERR_RSS_NOSPC_FIELD = -415,
NIX_AF_ERR_RSS_NOSPC_ALGO = -416,
NIX_AF_ERR_MARK_CFG_FAIL = -417,
NIX_AF_ERR_LSO_CFG_FAIL = -418,
NIX_AF_INVAL_NPA_PF_FUNC = -419, NIX_AF_INVAL_NPA_PF_FUNC = -419,
NIX_AF_INVAL_SSO_PF_FUNC = -420, NIX_AF_INVAL_SSO_PF_FUNC = -420,
}; };
...@@ -560,15 +572,40 @@ struct nix_vtag_config { ...@@ -560,15 +572,40 @@ struct nix_vtag_config {
struct nix_rss_flowkey_cfg { struct nix_rss_flowkey_cfg {
struct mbox_msghdr hdr; struct mbox_msghdr hdr;
int mcam_index; /* MCAM entry index to modify */ int mcam_index; /* MCAM entry index to modify */
#define NIX_FLOW_KEY_TYPE_PORT BIT(0)
#define NIX_FLOW_KEY_TYPE_IPV4 BIT(1)
#define NIX_FLOW_KEY_TYPE_IPV6 BIT(2)
#define NIX_FLOW_KEY_TYPE_TCP BIT(3)
#define NIX_FLOW_KEY_TYPE_UDP BIT(4)
#define NIX_FLOW_KEY_TYPE_SCTP BIT(5)
u32 flowkey_cfg; /* Flowkey types selected */ u32 flowkey_cfg; /* Flowkey types selected */
u8 group; /* RSS context or group */ u8 group; /* RSS context or group */
}; };
struct nix_rss_flowkey_cfg_rsp {
struct mbox_msghdr hdr;
u8 alg_idx; /* Selected algo index */
};
struct nix_set_mac_addr { struct nix_set_mac_addr {
struct mbox_msghdr hdr; struct mbox_msghdr hdr;
u8 mac_addr[ETH_ALEN]; /* MAC address to be set for this pcifunc */ u8 mac_addr[ETH_ALEN]; /* MAC address to be set for this pcifunc */
}; };
struct nix_mark_format_cfg {
struct mbox_msghdr hdr;
u8 offset;
u8 y_mask;
u8 y_val;
u8 r_mask;
u8 r_val;
};
struct nix_mark_format_cfg_rsp {
struct mbox_msghdr hdr;
u8 mark_format_idx;
};
struct nix_rx_mode { struct nix_rx_mode {
struct mbox_msghdr hdr; struct mbox_msghdr hdr;
#define NIX_RX_MODE_UCAST BIT(0) #define NIX_RX_MODE_UCAST BIT(0)
...@@ -577,6 +614,15 @@ struct nix_rx_mode { ...@@ -577,6 +614,15 @@ struct nix_rx_mode {
u16 mode; u16 mode;
}; };
struct nix_rx_cfg {
struct mbox_msghdr hdr;
#define NIX_RX_OL3_VERIFY BIT(0)
#define NIX_RX_OL4_VERIFY BIT(1)
u8 len_verify; /* Outer L3/L4 len check */
#define NIX_RX_CSUM_OL4_VERIFY BIT(0)
u8 csum_verify; /* Outer L4 checksum verification */
};
struct nix_frs_cfg { struct nix_frs_cfg {
struct mbox_msghdr hdr; struct mbox_msghdr hdr;
u8 update_smq; /* Update SMQ's min/max lens */ u8 update_smq; /* Update SMQ's min/max lens */
...@@ -586,6 +632,18 @@ struct nix_frs_cfg { ...@@ -586,6 +632,18 @@ struct nix_frs_cfg {
u16 minlen; u16 minlen;
}; };
struct nix_lso_format_cfg {
struct mbox_msghdr hdr;
u64 field_mask;
#define NIX_LSO_FIELD_MAX 8
u64 fields[NIX_LSO_FIELD_MAX];
};
struct nix_lso_format_cfg_rsp {
struct mbox_msghdr hdr;
u8 lso_format_idx;
};
/* NPC mbox message structs */ /* NPC mbox message structs */
#define NPC_MCAM_ENTRY_INVALID 0xFFFF #define NPC_MCAM_ENTRY_INVALID 0xFFFF
...@@ -730,6 +788,8 @@ struct npc_get_kex_cfg_rsp { ...@@ -730,6 +788,8 @@ struct npc_get_kex_cfg_rsp {
u64 intf_lid_lt_ld[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD]; u64 intf_lid_lt_ld[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
/* NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG */ /* NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG */
u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL]; u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
#define MKEX_NAME_LEN 128
u8 mkex_pfl_name[MKEX_NAME_LEN];
}; };
#endif /* MBOX_H */ #endif /* MBOX_H */
...@@ -265,4 +265,22 @@ struct nix_rx_action { ...@@ -265,4 +265,22 @@ struct nix_rx_action {
#define VTAG0_LID_MASK GENMASK_ULL(10, 8) #define VTAG0_LID_MASK GENMASK_ULL(10, 8)
#define VTAG0_RELPTR_MASK GENMASK_ULL(7, 0) #define VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
struct npc_mcam_kex {
/* MKEX Profle Header */
u64 mkex_sign; /* "mcam-kex-profile" (8 bytes/ASCII characters) */
u8 name[MKEX_NAME_LEN]; /* MKEX Profile name */
u64 cpu_model; /* Format as profiled by CPU hardware */
u64 kpu_version; /* KPU firmware/profile version */
u64 reserved; /* Reserved for extension */
/* MKEX Profle Data */
u64 keyx_cfg[NPC_MAX_INTF]; /* NPC_AF_INTF(0..1)_KEX_CFG */
/* NPC_AF_KEX_LDATA(0..1)_FLAGS_CFG */
u64 kex_ld_flags[NPC_MAX_LD];
/* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */
u64 intf_lid_lt_ld[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
/* NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG */
u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
} __packed;
#endif /* NPC_H */ #endif /* NPC_H */
...@@ -52,6 +52,10 @@ MODULE_LICENSE("GPL v2"); ...@@ -52,6 +52,10 @@ MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION); MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, rvu_id_table); MODULE_DEVICE_TABLE(pci, rvu_id_table);
static char *mkex_profile; /* MKEX profile name */
module_param(mkex_profile, charp, 0000);
MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
/* Poll a RVU block's register 'offset', for a 'zero' /* Poll a RVU block's register 'offset', for a 'zero'
* or 'nonzero' at bits specified by 'mask' * or 'nonzero' at bits specified by 'mask'
*/ */
...@@ -2359,6 +2363,14 @@ static void rvu_disable_sriov(struct rvu *rvu) ...@@ -2359,6 +2363,14 @@ static void rvu_disable_sriov(struct rvu *rvu)
pci_disable_sriov(rvu->pdev); pci_disable_sriov(rvu->pdev);
} }
static void rvu_update_module_params(struct rvu *rvu)
{
const char *default_pfl_name = "default";
strscpy(rvu->mkex_pfl_name,
mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
}
static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
...@@ -2412,6 +2424,9 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2412,6 +2424,9 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_release_regions; goto err_release_regions;
} }
/* Store module params in rvu structure */
rvu_update_module_params(rvu);
/* Check which blocks the HW supports */ /* Check which blocks the HW supports */
rvu_check_block_implemented(rvu); rvu_check_block_implemented(rvu);
......
...@@ -156,7 +156,17 @@ struct rvu_pfvf { ...@@ -156,7 +156,17 @@ struct rvu_pfvf {
struct nix_txsch { struct nix_txsch {
struct rsrc_bmap schq; struct rsrc_bmap schq;
u8 lvl; u8 lvl;
u16 *pfvf_map; #define NIX_TXSCHQ_TL1_CFG_DONE BIT_ULL(0)
#define TXSCH_MAP_FUNC(__pfvf_map) ((__pfvf_map) & 0xFFFF)
#define TXSCH_MAP_FLAGS(__pfvf_map) ((__pfvf_map) >> 16)
#define TXSCH_MAP(__func, __flags) (((__func) & 0xFFFF) | ((__flags) << 16))
u32 *pfvf_map;
};
struct nix_mark_format {
u8 total;
u8 in_use;
u32 *cfg;
}; };
struct npc_pkind { struct npc_pkind {
...@@ -164,9 +174,23 @@ struct npc_pkind { ...@@ -164,9 +174,23 @@ struct npc_pkind {
u32 *pfchan_map; u32 *pfchan_map;
}; };
struct nix_flowkey {
#define NIX_FLOW_KEY_ALG_MAX 32
u32 flowkey[NIX_FLOW_KEY_ALG_MAX];
int in_use;
};
struct nix_lso {
u8 total;
u8 in_use;
};
struct nix_hw { struct nix_hw {
struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */ struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
struct nix_mcast mcast; struct nix_mcast mcast;
struct nix_flowkey flowkey;
struct nix_mark_format mark_format;
struct nix_lso lso;
}; };
struct rvu_hwinfo { struct rvu_hwinfo {
...@@ -237,6 +261,8 @@ struct rvu { ...@@ -237,6 +261,8 @@ struct rvu {
struct workqueue_struct *cgx_evh_wq; struct workqueue_struct *cgx_evh_wq;
spinlock_t cgx_evq_lock; /* cgx event queue lock */ spinlock_t cgx_evq_lock; /* cgx event queue lock */
struct list_head cgx_evq_head; /* cgx event queue head */ struct list_head cgx_evq_head; /* cgx event queue head */
char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */
}; };
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
...@@ -366,6 +392,8 @@ int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req, ...@@ -366,6 +392,8 @@ int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
/* NIX APIs */ /* NIX APIs */
bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc); bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc);
int rvu_nix_init(struct rvu *rvu); int rvu_nix_init(struct rvu *rvu);
int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
int blkaddr, u32 cfg);
void rvu_nix_freemem(struct rvu *rvu); void rvu_nix_freemem(struct rvu *rvu);
int rvu_get_nixlf_count(struct rvu *rvu); int rvu_get_nixlf_count(struct rvu *rvu);
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf); void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
...@@ -398,7 +426,7 @@ int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req, ...@@ -398,7 +426,7 @@ int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp); struct msg_rsp *rsp);
int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
struct nix_rss_flowkey_cfg *req, struct nix_rss_flowkey_cfg *req,
struct msg_rsp *rsp); struct nix_rss_flowkey_cfg_rsp *rsp);
int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
struct nix_set_mac_addr *req, struct nix_set_mac_addr *req,
struct msg_rsp *rsp); struct msg_rsp *rsp);
...@@ -410,6 +438,14 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, ...@@ -410,6 +438,14 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp); struct msg_rsp *rsp);
int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp); struct msg_rsp *rsp);
int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
struct nix_mark_format_cfg *req,
struct nix_mark_format_cfg_rsp *rsp);
int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
struct nix_lso_format_cfg *req,
struct nix_lso_format_cfg_rsp *rsp);
/* NPC APIs */ /* NPC APIs */
int rvu_npc_init(struct rvu *rvu); int rvu_npc_init(struct rvu *rvu);
......
...@@ -43,6 +43,19 @@ enum mc_buf_cnt { ...@@ -43,6 +43,19 @@ enum mc_buf_cnt {
MC_BUF_CNT_2048, MC_BUF_CNT_2048,
}; };
enum nix_makr_fmt_indexes {
NIX_MARK_CFG_IP_DSCP_RED,
NIX_MARK_CFG_IP_DSCP_YELLOW,
NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
NIX_MARK_CFG_IP_ECN_RED,
NIX_MARK_CFG_IP_ECN_YELLOW,
NIX_MARK_CFG_IP_ECN_YELLOW_RED,
NIX_MARK_CFG_VLAN_DEI_RED,
NIX_MARK_CFG_VLAN_DEI_YELLOW,
NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
NIX_MARK_CFG_MAX,
};
/* For now considering MC resources needed for broadcast /* For now considering MC resources needed for broadcast
* pkt replication only. i.e 256 HWVFs + 12 PFs. * pkt replication only. i.e 256 HWVFs + 12 PFs.
*/ */
...@@ -127,6 +140,7 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr, ...@@ -127,6 +140,7 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
{ {
struct nix_txsch *txsch; struct nix_txsch *txsch;
struct nix_hw *nix_hw; struct nix_hw *nix_hw;
u16 map_func;
nix_hw = get_nix_hw(rvu->hw, blkaddr); nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw) if (!nix_hw)
...@@ -138,11 +152,18 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr, ...@@ -138,11 +152,18 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
return false; return false;
mutex_lock(&rvu->rsrc_lock); mutex_lock(&rvu->rsrc_lock);
if (txsch->pfvf_map[schq] != pcifunc) { map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
mutex_unlock(&rvu->rsrc_lock);
return false;
}
mutex_unlock(&rvu->rsrc_lock); mutex_unlock(&rvu->rsrc_lock);
/* For TL1 schq, sharing across VF's of same PF is ok */
if (lvl == NIX_TXSCH_LVL_TL1 &&
rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
return false;
if (lvl != NIX_TXSCH_LVL_TL1 &&
map_func != pcifunc)
return false;
return true; return true;
} }
...@@ -277,17 +298,21 @@ static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr, ...@@ -277,17 +298,21 @@ static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
/* TCP's flags field */ /* TCP's flags field */
field.layer = NIX_TXLAYER_OL4; field.layer = NIX_TXLAYER_OL4;
field.offset = 12; field.offset = 12;
field.sizem1 = 0; /* not needed */ field.sizem1 = 1; /* 2 bytes */
field.alg = NIX_LSOALG_TCP_FLAGS; field.alg = NIX_LSOALG_TCP_FLAGS;
rvu_write64(rvu, blkaddr, rvu_write64(rvu, blkaddr,
NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
*(u64 *)&field); *(u64 *)&field);
} }
static void nix_setup_lso(struct rvu *rvu, int blkaddr) static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
{ {
u64 cfg, idx, fidx = 0; u64 cfg, idx, fidx = 0;
/* Get max HW supported format indices */
cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
nix_hw->lso.total = cfg;
/* Enable LSO */ /* Enable LSO */
cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG); cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
/* For TSO, set first and middle segment flags to /* For TSO, set first and middle segment flags to
...@@ -297,7 +322,10 @@ static void nix_setup_lso(struct rvu *rvu, int blkaddr) ...@@ -297,7 +322,10 @@ static void nix_setup_lso(struct rvu *rvu, int blkaddr)
cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16); cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63)); rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
/* Configure format fields for TCPv4 segmentation offload */ /* Setup default static LSO formats
*
* Configure format fields for TCPv4 segmentation offload
*/
idx = NIX_LSO_FORMAT_IDX_TSOV4; idx = NIX_LSO_FORMAT_IDX_TSOV4;
nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx); nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
...@@ -307,6 +335,7 @@ static void nix_setup_lso(struct rvu *rvu, int blkaddr) ...@@ -307,6 +335,7 @@ static void nix_setup_lso(struct rvu *rvu, int blkaddr)
rvu_write64(rvu, blkaddr, rvu_write64(rvu, blkaddr,
NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
} }
nix_hw->lso.in_use++;
/* Configure format fields for TCPv6 segmentation offload */ /* Configure format fields for TCPv6 segmentation offload */
idx = NIX_LSO_FORMAT_IDX_TSOV6; idx = NIX_LSO_FORMAT_IDX_TSOV6;
...@@ -319,6 +348,7 @@ static void nix_setup_lso(struct rvu *rvu, int blkaddr) ...@@ -319,6 +348,7 @@ static void nix_setup_lso(struct rvu *rvu, int blkaddr)
rvu_write64(rvu, blkaddr, rvu_write64(rvu, blkaddr,
NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
} }
nix_hw->lso.in_use++;
} }
static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
...@@ -431,9 +461,8 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, ...@@ -431,9 +461,8 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
bool ena; bool ena;
u64 cfg; u64 cfg;
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (!pfvf->nixlf || blkaddr < 0) if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID; return NIX_AF_ERR_AF_LF_INVALID;
block = &hw->block[blkaddr]; block = &hw->block[blkaddr];
...@@ -443,9 +472,14 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, ...@@ -443,9 +472,14 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
return NIX_AF_ERR_AQ_ENQUEUE; return NIX_AF_ERR_AQ_ENQUEUE;
} }
pfvf = rvu_get_pfvf(rvu, pcifunc);
nixlf = rvu_get_lf(rvu, block, pcifunc, 0); nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID; /* Skip NIXLF check for broadcast MCE entry init */
if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
if (!pfvf->nixlf || nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
}
switch (req->ctype) { switch (req->ctype) {
case NIX_AQ_CTYPE_RQ: case NIX_AQ_CTYPE_RQ:
...@@ -490,7 +524,9 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, ...@@ -490,7 +524,9 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
/* Check if SQ pointed SMQ belongs to this PF/VF or not */ /* Check if SQ pointed SMQ belongs to this PF/VF or not */
if (req->ctype == NIX_AQ_CTYPE_SQ && if (req->ctype == NIX_AQ_CTYPE_SQ &&
req->op != NIX_AQ_INSTOP_WRITE) { ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
(req->op == NIX_AQ_INSTOP_WRITE &&
req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ, if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
pcifunc, req->sq.smq)) pcifunc, req->sq.smq))
return NIX_AF_ERR_AQ_ENQUEUE; return NIX_AF_ERR_AQ_ENQUEUE;
...@@ -838,6 +874,13 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, ...@@ -838,6 +874,13 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
(u64)pfvf->nix_qints_ctx->iova); (u64)pfvf->nix_qints_ctx->iova);
rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36)); rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36));
/* Setup VLANX TPID's.
* Use VLAN1 for 802.1Q
* and VLAN0 for 802.1AD.
*/
cfg = (0x8100ULL << 16) | 0x88A8ULL;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
/* Enable LMTST for this NIX LF */ /* Enable LMTST for this NIX LF */
rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0)); rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
...@@ -925,6 +968,41 @@ int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req, ...@@ -925,6 +968,41 @@ int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
return 0; return 0;
} }
int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
struct nix_mark_format_cfg *req,
struct nix_mark_format_cfg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
struct nix_hw *nix_hw;
struct rvu_pfvf *pfvf;
int blkaddr, rc;
u32 cfg;
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (!pfvf->nixlf || blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return -EINVAL;
cfg = (((u32)req->offset & 0x7) << 16) |
(((u32)req->y_mask & 0xF) << 12) |
(((u32)req->y_val & 0xF) << 8) |
(((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
if (rc < 0) {
dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
return NIX_AF_ERR_MARK_CFG_FAIL;
}
rsp->mark_format_idx = rc;
return 0;
}
/* Disable shaping of pkts by a scheduler queue /* Disable shaping of pkts by a scheduler queue
* at a given scheduler level. * at a given scheduler level.
*/ */
...@@ -983,6 +1061,73 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, ...@@ -983,6 +1061,73 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
} }
static int
rvu_get_tl1_schqs(struct rvu *rvu, int blkaddr, u16 pcifunc,
u16 *schq_list, u16 *schq_cnt)
{
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
u16 schq_base;
u32 *pfvf_map;
int pf, intf;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return -ENODEV;
pfvf = rvu_get_pfvf(rvu, pcifunc);
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
pfvf_map = txsch->pfvf_map;
pf = rvu_get_pf(pcifunc);
/* static allocation as two TL1's per link */
intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
switch (intf) {
case NIX_INTF_TYPE_CGX:
rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
schq_base = (cgx_id * MAX_LMAC_PER_CGX + lmac_id) * 2;
break;
case NIX_INTF_TYPE_LBK:
schq_base = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX * 2;
break;
default:
return -ENODEV;
}
if (schq_base + 1 > txsch->schq.max)
return -ENODEV;
/* init pfvf_map as we store flags */
if (pfvf_map[schq_base] == U32_MAX) {
pfvf_map[schq_base] =
TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0);
pfvf_map[schq_base + 1] =
TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0);
/* Onetime reset for TL1 */
nix_reset_tx_linkcfg(rvu, blkaddr,
NIX_TXSCH_LVL_TL1, schq_base);
nix_reset_tx_shaping(rvu, blkaddr,
NIX_TXSCH_LVL_TL1, schq_base);
nix_reset_tx_linkcfg(rvu, blkaddr,
NIX_TXSCH_LVL_TL1, schq_base + 1);
nix_reset_tx_shaping(rvu, blkaddr,
NIX_TXSCH_LVL_TL1, schq_base + 1);
}
if (schq_list && schq_cnt) {
schq_list[0] = schq_base;
schq_list[1] = schq_base + 1;
*schq_cnt = 2;
}
return 0;
}
int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
struct nix_txsch_alloc_req *req, struct nix_txsch_alloc_req *req,
struct nix_txsch_alloc_rsp *rsp) struct nix_txsch_alloc_rsp *rsp)
...@@ -993,6 +1138,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, ...@@ -993,6 +1138,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
struct rvu_pfvf *pfvf; struct rvu_pfvf *pfvf;
struct nix_hw *nix_hw; struct nix_hw *nix_hw;
int blkaddr, rc = 0; int blkaddr, rc = 0;
u32 *pfvf_map;
u16 schq; u16 schq;
pfvf = rvu_get_pfvf(rvu, pcifunc); pfvf = rvu_get_pfvf(rvu, pcifunc);
...@@ -1008,13 +1154,23 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, ...@@ -1008,13 +1154,23 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
txsch = &nix_hw->txsch[lvl]; txsch = &nix_hw->txsch[lvl];
req_schq = req->schq_contig[lvl] + req->schq[lvl]; req_schq = req->schq_contig[lvl] + req->schq[lvl];
pfvf_map = txsch->pfvf_map;
if (!req_schq)
continue;
/* There are only 28 TL1s */ /* There are only 28 TL1s */
if (lvl == NIX_TXSCH_LVL_TL1 && req_schq > txsch->schq.max) if (lvl == NIX_TXSCH_LVL_TL1) {
goto err; if (req->schq_contig[lvl] ||
req->schq[lvl] > 2 ||
rvu_get_tl1_schqs(rvu, blkaddr,
pcifunc, NULL, NULL))
goto err;
continue;
}
/* Check if request is valid */ /* Check if request is valid */
if (!req_schq || req_schq > MAX_TXSCHQ_PER_FUNC) if (req_schq > MAX_TXSCHQ_PER_FUNC)
goto err; goto err;
/* If contiguous queues are needed, check for availability */ /* If contiguous queues are needed, check for availability */
...@@ -1030,16 +1186,32 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, ...@@ -1030,16 +1186,32 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
txsch = &nix_hw->txsch[lvl]; txsch = &nix_hw->txsch[lvl];
rsp->schq_contig[lvl] = req->schq_contig[lvl]; rsp->schq_contig[lvl] = req->schq_contig[lvl];
pfvf_map = txsch->pfvf_map;
rsp->schq[lvl] = req->schq[lvl]; rsp->schq[lvl] = req->schq[lvl];
schq = 0; if (!req->schq[lvl] && !req->schq_contig[lvl])
continue;
/* Handle TL1 specially as it is
* allocation is restricted to 2 TL1's
* per link
*/
if (lvl == NIX_TXSCH_LVL_TL1) {
rsp->schq_contig[lvl] = 0;
rvu_get_tl1_schqs(rvu, blkaddr, pcifunc,
&rsp->schq_list[lvl][0],
&rsp->schq[lvl]);
continue;
}
/* Alloc contiguous queues first */ /* Alloc contiguous queues first */
if (req->schq_contig[lvl]) { if (req->schq_contig[lvl]) {
schq = rvu_alloc_rsrc_contig(&txsch->schq, schq = rvu_alloc_rsrc_contig(&txsch->schq,
req->schq_contig[lvl]); req->schq_contig[lvl]);
for (idx = 0; idx < req->schq_contig[lvl]; idx++) { for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
txsch->pfvf_map[schq] = pcifunc; pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
rsp->schq_contig_list[lvl][idx] = schq; rsp->schq_contig_list[lvl][idx] = schq;
...@@ -1050,7 +1222,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, ...@@ -1050,7 +1222,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
/* Alloc non-contiguous queues */ /* Alloc non-contiguous queues */
for (idx = 0; idx < req->schq[lvl]; idx++) { for (idx = 0; idx < req->schq[lvl]; idx++) {
schq = rvu_alloc_rsrc(&txsch->schq); schq = rvu_alloc_rsrc(&txsch->schq);
txsch->pfvf_map[schq] = pcifunc; pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
rsp->schq_list[lvl][idx] = schq; rsp->schq_list[lvl][idx] = schq;
...@@ -1092,7 +1264,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) ...@@ -1092,7 +1264,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
txsch = &nix_hw->txsch[lvl]; txsch = &nix_hw->txsch[lvl];
for (schq = 0; schq < txsch->schq.max; schq++) { for (schq = 0; schq < txsch->schq.max; schq++) {
if (txsch->pfvf_map[schq] != pcifunc) if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue; continue;
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
} }
...@@ -1101,7 +1273,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) ...@@ -1101,7 +1273,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
/* Flush SMQs */ /* Flush SMQs */
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
for (schq = 0; schq < txsch->schq.max; schq++) { for (schq = 0; schq < txsch->schq.max; schq++) {
if (txsch->pfvf_map[schq] != pcifunc) if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue; continue;
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
/* Do SMQ flush and set enqueue xoff */ /* Do SMQ flush and set enqueue xoff */
...@@ -1119,9 +1291,15 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) ...@@ -1119,9 +1291,15 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
/* Now free scheduler queues to free pool */ /* Now free scheduler queues to free pool */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
/* Free all SCHQ's except TL1 as
* TL1 is shared across all VF's for a RVU PF
*/
if (lvl == NIX_TXSCH_LVL_TL1)
continue;
txsch = &nix_hw->txsch[lvl]; txsch = &nix_hw->txsch[lvl];
for (schq = 0; schq < txsch->schq.max; schq++) { for (schq = 0; schq < txsch->schq.max; schq++) {
if (txsch->pfvf_map[schq] != pcifunc) if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue; continue;
rvu_free_rsrc(&txsch->schq, schq); rvu_free_rsrc(&txsch->schq, schq);
txsch->pfvf_map[schq] = 0; txsch->pfvf_map[schq] = 0;
...@@ -1138,11 +1316,81 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) ...@@ -1138,11 +1316,81 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
return 0; return 0;
} }
static int nix_txschq_free_one(struct rvu *rvu,
struct nix_txsch_free_req *req)
{
int lvl, schq, nixlf, blkaddr, rc;
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
u32 *pfvf_map;
u64 cfg;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return -EINVAL;
nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
lvl = req->schq_lvl;
schq = req->schq;
txsch = &nix_hw->txsch[lvl];
/* Don't allow freeing TL1 */
if (lvl > NIX_TXSCH_LVL_TL2 ||
schq >= txsch->schq.max)
goto err;
pfvf_map = txsch->pfvf_map;
mutex_lock(&rvu->rsrc_lock);
if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
mutex_unlock(&rvu->rsrc_lock);
goto err;
}
/* Flush if it is a SMQ. Onus of disabling
* TL2/3 queue links before SMQ flush is on user
*/
if (lvl == NIX_TXSCH_LVL_SMQ) {
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
/* Do SMQ flush and set enqueue xoff */
cfg |= BIT_ULL(50) | BIT_ULL(49);
rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
/* Wait for flush to complete */
rc = rvu_poll_reg(rvu, blkaddr,
NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
if (rc) {
dev_err(rvu->dev,
"NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
}
}
/* Free the resource */
rvu_free_rsrc(&txsch->schq, schq);
txsch->pfvf_map[schq] = 0;
mutex_unlock(&rvu->rsrc_lock);
return 0;
err:
return NIX_AF_ERR_TLX_INVALID;
}
int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
struct nix_txsch_free_req *req, struct nix_txsch_free_req *req,
struct msg_rsp *rsp) struct msg_rsp *rsp)
{ {
return nix_txschq_free(rvu, req->hdr.pcifunc); if (req->flags & TXSCHQ_FREE_ALL)
return nix_txschq_free(rvu, req->hdr.pcifunc);
else
return nix_txschq_free_one(rvu, req);
} }
static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
...@@ -1183,16 +1431,73 @@ static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, ...@@ -1183,16 +1431,73 @@ static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
return true; return true;
} }
static int
nix_tl1_default_cfg(struct rvu *rvu, u16 pcifunc)
{
u16 schq_list[2], schq_cnt, schq;
int blkaddr, idx, err = 0;
u16 map_func, map_flags;
struct nix_hw *nix_hw;
u64 reg, regval;
u32 *pfvf_map;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return -EINVAL;
pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
mutex_lock(&rvu->rsrc_lock);
err = rvu_get_tl1_schqs(rvu, blkaddr,
pcifunc, schq_list, &schq_cnt);
if (err)
goto unlock;
for (idx = 0; idx < schq_cnt; idx++) {
schq = schq_list[idx];
map_func = TXSCH_MAP_FUNC(pfvf_map[schq]);
map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]);
/* check if config is already done or this is pf */
if (map_flags & NIX_TXSCHQ_TL1_CFG_DONE)
continue;
/* default configuration */
reg = NIX_AF_TL1X_TOPOLOGY(schq);
regval = (TXSCH_TL1_DFLT_RR_PRIO << 1);
rvu_write64(rvu, blkaddr, reg, regval);
reg = NIX_AF_TL1X_SCHEDULE(schq);
regval = TXSCH_TL1_DFLT_RR_QTM;
rvu_write64(rvu, blkaddr, reg, regval);
reg = NIX_AF_TL1X_CIR(schq);
regval = 0;
rvu_write64(rvu, blkaddr, reg, regval);
map_flags |= NIX_TXSCHQ_TL1_CFG_DONE;
pfvf_map[schq] = TXSCH_MAP(map_func, map_flags);
}
unlock:
mutex_unlock(&rvu->rsrc_lock);
return err;
}
int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
struct nix_txschq_config *req, struct nix_txschq_config *req,
struct msg_rsp *rsp) struct msg_rsp *rsp)
{ {
u16 schq, pcifunc = req->hdr.pcifunc;
struct rvu_hwinfo *hw = rvu->hw; struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
u64 reg, regval, schq_regbase; u64 reg, regval, schq_regbase;
struct nix_txsch *txsch; struct nix_txsch *txsch;
u16 map_func, map_flags;
struct nix_hw *nix_hw; struct nix_hw *nix_hw;
int blkaddr, idx, err; int blkaddr, idx, err;
u32 *pfvf_map;
int nixlf; int nixlf;
if (req->lvl >= NIX_TXSCH_LVL_CNT || if (req->lvl >= NIX_TXSCH_LVL_CNT ||
...@@ -1212,6 +1517,16 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, ...@@ -1212,6 +1517,16 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
return NIX_AF_ERR_AF_LF_INVALID; return NIX_AF_ERR_AF_LF_INVALID;
txsch = &nix_hw->txsch[req->lvl]; txsch = &nix_hw->txsch[req->lvl];
pfvf_map = txsch->pfvf_map;
/* VF is only allowed to trigger
* setting default cfg on TL1
*/
if (pcifunc & RVU_PFVF_FUNC_MASK &&
req->lvl == NIX_TXSCH_LVL_TL1) {
return nix_tl1_default_cfg(rvu, pcifunc);
}
for (idx = 0; idx < req->num_regs; idx++) { for (idx = 0; idx < req->num_regs; idx++) {
reg = req->reg[idx]; reg = req->reg[idx];
regval = req->regval[idx]; regval = req->regval[idx];
...@@ -1229,6 +1544,21 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, ...@@ -1229,6 +1544,21 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
regval |= ((u64)nixlf << 24); regval |= ((u64)nixlf << 24);
} }
/* Mark config as done for TL1 by PF */
if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
mutex_lock(&rvu->rsrc_lock);
map_func = TXSCH_MAP_FUNC(pfvf_map[schq]);
map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]);
map_flags |= NIX_TXSCHQ_TL1_CFG_DONE;
pfvf_map[schq] = TXSCH_MAP(map_func, map_flags);
mutex_unlock(&rvu->rsrc_lock);
}
rvu_write64(rvu, blkaddr, reg, regval); rvu_write64(rvu, blkaddr, reg, regval);
/* Check for SMQ flush, if so, poll for its completion */ /* Check for SMQ flush, if so, poll for its completion */
...@@ -1295,7 +1625,7 @@ static int nix_setup_mce(struct rvu *rvu, int mce, u8 op, ...@@ -1295,7 +1625,7 @@ static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
struct nix_aq_enq_req aq_req; struct nix_aq_enq_req aq_req;
int err; int err;
aq_req.hdr.pcifunc = pcifunc; aq_req.hdr.pcifunc = 0;
aq_req.ctype = NIX_AQ_CTYPE_MCE; aq_req.ctype = NIX_AQ_CTYPE_MCE;
aq_req.op = op; aq_req.op = op;
aq_req.qidx = mce; aq_req.qidx = mce;
...@@ -1555,10 +1885,62 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) ...@@ -1555,10 +1885,62 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
* PF/VF pcifunc mapping info. * PF/VF pcifunc mapping info.
*/ */
txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max, txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
sizeof(u16), GFP_KERNEL); sizeof(u32), GFP_KERNEL);
if (!txsch->pfvf_map) if (!txsch->pfvf_map)
return -ENOMEM; return -ENOMEM;
memset(txsch->pfvf_map, U8_MAX, txsch->schq.max * sizeof(u32));
}
return 0;
}
int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
int blkaddr, u32 cfg)
{
int fmt_idx;
for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
return fmt_idx;
} }
if (fmt_idx >= nix_hw->mark_format.total)
return -ERANGE;
rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
nix_hw->mark_format.cfg[fmt_idx] = cfg;
nix_hw->mark_format.in_use++;
return fmt_idx;
}
static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
int blkaddr)
{
u64 cfgs[] = {
[NIX_MARK_CFG_IP_DSCP_RED] = 0x10003,
[NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200,
[NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203,
[NIX_MARK_CFG_IP_ECN_RED] = 0x6000c,
[NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00,
[NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c,
[NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008,
[NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800,
[NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
};
int i, rc;
u64 total;
total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
nix_hw->mark_format.total = (u8)total;
nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
GFP_KERNEL);
if (!nix_hw->mark_format.cfg)
return -ENOMEM;
for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
if (rc < 0)
dev_err(rvu->dev, "Err %d in setup mark format %d\n",
i, rc);
}
return 0; return 0;
} }
...@@ -1593,187 +1975,284 @@ int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, ...@@ -1593,187 +1975,284 @@ int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
} }
/* Returns the ALG index to be set into NPC_RX_ACTION */ /* Returns the ALG index to be set into NPC_RX_ACTION */
static int get_flowkey_alg_idx(u32 flow_cfg) static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
{
u32 ip_cfg;
flow_cfg &= ~FLOW_KEY_TYPE_PORT;
ip_cfg = FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_IPV6;
if (flow_cfg == ip_cfg)
return FLOW_KEY_ALG_IP;
else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP))
return FLOW_KEY_ALG_TCP;
else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_UDP))
return FLOW_KEY_ALG_UDP;
else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_SCTP))
return FLOW_KEY_ALG_SCTP;
else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_UDP))
return FLOW_KEY_ALG_TCP_UDP;
else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_SCTP))
return FLOW_KEY_ALG_TCP_SCTP;
else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP))
return FLOW_KEY_ALG_UDP_SCTP;
else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP |
FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP))
return FLOW_KEY_ALG_TCP_UDP_SCTP;
return FLOW_KEY_ALG_PORT;
}
int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
struct nix_rss_flowkey_cfg *req,
struct msg_rsp *rsp)
{ {
struct rvu_hwinfo *hw = rvu->hw; int i;
u16 pcifunc = req->hdr.pcifunc;
int alg_idx, nixlf, blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
alg_idx = get_flowkey_alg_idx(req->flowkey_cfg); /* Scan over exiting algo entries to find a match */
for (i = 0; i < nix_hw->flowkey.in_use; i++)
if (nix_hw->flowkey.flowkey[i] == flow_cfg)
return i;
rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group, return -ERANGE;
alg_idx, req->mcam_index);
return 0;
} }
static void set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
{ {
struct nix_rx_flowkey_alg *field = NULL; int idx, nr_field, key_off, field_marker, keyoff_marker;
int idx, key_type; int max_key_off, max_bit_pos, group_member;
struct nix_rx_flowkey_alg *field;
struct nix_rx_flowkey_alg tmp;
u32 key_type, valid_key;
if (!alg) if (!alg)
return; return -EINVAL;
/* FIELD0: IPv4 #define FIELDS_PER_ALG 5
* FIELD1: IPv6 #define MAX_KEY_OFF 40
* FIELD2: TCP/UDP/SCTP/ALL /* Clear all fields */
* FIELD3: Unused memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
* FIELD4: Unused
* /* Each of the 32 possible flow key algorithm definitions should
* Each of the 32 possible flow key algorithm definitions should
* fall into above incremental config (except ALG0). Otherwise a * fall into above incremental config (except ALG0). Otherwise a
* single NPC MCAM entry is not sufficient for supporting RSS. * single NPC MCAM entry is not sufficient for supporting RSS.
* *
* If a different definition or combination needed then NPC MCAM * If a different definition or combination needed then NPC MCAM
* has to be programmed to filter such pkts and it's action should * has to be programmed to filter such pkts and it's action should
* point to this definition to calculate flowtag or hash. * point to this definition to calculate flowtag or hash.
*
* The `for loop` goes over _all_ protocol field and the following
* variables depicts the state machine forward progress logic.
*
* keyoff_marker - Enabled when hash byte length needs to be accounted
* in field->key_offset update.
* field_marker - Enabled when a new field needs to be selected.
* group_member - Enabled when protocol is part of a group.
*/ */
for (idx = 0; idx < 32; idx++) {
key_type = flow_cfg & BIT_ULL(idx); keyoff_marker = 0; max_key_off = 0; group_member = 0;
if (!key_type) nr_field = 0; key_off = 0; field_marker = 1;
continue; field = &tmp; max_bit_pos = fls(flow_cfg);
for (idx = 0;
idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
key_off < MAX_KEY_OFF; idx++) {
key_type = BIT(idx);
valid_key = flow_cfg & key_type;
/* Found a field marker, reset the field values */
if (field_marker)
memset(&tmp, 0, sizeof(tmp));
switch (key_type) { switch (key_type) {
case FLOW_KEY_TYPE_PORT: case NIX_FLOW_KEY_TYPE_PORT:
field = &alg[0];
field->sel_chan = true; field->sel_chan = true;
/* This should be set to 1, when SEL_CHAN is set */ /* This should be set to 1, when SEL_CHAN is set */
field->bytesm1 = 1; field->bytesm1 = 1;
field_marker = true;
keyoff_marker = true;
break; break;
case FLOW_KEY_TYPE_IPV4: case NIX_FLOW_KEY_TYPE_IPV4:
field = &alg[0];
field->lid = NPC_LID_LC; field->lid = NPC_LID_LC;
field->ltype_match = NPC_LT_LC_IP; field->ltype_match = NPC_LT_LC_IP;
field->hdr_offset = 12; /* SIP offset */ field->hdr_offset = 12; /* SIP offset */
field->bytesm1 = 7; /* SIP + DIP, 8 bytes */ field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
field->ltype_mask = 0xF; /* Match only IPv4 */ field->ltype_mask = 0xF; /* Match only IPv4 */
field_marker = true;
keyoff_marker = false;
break; break;
case FLOW_KEY_TYPE_IPV6: case NIX_FLOW_KEY_TYPE_IPV6:
field = &alg[1];
field->lid = NPC_LID_LC; field->lid = NPC_LID_LC;
field->ltype_match = NPC_LT_LC_IP6; field->ltype_match = NPC_LT_LC_IP6;
field->hdr_offset = 8; /* SIP offset */ field->hdr_offset = 8; /* SIP offset */
field->bytesm1 = 31; /* SIP + DIP, 32 bytes */ field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
field->ltype_mask = 0xF; /* Match only IPv6 */ field->ltype_mask = 0xF; /* Match only IPv6 */
field_marker = true;
keyoff_marker = true;
break; break;
case FLOW_KEY_TYPE_TCP: case NIX_FLOW_KEY_TYPE_TCP:
case FLOW_KEY_TYPE_UDP: case NIX_FLOW_KEY_TYPE_UDP:
case FLOW_KEY_TYPE_SCTP: case NIX_FLOW_KEY_TYPE_SCTP:
field = &alg[2];
field->lid = NPC_LID_LD; field->lid = NPC_LID_LD;
field->bytesm1 = 3; /* Sport + Dport, 4 bytes */ field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
if (key_type == FLOW_KEY_TYPE_TCP) if (key_type == NIX_FLOW_KEY_TYPE_TCP && valid_key) {
field->ltype_match |= NPC_LT_LD_TCP; field->ltype_match |= NPC_LT_LD_TCP;
else if (key_type == FLOW_KEY_TYPE_UDP) group_member = true;
} else if (key_type == NIX_FLOW_KEY_TYPE_UDP &&
valid_key) {
field->ltype_match |= NPC_LT_LD_UDP; field->ltype_match |= NPC_LT_LD_UDP;
else if (key_type == FLOW_KEY_TYPE_SCTP) group_member = true;
} else if (key_type == NIX_FLOW_KEY_TYPE_SCTP &&
valid_key) {
field->ltype_match |= NPC_LT_LD_SCTP; field->ltype_match |= NPC_LT_LD_SCTP;
field->key_offset = 32; /* After IPv4/v6 SIP, DIP */ group_member = true;
}
field->ltype_mask = ~field->ltype_match; field->ltype_mask = ~field->ltype_match;
if (key_type == NIX_FLOW_KEY_TYPE_SCTP) {
/* Handle the case where any of the group item
* is enabled in the group but not the final one
*/
if (group_member) {
valid_key = true;
group_member = false;
}
field_marker = true;
keyoff_marker = true;
} else {
field_marker = false;
keyoff_marker = false;
}
break; break;
} }
if (field) field->ena = 1;
field->ena = 1;
field = NULL; /* Found a valid flow key type */
if (valid_key) {
field->key_offset = key_off;
memcpy(&alg[nr_field], field, sizeof(*field));
max_key_off = max(max_key_off, field->bytesm1 + 1);
/* Found a field marker, get the next field */
if (field_marker)
nr_field++;
}
/* Found a keyoff marker, update the new key_off */
if (keyoff_marker) {
key_off += max_key_off;
max_key_off = 0;
}
} }
/* Processed all the flow key types */
if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
return 0;
else
return NIX_AF_ERR_RSS_NOSPC_FIELD;
} }
static void nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr) static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
{ {
#define FIELDS_PER_ALG 5 u64 field[FIELDS_PER_ALG];
u64 field[FLOW_KEY_ALG_MAX][FIELDS_PER_ALG]; struct nix_hw *hw;
u32 flowkey_cfg, minkey_cfg; int fid, rc;
int alg, fid;
memset(&field, 0, sizeof(u64) * FLOW_KEY_ALG_MAX * FIELDS_PER_ALG); hw = get_nix_hw(rvu->hw, blkaddr);
if (!hw)
return -EINVAL;
/* Only incoming channel number */ /* No room to add new flow hash algoritham */
flowkey_cfg = FLOW_KEY_TYPE_PORT; if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
set_flowkey_fields((void *)&field[FLOW_KEY_ALG_PORT], flowkey_cfg); return NIX_AF_ERR_RSS_NOSPC_ALGO;
/* For a incoming pkt if none of the fields match then flowkey /* Generate algo fields for the given flow_cfg */
* will be zero, hence tag generated will also be zero. rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
* RSS entry at rsse_index = NIX_AF_LF()_RSS_GRP()[OFFSET] will if (rc)
* be used to queue the packet. return rc;
*/
/* Update ALGX_FIELDX register with generated fields */
for (fid = 0; fid < FIELDS_PER_ALG; fid++)
rvu_write64(rvu, blkaddr,
NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
fid), field[fid]);
/* Store the flow_cfg for futher lookup */
rc = hw->flowkey.in_use;
hw->flowkey.flowkey[rc] = flow_cfg;
hw->flowkey.in_use++;
return rc;
}
int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
struct nix_rss_flowkey_cfg *req,
struct nix_rss_flowkey_cfg_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
int alg_idx, nixlf, blkaddr;
struct nix_hw *nix_hw;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return -EINVAL;
alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
/* Failed to get algo index from the exiting list, reserve new */
if (alg_idx < 0) {
alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
req->flowkey_cfg);
if (alg_idx < 0)
return alg_idx;
}
rsp->alg_idx = alg_idx;
rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
alg_idx, req->mcam_index);
return 0;
}
static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
{
u32 flowkey_cfg, minkey_cfg;
int alg, fid, rc;
/* Disable all flow key algx fieldx */
for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
for (fid = 0; fid < FIELDS_PER_ALG; fid++)
rvu_write64(rvu, blkaddr,
NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
0);
}
/* IPv4/IPv6 SIP/DIPs */ /* IPv4/IPv6 SIP/DIPs */
flowkey_cfg = FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_IPV6; flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
set_flowkey_fields((void *)&field[FLOW_KEY_ALG_IP], flowkey_cfg); rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
if (rc < 0)
return rc;
/* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
minkey_cfg = flowkey_cfg; minkey_cfg = flowkey_cfg;
flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP; flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP], flowkey_cfg); rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
if (rc < 0)
return rc;
/* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_UDP; flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
set_flowkey_fields((void *)&field[FLOW_KEY_ALG_UDP], flowkey_cfg); rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
if (rc < 0)
return rc;
/* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_SCTP; flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
set_flowkey_fields((void *)&field[FLOW_KEY_ALG_SCTP], flowkey_cfg); rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
if (rc < 0)
return rc;
/* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */ /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_UDP; flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_UDP], flowkey_cfg); NIX_FLOW_KEY_TYPE_UDP;
rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
if (rc < 0)
return rc;
/* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_SCTP; flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_SCTP], flowkey_cfg); NIX_FLOW_KEY_TYPE_SCTP;
rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
if (rc < 0)
return rc;
/* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP; flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
set_flowkey_fields((void *)&field[FLOW_KEY_ALG_UDP_SCTP], flowkey_cfg); NIX_FLOW_KEY_TYPE_SCTP;
rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
if (rc < 0)
return rc;
/* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP; NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_UDP_SCTP], rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
flowkey_cfg); if (rc < 0)
return rc;
for (alg = 0; alg < FLOW_KEY_ALG_MAX; alg++) { return 0;
for (fid = 0; fid < FIELDS_PER_ALG; fid++)
rvu_write64(rvu, blkaddr,
NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
field[alg][fid]);
}
} }
int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
...@@ -1919,7 +2398,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, ...@@ -1919,7 +2398,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
mutex_lock(&rvu->rsrc_lock); mutex_lock(&rvu->rsrc_lock);
for (schq = 0; schq < txsch->schq.max; schq++) { for (schq = 0; schq < txsch->schq.max; schq++) {
if (txsch->pfvf_map[schq] != pcifunc) if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue; continue;
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8); cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
...@@ -2034,6 +2513,48 @@ int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req, ...@@ -2034,6 +2513,48 @@ int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
return err; return err;
} }
int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
struct msg_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_block *block;
struct rvu_pfvf *pfvf;
int nixlf, blkaddr;
u64 cfg;
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (!pfvf->nixlf || blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
block = &hw->block[blkaddr];
nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
/* Set the interface configuration */
if (req->len_verify & BIT(0))
cfg |= BIT_ULL(41);
else
cfg &= ~BIT_ULL(41);
if (req->len_verify & BIT(1))
cfg |= BIT_ULL(40);
else
cfg &= ~BIT_ULL(40);
if (req->csum_verify & BIT(0))
cfg |= BIT_ULL(37);
else
cfg &= ~BIT_ULL(37);
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
return 0;
}
static void nix_link_config(struct rvu *rvu, int blkaddr) static void nix_link_config(struct rvu *rvu, int blkaddr)
{ {
struct rvu_hwinfo *hw = rvu->hw; struct rvu_hwinfo *hw = rvu->hw;
...@@ -2212,9 +2733,6 @@ int rvu_nix_init(struct rvu *rvu) ...@@ -2212,9 +2733,6 @@ int rvu_nix_init(struct rvu *rvu)
/* Restore CINT timer delay to HW reset values */ /* Restore CINT timer delay to HW reset values */
rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL); rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
/* Configure segmentation offload formats */
nix_setup_lso(rvu, blkaddr);
if (blkaddr == BLKADDR_NIX0) { if (blkaddr == BLKADDR_NIX0) {
hw->nix0 = devm_kzalloc(rvu->dev, hw->nix0 = devm_kzalloc(rvu->dev,
sizeof(struct nix_hw), GFP_KERNEL); sizeof(struct nix_hw), GFP_KERNEL);
...@@ -2225,24 +2743,48 @@ int rvu_nix_init(struct rvu *rvu) ...@@ -2225,24 +2743,48 @@ int rvu_nix_init(struct rvu *rvu)
if (err) if (err)
return err; return err;
err = nix_af_mark_format_setup(rvu, hw->nix0, blkaddr);
if (err)
return err;
err = nix_setup_mcast(rvu, hw->nix0, blkaddr); err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
if (err) if (err)
return err; return err;
/* Config Outer L2, IP, TCP and UDP's NPC layer info. /* Configure segmentation offload formats */
nix_setup_lso(rvu, hw->nix0, blkaddr);
/* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
* This helps HW protocol checker to identify headers * This helps HW protocol checker to identify headers
* and validate length and checksums. * and validate length and checksums.
*/ */
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2, rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
(NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F); (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
(NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
(NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4, rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
(NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F); (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
nix_rx_flowkey_alg_cfg(rvu, blkaddr); (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
(NPC_LID_LC << 8) | (NPC_LT_LC_IP6 << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
(NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP6 << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
(NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
(NPC_LID_LG << 8) | (NPC_LT_LG_TU_TCP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
(NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
(NPC_LID_LG << 8) | (NPC_LT_LG_TU_UDP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
(NPC_LID_LD << 8) | (NPC_LT_LD_SCTP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
(NPC_LID_LG << 8) | (NPC_LT_LG_TU_SCTP << 4) |
0x0F);
err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
if (err)
return err;
/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
nix_link_config(rvu, blkaddr); nix_link_config(rvu, blkaddr);
...@@ -2364,3 +2906,54 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) ...@@ -2364,3 +2906,54 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
nix_ctx_free(rvu, pfvf); nix_ctx_free(rvu, pfvf);
} }
int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
struct nix_lso_format_cfg *req,
struct nix_lso_format_cfg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
struct nix_hw *nix_hw;
struct rvu_pfvf *pfvf;
int blkaddr, idx, f;
u64 reg;
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (!pfvf->nixlf || blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return -EINVAL;
/* Find existing matching LSO format, if any */
for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
reg = rvu_read64(rvu, blkaddr,
NIX_AF_LSO_FORMATX_FIELDX(idx, f));
if (req->fields[f] != (reg & req->field_mask))
break;
}
if (f == NIX_LSO_FIELD_MAX)
break;
}
if (idx < nix_hw->lso.in_use) {
/* Match found */
rsp->lso_format_idx = idx;
return 0;
}
if (nix_hw->lso.in_use == nix_hw->lso.total)
return NIX_AF_ERR_LSO_CFG_FAIL;
rsp->lso_format_idx = nix_hw->lso.in_use++;
for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
rvu_write64(rvu, blkaddr,
NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
req->fields[f]);
return 0;
}
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include "rvu_reg.h" #include "rvu_reg.h"
#include "rvu.h" #include "rvu.h"
#include "npc.h" #include "npc.h"
#include "cgx.h"
#include "npc_profile.h" #include "npc_profile.h"
#define RSVD_MCAM_ENTRIES_PER_PF 2 /* Bcast & Promisc */ #define RSVD_MCAM_ENTRIES_PER_PF 2 /* Bcast & Promisc */
...@@ -368,9 +369,9 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, ...@@ -368,9 +369,9 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, bool allmulti) int nixlf, u64 chan, bool allmulti)
{ {
struct npc_mcam *mcam = &rvu->hw->mcam; struct npc_mcam *mcam = &rvu->hw->mcam;
int blkaddr, ucast_idx, index, kwi;
struct mcam_entry entry = { {0} }; struct mcam_entry entry = { {0} };
struct nix_rx_action action = { }; struct nix_rx_action action = { };
int blkaddr, index, kwi;
/* Only PF or AF VF can add a promiscuous entry */ /* Only PF or AF VF can add a promiscuous entry */
if ((pcifunc & RVU_PFVF_FUNC_MASK) && !is_afvf(pcifunc)) if ((pcifunc & RVU_PFVF_FUNC_MASK) && !is_afvf(pcifunc))
...@@ -392,9 +393,21 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, ...@@ -392,9 +393,21 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
entry.kw_mask[kwi] = BIT_ULL(40); entry.kw_mask[kwi] = BIT_ULL(40);
} }
*(u64 *)&action = 0x00; ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
action.op = NIX_RX_ACTIONOP_UCAST; nixlf, NIXLF_UCAST_ENTRY);
action.pf_func = pcifunc;
/* If the corresponding PF's ucast action is RSS,
* use the same action for promisc also
*/
if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx))
*(u64 *)&action = npc_get_mcam_action(rvu, mcam,
blkaddr, ucast_idx);
if (action.op != NIX_RX_ACTIONOP_RSS) {
*(u64 *)&action = 0x00;
action.op = NIX_RX_ACTIONOP_UCAST;
action.pf_func = pcifunc;
}
entry.action = *(u64 *)&action; entry.action = *(u64 *)&action;
npc_config_mcam_entry(rvu, mcam, blkaddr, index, npc_config_mcam_entry(rvu, mcam, blkaddr, index,
...@@ -476,7 +489,7 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, ...@@ -476,7 +489,7 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
* *
*/ */
entry.kw[0] = BIT_ULL(13) | chan; entry.kw[0] = BIT_ULL(13) | chan;
entry.kw_mask[0] = ~entry.kw[0] & (BIT_ULL(13) | 0xFFFULL); entry.kw_mask[0] = BIT_ULL(13) | 0xFFFULL;
*(u64 *)&action = 0x00; *(u64 *)&action = 0x00;
#ifdef MCAST_MCE #ifdef MCAST_MCE
...@@ -539,6 +552,21 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, ...@@ -539,6 +552,21 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
rvu_write64(rvu, blkaddr, rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action); NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action);
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_PROMISC_ENTRY);
/* If PF's promiscuous entry is enabled,
* Set RSS action for that entry as well
*/
if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
bank = npc_get_bank(mcam, index);
index &= (mcam->banksize - 1);
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_ACTION(index, bank),
*(u64 *)&action);
}
rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
} }
...@@ -704,6 +732,111 @@ static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr) ...@@ -704,6 +732,111 @@ static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr)
SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 1, cfg); SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 1, cfg);
} }
static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
struct npc_mcam_kex *mkex)
{
int lid, lt, ld, fl;
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX),
mkex->keyx_cfg[NIX_INTF_RX]);
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX),
mkex->keyx_cfg[NIX_INTF_TX]);
for (ld = 0; ld < NPC_MAX_LD; ld++)
rvu_write64(rvu, blkaddr, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld),
mkex->kex_ld_flags[ld]);
for (lid = 0; lid < NPC_MAX_LID; lid++) {
for (lt = 0; lt < NPC_MAX_LT; lt++) {
for (ld = 0; ld < NPC_MAX_LD; ld++) {
SET_KEX_LD(NIX_INTF_RX, lid, lt, ld,
mkex->intf_lid_lt_ld[NIX_INTF_RX]
[lid][lt][ld]);
SET_KEX_LD(NIX_INTF_TX, lid, lt, ld,
mkex->intf_lid_lt_ld[NIX_INTF_TX]
[lid][lt][ld]);
}
}
}
for (ld = 0; ld < NPC_MAX_LD; ld++) {
for (fl = 0; fl < NPC_MAX_LFL; fl++) {
SET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl,
mkex->intf_ld_flags[NIX_INTF_RX]
[ld][fl]);
SET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl,
mkex->intf_ld_flags[NIX_INTF_TX]
[ld][fl]);
}
}
}
/* strtoull of "mkexprof" with base:36 */
#define MKEX_SIGN 0x19bbfdbd15f
#define MKEX_END_SIGN 0xdeadbeef
static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr)
{
const char *mkex_profile = rvu->mkex_pfl_name;
struct device *dev = &rvu->pdev->dev;
void __iomem *mkex_prfl_addr = NULL;
struct npc_mcam_kex *mcam_kex;
u64 prfl_addr;
u64 prfl_sz;
/* If user not selected mkex profile */
if (!strncmp(mkex_profile, "default", MKEX_NAME_LEN))
goto load_default;
if (cgx_get_mkex_prfl_info(&prfl_addr, &prfl_sz))
goto load_default;
if (!prfl_addr || !prfl_sz)
goto load_default;
mkex_prfl_addr = ioremap_wc(prfl_addr, prfl_sz);
if (!mkex_prfl_addr)
goto load_default;
mcam_kex = (struct npc_mcam_kex *)mkex_prfl_addr;
while (((s64)prfl_sz > 0) && (mcam_kex->mkex_sign != MKEX_END_SIGN)) {
/* Compare with mkex mod_param name string */
if (mcam_kex->mkex_sign == MKEX_SIGN &&
!strncmp(mcam_kex->name, mkex_profile, MKEX_NAME_LEN)) {
/* Due to an errata (35786) in A0 pass silicon,
* parse nibble enable configuration has to be
* identical for both Rx and Tx interfaces.
*/
if (is_rvu_9xxx_A0(rvu) &&
mcam_kex->keyx_cfg[NIX_INTF_RX] !=
mcam_kex->keyx_cfg[NIX_INTF_TX])
goto load_default;
/* Program selected mkex profile */
npc_program_mkex_profile(rvu, blkaddr, mcam_kex);
goto unmap;
}
mcam_kex++;
prfl_sz -= sizeof(struct npc_mcam_kex);
}
dev_warn(dev, "Failed to load requested profile: %s\n",
rvu->mkex_pfl_name);
load_default:
dev_info(rvu->dev, "Using default mkex profile\n");
/* Config packet data and flags extraction into PARSE result */
npc_config_ldata_extract(rvu, blkaddr);
unmap:
if (mkex_prfl_addr)
iounmap(mkex_prfl_addr);
}
static void npc_config_kpuaction(struct rvu *rvu, int blkaddr, static void npc_config_kpuaction(struct rvu *rvu, int blkaddr,
struct npc_kpu_profile_action *kpuaction, struct npc_kpu_profile_action *kpuaction,
int kpu, int entry, bool pkind) int kpu, int entry, bool pkind)
...@@ -1008,13 +1141,20 @@ int rvu_npc_init(struct rvu *rvu) ...@@ -1008,13 +1141,20 @@ int rvu_npc_init(struct rvu *rvu)
rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OIP4, rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OIP4,
(NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F); (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
/* Config Inner IPV4 NPC layer info */
rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_IIP4,
(NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F);
/* Enable below for Rx pkts. /* Enable below for Rx pkts.
* - Outer IPv4 header checksum validation. * - Outer IPv4 header checksum validation.
* - Detect outer L2 broadcast address and set NPC_RESULT_S[L2M]. * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2M].
* - Inner IPv4 header checksum validation.
* - Set non zero checksum error code value
*/ */
rvu_write64(rvu, blkaddr, NPC_AF_PCK_CFG, rvu_write64(rvu, blkaddr, NPC_AF_PCK_CFG,
rvu_read64(rvu, blkaddr, NPC_AF_PCK_CFG) | rvu_read64(rvu, blkaddr, NPC_AF_PCK_CFG) |
BIT_ULL(6) | BIT_ULL(2)); BIT_ULL(32) | BIT_ULL(24) | BIT_ULL(6) |
BIT_ULL(2) | BIT_ULL(1));
/* Set RX and TX side MCAM search key size. /* Set RX and TX side MCAM search key size.
* LA..LD (ltype only) + Channel * LA..LD (ltype only) + Channel
...@@ -1034,8 +1174,8 @@ int rvu_npc_init(struct rvu *rvu) ...@@ -1034,8 +1174,8 @@ int rvu_npc_init(struct rvu *rvu)
if (err) if (err)
return err; return err;
/* Config packet data and flags extraction into PARSE result */ /* Configure MKEX profile */
npc_config_ldata_extract(rvu, blkaddr); npc_load_mkex_profile(rvu, blkaddr);
/* Set TX miss action to UCAST_DEFAULT i.e /* Set TX miss action to UCAST_DEFAULT i.e
* transmit the packet on NIX LF SQ's default channel. * transmit the packet on NIX LF SQ's default channel.
...@@ -2043,6 +2183,7 @@ int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req, ...@@ -2043,6 +2183,7 @@ int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
GET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl); GET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl);
} }
} }
memcpy(rsp->mkex_pfl_name, rvu->mkex_pfl_name, MKEX_NAME_LEN);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment