Commit aeaf0cc5 authored by David S. Miller's avatar David S. Miller

Merge branch 'octeontx2-Flow-control-support-and-other-misc-changes'

Sunil Goutham says:

====================
octeontx2: Flow control support and other misc changes

This patch series adds flow control support (802.3 pause frames) and
has other changes wrt generic admin function (AF) driver functionality.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 48938b1e dc819c1b
...@@ -367,6 +367,107 @@ int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable) ...@@ -367,6 +367,107 @@ int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
return !!(last & DATA_PKT_TX_EN); return !!(last & DATA_PKT_TX_EN);
} }
int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
u8 *tx_pause, u8 *rx_pause)
{
struct cgx *cgx = cgxd;
u64 cfg;
if (!cgx || lmac_id >= cgx->lmac_count)
return -ENODEV;
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
*rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
*tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
return 0;
}
int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
u8 tx_pause, u8 rx_pause)
{
struct cgx *cgx = cgxd;
u64 cfg;
if (!cgx || lmac_id >= cgx->lmac_count)
return -ENODEV;
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
cfg |= tx_pause ? CGX_SMUX_TX_CTL_L2P_BP_CONV : 0x0;
cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
if (tx_pause) {
cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id);
} else {
cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
}
cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
return 0;
}
static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
{
u64 cfg;
if (!cgx || lmac_id >= cgx->lmac_count)
return;
if (enable) {
/* Enable receive pause frames */
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
/* Enable pause frames transmission */
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
cfg |= CGX_SMUX_TX_CTL_L2P_BP_CONV;
cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
/* Set pause time and interval */
cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
DEFAULT_PAUSE_TIME);
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
cfg &= ~0xFFFFULL;
cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
cfg | (DEFAULT_PAUSE_TIME / 2));
cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME,
DEFAULT_PAUSE_TIME);
cfg = cgx_read(cgx, lmac_id,
CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL);
cfg &= ~0xFFFFULL;
cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
cfg | (DEFAULT_PAUSE_TIME / 2));
} else {
/* ALL pause frames received are completely ignored */
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
/* Disable pause frames transmission */
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
}
}
/* CGX Firmware interface low level support */ /* CGX Firmware interface low level support */
static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac) static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
{ {
...@@ -544,59 +645,6 @@ static inline bool cgx_event_is_linkevent(u64 event) ...@@ -544,59 +645,6 @@ static inline bool cgx_event_is_linkevent(u64 event)
return false; return false;
} }
static inline int cgx_fwi_get_mkex_prfl_sz(u64 *prfl_sz,
struct cgx *cgx)
{
u64 req = 0;
u64 resp;
int err;
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_SIZE, req);
err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
if (!err)
*prfl_sz = FIELD_GET(RESP_MKEX_PRFL_SIZE, resp);
return err;
}
static inline int cgx_fwi_get_mkex_prfl_addr(u64 *prfl_addr,
struct cgx *cgx)
{
u64 req = 0;
u64 resp;
int err;
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_ADDR, req);
err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
if (!err)
*prfl_addr = FIELD_GET(RESP_MKEX_PRFL_ADDR, resp);
return err;
}
int cgx_get_mkex_prfl_info(u64 *addr, u64 *size)
{
struct cgx *cgx_dev;
int err;
if (!addr || !size)
return -EINVAL;
cgx_dev = list_first_entry(&cgx_list, struct cgx, cgx_list);
if (!cgx_dev)
return -ENXIO;
err = cgx_fwi_get_mkex_prfl_sz(size, cgx_dev);
if (err)
return -EIO;
err = cgx_fwi_get_mkex_prfl_addr(addr, cgx_dev);
if (err)
return -EIO;
return 0;
}
static irqreturn_t cgx_fwi_event_handler(int irq, void *data) static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
{ {
struct lmac *lmac = data; struct lmac *lmac = data;
...@@ -680,6 +728,24 @@ int cgx_lmac_evh_unregister(void *cgxd, int lmac_id) ...@@ -680,6 +728,24 @@ int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
return 0; return 0;
} }
int cgx_get_fwdata_base(u64 *base)
{
u64 req = 0, resp;
struct cgx *cgx;
int err;
cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list);
if (!cgx)
return -ENXIO;
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
if (!err)
*base = FIELD_GET(RESP_FWD_BASE, resp);
return err;
}
static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable) static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
{ {
u64 req = 0; u64 req = 0;
...@@ -787,6 +853,7 @@ static int cgx_lmac_init(struct cgx *cgx) ...@@ -787,6 +853,7 @@ static int cgx_lmac_init(struct cgx *cgx)
/* Add reference */ /* Add reference */
cgx->lmac_idmap[i] = lmac; cgx->lmac_idmap[i] = lmac;
cgx_lmac_pause_frm_config(cgx, i, true);
} }
return cgx_lmac_verify_fwi_version(cgx); return cgx_lmac_verify_fwi_version(cgx);
...@@ -805,6 +872,7 @@ static int cgx_lmac_exit(struct cgx *cgx) ...@@ -805,6 +872,7 @@ static int cgx_lmac_exit(struct cgx *cgx)
/* Free all lmac related resources */ /* Free all lmac related resources */
for (i = 0; i < cgx->lmac_count; i++) { for (i = 0; i < cgx->lmac_count; i++) {
cgx_lmac_pause_frm_config(cgx, i, false);
lmac = cgx->lmac_idmap[i]; lmac = cgx->lmac_idmap[i];
if (!lmac) if (!lmac)
continue; continue;
......
...@@ -60,10 +60,20 @@ ...@@ -60,10 +60,20 @@
#define CGX_SMUX_RX_FRM_CTL_CTL_BCK BIT_ULL(3) #define CGX_SMUX_RX_FRM_CTL_CTL_BCK BIT_ULL(3)
#define CGXX_GMP_GMI_RXX_FRM_CTL 0x38028 #define CGXX_GMP_GMI_RXX_FRM_CTL 0x38028
#define CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK BIT_ULL(3) #define CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK BIT_ULL(3)
#define CGXX_SMUX_TX_CTL 0x20178
#define CGXX_SMUX_TX_PAUSE_PKT_TIME 0x20110
#define CGXX_SMUX_TX_PAUSE_PKT_INTERVAL 0x20120
#define CGXX_GMP_GMI_TX_PAUSE_PKT_TIME 0x38230
#define CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL 0x38248
#define CGX_SMUX_TX_CTL_L2P_BP_CONV BIT_ULL(7)
#define CGXX_CMR_RX_OVR_BP 0x130
#define CGX_CMR_RX_OVR_BP_EN(X) BIT_ULL(((X) + 8))
#define CGX_CMR_RX_OVR_BP_BP(X) BIT_ULL(((X) + 4))
#define CGX_COMMAND_REG CGXX_SCRATCH1_REG #define CGX_COMMAND_REG CGXX_SCRATCH1_REG
#define CGX_EVENT_REG CGXX_SCRATCH0_REG #define CGX_EVENT_REG CGXX_SCRATCH0_REG
#define CGX_CMD_TIMEOUT 2200 /* msecs */ #define CGX_CMD_TIMEOUT 2200 /* msecs */
#define DEFAULT_PAUSE_TIME 0x7FF
#define CGX_NVEC 37 #define CGX_NVEC 37
#define CGX_LMAC_FWI 0 #define CGX_LMAC_FWI 0
...@@ -124,5 +134,9 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable); ...@@ -124,5 +134,9 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
int cgx_get_link_info(void *cgxd, int lmac_id, int cgx_get_link_info(void *cgxd, int lmac_id,
struct cgx_link_user_info *linfo); struct cgx_link_user_info *linfo);
int cgx_lmac_linkup_start(void *cgxd); int cgx_lmac_linkup_start(void *cgxd);
int cgx_get_mkex_prfl_info(u64 *addr, u64 *size); int cgx_get_fwdata_base(u64 *base);
int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
u8 *tx_pause, u8 *rx_pause);
int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
u8 tx_pause, u8 rx_pause);
#endif /* CGX_H */ #endif /* CGX_H */
...@@ -79,7 +79,8 @@ enum cgx_cmd_id { ...@@ -79,7 +79,8 @@ enum cgx_cmd_id {
CGX_CMD_MODE_CHANGE, /* hot plug support */ CGX_CMD_MODE_CHANGE, /* hot plug support */
CGX_CMD_INTF_SHUTDOWN, CGX_CMD_INTF_SHUTDOWN,
CGX_CMD_GET_MKEX_PRFL_SIZE, CGX_CMD_GET_MKEX_PRFL_SIZE,
CGX_CMD_GET_MKEX_PRFL_ADDR CGX_CMD_GET_MKEX_PRFL_ADDR,
CGX_CMD_GET_FWD_BASE, /* get base address of shared FW data */
}; };
/* async event ids */ /* async event ids */
...@@ -149,6 +150,11 @@ enum cgx_cmd_own { ...@@ -149,6 +150,11 @@ enum cgx_cmd_own {
*/ */
#define RESP_MKEX_PRFL_ADDR GENMASK_ULL(63, 9) #define RESP_MKEX_PRFL_ADDR GENMASK_ULL(63, 9)
/* Response to cmd ID as CGX_CMD_GET_FWD_BASE with cmd status as
* CGX_STAT_SUCCESS
*/
#define RESP_FWD_BASE GENMASK_ULL(56, 9)
/* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE /* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE
* status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS * status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS
* *
......
...@@ -125,7 +125,7 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox, ...@@ -125,7 +125,7 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
M(READY, 0x001, ready, msg_req, ready_msg_rsp) \ M(READY, 0x001, ready, msg_req, ready_msg_rsp) \
M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \ M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \
M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \ M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \
M(MSIX_OFFSET, 0x004, msix_offset, msg_req, msix_offset_rsp) \ M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \ M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \ M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \ /* CGX mbox IDs (range 0x200 - 0x3FF) */ \
...@@ -143,6 +143,8 @@ M(CGX_STOP_LINKEVENTS, 0x208, cgx_stop_linkevents, msg_req, msg_rsp) \ ...@@ -143,6 +143,8 @@ M(CGX_STOP_LINKEVENTS, 0x208, cgx_stop_linkevents, msg_req, msg_rsp) \
M(CGX_GET_LINKINFO, 0x209, cgx_get_linkinfo, msg_req, cgx_link_info_msg) \ M(CGX_GET_LINKINFO, 0x209, cgx_get_linkinfo, msg_req, cgx_link_info_msg) \
M(CGX_INTLBK_ENABLE, 0x20A, cgx_intlbk_enable, msg_req, msg_rsp) \ M(CGX_INTLBK_ENABLE, 0x20A, cgx_intlbk_enable, msg_req, msg_rsp) \
M(CGX_INTLBK_DISABLE, 0x20B, cgx_intlbk_disable, msg_req, msg_rsp) \ M(CGX_INTLBK_DISABLE, 0x20B, cgx_intlbk_disable, msg_req, msg_rsp) \
M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \
cgx_pause_frm_cfg) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \ /* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \ M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
npa_lf_alloc_req, npa_lf_alloc_rsp) \ npa_lf_alloc_req, npa_lf_alloc_rsp) \
...@@ -211,6 +213,9 @@ M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \ ...@@ -211,6 +213,9 @@ M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
nix_lso_format_cfg, \ nix_lso_format_cfg, \
nix_lso_format_cfg_rsp) \ nix_lso_format_cfg_rsp) \
M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp) \ M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp) \
M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \
nix_bp_cfg_rsp) \
M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \ M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \
/* Messages initiated by AF (range 0xC00 - 0xDFF) */ /* Messages initiated by AF (range 0xC00 - 0xDFF) */
...@@ -251,7 +256,8 @@ enum rvu_af_status { ...@@ -251,7 +256,8 @@ enum rvu_af_status {
struct ready_msg_rsp { struct ready_msg_rsp {
struct mbox_msghdr hdr; struct mbox_msghdr hdr;
u16 sclk_feq; /* SCLK frequency */ u16 sclk_freq; /* SCLK frequency (in MHz) */
u16 rclk_freq; /* RCLK frequency (in MHz) */
}; };
/* Structure for requesting resource provisioning. /* Structure for requesting resource provisioning.
...@@ -342,6 +348,15 @@ struct cgx_link_info_msg { ...@@ -342,6 +348,15 @@ struct cgx_link_info_msg {
struct cgx_link_user_info link_info; struct cgx_link_user_info link_info;
}; };
struct cgx_pause_frm_cfg {
struct mbox_msghdr hdr;
u8 set;
/* set = 1 if the request is to config pause frames */
/* set = 0 if the request is to fetch pause frames config */
u8 rx_pause;
u8 tx_pause;
};
/* NPA mbox message formats */ /* NPA mbox message formats */
/* NPA mailbox error codes /* NPA mailbox error codes
...@@ -676,6 +691,25 @@ struct nix_lso_format_cfg_rsp { ...@@ -676,6 +691,25 @@ struct nix_lso_format_cfg_rsp {
u8 lso_format_idx; u8 lso_format_idx;
}; };
struct nix_bp_cfg_req {
struct mbox_msghdr hdr;
u16 chan_base; /* Starting channel number */
u8 chan_cnt; /* Number of channels */
u8 bpid_per_chan;
/* bpid_per_chan = 0 assigns single bp id for range of channels */
/* bpid_per_chan = 1 assigns separate bp id for each channel */
};
/* PF can be mapped to either CGX or LBK interface,
* so maximum 64 channels are possible.
*/
#define NIX_MAX_BPID_CHAN 64
struct nix_bp_cfg_rsp {
struct mbox_msghdr hdr;
u16 chan_bpid[NIX_MAX_BPID_CHAN]; /* Channel and bpid mapping */
u8 chan_cnt; /* Number of channel for which bpids are assigned */
};
/* NPC mbox message structs */ /* NPC mbox message structs */
#define NPC_MCAM_ENTRY_INVALID 0xFFFF #define NPC_MCAM_ENTRY_INVALID 0xFFFF
......
...@@ -88,13 +88,15 @@ int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero) ...@@ -88,13 +88,15 @@ int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
u64 reg_val; u64 reg_val;
reg = rvu->afreg_base + ((block << 28) | offset); reg = rvu->afreg_base + ((block << 28) | offset);
while (time_before(jiffies, timeout)) { again:
reg_val = readq(reg); reg_val = readq(reg);
if (zero && !(reg_val & mask)) if (zero && !(reg_val & mask))
return 0; return 0;
if (!zero && (reg_val & mask)) if (!zero && (reg_val & mask))
return 0; return 0;
if (time_before(jiffies, timeout)) {
usleep_range(1, 5); usleep_range(1, 5);
goto again;
} }
return -EBUSY; return -EBUSY;
} }
...@@ -421,6 +423,19 @@ static void rvu_check_block_implemented(struct rvu *rvu) ...@@ -421,6 +423,19 @@ static void rvu_check_block_implemented(struct rvu *rvu)
} }
} }
static void rvu_setup_rvum_blk_revid(struct rvu *rvu)
{
rvu_write64(rvu, BLKADDR_RVUM,
RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM),
RVU_BLK_RVUM_REVID);
}
static void rvu_clear_rvum_blk_revid(struct rvu *rvu)
{
rvu_write64(rvu, BLKADDR_RVUM,
RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00);
}
int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf) int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
{ {
int err; int err;
...@@ -603,7 +618,11 @@ static int rvu_setup_msix_resources(struct rvu *rvu) ...@@ -603,7 +618,11 @@ static int rvu_setup_msix_resources(struct rvu *rvu)
*/ */
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
max_msix = cfg & 0xFFFFF; max_msix = cfg & 0xFFFFF;
if (rvu->fwdata && rvu->fwdata->msixtr_base)
phy_addr = rvu->fwdata->msixtr_base;
else
phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE); phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
iova = dma_map_resource(rvu->dev, phy_addr, iova = dma_map_resource(rvu->dev, phy_addr,
max_msix * PCI_MSIX_ENTRY_SIZE, max_msix * PCI_MSIX_ENTRY_SIZE,
DMA_BIDIRECTIONAL, 0); DMA_BIDIRECTIONAL, 0);
...@@ -613,10 +632,18 @@ static int rvu_setup_msix_resources(struct rvu *rvu) ...@@ -613,10 +632,18 @@ static int rvu_setup_msix_resources(struct rvu *rvu)
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova); rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
rvu->msix_base_iova = iova; rvu->msix_base_iova = iova;
rvu->msixtr_base_phy = phy_addr;
return 0; return 0;
} }
static void rvu_reset_msix(struct rvu *rvu)
{
/* Restore msixtr base register */
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE,
rvu->msixtr_base_phy);
}
static void rvu_free_hw_resources(struct rvu *rvu) static void rvu_free_hw_resources(struct rvu *rvu)
{ {
struct rvu_hwinfo *hw = rvu->hw; struct rvu_hwinfo *hw = rvu->hw;
...@@ -655,9 +682,80 @@ static void rvu_free_hw_resources(struct rvu *rvu) ...@@ -655,9 +682,80 @@ static void rvu_free_hw_resources(struct rvu *rvu)
max_msix * PCI_MSIX_ENTRY_SIZE, max_msix * PCI_MSIX_ENTRY_SIZE,
DMA_BIDIRECTIONAL, 0); DMA_BIDIRECTIONAL, 0);
rvu_reset_msix(rvu);
mutex_destroy(&rvu->rsrc_lock); mutex_destroy(&rvu->rsrc_lock);
} }
static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
int pf, vf, numvfs, hwvf;
struct rvu_pfvf *pfvf;
u64 *mac;
for (pf = 0; pf < hw->total_pfs; pf++) {
if (!is_pf_cgxmapped(rvu, pf))
continue;
/* Assign MAC address to PF */
pfvf = &rvu->pf[pf];
if (rvu->fwdata && pf < PF_MACNUM_MAX) {
mac = &rvu->fwdata->pf_macs[pf];
if (*mac)
u64_to_ether_addr(*mac, pfvf->mac_addr);
else
eth_random_addr(pfvf->mac_addr);
} else {
eth_random_addr(pfvf->mac_addr);
}
/* Assign MAC address to VFs */
rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
for (vf = 0; vf < numvfs; vf++, hwvf++) {
pfvf = &rvu->hwvf[hwvf];
if (rvu->fwdata && hwvf < VF_MACNUM_MAX) {
mac = &rvu->fwdata->vf_macs[hwvf];
if (*mac)
u64_to_ether_addr(*mac, pfvf->mac_addr);
else
eth_random_addr(pfvf->mac_addr);
} else {
eth_random_addr(pfvf->mac_addr);
}
}
}
}
static int rvu_fwdata_init(struct rvu *rvu)
{
u64 fwdbase;
int err;
/* Get firmware data base address */
err = cgx_get_fwdata_base(&fwdbase);
if (err)
goto fail;
rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
if (!rvu->fwdata)
goto fail;
if (!is_rvu_fwdata_valid(rvu)) {
dev_err(rvu->dev,
"Mismatch in 'fwdata' struct btw kernel and firmware\n");
iounmap(rvu->fwdata);
rvu->fwdata = NULL;
return -EINVAL;
}
return 0;
fail:
dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n");
return -EIO;
}
static void rvu_fwdata_exit(struct rvu *rvu)
{
if (rvu->fwdata)
iounmap(rvu->fwdata);
}
static int rvu_setup_hw_resources(struct rvu *rvu) static int rvu_setup_hw_resources(struct rvu *rvu)
{ {
struct rvu_hwinfo *hw = rvu->hw; struct rvu_hwinfo *hw = rvu->hw;
...@@ -813,6 +911,8 @@ static int rvu_setup_hw_resources(struct rvu *rvu) ...@@ -813,6 +911,8 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
mutex_init(&rvu->rsrc_lock); mutex_init(&rvu->rsrc_lock);
rvu_fwdata_init(rvu);
err = rvu_setup_msix_resources(rvu); err = rvu_setup_msix_resources(rvu);
if (err) if (err)
return err; return err;
...@@ -825,8 +925,10 @@ static int rvu_setup_hw_resources(struct rvu *rvu) ...@@ -825,8 +925,10 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
/* Allocate memory for block LF/slot to pcifunc mapping info */ /* Allocate memory for block LF/slot to pcifunc mapping info */
block->fn_map = devm_kcalloc(rvu->dev, block->lf.max, block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
sizeof(u16), GFP_KERNEL); sizeof(u16), GFP_KERNEL);
if (!block->fn_map) if (!block->fn_map) {
return -ENOMEM; err = -ENOMEM;
goto msix_err;
}
/* Scan all blocks to check if low level firmware has /* Scan all blocks to check if low level firmware has
* already provisioned any of the resources to a PF/VF. * already provisioned any of the resources to a PF/VF.
...@@ -836,25 +938,36 @@ static int rvu_setup_hw_resources(struct rvu *rvu) ...@@ -836,25 +938,36 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
err = rvu_npc_init(rvu); err = rvu_npc_init(rvu);
if (err) if (err)
goto exit; goto npc_err;
err = rvu_cgx_init(rvu); err = rvu_cgx_init(rvu);
if (err) if (err)
goto exit; goto cgx_err;
/* Assign MACs for CGX mapped functions */
rvu_setup_pfvf_macaddress(rvu);
err = rvu_npa_init(rvu); err = rvu_npa_init(rvu);
if (err) if (err)
goto cgx_err; goto npa_err;
err = rvu_nix_init(rvu); err = rvu_nix_init(rvu);
if (err) if (err)
goto cgx_err; goto nix_err;
return 0; return 0;
nix_err:
rvu_nix_freemem(rvu);
npa_err:
rvu_npa_freemem(rvu);
cgx_err: cgx_err:
rvu_cgx_exit(rvu); rvu_cgx_exit(rvu);
exit: npc_err:
rvu_npc_freemem(rvu);
rvu_fwdata_exit(rvu);
msix_err:
rvu_reset_msix(rvu);
return err; return err;
} }
...@@ -901,6 +1014,10 @@ int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue, ...@@ -901,6 +1014,10 @@ int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req, int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
struct ready_msg_rsp *rsp) struct ready_msg_rsp *rsp)
{ {
if (rvu->fwdata) {
rsp->rclk_freq = rvu->fwdata->rclk;
rsp->sclk_freq = rvu->fwdata->sclk;
}
return 0; return 0;
} }
...@@ -2128,6 +2245,9 @@ static int rvu_register_interrupts(struct rvu *rvu) ...@@ -2128,6 +2245,9 @@ static int rvu_register_interrupts(struct rvu *rvu)
} }
rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true; rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
/* Clear TRPEND bit for all PF */
rvu_write64(rvu, BLKADDR_RVUM,
RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs));
/* Enable ME interrupt for all PFs*/ /* Enable ME interrupt for all PFs*/
rvu_write64(rvu, BLKADDR_RVUM, rvu_write64(rvu, BLKADDR_RVUM,
RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs)); RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
...@@ -2439,17 +2559,13 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2439,17 +2559,13 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_disable_device; goto err_disable_device;
} }
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (err) { if (err) {
dev_err(dev, "Unable to set DMA mask\n"); dev_err(dev, "DMA mask config failed, abort\n");
goto err_release_regions; goto err_release_regions;
} }
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); pci_set_master(pdev);
if (err) {
dev_err(dev, "Unable to set consistent DMA mask\n");
goto err_release_regions;
}
/* Map Admin function CSRs */ /* Map Admin function CSRs */
rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0); rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
...@@ -2489,6 +2605,8 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2489,6 +2605,8 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err) if (err)
goto err_flr; goto err_flr;
rvu_setup_rvum_blk_revid(rvu);
/* Enable AF's VFs (if any) */ /* Enable AF's VFs (if any) */
err = rvu_enable_sriov(rvu); err = rvu_enable_sriov(rvu);
if (err) if (err)
...@@ -2506,8 +2624,10 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2506,8 +2624,10 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rvu_mbox_destroy(&rvu->afpf_wq_info); rvu_mbox_destroy(&rvu->afpf_wq_info);
err_hwsetup: err_hwsetup:
rvu_cgx_exit(rvu); rvu_cgx_exit(rvu);
rvu_fwdata_exit(rvu);
rvu_reset_all_blocks(rvu); rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu); rvu_free_hw_resources(rvu);
rvu_clear_rvum_blk_revid(rvu);
err_release_regions: err_release_regions:
pci_release_regions(pdev); pci_release_regions(pdev);
err_disable_device: err_disable_device:
...@@ -2527,11 +2647,12 @@ static void rvu_remove(struct pci_dev *pdev) ...@@ -2527,11 +2647,12 @@ static void rvu_remove(struct pci_dev *pdev)
rvu_unregister_interrupts(rvu); rvu_unregister_interrupts(rvu);
rvu_flr_wq_destroy(rvu); rvu_flr_wq_destroy(rvu);
rvu_cgx_exit(rvu); rvu_cgx_exit(rvu);
rvu_fwdata_exit(rvu);
rvu_mbox_destroy(&rvu->afpf_wq_info); rvu_mbox_destroy(&rvu->afpf_wq_info);
rvu_disable_sriov(rvu); rvu_disable_sriov(rvu);
rvu_reset_all_blocks(rvu); rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu); rvu_free_hw_resources(rvu);
rvu_clear_rvum_blk_revid(rvu);
pci_release_regions(pdev); pci_release_regions(pdev);
pci_disable_device(pdev); pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
......
...@@ -269,6 +269,26 @@ struct mbox_wq_info { ...@@ -269,6 +269,26 @@ struct mbox_wq_info {
struct workqueue_struct *mbox_wq; struct workqueue_struct *mbox_wq;
}; };
struct rvu_fwdata {
#define RVU_FWDATA_HEADER_MAGIC 0xCFDA /* Custom Firmware Data*/
#define RVU_FWDATA_VERSION 0x0001
u32 header_magic;
u32 version; /* version id */
/* MAC address */
#define PF_MACNUM_MAX 32
#define VF_MACNUM_MAX 256
u64 pf_macs[PF_MACNUM_MAX];
u64 vf_macs[VF_MACNUM_MAX];
u64 sclk;
u64 rclk;
u64 mcam_addr;
u64 mcam_sz;
u64 msixtr_base;
#define FWDATA_RESERVED_MEM 1023
u64 reserved[FWDATA_RESERVED_MEM];
};
struct rvu { struct rvu {
void __iomem *afreg_base; void __iomem *afreg_base;
void __iomem *pfreg_base; void __iomem *pfreg_base;
...@@ -294,6 +314,7 @@ struct rvu { ...@@ -294,6 +314,7 @@ struct rvu {
char *irq_name; char *irq_name;
bool *irq_allocated; bool *irq_allocated;
dma_addr_t msix_base_iova; dma_addr_t msix_base_iova;
u64 msixtr_base_phy; /* Register reset value */
/* CGX */ /* CGX */
#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */ #define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */
...@@ -313,6 +334,9 @@ struct rvu { ...@@ -313,6 +334,9 @@ struct rvu {
char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */ char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */
/* Firmware data */
struct rvu_fwdata *fwdata;
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
struct rvu_debugfs rvu_dbg; struct rvu_debugfs rvu_dbg;
#endif #endif
...@@ -363,6 +387,12 @@ static inline int is_afvf(u16 pcifunc) ...@@ -363,6 +387,12 @@ static inline int is_afvf(u16 pcifunc)
return !(pcifunc & ~RVU_PFVF_FUNC_MASK); return !(pcifunc & ~RVU_PFVF_FUNC_MASK);
} }
static inline bool is_rvu_fwdata_valid(struct rvu *rvu)
{
return (rvu->fwdata->header_magic == RVU_FWDATA_HEADER_MAGIC) &&
(rvu->fwdata->version == RVU_FWDATA_VERSION);
}
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc); int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc(struct rsrc_bmap *rsrc); int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id); void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
......
...@@ -590,6 +590,30 @@ int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req, ...@@ -590,6 +590,30 @@ int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
return 0; return 0;
} }
int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
struct cgx_pause_frm_cfg *req,
struct cgx_pause_frm_cfg *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
/* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
* if received from other PF/VF simply ACK, nothing to do.
*/
if (!is_pf_cgxmapped(rvu, pf))
return -ENODEV;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
if (req->set)
cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
req->tx_pause, req->rx_pause);
else
cgx_lmac_get_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
&rsp->tx_pause, &rsp->rx_pause);
return 0;
}
/* Finds cumulative status of NIX rx/tx counters from LF of a PF and those /* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
* from its VFs as well. ie. NIX rx/tx counters at the CGX port level * from its VFs as well. ie. NIX rx/tx counters at the CGX port level
*/ */
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include "cgx.h" #include "cgx.h"
static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add); static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id);
enum mc_tbl_sz { enum mc_tbl_sz {
MC_TBL_SZ_256, MC_TBL_SZ_256,
...@@ -211,6 +213,11 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) ...@@ -211,6 +213,11 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
pfvf->tx_chan_cnt = 1; pfvf->tx_chan_cnt = 1;
cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
rvu_npc_set_pkind(rvu, pkind, pfvf); rvu_npc_set_pkind(rvu, pkind, pfvf);
/* By default we enable pause frames */
if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu),
lmac_id, true, true);
break; break;
case NIX_INTF_TYPE_LBK: case NIX_INTF_TYPE_LBK:
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
...@@ -273,6 +280,142 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) ...@@ -273,6 +280,142 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
} }
int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
struct nix_bp_cfg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
int blkaddr, pf, type;
u16 chan_base, chan;
u64 cfg;
pf = rvu_get_pf(pcifunc);
type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
return 0;
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
chan_base = pfvf->rx_chan_base + req->chan_base;
for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
cfg & ~BIT_ULL(16));
}
return 0;
}
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id)
{
int bpid, blkaddr, lmac_chan_cnt;
struct rvu_hwinfo *hw = rvu->hw;
u16 cgx_bpid_cnt, lbk_bpid_cnt;
struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
u64 cfg;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
lmac_chan_cnt = cfg & 0xFF;
cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
/* Backpressure IDs range division
* CGX channles are mapped to (0 - 191) BPIDs
* LBK channles are mapped to (192 - 255) BPIDs
* SDP channles are mapped to (256 - 511) BPIDs
*
* Lmac channles and bpids mapped as follows
* cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
* cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
* cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
*/
switch (type) {
case NIX_INTF_TYPE_CGX:
if ((req->chan_base + req->chan_cnt) > 15)
return -EINVAL;
rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
/* Assign bpid based on cgx, lmac and chan id */
bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
(lmac_id * lmac_chan_cnt) + req->chan_base;
if (req->bpid_per_chan)
bpid += chan_id;
if (bpid > cgx_bpid_cnt)
return -EINVAL;
break;
case NIX_INTF_TYPE_LBK:
if ((req->chan_base + req->chan_cnt) > 63)
return -EINVAL;
bpid = cgx_bpid_cnt + req->chan_base;
if (req->bpid_per_chan)
bpid += chan_id;
if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
return -EINVAL;
break;
default:
return -EINVAL;
}
return bpid;
}
int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
struct nix_bp_cfg_req *req,
struct nix_bp_cfg_rsp *rsp)
{
int blkaddr, pf, type, chan_id = 0;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
u16 chan_base, chan;
s16 bpid, bpid_base;
u64 cfg;
pf = rvu_get_pf(pcifunc);
type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
/* Enable backpressure only for CGX mapped PFs and LBK interface */
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
return 0;
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
chan_base = pfvf->rx_chan_base + req->chan_base;
bpid = bpid_base;
for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
if (bpid < 0) {
dev_warn(rvu->dev, "Fail to enable backpessure\n");
return -EINVAL;
}
cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
cfg | (bpid & 0xFF) | BIT_ULL(16));
chan_id++;
bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
}
for (chan = 0; chan < req->chan_cnt; chan++) {
/* Map channel and bpid assign to it */
rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
(bpid_base & 0x3FF);
if (req->bpid_per_chan)
bpid_base++;
}
rsp->chan_cnt = req->chan_cnt;
return 0;
}
static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
u64 format, bool v4, u64 *fidx) u64 format, bool v4, u64 *fidx)
{ {
...@@ -565,6 +708,11 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, ...@@ -565,6 +708,11 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
*/ */
inst.res_addr = (u64)aq->res->iova; inst.res_addr = (u64)aq->res->iova;
/* Hardware uses same aq->res->base for updating result of
* previous instruction hence wait here till it is done.
*/
spin_lock(&aq->lock);
/* Clean result + context memory */ /* Clean result + context memory */
memset(aq->res->base, 0, aq->res->entry_sz); memset(aq->res->base, 0, aq->res->entry_sz);
/* Context needs to be written at RES_ADDR + 128 */ /* Context needs to be written at RES_ADDR + 128 */
...@@ -609,11 +757,10 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, ...@@ -609,11 +757,10 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
break; break;
default: default:
rc = NIX_AF_ERR_AQ_ENQUEUE; rc = NIX_AF_ERR_AQ_ENQUEUE;
spin_unlock(&aq->lock);
return rc; return rc;
} }
spin_lock(&aq->lock);
/* Submit the instruction to AQ */ /* Submit the instruction to AQ */
rc = nix_aq_enqueue_wait(rvu, block, &inst); rc = nix_aq_enqueue_wait(rvu, block, &inst);
if (rc) { if (rc) {
...@@ -718,6 +865,8 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) ...@@ -718,6 +865,8 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
if (req->ctype == NIX_AQ_CTYPE_CQ) { if (req->ctype == NIX_AQ_CTYPE_CQ) {
aq_req.cq.ena = 0; aq_req.cq.ena = 0;
aq_req.cq_mask.ena = 1; aq_req.cq_mask.ena = 1;
aq_req.cq.bp_ena = 0;
aq_req.cq_mask.bp_ena = 1;
q_cnt = pfvf->cq_ctx->qsize; q_cnt = pfvf->cq_ctx->qsize;
bmap = pfvf->cq_bmap; bmap = pfvf->cq_bmap;
} }
...@@ -3061,6 +3210,9 @@ int rvu_nix_init(struct rvu *rvu) ...@@ -3061,6 +3210,9 @@ int rvu_nix_init(struct rvu *rvu)
/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
nix_link_config(rvu, blkaddr); nix_link_config(rvu, blkaddr);
/* Enable Channel backpressure */
rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
} }
return 0; return 0;
} }
......
...@@ -94,6 +94,11 @@ int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req, ...@@ -94,6 +94,11 @@ int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
*/ */
inst.res_addr = (u64)aq->res->iova; inst.res_addr = (u64)aq->res->iova;
/* Hardware uses same aq->res->base for updating result of
* previous instruction hence wait here till it is done.
*/
spin_lock(&aq->lock);
/* Clean result + context memory */ /* Clean result + context memory */
memset(aq->res->base, 0, aq->res->entry_sz); memset(aq->res->base, 0, aq->res->entry_sz);
/* Context needs to be written at RES_ADDR + 128 */ /* Context needs to be written at RES_ADDR + 128 */
...@@ -138,10 +143,10 @@ int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req, ...@@ -138,10 +143,10 @@ int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
break; break;
} }
if (rc) if (rc) {
spin_unlock(&aq->lock);
return rc; return rc;
}
spin_lock(&aq->lock);
/* Submit the instruction to AQ */ /* Submit the instruction to AQ */
rc = npa_aq_enqueue_wait(rvu, block, &inst); rc = npa_aq_enqueue_wait(rvu, block, &inst);
...@@ -218,6 +223,8 @@ static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) ...@@ -218,6 +223,8 @@ static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
} else if (req->ctype == NPA_AQ_CTYPE_AURA) { } else if (req->ctype == NPA_AQ_CTYPE_AURA) {
aq_req.aura.ena = 0; aq_req.aura.ena = 0;
aq_req.aura_mask.ena = 1; aq_req.aura_mask.ena = 1;
aq_req.aura.bp_ena = 0;
aq_req.aura_mask.bp_ena = 1;
cnt = pfvf->aura_ctx->qsize; cnt = pfvf->aura_ctx->qsize;
bmap = pfvf->aura_bmap; bmap = pfvf->aura_bmap;
} }
......
...@@ -825,8 +825,10 @@ static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr) ...@@ -825,8 +825,10 @@ static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr)
if (!strncmp(mkex_profile, "default", MKEX_NAME_LEN)) if (!strncmp(mkex_profile, "default", MKEX_NAME_LEN))
goto load_default; goto load_default;
if (cgx_get_mkex_prfl_info(&prfl_addr, &prfl_sz)) if (!rvu->fwdata)
goto load_default; goto load_default;
prfl_addr = rvu->fwdata->mcam_addr;
prfl_sz = rvu->fwdata->mcam_sz;
if (!prfl_addr || !prfl_sz) if (!prfl_addr || !prfl_sz)
goto load_default; goto load_default;
......
...@@ -11,6 +11,9 @@ ...@@ -11,6 +11,9 @@
#ifndef RVU_STRUCT_H #ifndef RVU_STRUCT_H
#define RVU_STRUCT_H #define RVU_STRUCT_H
/* RVU Block revision IDs */
#define RVU_BLK_RVUM_REVID 0x01
/* RVU Block Address Enumeration */ /* RVU Block Address Enumeration */
enum rvu_block_addr_e { enum rvu_block_addr_e {
BLKADDR_RVUM = 0x0ULL, BLKADDR_RVUM = 0x0ULL,
......
...@@ -220,6 +220,25 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu) ...@@ -220,6 +220,25 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
return err; return err;
} }
int otx2_config_pause_frm(struct otx2_nic *pfvf)
{
struct cgx_pause_frm_cfg *req;
int err;
otx2_mbox_lock(&pfvf->mbox);
req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
if (!req)
return -ENOMEM;
req->rx_pause = !!(pfvf->flags & OTX2_FLAG_RX_PAUSE_ENABLED);
req->tx_pause = !!(pfvf->flags & OTX2_FLAG_TX_PAUSE_ENABLED);
req->set = 1;
err = otx2_sync_mbox_msg(&pfvf->mbox);
otx2_mbox_unlock(&pfvf->mbox);
return err;
}
int otx2_set_flowkey_cfg(struct otx2_nic *pfvf) int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
{ {
struct otx2_rss_info *rss = &pfvf->hw.rss_info; struct otx2_rss_info *rss = &pfvf->hw.rss_info;
...@@ -580,6 +599,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf) ...@@ -580,6 +599,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
* RED accepts pkts if free pointers > 102 & <= 205. * RED accepts pkts if free pointers > 102 & <= 205.
* Drops pkts if free pointers < 102. * Drops pkts if free pointers < 102.
*/ */
#define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */
#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */ #define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */ #define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
...@@ -741,6 +761,13 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) ...@@ -741,6 +761,13 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
if (qidx < pfvf->hw.rx_queues) { if (qidx < pfvf->hw.rx_queues) {
aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt); aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt);
aq->cq.drop_ena = 1; aq->cq.drop_ena = 1;
/* Enable receive CQ backpressure */
aq->cq.bp_ena = 1;
aq->cq.bpid = pfvf->bpid[0];
/* Set backpressure level is same as cq pass level */
aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
} }
/* Fill AQ info */ /* Fill AQ info */
...@@ -996,6 +1023,14 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, ...@@ -996,6 +1023,14 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
aq->aura.fc_addr = pool->fc_addr->iova; aq->aura.fc_addr = pool->fc_addr->iova;
aq->aura.fc_hyst_bits = 0; /* Store count on all updates */ aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
/* Enable backpressure for RQ aura */
if (aura_id < pfvf->hw.rqpool_cnt) {
aq->aura.bp_ena = 0;
aq->aura.nix0_bpid = pfvf->bpid[0];
/* Set backpressure level for RQ's Aura */
aq->aura.bp = RQ_BP_LVL_AURA;
}
/* Fill AQ info */ /* Fill AQ info */
aq->ctype = NPA_AQ_CTYPE_AURA; aq->ctype = NPA_AQ_CTYPE_AURA;
aq->op = NPA_AQ_INSTOP_INIT; aq->op = NPA_AQ_INSTOP_INIT;
...@@ -1307,6 +1342,25 @@ void otx2_ctx_disable(struct mbox *mbox, int type, bool npa) ...@@ -1307,6 +1342,25 @@ void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
otx2_mbox_unlock(mbox); otx2_mbox_unlock(mbox);
} }
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
{
struct nix_bp_cfg_req *req;
if (enable)
req = otx2_mbox_alloc_msg_nix_bp_enable(&pfvf->mbox);
else
req = otx2_mbox_alloc_msg_nix_bp_disable(&pfvf->mbox);
if (!req)
return -ENOMEM;
req->chan_base = 0;
req->chan_cnt = 1;
req->bpid_per_chan = 0;
return otx2_sync_mbox_msg(&pfvf->mbox);
}
/* Mbox message handlers */ /* Mbox message handlers */
void mbox_handler_cgx_stats(struct otx2_nic *pfvf, void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
struct cgx_stats_rsp *rsp) struct cgx_stats_rsp *rsp)
...@@ -1355,6 +1409,17 @@ void mbox_handler_msix_offset(struct otx2_nic *pfvf, ...@@ -1355,6 +1409,17 @@ void mbox_handler_msix_offset(struct otx2_nic *pfvf,
pfvf->hw.nix_msixoff = rsp->nix_msixoff; pfvf->hw.nix_msixoff = rsp->nix_msixoff;
} }
void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
struct nix_bp_cfg_rsp *rsp)
{
int chan, chan_id;
for (chan = 0; chan < rsp->chan_cnt; chan++) {
chan_id = ((rsp->chan_bpid[chan] >> 10) & 0x7F);
pfvf->bpid[chan_id] = rsp->chan_bpid[chan] & 0x3FF;
}
}
void otx2_free_cints(struct otx2_nic *pfvf, int n) void otx2_free_cints(struct otx2_nic *pfvf, int n)
{ {
struct otx2_qset *qset = &pfvf->qset; struct otx2_qset *qset = &pfvf->qset;
......
...@@ -204,6 +204,8 @@ struct otx2_nic { ...@@ -204,6 +204,8 @@ struct otx2_nic {
u16 rbsize; /* Receive buffer size */ u16 rbsize; /* Receive buffer size */
#define OTX2_FLAG_INTF_DOWN BIT_ULL(2) #define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
#define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9)
#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
u64 flags; u64 flags;
struct otx2_qset qset; struct otx2_qset qset;
...@@ -216,6 +218,7 @@ struct otx2_nic { ...@@ -216,6 +218,7 @@ struct otx2_nic {
struct workqueue_struct *mbox_wq; struct workqueue_struct *mbox_wq;
u16 pcifunc; /* RVU PF_FUNC */ u16 pcifunc; /* RVU PF_FUNC */
u16 bpid[NIX_MAX_BPID_CHAN];
struct cgx_link_user_info linfo; struct cgx_link_user_info linfo;
u64 reset_count; u64 reset_count;
...@@ -558,6 +561,7 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu); ...@@ -558,6 +561,7 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu);
void otx2_tx_timeout(struct net_device *netdev, unsigned int txq); void otx2_tx_timeout(struct net_device *netdev, unsigned int txq);
void otx2_get_mac_from_af(struct net_device *netdev); void otx2_get_mac_from_af(struct net_device *netdev);
void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx); void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
int otx2_config_pause_frm(struct otx2_nic *pfvf);
/* RVU block related APIs */ /* RVU block related APIs */
int otx2_attach_npa_nix(struct otx2_nic *pfvf); int otx2_attach_npa_nix(struct otx2_nic *pfvf);
...@@ -578,6 +582,7 @@ dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, ...@@ -578,6 +582,7 @@ dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
gfp_t gfp); gfp_t gfp);
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable); int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa); void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
...@@ -598,6 +603,8 @@ void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf, ...@@ -598,6 +603,8 @@ void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
struct nix_txsch_alloc_rsp *rsp); struct nix_txsch_alloc_rsp *rsp);
void mbox_handler_cgx_stats(struct otx2_nic *pfvf, void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
struct cgx_stats_rsp *rsp); struct cgx_stats_rsp *rsp);
void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
struct nix_bp_cfg_rsp *rsp);
/* Device stats APIs */ /* Device stats APIs */
void otx2_get_dev_stats(struct otx2_nic *pfvf); void otx2_get_dev_stats(struct otx2_nic *pfvf);
......
...@@ -253,6 +253,45 @@ static int otx2_set_channels(struct net_device *dev, ...@@ -253,6 +253,45 @@ static int otx2_set_channels(struct net_device *dev,
return err; return err;
} }
static void otx2_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct cgx_pause_frm_cfg *req, *rsp;
req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
if (!req)
return;
if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
rsp = (struct cgx_pause_frm_cfg *)
otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
pause->rx_pause = rsp->rx_pause;
pause->tx_pause = rsp->tx_pause;
}
}
static int otx2_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
if (pause->autoneg)
return -EOPNOTSUPP;
if (pause->rx_pause)
pfvf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
else
pfvf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
if (pause->tx_pause)
pfvf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
else
pfvf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
return otx2_config_pause_frm(pfvf);
}
static void otx2_get_ringparam(struct net_device *netdev, static void otx2_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring) struct ethtool_ringparam *ring)
{ {
...@@ -654,6 +693,8 @@ static const struct ethtool_ops otx2_ethtool_ops = { ...@@ -654,6 +693,8 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.set_rxfh = otx2_set_rxfh, .set_rxfh = otx2_set_rxfh,
.get_msglevel = otx2_get_msglevel, .get_msglevel = otx2_get_msglevel,
.set_msglevel = otx2_set_msglevel, .set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
.set_pauseparam = otx2_set_pauseparam,
}; };
void otx2_set_ethtool_ops(struct net_device *netdev) void otx2_set_ethtool_ops(struct net_device *netdev)
......
...@@ -148,6 +148,9 @@ static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf, ...@@ -148,6 +148,9 @@ static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
mbox_handler_nix_txsch_alloc(pf, mbox_handler_nix_txsch_alloc(pf,
(struct nix_txsch_alloc_rsp *)msg); (struct nix_txsch_alloc_rsp *)msg);
break; break;
case MBOX_MSG_NIX_BP_ENABLE:
mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg);
break;
case MBOX_MSG_CGX_STATS: case MBOX_MSG_CGX_STATS:
mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg); mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
break; break;
...@@ -654,6 +657,9 @@ static int otx2_init_hw_resources(struct otx2_nic *pf) ...@@ -654,6 +657,9 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
if (err) if (err)
goto err_free_npa_lf; goto err_free_npa_lf;
/* Enable backpressure */
otx2_nix_config_bp(pf, true);
/* Init Auras and pools used by NIX RQ, for free buffer ptrs */ /* Init Auras and pools used by NIX RQ, for free buffer ptrs */
err = otx2_rq_aura_pool_init(pf); err = otx2_rq_aura_pool_init(pf);
if (err) { if (err) {
...@@ -737,6 +743,12 @@ static void otx2_free_hw_resources(struct otx2_nic *pf) ...@@ -737,6 +743,12 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
if (err) if (err)
dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n"); dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n");
otx2_mbox_lock(mbox);
/* Disable backpressure */
if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
otx2_nix_config_bp(pf, false);
otx2_mbox_unlock(mbox);
/* Disable RQs */ /* Disable RQs */
otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false); otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment