Commit b8aebeaa authored by Geetha sowjanya's avatar Geetha sowjanya Committed by Paolo Abeni

octeontx2-af: mcs: Fix MCS block interrupt

On CN10KB, MCS IP vector number, BBE and PAB interrupt mask
got changed to support more block level interrupts.
To address this changes, this patch fixes the bbe and pab
interrupt handlers.

Fixes: 6c635f78 ("octeontx2-af: cn10k: mcs: Handle MCS block interrupts")
Signed-off-by: default avatarSunil Goutham <sgoutham@marvell.com>
Signed-off-by: default avatarGeetha sowjanya <gakula@marvell.com>
Reviewed-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parent 65cdc2b6
...@@ -936,60 +936,42 @@ static void mcs_tx_misc_intr_handler(struct mcs *mcs, u64 intr) ...@@ -936,60 +936,42 @@ static void mcs_tx_misc_intr_handler(struct mcs *mcs, u64 intr)
mcs_add_intr_wq_entry(mcs, &event); mcs_add_intr_wq_entry(mcs, &event);
} }
static void mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir) void cn10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr,
enum mcs_direction dir)
{ {
struct mcs_intr_event event = { 0 }; u64 val, reg;
int i; int lmac;
if (!(intr & MCS_BBE_INT_MASK)) if (!(intr & 0x6ULL))
return; return;
event.mcs_id = mcs->mcs_id; if (intr & BIT_ULL(1))
event.pcifunc = mcs->pf_map[0]; reg = (dir == MCS_RX) ? MCSX_BBE_RX_SLAVE_DFIFO_OVERFLOW_0 :
MCSX_BBE_TX_SLAVE_DFIFO_OVERFLOW_0;
else
reg = (dir == MCS_RX) ? MCSX_BBE_RX_SLAVE_PLFIFO_OVERFLOW_0 :
MCSX_BBE_TX_SLAVE_PLFIFO_OVERFLOW_0;
val = mcs_reg_read(mcs, reg);
for (i = 0; i < MCS_MAX_BBE_INT; i++) { /* policy/data over flow occurred */
if (!(intr & BIT_ULL(i))) for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
if (!(val & BIT_ULL(lmac)))
continue; continue;
dev_warn(mcs->dev, "BEE:Policy or data overflow occurred on lmac:%d\n", lmac);
/* Lower nibble denotes data fifo overflow interrupts and
* upper nibble indicates policy fifo overflow interrupts.
*/
if (intr & 0xFULL)
event.intr_mask = (dir == MCS_RX) ?
MCS_BBE_RX_DFIFO_OVERFLOW_INT :
MCS_BBE_TX_DFIFO_OVERFLOW_INT;
else
event.intr_mask = (dir == MCS_RX) ?
MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
MCS_BBE_TX_PLFIFO_OVERFLOW_INT;
/* Notify the lmac_id info which ran into BBE fatal error */
event.lmac_id = i & 0x3ULL;
mcs_add_intr_wq_entry(mcs, &event);
} }
} }
static void mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir) void cn10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr,
enum mcs_direction dir)
{ {
struct mcs_intr_event event = { 0 }; int lmac;
int i;
if (!(intr & MCS_PAB_INT_MASK)) if (!(intr & 0xFFFFFULL))
return; return;
event.mcs_id = mcs->mcs_id; for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
event.pcifunc = mcs->pf_map[0]; if (intr & BIT_ULL(lmac))
dev_warn(mcs->dev, "PAB: overflow occurred on lmac:%d\n", lmac);
for (i = 0; i < MCS_MAX_PAB_INT; i++) {
if (!(intr & BIT_ULL(i)))
continue;
event.intr_mask = (dir == MCS_RX) ? MCS_PAB_RX_CHAN_OVERFLOW_INT :
MCS_PAB_TX_CHAN_OVERFLOW_INT;
/* Notify the lmac_id info which ran into PAB fatal error */
event.lmac_id = i;
mcs_add_intr_wq_entry(mcs, &event);
} }
} }
...@@ -998,9 +980,8 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq) ...@@ -998,9 +980,8 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
struct mcs *mcs = (struct mcs *)mcs_irq; struct mcs *mcs = (struct mcs *)mcs_irq;
u64 intr, cpm_intr, bbe_intr, pab_intr; u64 intr, cpm_intr, bbe_intr, pab_intr;
/* Disable and clear the interrupt */ /* Disable the interrupt */
mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1C, BIT_ULL(0)); mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1C, BIT_ULL(0));
mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0));
/* Check which block has interrupt*/ /* Check which block has interrupt*/
intr = mcs_reg_read(mcs, MCSX_TOP_SLAVE_INT_SUM); intr = mcs_reg_read(mcs, MCSX_TOP_SLAVE_INT_SUM);
...@@ -1047,7 +1028,7 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq) ...@@ -1047,7 +1028,7 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
/* BBE RX */ /* BBE RX */
if (intr & MCS_BBE_RX_INT_ENA) { if (intr & MCS_BBE_RX_INT_ENA) {
bbe_intr = mcs_reg_read(mcs, MCSX_BBE_RX_SLAVE_BBE_INT); bbe_intr = mcs_reg_read(mcs, MCSX_BBE_RX_SLAVE_BBE_INT);
mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX); mcs->mcs_ops->mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX);
/* Clear the interrupt */ /* Clear the interrupt */
mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW, 0); mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW, 0);
...@@ -1057,7 +1038,7 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq) ...@@ -1057,7 +1038,7 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
/* BBE TX */ /* BBE TX */
if (intr & MCS_BBE_TX_INT_ENA) { if (intr & MCS_BBE_TX_INT_ENA) {
bbe_intr = mcs_reg_read(mcs, MCSX_BBE_TX_SLAVE_BBE_INT); bbe_intr = mcs_reg_read(mcs, MCSX_BBE_TX_SLAVE_BBE_INT);
mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX); mcs->mcs_ops->mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX);
/* Clear the interrupt */ /* Clear the interrupt */
mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW, 0); mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW, 0);
...@@ -1067,7 +1048,7 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq) ...@@ -1067,7 +1048,7 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
/* PAB RX */ /* PAB RX */
if (intr & MCS_PAB_RX_INT_ENA) { if (intr & MCS_PAB_RX_INT_ENA) {
pab_intr = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PAB_INT); pab_intr = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PAB_INT);
mcs_pab_intr_handler(mcs, pab_intr, MCS_RX); mcs->mcs_ops->mcs_pab_intr_handler(mcs, pab_intr, MCS_RX);
/* Clear the interrupt */ /* Clear the interrupt */
mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW, 0); mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW, 0);
...@@ -1077,14 +1058,15 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq) ...@@ -1077,14 +1058,15 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
/* PAB TX */ /* PAB TX */
if (intr & MCS_PAB_TX_INT_ENA) { if (intr & MCS_PAB_TX_INT_ENA) {
pab_intr = mcs_reg_read(mcs, MCSX_PAB_TX_SLAVE_PAB_INT); pab_intr = mcs_reg_read(mcs, MCSX_PAB_TX_SLAVE_PAB_INT);
mcs_pab_intr_handler(mcs, pab_intr, MCS_TX); mcs->mcs_ops->mcs_pab_intr_handler(mcs, pab_intr, MCS_TX);
/* Clear the interrupt */ /* Clear the interrupt */
mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW, 0); mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW, 0);
mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT, pab_intr); mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT, pab_intr);
} }
/* Enable the interrupt */ /* Clear and enable the interrupt */
mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0));
mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0)); mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
return IRQ_HANDLED; return IRQ_HANDLED;
...@@ -1166,7 +1148,7 @@ static int mcs_register_interrupts(struct mcs *mcs) ...@@ -1166,7 +1148,7 @@ static int mcs_register_interrupts(struct mcs *mcs)
return ret; return ret;
} }
ret = request_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP), ret = request_irq(pci_irq_vector(mcs->pdev, mcs->hw->ip_vec),
mcs_ip_intr_handler, 0, "MCS_IP", mcs); mcs_ip_intr_handler, 0, "MCS_IP", mcs);
if (ret) { if (ret) {
dev_err(mcs->dev, "MCS IP irq registration failed\n"); dev_err(mcs->dev, "MCS IP irq registration failed\n");
...@@ -1185,11 +1167,11 @@ static int mcs_register_interrupts(struct mcs *mcs) ...@@ -1185,11 +1167,11 @@ static int mcs_register_interrupts(struct mcs *mcs)
mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT_ENB, 0x7ULL); mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT_ENB, 0x7ULL);
mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT_ENB, 0x7FULL); mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT_ENB, 0x7FULL);
mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xff); mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xFFULL);
mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xff); mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xFFULL);
mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xff); mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xFFFFFULL);
mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xff); mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xFFFFFULL);
mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries); mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries);
if (!mcs->tx_sa_active) { if (!mcs->tx_sa_active) {
...@@ -1200,7 +1182,7 @@ static int mcs_register_interrupts(struct mcs *mcs) ...@@ -1200,7 +1182,7 @@ static int mcs_register_interrupts(struct mcs *mcs)
return ret; return ret;
free_irq: free_irq:
free_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP), mcs); free_irq(pci_irq_vector(mcs->pdev, mcs->hw->ip_vec), mcs);
exit: exit:
pci_free_irq_vectors(mcs->pdev); pci_free_irq_vectors(mcs->pdev);
mcs->num_vec = 0; mcs->num_vec = 0;
...@@ -1497,6 +1479,7 @@ void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs) ...@@ -1497,6 +1479,7 @@ void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs)
hw->lmac_cnt = 20; /* lmacs/ports per mcs block */ hw->lmac_cnt = 20; /* lmacs/ports per mcs block */
hw->mcs_x2p_intf = 5; /* x2p clabration intf */ hw->mcs_x2p_intf = 5; /* x2p clabration intf */
hw->mcs_blks = 1; /* MCS blocks */ hw->mcs_blks = 1; /* MCS blocks */
hw->ip_vec = MCS_CN10KB_INT_VEC_IP; /* IP vector */
} }
static struct mcs_ops cn10kb_mcs_ops = { static struct mcs_ops cn10kb_mcs_ops = {
...@@ -1505,6 +1488,8 @@ static struct mcs_ops cn10kb_mcs_ops = { ...@@ -1505,6 +1488,8 @@ static struct mcs_ops cn10kb_mcs_ops = {
.mcs_tx_sa_mem_map_write = cn10kb_mcs_tx_sa_mem_map_write, .mcs_tx_sa_mem_map_write = cn10kb_mcs_tx_sa_mem_map_write,
.mcs_rx_sa_mem_map_write = cn10kb_mcs_rx_sa_mem_map_write, .mcs_rx_sa_mem_map_write = cn10kb_mcs_rx_sa_mem_map_write,
.mcs_flowid_secy_map = cn10kb_mcs_flowid_secy_map, .mcs_flowid_secy_map = cn10kb_mcs_flowid_secy_map,
.mcs_bbe_intr_handler = cn10kb_mcs_bbe_intr_handler,
.mcs_pab_intr_handler = cn10kb_mcs_pab_intr_handler,
}; };
static int mcs_probe(struct pci_dev *pdev, const struct pci_device_id *id) static int mcs_probe(struct pci_dev *pdev, const struct pci_device_id *id)
...@@ -1605,7 +1590,7 @@ static void mcs_remove(struct pci_dev *pdev) ...@@ -1605,7 +1590,7 @@ static void mcs_remove(struct pci_dev *pdev)
/* Set MCS to external bypass */ /* Set MCS to external bypass */
mcs_set_external_bypass(mcs, true); mcs_set_external_bypass(mcs, true);
free_irq(pci_irq_vector(pdev, MCS_INT_VEC_IP), mcs); free_irq(pci_irq_vector(pdev, mcs->hw->ip_vec), mcs);
pci_free_irq_vectors(pdev); pci_free_irq_vectors(pdev);
pci_release_regions(pdev); pci_release_regions(pdev);
pci_disable_device(pdev); pci_disable_device(pdev);
......
...@@ -43,24 +43,15 @@ ...@@ -43,24 +43,15 @@
/* Reserved resources for default bypass entry */ /* Reserved resources for default bypass entry */
#define MCS_RSRC_RSVD_CNT 1 #define MCS_RSRC_RSVD_CNT 1
/* MCS Interrupt Vector Enumeration */ /* MCS Interrupt Vector */
enum mcs_int_vec_e { #define MCS_CNF10KB_INT_VEC_IP 0x13
MCS_INT_VEC_MIL_RX_GBL = 0x0, #define MCS_CN10KB_INT_VEC_IP 0x53
MCS_INT_VEC_MIL_RX_LMACX = 0x1,
MCS_INT_VEC_MIL_TX_LMACX = 0x5,
MCS_INT_VEC_HIL_RX_GBL = 0x9,
MCS_INT_VEC_HIL_RX_LMACX = 0xa,
MCS_INT_VEC_HIL_TX_GBL = 0xe,
MCS_INT_VEC_HIL_TX_LMACX = 0xf,
MCS_INT_VEC_IP = 0x13,
MCS_INT_VEC_CNT = 0x14,
};
#define MCS_MAX_BBE_INT 8ULL #define MCS_MAX_BBE_INT 8ULL
#define MCS_BBE_INT_MASK 0xFFULL #define MCS_BBE_INT_MASK 0xFFULL
#define MCS_MAX_PAB_INT 4ULL #define MCS_MAX_PAB_INT 8ULL
#define MCS_PAB_INT_MASK 0xFULL #define MCS_PAB_INT_MASK 0xFULL
#define MCS_BBE_RX_INT_ENA BIT_ULL(0) #define MCS_BBE_RX_INT_ENA BIT_ULL(0)
#define MCS_BBE_TX_INT_ENA BIT_ULL(1) #define MCS_BBE_TX_INT_ENA BIT_ULL(1)
...@@ -137,6 +128,7 @@ struct hwinfo { ...@@ -137,6 +128,7 @@ struct hwinfo {
u8 lmac_cnt; u8 lmac_cnt;
u8 mcs_blks; u8 mcs_blks;
unsigned long lmac_bmap; /* bitmap of enabled mcs lmac */ unsigned long lmac_bmap; /* bitmap of enabled mcs lmac */
u16 ip_vec;
}; };
struct mcs { struct mcs {
...@@ -165,6 +157,8 @@ struct mcs_ops { ...@@ -165,6 +157,8 @@ struct mcs_ops {
void (*mcs_tx_sa_mem_map_write)(struct mcs *mcs, struct mcs_tx_sc_sa_map *map); void (*mcs_tx_sa_mem_map_write)(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
void (*mcs_rx_sa_mem_map_write)(struct mcs *mcs, struct mcs_rx_sc_sa_map *map); void (*mcs_rx_sa_mem_map_write)(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
void (*mcs_flowid_secy_map)(struct mcs *mcs, struct secy_mem_map *map, int dir); void (*mcs_flowid_secy_map)(struct mcs *mcs, struct secy_mem_map *map, int dir);
void (*mcs_bbe_intr_handler)(struct mcs *mcs, u64 intr, enum mcs_direction dir);
void (*mcs_pab_intr_handler)(struct mcs *mcs, u64 intr, enum mcs_direction dir);
}; };
extern struct pci_driver mcs_driver; extern struct pci_driver mcs_driver;
...@@ -219,6 +213,8 @@ void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *ma ...@@ -219,6 +213,8 @@ void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *ma
void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir); void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map); void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
void cn10kb_mcs_parser_cfg(struct mcs *mcs); void cn10kb_mcs_parser_cfg(struct mcs *mcs);
void cn10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
void cn10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
/* CNF10K-B APIs */ /* CNF10K-B APIs */
struct mcs_ops *cnf10kb_get_mac_ops(void); struct mcs_ops *cnf10kb_get_mac_ops(void);
...@@ -229,6 +225,8 @@ void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *m ...@@ -229,6 +225,8 @@ void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *m
void cnf10kb_mcs_parser_cfg(struct mcs *mcs); void cnf10kb_mcs_parser_cfg(struct mcs *mcs);
void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs); void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs);
void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs); void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs);
void cnf10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
void cnf10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
/* Stats APIs */ /* Stats APIs */
void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats, int id, int dir); void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats, int id, int dir);
......
...@@ -13,6 +13,8 @@ static struct mcs_ops cnf10kb_mcs_ops = { ...@@ -13,6 +13,8 @@ static struct mcs_ops cnf10kb_mcs_ops = {
.mcs_tx_sa_mem_map_write = cnf10kb_mcs_tx_sa_mem_map_write, .mcs_tx_sa_mem_map_write = cnf10kb_mcs_tx_sa_mem_map_write,
.mcs_rx_sa_mem_map_write = cnf10kb_mcs_rx_sa_mem_map_write, .mcs_rx_sa_mem_map_write = cnf10kb_mcs_rx_sa_mem_map_write,
.mcs_flowid_secy_map = cnf10kb_mcs_flowid_secy_map, .mcs_flowid_secy_map = cnf10kb_mcs_flowid_secy_map,
.mcs_bbe_intr_handler = cnf10kb_mcs_bbe_intr_handler,
.mcs_pab_intr_handler = cnf10kb_mcs_pab_intr_handler,
}; };
struct mcs_ops *cnf10kb_get_mac_ops(void) struct mcs_ops *cnf10kb_get_mac_ops(void)
...@@ -31,6 +33,7 @@ void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs) ...@@ -31,6 +33,7 @@ void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs)
hw->lmac_cnt = 4; /* lmacs/ports per mcs block */ hw->lmac_cnt = 4; /* lmacs/ports per mcs block */
hw->mcs_x2p_intf = 1; /* x2p clabration intf */ hw->mcs_x2p_intf = 1; /* x2p clabration intf */
hw->mcs_blks = 7; /* MCS blocks */ hw->mcs_blks = 7; /* MCS blocks */
hw->ip_vec = MCS_CNF10KB_INT_VEC_IP; /* IP vector */
} }
void cnf10kb_mcs_parser_cfg(struct mcs *mcs) void cnf10kb_mcs_parser_cfg(struct mcs *mcs)
...@@ -212,3 +215,63 @@ void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs) ...@@ -212,3 +215,63 @@ void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
mcs_add_intr_wq_entry(mcs, &event); mcs_add_intr_wq_entry(mcs, &event);
} }
} }
void cnf10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr,
enum mcs_direction dir)
{
struct mcs_intr_event event = { 0 };
int i;
if (!(intr & MCS_BBE_INT_MASK))
return;
event.mcs_id = mcs->mcs_id;
event.pcifunc = mcs->pf_map[0];
for (i = 0; i < MCS_MAX_BBE_INT; i++) {
if (!(intr & BIT_ULL(i)))
continue;
/* Lower nibble denotes data fifo overflow interrupts and
* upper nibble indicates policy fifo overflow interrupts.
*/
if (intr & 0xFULL)
event.intr_mask = (dir == MCS_RX) ?
MCS_BBE_RX_DFIFO_OVERFLOW_INT :
MCS_BBE_TX_DFIFO_OVERFLOW_INT;
else
event.intr_mask = (dir == MCS_RX) ?
MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
MCS_BBE_TX_PLFIFO_OVERFLOW_INT;
/* Notify the lmac_id info which ran into BBE fatal error */
event.lmac_id = i & 0x3ULL;
mcs_add_intr_wq_entry(mcs, &event);
}
}
void cnf10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr,
enum mcs_direction dir)
{
struct mcs_intr_event event = { 0 };
int i;
if (!(intr & MCS_PAB_INT_MASK))
return;
event.mcs_id = mcs->mcs_id;
event.pcifunc = mcs->pf_map[0];
for (i = 0; i < MCS_MAX_PAB_INT; i++) {
if (!(intr & BIT_ULL(i)))
continue;
event.intr_mask = (dir == MCS_RX) ?
MCS_PAB_RX_CHAN_OVERFLOW_INT :
MCS_PAB_TX_CHAN_OVERFLOW_INT;
/* Notify the lmac_id info which ran into PAB fatal error */
event.lmac_id = i;
mcs_add_intr_wq_entry(mcs, &event);
}
}
...@@ -276,7 +276,10 @@ ...@@ -276,7 +276,10 @@
#define MCSX_BBE_RX_SLAVE_CAL_ENTRY 0x180ull #define MCSX_BBE_RX_SLAVE_CAL_ENTRY 0x180ull
#define MCSX_BBE_RX_SLAVE_CAL_LEN 0x188ull #define MCSX_BBE_RX_SLAVE_CAL_LEN 0x188ull
#define MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(a) (0x290ull + (a) * 0x40ull) #define MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(a) (0x290ull + (a) * 0x40ull)
#define MCSX_BBE_RX_SLAVE_DFIFO_OVERFLOW_0 0xe20
#define MCSX_BBE_TX_SLAVE_DFIFO_OVERFLOW_0 0x1298
#define MCSX_BBE_RX_SLAVE_PLFIFO_OVERFLOW_0 0xe40
#define MCSX_BBE_TX_SLAVE_PLFIFO_OVERFLOW_0 0x12b8
#define MCSX_BBE_RX_SLAVE_BBE_INT ({ \ #define MCSX_BBE_RX_SLAVE_BBE_INT ({ \
u64 offset; \ u64 offset; \
\ \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment