Commit fea2993a authored by Jakub Kicinski's avatar Jakub Kicinski

eth: bnxt: move and rename reset helpers

Move the reset helpers, subsequent patches will need some
of them on the Tx path.

While at it rename bnxt_sched_reset(), on more recent chips
it schedules a queue reset, instead of a fuller reset.

Link: https://lore.kernel.org/r/20230720010440.1967136-2-kuba@kernel.orgReviewed-by: default avatarMichael Chan <michael.chan@broadcom.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 59be3baa
...@@ -293,6 +293,38 @@ static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) ...@@ -293,6 +293,38 @@ static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
BNXT_DB_CQ(db, idx); BNXT_DB_CQ(db, idx);
} }
static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
{
if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
return;
if (BNXT_PF(bp))
queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
else
schedule_delayed_work(&bp->fw_reset_task, delay);
}
static void bnxt_queue_sp_work(struct bnxt *bp)
{
if (BNXT_PF(bp))
queue_work(bnxt_pf_wq, &bp->sp_task);
else
schedule_work(&bp->sp_task);
}
static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
if (!rxr->bnapi->in_reset) {
rxr->bnapi->in_reset = true;
if (bp->flags & BNXT_FLAG_CHIP_P5)
set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
else
set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
bnxt_queue_sp_work(bp);
}
rxr->rx_next_cons = 0xffff;
}
const u16 bnxt_lhint_arr[] = { const u16 bnxt_lhint_arr[] = {
TX_BD_FLAGS_LHINT_512_AND_SMALLER, TX_BD_FLAGS_LHINT_512_AND_SMALLER,
TX_BD_FLAGS_LHINT_512_TO_1023, TX_BD_FLAGS_LHINT_512_TO_1023,
...@@ -1234,38 +1266,6 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, ...@@ -1234,38 +1266,6 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
return 0; return 0;
} }
static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
{
if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
return;
if (BNXT_PF(bp))
queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
else
schedule_delayed_work(&bp->fw_reset_task, delay);
}
static void bnxt_queue_sp_work(struct bnxt *bp)
{
if (BNXT_PF(bp))
queue_work(bnxt_pf_wq, &bp->sp_task);
else
schedule_work(&bp->sp_task);
}
static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
if (!rxr->bnapi->in_reset) {
rxr->bnapi->in_reset = true;
if (bp->flags & BNXT_FLAG_CHIP_P5)
set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
else
set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
bnxt_queue_sp_work(bp);
}
rxr->rx_next_cons = 0xffff;
}
static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
{ {
struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
...@@ -1320,7 +1320,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, ...@@ -1320,7 +1320,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n", netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
cons, rxr->rx_next_cons, cons, rxr->rx_next_cons,
TPA_START_ERROR_CODE(tpa_start1)); TPA_START_ERROR_CODE(tpa_start1));
bnxt_sched_reset(bp, rxr); bnxt_sched_reset_rxr(bp, rxr);
return; return;
} }
/* Store cfa_code in tpa_info to use in tpa_end /* Store cfa_code in tpa_info to use in tpa_end
...@@ -1844,7 +1844,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, ...@@ -1844,7 +1844,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (rxr->rx_next_cons != 0xffff) if (rxr->rx_next_cons != 0xffff)
netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
cons, rxr->rx_next_cons); cons, rxr->rx_next_cons);
bnxt_sched_reset(bp, rxr); bnxt_sched_reset_rxr(bp, rxr);
if (rc1) if (rc1)
return rc1; return rc1;
goto next_rx_no_prod_no_len; goto next_rx_no_prod_no_len;
...@@ -1882,7 +1882,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, ...@@ -1882,7 +1882,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
!(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) { !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
netdev_warn_once(bp->dev, "RX buffer error %x\n", netdev_warn_once(bp->dev, "RX buffer error %x\n",
rx_err); rx_err);
bnxt_sched_reset(bp, rxr); bnxt_sched_reset_rxr(bp, rxr);
} }
} }
goto next_rx_no_len; goto next_rx_no_len;
...@@ -2329,7 +2329,7 @@ static int bnxt_async_event_process(struct bnxt *bp, ...@@ -2329,7 +2329,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
goto async_event_process_exit; goto async_event_process_exit;
} }
rxr = bp->bnapi[grp_idx]->rx_ring; rxr = bp->bnapi[grp_idx]->rx_ring;
bnxt_sched_reset(bp, rxr); bnxt_sched_reset_rxr(bp, rxr);
goto async_event_process_exit; goto async_event_process_exit;
} }
case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: { case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment