Commit 726fdbe9 authored by Mintz, Yuval's avatar Mintz, Yuval Committed by David S. Miller

qed: Encapsulate interrupt counters in struct

We already have an API struct that contains interrupt-related
numbers. Use it to encapsulate all information relating to the
status of SBs as (used|free).
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a333f7f3
...@@ -2061,7 +2061,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) ...@@ -2061,7 +2061,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
feat_num[QED_VF_L2_QUE] = min_t(u32, feat_num[QED_VF_L2_QUE] = min_t(u32,
RESC_NUM(p_hwfn, QED_L2_QUEUE), RESC_NUM(p_hwfn, QED_L2_QUEUE),
sb_cnt_info.sb_iov_cnt); sb_cnt_info.iov_cnt);
feat_num[QED_PF_L2_QUE] = min_t(u32, feat_num[QED_PF_L2_QUE] = min_t(u32,
RESC_NUM(p_hwfn, QED_SB) - RESC_NUM(p_hwfn, QED_SB) -
non_l2_sbs, non_l2_sbs,
...@@ -2255,7 +2255,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, ...@@ -2255,7 +2255,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
case QED_SB: case QED_SB:
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
*p_resc_num = sb_cnt_info.sb_cnt; *p_resc_num = sb_cnt_info.cnt;
break; break;
default: default:
return -EINVAL; return -EINVAL;
......
...@@ -1769,7 +1769,7 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, ...@@ -1769,7 +1769,7 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
bool b_set, bool b_slowpath) bool b_set, bool b_slowpath)
{ {
u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb; u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt; u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->usage.cnt;
u32 igu_sb_id = 0, val = 0; u32 igu_sb_id = 0, val = 0;
val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
...@@ -1827,7 +1827,6 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -1827,7 +1827,6 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
/* Initialize base sb / sb cnt for PFs and VFs */ /* Initialize base sb / sb cnt for PFs and VFs */
p_igu_info->igu_base_sb = 0xffff; p_igu_info->igu_base_sb = 0xffff;
p_igu_info->igu_sb_cnt = 0;
p_igu_info->igu_base_sb_iov = 0xffff; p_igu_info->igu_base_sb_iov = 0xffff;
/* Distinguish between existent and non-existent default SB */ /* Distinguish between existent and non-existent default SB */
...@@ -1856,7 +1855,7 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -1856,7 +1855,7 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX) { if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX) {
if (p_igu_info->igu_base_sb == 0xffff) if (p_igu_info->igu_base_sb == 0xffff)
p_igu_info->igu_base_sb = igu_sb_id; p_igu_info->igu_base_sb = igu_sb_id;
p_igu_info->igu_sb_cnt++; p_igu_info->usage.cnt++;
} }
} else if (!(p_block->is_pf) && } else if (!(p_block->is_pf) &&
(p_block->function_id >= min_vf) && (p_block->function_id >= min_vf) &&
...@@ -1867,7 +1866,7 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -1867,7 +1866,7 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
if (p_igu_info->igu_base_sb_iov == 0xffff) if (p_igu_info->igu_base_sb_iov == 0xffff)
p_igu_info->igu_base_sb_iov = igu_sb_id; p_igu_info->igu_base_sb_iov = igu_sb_id;
p_igu_info->free_blks++; p_igu_info->usage.iov_cnt++;
} }
/* Mark the First entry belonging to the PF or its VFs /* Mark the First entry belonging to the PF or its VFs
...@@ -1900,12 +1899,13 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -1900,12 +1899,13 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
} }
/* All non default SB are considered free at this point */ /* All non default SB are considered free at this point */
p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks; p_igu_info->usage.free_cnt = p_igu_info->usage.cnt;
p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt;
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
"igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x\n", "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x\n",
p_igu_info->igu_dsb_id, p_igu_info->igu_dsb_id,
p_igu_info->igu_sb_cnt, p_igu_info->igu_sb_cnt_iov); p_igu_info->usage.cnt, p_igu_info->usage.iov_cnt);
return 0; return 0;
} }
...@@ -2003,9 +2003,7 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, ...@@ -2003,9 +2003,7 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
if (!info || !p_sb_cnt_info) if (!info || !p_sb_cnt_info)
return; return;
p_sb_cnt_info->sb_cnt = info->igu_sb_cnt; memcpy(p_sb_cnt_info, &info->usage, sizeof(*p_sb_cnt_info));
p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov;
p_sb_cnt_info->sb_free_blk = info->free_blks;
} }
u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
...@@ -2014,10 +2012,10 @@ u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) ...@@ -2014,10 +2012,10 @@ u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
/* Determine origin of SB id */ /* Determine origin of SB id */
if ((sb_id >= p_info->igu_base_sb) && if ((sb_id >= p_info->igu_base_sb) &&
(sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) { (sb_id < p_info->igu_base_sb + p_info->usage.cnt)) {
return sb_id - p_info->igu_base_sb; return sb_id - p_info->igu_base_sb;
} else if ((sb_id >= p_info->igu_base_sb_iov) && } else if ((sb_id >= p_info->igu_base_sb_iov) &&
(sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) { (sb_id < p_info->igu_base_sb_iov + p_info->usage.iov_cnt)) {
/* We want the first VF queue to be adjacent to the /* We want the first VF queue to be adjacent to the
* last PF queue. Since L2 queues can be partial to * last PF queue. Since L2 queues can be partial to
* SBs, we'll use the feature instead. * SBs, we'll use the feature instead.
......
...@@ -216,11 +216,11 @@ struct qed_igu_block { ...@@ -216,11 +216,11 @@ struct qed_igu_block {
struct qed_igu_info { struct qed_igu_info {
struct qed_igu_block entry[MAX_TOT_SB_PER_PATH]; struct qed_igu_block entry[MAX_TOT_SB_PER_PATH];
u16 igu_dsb_id; u16 igu_dsb_id;
u16 igu_base_sb;
u16 igu_base_sb_iov; u16 igu_base_sb;
u16 igu_sb_cnt; u16 igu_base_sb_iov;
u16 igu_sb_cnt_iov; struct qed_sb_cnt_info usage;
u16 free_blks;
}; };
/* TODO Names of function may change... */ /* TODO Names of function may change... */
......
...@@ -762,7 +762,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, ...@@ -762,7 +762,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
for_each_hwfn(cdev, i) { for_each_hwfn(cdev, i) {
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt; cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
cdev->int_params.in.num_vectors++; /* slowpath */ cdev->int_params.in.num_vectors++; /* slowpath */
} }
......
...@@ -874,9 +874,9 @@ static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, ...@@ -874,9 +874,9 @@ static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
igu_blocks = p_hwfn->hw_info.p_igu_info->entry; igu_blocks = p_hwfn->hw_info.p_igu_info->entry;
if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks) if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks; num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues; p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id); SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1); SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
...@@ -932,8 +932,7 @@ static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn, ...@@ -932,8 +932,7 @@ static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, addr, val); qed_wr(p_hwfn, p_ptt, addr, val);
p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE; p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE;
p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
p_hwfn->hw_info.p_igu_info->free_blks++;
} }
vf->num_sbs = 0; vf->num_sbs = 0;
......
...@@ -886,9 +886,15 @@ struct qed_eth_stats { ...@@ -886,9 +886,15 @@ struct qed_eth_stats {
#define TX_PI(tc) (RX_PI + 1 + tc) #define TX_PI(tc) (RX_PI + 1 + tc)
struct qed_sb_cnt_info { struct qed_sb_cnt_info {
int sb_cnt; /* Original, current, and free SBs for PF */
int sb_iov_cnt; int orig;
int sb_free_blk; int cnt;
int free_cnt;
/* Original, current and free SBS for child VFs */
int iov_orig;
int iov_cnt;
int free_cnt_iov;
}; };
static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment