Commit 40aa306f authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-Status-block-changes'

Yuval Mintz says:

====================
qed: Status block changes

The device maintains a CAM mapping of the internal status blocks
and the various PF/VF MSI-x vector mappings.
During initialization, the driver reads the HW memory and constructs
a shadow SW implementation which it would later use for manipulation
of interrupts. E.g., when enabling VFs and setting their MSI-x tables.

The driver currently has some very strict assumptions on the order the
entries are placed in the CAM. Specifically, it assumes that all entries
belonging to a PF would be consecutive and in-order in the CAM, and that
the VF entries would then follow. But there's no actual HW constraint
enforcing this assumption [although management firmware does set it
accordingly to same assumption initially].

Since the CAM is re-configurable, there are now SW flows employeed
by other OSes that might cause the assumption to be invalid.
Such flows allow the PF to forfeit some of it's available interrupts
in favor of its VFs or vice versa.
While those are not employeed today by qed, we want to relax the
assumptions as much as we can -
both to allow functionality after PDA as well as allowing future
compatibility where the driver would be loaded after a newer one has
'dirtied' the CAM configuration.

In addition to patches meant for the above relaxation, the series
also contains various cleanups & refactoring for interrupt logic
[most of which is !semantic].
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 7b954ed7 1ee240e3
...@@ -495,10 +495,6 @@ struct qed_hwfn { ...@@ -495,10 +495,6 @@ struct qed_hwfn {
bool b_rdma_enabled_in_prs; bool b_rdma_enabled_in_prs;
u32 rdma_prs_search_reg; u32 rdma_prs_search_reg;
/* Array of sb_info of all status blocks */
struct qed_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
u16 num_sbs;
struct qed_cxt_mngr *p_cxt_mngr; struct qed_cxt_mngr *p_cxt_mngr;
/* Flag indicating whether interrupts are enabled or not*/ /* Flag indicating whether interrupts are enabled or not*/
......
...@@ -1030,7 +1030,7 @@ void qed_resc_setup(struct qed_dev *cdev) ...@@ -1030,7 +1030,7 @@ void qed_resc_setup(struct qed_dev *cdev)
qed_int_setup(p_hwfn, p_hwfn->p_main_ptt); qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt); qed_iov_setup(p_hwfn);
#ifdef CONFIG_QED_LL2 #ifdef CONFIG_QED_LL2
if (p_hwfn->using_ll2) if (p_hwfn->using_ll2)
qed_ll2_setup(p_hwfn); qed_ll2_setup(p_hwfn);
...@@ -1155,7 +1155,7 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn) ...@@ -1155,7 +1155,7 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
static void qed_init_cau_rt_data(struct qed_dev *cdev) static void qed_init_cau_rt_data(struct qed_dev *cdev)
{ {
u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET; u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
int i, sb_id; int i, igu_sb_id;
for_each_hwfn(cdev, i) { for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
...@@ -1165,15 +1165,17 @@ static void qed_init_cau_rt_data(struct qed_dev *cdev) ...@@ -1165,15 +1165,17 @@ static void qed_init_cau_rt_data(struct qed_dev *cdev)
p_igu_info = p_hwfn->hw_info.p_igu_info; p_igu_info = p_hwfn->hw_info.p_igu_info;
for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev); for (igu_sb_id = 0;
sb_id++) { igu_sb_id < QED_MAPPING_MEMORY_SIZE(cdev); igu_sb_id++) {
p_block = &p_igu_info->igu_map.igu_blocks[sb_id]; p_block = &p_igu_info->entry[igu_sb_id];
if (!p_block->is_pf) if (!p_block->is_pf)
continue; continue;
qed_init_cau_sb_entry(p_hwfn, &sb_entry, qed_init_cau_sb_entry(p_hwfn, &sb_entry,
p_block->function_id, 0, 0); p_block->function_id, 0, 0);
STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry); STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2,
sb_entry);
} }
} }
} }
...@@ -2036,9 +2038,12 @@ static void get_function_id(struct qed_hwfn *p_hwfn) ...@@ -2036,9 +2038,12 @@ static void get_function_id(struct qed_hwfn *p_hwfn)
static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
{ {
u32 *feat_num = p_hwfn->hw_info.feat_num; u32 *feat_num = p_hwfn->hw_info.feat_num;
struct qed_sb_cnt_info sb_cnt_info; struct qed_sb_cnt_info sb_cnt;
u32 non_l2_sbs = 0; u32 non_l2_sbs = 0;
memset(&sb_cnt, 0, sizeof(sb_cnt));
qed_int_get_num_sbs(p_hwfn, &sb_cnt);
if (IS_ENABLED(CONFIG_QED_RDMA) && if (IS_ENABLED(CONFIG_QED_RDMA) &&
p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
/* Roce CNQ each requires: 1 status block + 1 CNQ. We divide /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
...@@ -2046,7 +2051,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) ...@@ -2046,7 +2051,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
* consideration as to how many l2 queues / cnqs we have. * consideration as to how many l2 queues / cnqs we have.
*/ */
feat_num[QED_RDMA_CNQ] = feat_num[QED_RDMA_CNQ] =
min_t(u32, RESC_NUM(p_hwfn, QED_SB) / 2, min_t(u32, sb_cnt.cnt / 2,
RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM)); RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
non_l2_sbs = feat_num[QED_RDMA_CNQ]; non_l2_sbs = feat_num[QED_RDMA_CNQ];
...@@ -2055,14 +2060,11 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) ...@@ -2055,14 +2060,11 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE || if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE ||
p_hwfn->hw_info.personality == QED_PCI_ETH) { p_hwfn->hw_info.personality == QED_PCI_ETH) {
/* Start by allocating VF queues, then PF's */ /* Start by allocating VF queues, then PF's */
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
feat_num[QED_VF_L2_QUE] = min_t(u32, feat_num[QED_VF_L2_QUE] = min_t(u32,
RESC_NUM(p_hwfn, QED_L2_QUEUE), RESC_NUM(p_hwfn, QED_L2_QUEUE),
sb_cnt_info.sb_iov_cnt); sb_cnt.iov_cnt);
feat_num[QED_PF_L2_QUE] = min_t(u32, feat_num[QED_PF_L2_QUE] = min_t(u32,
RESC_NUM(p_hwfn, QED_SB) - sb_cnt.cnt - non_l2_sbs,
non_l2_sbs,
RESC_NUM(p_hwfn, RESC_NUM(p_hwfn,
QED_L2_QUEUE) - QED_L2_QUEUE) -
FEAT_NUM(p_hwfn, FEAT_NUM(p_hwfn,
...@@ -2070,7 +2072,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) ...@@ -2070,7 +2072,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
} }
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
feat_num[QED_ISCSI_CQ] = min_t(u32, RESC_NUM(p_hwfn, QED_SB), feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt,
RESC_NUM(p_hwfn, RESC_NUM(p_hwfn,
QED_CMDQS_CQS)); QED_CMDQS_CQS));
DP_VERBOSE(p_hwfn, DP_VERBOSE(p_hwfn,
...@@ -2080,7 +2082,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) ...@@ -2080,7 +2082,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
(int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE), (int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE),
(int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ), (int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ),
(int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ), (int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ),
RESC_NUM(p_hwfn, QED_SB)); (int)sb_cnt.cnt);
} }
const char *qed_hw_get_resc_name(enum qed_resources res_id) const char *qed_hw_get_resc_name(enum qed_resources res_id)
...@@ -2199,7 +2201,6 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, ...@@ -2199,7 +2201,6 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
{ {
u8 num_funcs = p_hwfn->num_funcs_on_engine; u8 num_funcs = p_hwfn->num_funcs_on_engine;
bool b_ah = QED_IS_AH(p_hwfn->cdev); bool b_ah = QED_IS_AH(p_hwfn->cdev);
struct qed_sb_cnt_info sb_cnt_info;
switch (res_id) { switch (res_id) {
case QED_L2_QUEUE: case QED_L2_QUEUE:
...@@ -2251,9 +2252,10 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, ...@@ -2251,9 +2252,10 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
*p_resc_num = 1; *p_resc_num = 1;
break; break;
case QED_SB: case QED_SB:
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); /* Since we want its value to reflect whether MFW supports
qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); * the new scheme, have a default of 0.
*p_resc_num = sb_cnt_info.sb_cnt; */
*p_resc_num = 0;
break; break;
default: default:
return -EINVAL; return -EINVAL;
...@@ -2322,11 +2324,6 @@ static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn, ...@@ -2322,11 +2324,6 @@ static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
goto out; goto out;
} }
/* Special handling for status blocks; Would be revised in future */
if (res_id == QED_SB) {
*p_resc_num -= 1;
*p_resc_start -= p_hwfn->enabled_func_idx;
}
out: out:
/* PQs have to divide by 8 [that's the HW granularity]. /* PQs have to divide by 8 [that's the HW granularity].
* Reduce number so it would fit. * Reduce number so it would fit.
...@@ -2424,6 +2421,10 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -2424,6 +2421,10 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return -EINVAL; return -EINVAL;
} }
/* This will also learn the number of SBs from MFW */
if (qed_int_igu_reset_cam(p_hwfn, p_ptt))
return -EINVAL;
qed_hw_set_feat(p_hwfn); qed_hw_set_feat(p_hwfn);
for (res_id = 0; res_id < QED_MAX_RESC; res_id++) for (res_id = 0; res_id < QED_MAX_RESC; res_id++)
......
...@@ -183,7 +183,10 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, ...@@ -183,7 +183,10 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
p_data->q_params.queue_relative_offset = (u8)tmp; p_data->q_params.queue_relative_offset = (u8)tmp;
for (i = 0; i < fcoe_pf_params->num_cqs; i++) { for (i = 0; i < fcoe_pf_params->num_cqs; i++) {
tmp = cpu_to_le16(p_hwfn->sbs_info[i]->igu_sb_id); u16 igu_sb_id;
igu_sb_id = qed_get_igu_sb_id(p_hwfn, i);
tmp = cpu_to_le16(igu_sb_id);
p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp; p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp;
} }
......
This diff is collapsed.
...@@ -78,24 +78,6 @@ enum qed_coalescing_fsm { ...@@ -78,24 +78,6 @@ enum qed_coalescing_fsm {
QED_COAL_TX_STATE_MACHINE QED_COAL_TX_STATE_MACHINE
}; };
/**
* @brief qed_int_cau_conf_pi - configure cau for a given
* status block
*
* @param p_hwfn
* @param p_ptt
* @param igu_sb_id
* @param pi_index
* @param state
* @param timeset
*/
void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 igu_sb_id,
u32 pi_index,
enum qed_coalescing_fsm coalescing_fsm,
u8 timeset);
/** /**
* @brief qed_int_igu_enable_int - enable device interrupts * @brief qed_int_igu_enable_int - enable device interrupts
* *
...@@ -217,32 +199,63 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev); ...@@ -217,32 +199,63 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev);
#define SB_ALIGNED_SIZE(p_hwfn) \ #define SB_ALIGNED_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(struct status_block, p_hwfn) ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
#define QED_SB_INVALID_IDX 0xffff
struct qed_igu_block { struct qed_igu_block {
u8 status; u8 status;
#define QED_IGU_STATUS_FREE 0x01 #define QED_IGU_STATUS_FREE 0x01
#define QED_IGU_STATUS_VALID 0x02 #define QED_IGU_STATUS_VALID 0x02
#define QED_IGU_STATUS_PF 0x04 #define QED_IGU_STATUS_PF 0x04
#define QED_IGU_STATUS_DSB 0x08
u8 vector_number; u8 vector_number;
u8 function_id; u8 function_id;
u8 is_pf; u8 is_pf;
};
/* Index inside IGU [meant for back reference] */
u16 igu_sb_id;
struct qed_igu_map { struct qed_sb_info *sb_info;
struct qed_igu_block igu_blocks[MAX_TOT_SB_PER_PATH];
}; };
struct qed_igu_info { struct qed_igu_info {
struct qed_igu_map igu_map; struct qed_igu_block entry[MAX_TOT_SB_PER_PATH];
u16 igu_dsb_id; u16 igu_dsb_id;
u16 igu_base_sb;
u16 igu_base_sb_iov; struct qed_sb_cnt_info usage;
u16 igu_sb_cnt;
u16 igu_sb_cnt_iov; bool b_allow_pf_vf_change;
u16 free_blks;
}; };
/* TODO Names of function may change... */ /**
* @brief - Make sure the IGU CAM reflects the resources provided by MFW
*
* @param p_hwfn
* @param p_ptt
*/
int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
* @brief Translate the weakly-defined client sb-id into an IGU sb-id
*
* @param p_hwfn
* @param sb_id - user provided sb_id
*
* @return an index inside IGU CAM where the SB resides
*/
u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
/**
* @brief return a pointer to an unused valid SB
*
* @param p_hwfn
* @param b_is_pf - true iff we want a SB belonging to a PF
*
* @return point to an igu_block, NULL if none is available
*/
struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn,
bool b_is_pf);
void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
bool b_set, bool b_set,
...@@ -321,13 +334,13 @@ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn); ...@@ -321,13 +334,13 @@ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn);
* *
* @param p_hwfn * @param p_hwfn
* @param p_ptt * @param p_ptt
* @param sb_id - igu status block id * @param igu_sb_id - igu status block id
* @param opaque - opaque fid of the sb owner. * @param opaque - opaque fid of the sb owner.
* @param b_set - set(1) / clear(0) * @param b_set - set(1) / clear(0)
*/ */
void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
u32 sb_id, u16 igu_sb_id,
u16 opaque, u16 opaque,
bool b_set); bool b_set);
...@@ -376,16 +389,6 @@ void qed_int_free(struct qed_hwfn *p_hwfn); ...@@ -376,16 +389,6 @@ void qed_int_free(struct qed_hwfn *p_hwfn);
void qed_int_setup(struct qed_hwfn *p_hwfn, void qed_int_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt); struct qed_ptt *p_ptt);
/**
* @brief - Returns an Rx queue index appropriate for usage with given SB.
*
* @param p_hwfn
* @param sb_id - absolute index of SB
*
* @return index of Rx queue
*/
u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
/** /**
* @brief - Enable Interrupt & Attention for hw function * @brief - Enable Interrupt & Attention for hw function
* *
......
...@@ -220,7 +220,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, ...@@ -220,7 +220,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_queue->cmdq_sb_pi = p_params->gl_cmd_pi; p_queue->cmdq_sb_pi = p_params->gl_cmd_pi;
for (i = 0; i < p_params->num_queues; i++) { for (i = 0; i < p_params->num_queues; i++) {
val = p_hwfn->sbs_info[i]->igu_sb_id; val = qed_get_igu_sb_id(p_hwfn, i);
p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val); p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val);
} }
......
...@@ -762,7 +762,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, ...@@ -762,7 +762,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
for_each_hwfn(cdev, i) { for_each_hwfn(cdev, i) {
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt; cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
cdev->int_params.in.num_vectors++; /* slowpath */ cdev->int_params.in.num_vectors++; /* slowpath */
} }
......
...@@ -581,6 +581,7 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, ...@@ -581,6 +581,7 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
struct qed_sp_init_data init_data; struct qed_sp_init_data init_data;
struct qed_spq_entry *p_ent; struct qed_spq_entry *p_ent;
u32 cnq_id, sb_id; u32 cnq_id, sb_id;
u16 igu_sb_id;
int rc; int rc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n"); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
...@@ -612,10 +613,10 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, ...@@ -612,10 +613,10 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) { for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id); sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
p_ramrod->cnq_params[cnq_id].sb_num = cpu_to_le16(igu_sb_id);
p_cnq_params = &p_ramrod->cnq_params[cnq_id]; p_cnq_params = &p_ramrod->cnq_params[cnq_id];
p_cnq_pbl_list = &params->cnq_pbl_list[cnq_id]; p_cnq_pbl_list = &params->cnq_pbl_list[cnq_id];
p_cnq_params->sb_num =
cpu_to_le16(p_hwfn->sbs_info[sb_id]->igu_sb_id);
p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi; p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages; p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
......
...@@ -378,33 +378,6 @@ static int qed_iov_pci_cfg_info(struct qed_dev *cdev) ...@@ -378,33 +378,6 @@ static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
return 0; return 0;
} }
static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
struct qed_igu_block *p_sb;
u16 sb_id;
u32 val;
if (!p_hwfn->hw_info.p_igu_info) {
DP_ERR(p_hwfn,
"qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
return;
}
for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
sb_id++) {
p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
if ((p_sb->status & QED_IGU_STATUS_FREE) &&
!(p_sb->status & QED_IGU_STATUS_PF)) {
val = qed_rd(p_hwfn, p_ptt,
IGU_REG_MAPPING_MEMORY + sb_id * 4);
SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
qed_wr(p_hwfn, p_ptt,
IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
}
}
}
static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn) static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
{ {
struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
...@@ -555,13 +528,12 @@ int qed_iov_alloc(struct qed_hwfn *p_hwfn) ...@@ -555,13 +528,12 @@ int qed_iov_alloc(struct qed_hwfn *p_hwfn)
return qed_iov_allocate_vfdb(p_hwfn); return qed_iov_allocate_vfdb(p_hwfn);
} }
void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) void qed_iov_setup(struct qed_hwfn *p_hwfn)
{ {
if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn)) if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
return; return;
qed_iov_setup_vfdb(p_hwfn); qed_iov_setup_vfdb(p_hwfn);
qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
} }
void qed_iov_free(struct qed_hwfn *p_hwfn) void qed_iov_free(struct qed_hwfn *p_hwfn)
...@@ -868,45 +840,36 @@ static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, ...@@ -868,45 +840,36 @@ static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
struct qed_vf_info *vf, u16 num_rx_queues) struct qed_vf_info *vf, u16 num_rx_queues)
{ {
struct qed_igu_block *igu_blocks; struct qed_igu_block *p_block;
int qid = 0, igu_id = 0; struct cau_sb_entry sb_entry;
int qid = 0;
u32 val = 0; u32 val = 0;
igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks; if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks) p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id); SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1); SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0); SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
while ((qid < num_rx_queues) && for (qid = 0; qid < num_rx_queues; qid++) {
(igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) { p_block = qed_get_igu_free_sb(p_hwfn, false);
if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) { vf->igu_sbs[qid] = p_block->igu_sb_id;
struct cau_sb_entry sb_entry; p_block->status &= ~QED_IGU_STATUS_FREE;
SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
vf->igu_sbs[qid] = (u16)igu_id;
igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE; qed_wr(p_hwfn, p_ptt,
IGU_REG_MAPPING_MEMORY +
SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid); sizeof(u32) * p_block->igu_sb_id, val);
qed_wr(p_hwfn, p_ptt, /* Configure igu sb in CAU which were marked valid */
IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id, qed_init_cau_sb_entry(p_hwfn, &sb_entry,
val); p_hwfn->rel_pf_id, vf->abs_vf_id, 1);
qed_dmae_host2grc(p_hwfn, p_ptt,
/* Configure igu sb in CAU which were marked valid */ (u64)(uintptr_t)&sb_entry,
qed_init_cau_sb_entry(p_hwfn, &sb_entry, CAU_REG_SB_VAR_MEMORY +
p_hwfn->rel_pf_id, p_block->igu_sb_id * sizeof(u64), 2, 0);
vf->abs_vf_id, 1);
qed_dmae_host2grc(p_hwfn, p_ptt,
(u64)(uintptr_t)&sb_entry,
CAU_REG_SB_VAR_MEMORY +
igu_id * sizeof(u64), 2, 0);
qid++;
}
igu_id++;
} }
vf->num_sbs = (u8) num_rx_queues; vf->num_sbs = (u8) num_rx_queues;
...@@ -931,10 +894,8 @@ static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn, ...@@ -931,10 +894,8 @@ static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
qed_wr(p_hwfn, p_ptt, addr, val); qed_wr(p_hwfn, p_ptt, addr, val);
p_info->igu_map.igu_blocks[igu_id].status |= p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE;
QED_IGU_STATUS_FREE; p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
p_hwfn->hw_info.p_igu_info->free_blks++;
} }
vf->num_sbs = 0; vf->num_sbs = 0;
......
...@@ -316,9 +316,8 @@ int qed_iov_alloc(struct qed_hwfn *p_hwfn); ...@@ -316,9 +316,8 @@ int qed_iov_alloc(struct qed_hwfn *p_hwfn);
* @brief qed_iov_setup - setup sriov related resources * @brief qed_iov_setup - setup sriov related resources
* *
* @param p_hwfn * @param p_hwfn
* @param p_ptt
*/ */
void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_iov_setup(struct qed_hwfn *p_hwfn);
/** /**
* @brief qed_iov_free - free sriov related resources * @brief qed_iov_free - free sriov related resources
...@@ -397,7 +396,7 @@ static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn) ...@@ -397,7 +396,7 @@ static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn)
return 0; return 0;
} }
static inline void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) static inline void qed_iov_setup(struct qed_hwfn *p_hwfn)
{ {
} }
......
...@@ -792,9 +792,12 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, ...@@ -792,9 +792,12 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
req->only_untagged = only_untagged; req->only_untagged = only_untagged;
/* status blocks */ /* status blocks */
for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) {
if (p_hwfn->sbs_info[i]) struct qed_sb_info *p_sb = p_hwfn->vf_iov_info->sbs_info[i];
req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
if (p_sb)
req->sb_addr[i] = p_sb->sb_phys;
}
/* add list termination tlv */ /* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset, qed_add_tlv(p_hwfn, &p_iov->offset,
...@@ -1240,6 +1243,24 @@ u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) ...@@ -1240,6 +1243,24 @@ u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id; return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
} }
void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn,
u16 sb_id, struct qed_sb_info *p_sb)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
if (!p_iov) {
DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n");
return;
}
if (sb_id >= PFVF_MAX_SBS_PER_VF) {
DP_NOTICE(p_hwfn, "Can't configure SB %04x\n", sb_id);
return;
}
p_iov->sbs_info[sb_id] = p_sb;
}
int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change) int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change)
{ {
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
......
...@@ -627,6 +627,14 @@ struct qed_vf_iov { ...@@ -627,6 +627,14 @@ struct qed_vf_iov {
* this has to be propagated as it affects the fastpath. * this has to be propagated as it affects the fastpath.
*/ */
bool b_pre_fp_hsi; bool b_pre_fp_hsi;
/* Current day VFs are passing the SBs physical address on vport
* start, and as they lack an IGU mapping they need to store the
* addresses of previously registered SBs.
* Even if we were to change configuration flow, due to backward
* compatibility [with older PFs] we'd still need to store these.
*/
struct qed_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF];
}; };
#ifdef CONFIG_QED_SRIOV #ifdef CONFIG_QED_SRIOV
...@@ -836,6 +844,16 @@ int qed_vf_pf_release(struct qed_hwfn *p_hwfn); ...@@ -836,6 +844,16 @@ int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
*/ */
u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
/**
* @brief Stores [or removes] a configured sb_info.
*
* @param p_hwfn
* @param sb_id - zero-based SB index [for fastpath]
* @param sb_info - may be NULL [during removal].
*/
void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn,
u16 sb_id, struct qed_sb_info *p_sb);
/** /**
* @brief qed_vf_pf_vport_start - perform vport start for VF. * @brief qed_vf_pf_vport_start - perform vport start for VF.
* *
......
...@@ -886,9 +886,15 @@ struct qed_eth_stats { ...@@ -886,9 +886,15 @@ struct qed_eth_stats {
#define TX_PI(tc) (RX_PI + 1 + tc) #define TX_PI(tc) (RX_PI + 1 + tc)
struct qed_sb_cnt_info { struct qed_sb_cnt_info {
int sb_cnt; /* Original, current, and free SBs for PF */
int sb_iov_cnt; int orig;
int sb_free_blk; int cnt;
int free_cnt;
/* Original, current and free SBS for child VFs */
int iov_orig;
int iov_cnt;
int free_cnt_iov;
}; };
static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment