Commit 76e7f31d authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-next'

Yuval Mintz says:

====================
qed/qede updates

This series contains some general minor fixes and enhancements:

 - #1, #2 and #9 correct small missing ethtool functionality.
 - #3, #6  and #8 correct minor issues in driver, but those are either
   print-related or unexposed in existing code.
 - #4 adds proper support to TLB mode bonding.
 - #10 is meant to improve performance on varying cache-line sizes.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents dc57ae3d 9ac4c546
...@@ -92,7 +92,7 @@ enum qed_mcp_protocol_type; ...@@ -92,7 +92,7 @@ enum qed_mcp_protocol_type;
#define QED_MFW_SET_FIELD(name, field, value) \ #define QED_MFW_SET_FIELD(name, field, value) \
do { \ do { \
(name) &= ~((field ## _MASK) << (field ## _SHIFT)); \ (name) &= ~(field ## _MASK); \
(name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\ (name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\
} while (0) } while (0)
......
...@@ -923,6 +923,7 @@ int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn) ...@@ -923,6 +923,7 @@ int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn)
void qed_dcbx_info_free(struct qed_hwfn *p_hwfn) void qed_dcbx_info_free(struct qed_hwfn *p_hwfn)
{ {
kfree(p_hwfn->p_dcbx_info); kfree(p_hwfn->p_dcbx_info);
p_hwfn->p_dcbx_info = NULL;
} }
static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data, static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
......
...@@ -161,6 +161,7 @@ void qed_resc_free(struct qed_dev *cdev) ...@@ -161,6 +161,7 @@ void qed_resc_free(struct qed_dev *cdev)
cdev->fw_data = NULL; cdev->fw_data = NULL;
kfree(cdev->reset_stats); kfree(cdev->reset_stats);
cdev->reset_stats = NULL;
for_each_hwfn(cdev, i) { for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
...@@ -168,18 +169,18 @@ void qed_resc_free(struct qed_dev *cdev) ...@@ -168,18 +169,18 @@ void qed_resc_free(struct qed_dev *cdev)
qed_cxt_mngr_free(p_hwfn); qed_cxt_mngr_free(p_hwfn);
qed_qm_info_free(p_hwfn); qed_qm_info_free(p_hwfn);
qed_spq_free(p_hwfn); qed_spq_free(p_hwfn);
qed_eq_free(p_hwfn, p_hwfn->p_eq); qed_eq_free(p_hwfn);
qed_consq_free(p_hwfn, p_hwfn->p_consq); qed_consq_free(p_hwfn);
qed_int_free(p_hwfn); qed_int_free(p_hwfn);
#ifdef CONFIG_QED_LL2 #ifdef CONFIG_QED_LL2
qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info); qed_ll2_free(p_hwfn);
#endif #endif
if (p_hwfn->hw_info.personality == QED_PCI_FCOE) if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
qed_fcoe_free(p_hwfn, p_hwfn->p_fcoe_info); qed_fcoe_free(p_hwfn);
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
qed_iscsi_free(p_hwfn, p_hwfn->p_iscsi_info); qed_iscsi_free(p_hwfn);
qed_ooo_free(p_hwfn, p_hwfn->p_ooo_info); qed_ooo_free(p_hwfn);
} }
qed_iov_free(p_hwfn); qed_iov_free(p_hwfn);
qed_dmae_info_free(p_hwfn); qed_dmae_info_free(p_hwfn);
...@@ -843,15 +844,7 @@ static int qed_alloc_qm_data(struct qed_hwfn *p_hwfn) ...@@ -843,15 +844,7 @@ static int qed_alloc_qm_data(struct qed_hwfn *p_hwfn)
int qed_resc_alloc(struct qed_dev *cdev) int qed_resc_alloc(struct qed_dev *cdev)
{ {
struct qed_iscsi_info *p_iscsi_info;
struct qed_fcoe_info *p_fcoe_info;
struct qed_ooo_info *p_ooo_info;
#ifdef CONFIG_QED_LL2
struct qed_ll2_info *p_ll2_info;
#endif
u32 rdma_tasks, excess_tasks; u32 rdma_tasks, excess_tasks;
struct qed_consq *p_consq;
struct qed_eq *p_eq;
u32 line_count; u32 line_count;
int i, rc = 0; int i, rc = 0;
...@@ -956,45 +949,38 @@ int qed_resc_alloc(struct qed_dev *cdev) ...@@ -956,45 +949,38 @@ int qed_resc_alloc(struct qed_dev *cdev)
DP_ERR(p_hwfn, DP_ERR(p_hwfn,
"Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n", "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n",
n_eqes, 0xFFFF); n_eqes, 0xFFFF);
rc = -EINVAL; goto alloc_no_mem;
goto alloc_err;
} }
p_eq = qed_eq_alloc(p_hwfn, (u16) n_eqes); rc = qed_eq_alloc(p_hwfn, (u16) n_eqes);
if (!p_eq) if (rc)
goto alloc_no_mem; goto alloc_err;
p_hwfn->p_eq = p_eq;
p_consq = qed_consq_alloc(p_hwfn); rc = qed_consq_alloc(p_hwfn);
if (!p_consq) if (rc)
goto alloc_no_mem; goto alloc_err;
p_hwfn->p_consq = p_consq;
#ifdef CONFIG_QED_LL2 #ifdef CONFIG_QED_LL2
if (p_hwfn->using_ll2) { if (p_hwfn->using_ll2) {
p_ll2_info = qed_ll2_alloc(p_hwfn); rc = qed_ll2_alloc(p_hwfn);
if (!p_ll2_info) if (rc)
goto alloc_no_mem; goto alloc_err;
p_hwfn->p_ll2_info = p_ll2_info;
} }
#endif #endif
if (p_hwfn->hw_info.personality == QED_PCI_FCOE) { if (p_hwfn->hw_info.personality == QED_PCI_FCOE) {
p_fcoe_info = qed_fcoe_alloc(p_hwfn); rc = qed_fcoe_alloc(p_hwfn);
if (!p_fcoe_info) if (rc)
goto alloc_no_mem; goto alloc_err;
p_hwfn->p_fcoe_info = p_fcoe_info;
} }
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
p_iscsi_info = qed_iscsi_alloc(p_hwfn); rc = qed_iscsi_alloc(p_hwfn);
if (!p_iscsi_info) if (rc)
goto alloc_no_mem; goto alloc_err;
p_hwfn->p_iscsi_info = p_iscsi_info; rc = qed_ooo_alloc(p_hwfn);
p_ooo_info = qed_ooo_alloc(p_hwfn); if (rc)
if (!p_ooo_info) goto alloc_err;
goto alloc_no_mem;
p_hwfn->p_ooo_info = p_ooo_info;
} }
/* DMA info initialization */ /* DMA info initialization */
...@@ -1033,8 +1019,8 @@ void qed_resc_setup(struct qed_dev *cdev) ...@@ -1033,8 +1019,8 @@ void qed_resc_setup(struct qed_dev *cdev)
qed_cxt_mngr_setup(p_hwfn); qed_cxt_mngr_setup(p_hwfn);
qed_spq_setup(p_hwfn); qed_spq_setup(p_hwfn);
qed_eq_setup(p_hwfn, p_hwfn->p_eq); qed_eq_setup(p_hwfn);
qed_consq_setup(p_hwfn, p_hwfn->p_consq); qed_consq_setup(p_hwfn);
/* Read shadow of current MFW mailbox */ /* Read shadow of current MFW mailbox */
qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt); qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
...@@ -1047,14 +1033,14 @@ void qed_resc_setup(struct qed_dev *cdev) ...@@ -1047,14 +1033,14 @@ void qed_resc_setup(struct qed_dev *cdev)
qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt); qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
#ifdef CONFIG_QED_LL2 #ifdef CONFIG_QED_LL2
if (p_hwfn->using_ll2) if (p_hwfn->using_ll2)
qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info); qed_ll2_setup(p_hwfn);
#endif #endif
if (p_hwfn->hw_info.personality == QED_PCI_FCOE) if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
qed_fcoe_setup(p_hwfn, p_hwfn->p_fcoe_info); qed_fcoe_setup(p_hwfn);
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
qed_iscsi_setup(p_hwfn, p_hwfn->p_iscsi_info); qed_iscsi_setup(p_hwfn);
qed_ooo_setup(p_hwfn, p_hwfn->p_ooo_info); qed_ooo_setup(p_hwfn);
} }
} }
} }
...@@ -1968,6 +1954,7 @@ static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn) ...@@ -1968,6 +1954,7 @@ static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
{ {
qed_ptt_pool_free(p_hwfn); qed_ptt_pool_free(p_hwfn);
kfree(p_hwfn->hw_info.p_igu_info); kfree(p_hwfn->hw_info.p_igu_info);
p_hwfn->hw_info.p_igu_info = NULL;
} }
/* Setup bar access */ /* Setup bar access */
......
...@@ -538,7 +538,7 @@ static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn, ...@@ -538,7 +538,7 @@ static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
} }
} }
struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn) int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
{ {
struct qed_fcoe_info *p_fcoe_info; struct qed_fcoe_info *p_fcoe_info;
...@@ -546,19 +546,21 @@ struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn) ...@@ -546,19 +546,21 @@ struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
p_fcoe_info = kzalloc(sizeof(*p_fcoe_info), GFP_KERNEL); p_fcoe_info = kzalloc(sizeof(*p_fcoe_info), GFP_KERNEL);
if (!p_fcoe_info) { if (!p_fcoe_info) {
DP_NOTICE(p_hwfn, "Failed to allocate qed_fcoe_info'\n"); DP_NOTICE(p_hwfn, "Failed to allocate qed_fcoe_info'\n");
return NULL; return -ENOMEM;
} }
INIT_LIST_HEAD(&p_fcoe_info->free_list); INIT_LIST_HEAD(&p_fcoe_info->free_list);
return p_fcoe_info;
p_hwfn->p_fcoe_info = p_fcoe_info;
return 0;
} }
void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info) void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
{ {
struct fcoe_task_context *p_task_ctx = NULL; struct fcoe_task_context *p_task_ctx = NULL;
int rc; int rc;
u32 i; u32 i;
spin_lock_init(&p_fcoe_info->lock); spin_lock_init(&p_hwfn->p_fcoe_info->lock);
for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) { for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) {
rc = qed_cxt_get_task_ctx(p_hwfn, i, rc = qed_cxt_get_task_ctx(p_hwfn, i,
QED_CTX_WORKING_MEM, QED_CTX_WORKING_MEM,
...@@ -576,15 +578,15 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info) ...@@ -576,15 +578,15 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info)
} }
} }
void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info) void qed_fcoe_free(struct qed_hwfn *p_hwfn)
{ {
struct qed_fcoe_conn *p_conn = NULL; struct qed_fcoe_conn *p_conn = NULL;
if (!p_fcoe_info) if (!p_hwfn->p_fcoe_info)
return; return;
while (!list_empty(&p_fcoe_info->free_list)) { while (!list_empty(&p_hwfn->p_fcoe_info->free_list)) {
p_conn = list_first_entry(&p_fcoe_info->free_list, p_conn = list_first_entry(&p_hwfn->p_fcoe_info->free_list,
struct qed_fcoe_conn, list_entry); struct qed_fcoe_conn, list_entry);
if (!p_conn) if (!p_conn)
break; break;
...@@ -592,7 +594,8 @@ void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info) ...@@ -592,7 +594,8 @@ void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info)
qed_fcoe_free_connection(p_hwfn, p_conn); qed_fcoe_free_connection(p_hwfn, p_conn);
} }
kfree(p_fcoe_info); kfree(p_hwfn->p_fcoe_info);
p_hwfn->p_fcoe_info = NULL;
} }
static int static int
......
...@@ -49,29 +49,21 @@ struct qed_fcoe_info { ...@@ -49,29 +49,21 @@ struct qed_fcoe_info {
}; };
#if IS_ENABLED(CONFIG_QED_FCOE) #if IS_ENABLED(CONFIG_QED_FCOE)
struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn); int qed_fcoe_alloc(struct qed_hwfn *p_hwfn);
void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info); void qed_fcoe_setup(struct qed_hwfn *p_hwfn);
void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info); void qed_fcoe_free(struct qed_hwfn *p_hwfn);
void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
struct qed_mcp_fcoe_stats *stats); struct qed_mcp_fcoe_stats *stats);
#else /* CONFIG_QED_FCOE */ #else /* CONFIG_QED_FCOE */
static inline struct qed_fcoe_info * static inline int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
{ {
return NULL; return -EINVAL;
} }
static inline void qed_fcoe_setup(struct qed_hwfn *p_hwfn, static inline void qed_fcoe_setup(struct qed_hwfn *p_hwfn) {}
struct qed_fcoe_info *p_fcoe_info) static inline void qed_fcoe_free(struct qed_hwfn *p_hwfn) {}
{
}
static inline void qed_fcoe_free(struct qed_hwfn *p_hwfn,
struct qed_fcoe_info *p_fcoe_info)
{
}
static inline void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, static inline void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
struct qed_mcp_fcoe_stats *stats) struct qed_mcp_fcoe_stats *stats)
......
...@@ -158,6 +158,7 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn) ...@@ -158,6 +158,7 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn)
GFP_KERNEL); GFP_KERNEL);
if (!rt_data->init_val) { if (!rt_data->init_val) {
kfree(rt_data->b_valid); kfree(rt_data->b_valid);
rt_data->b_valid = NULL;
return -ENOMEM; return -ENOMEM;
} }
...@@ -167,7 +168,9 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn) ...@@ -167,7 +168,9 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn)
void qed_init_free(struct qed_hwfn *p_hwfn) void qed_init_free(struct qed_hwfn *p_hwfn)
{ {
kfree(p_hwfn->rt_data.init_val); kfree(p_hwfn->rt_data.init_val);
p_hwfn->rt_data.init_val = NULL;
kfree(p_hwfn->rt_data.b_valid); kfree(p_hwfn->rt_data.b_valid);
p_hwfn->rt_data.b_valid = NULL;
} }
static int qed_init_array_dmae(struct qed_hwfn *p_hwfn, static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
...@@ -525,6 +528,7 @@ int qed_init_run(struct qed_hwfn *p_hwfn, ...@@ -525,6 +528,7 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
} }
kfree(p_hwfn->unzip_buf); kfree(p_hwfn->unzip_buf);
p_hwfn->unzip_buf = NULL;
return rc; return rc;
} }
......
...@@ -2328,6 +2328,7 @@ static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn) ...@@ -2328,6 +2328,7 @@ static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
SB_ATTN_ALIGNED_SIZE(p_hwfn), SB_ATTN_ALIGNED_SIZE(p_hwfn),
p_sb->sb_attn, p_sb->sb_phys); p_sb->sb_attn, p_sb->sb_phys);
kfree(p_sb); kfree(p_sb);
p_hwfn->p_sb_attn = NULL;
} }
static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn, static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
...@@ -2679,6 +2680,7 @@ static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn) ...@@ -2679,6 +2680,7 @@ static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
p_sb->sb_info.sb_virt, p_sb->sb_info.sb_virt,
p_sb->sb_info.sb_phys); p_sb->sb_info.sb_phys);
kfree(p_sb); kfree(p_sb);
p_hwfn->p_sp_sb = NULL;
} }
static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
...@@ -3157,6 +3159,7 @@ static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn) ...@@ -3157,6 +3159,7 @@ static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn)
static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn) static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
{ {
kfree(p_hwfn->sp_dpc); kfree(p_hwfn->sp_dpc);
p_hwfn->sp_dpc = NULL;
} }
int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
......
...@@ -185,7 +185,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, ...@@ -185,7 +185,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
DP_ERR(p_hwfn, DP_ERR(p_hwfn,
"Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n", "Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n",
p_params->num_queues, p_params->num_queues,
p_hwfn->hw_info.resc_num[QED_ISCSI_CQ]); p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]);
return -EINVAL; return -EINVAL;
} }
...@@ -818,29 +818,32 @@ void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn, ...@@ -818,29 +818,32 @@ void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn,
kfree(p_conn); kfree(p_conn);
} }
struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn) int qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
{ {
struct qed_iscsi_info *p_iscsi_info; struct qed_iscsi_info *p_iscsi_info;
p_iscsi_info = kzalloc(sizeof(*p_iscsi_info), GFP_KERNEL); p_iscsi_info = kzalloc(sizeof(*p_iscsi_info), GFP_KERNEL);
if (!p_iscsi_info) if (!p_iscsi_info)
return NULL; return -ENOMEM;
INIT_LIST_HEAD(&p_iscsi_info->free_list); INIT_LIST_HEAD(&p_iscsi_info->free_list);
return p_iscsi_info;
p_hwfn->p_iscsi_info = p_iscsi_info;
return 0;
} }
void qed_iscsi_setup(struct qed_hwfn *p_hwfn, void qed_iscsi_setup(struct qed_hwfn *p_hwfn)
struct qed_iscsi_info *p_iscsi_info)
{ {
spin_lock_init(&p_iscsi_info->lock); spin_lock_init(&p_hwfn->p_iscsi_info->lock);
} }
void qed_iscsi_free(struct qed_hwfn *p_hwfn, void qed_iscsi_free(struct qed_hwfn *p_hwfn)
struct qed_iscsi_info *p_iscsi_info)
{ {
struct qed_iscsi_conn *p_conn = NULL; struct qed_iscsi_conn *p_conn = NULL;
if (!p_hwfn->p_iscsi_info)
return;
while (!list_empty(&p_hwfn->p_iscsi_info->free_list)) { while (!list_empty(&p_hwfn->p_iscsi_info->free_list)) {
p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list, p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list,
struct qed_iscsi_conn, list_entry); struct qed_iscsi_conn, list_entry);
...@@ -850,7 +853,8 @@ void qed_iscsi_free(struct qed_hwfn *p_hwfn, ...@@ -850,7 +853,8 @@ void qed_iscsi_free(struct qed_hwfn *p_hwfn,
} }
} }
kfree(p_iscsi_info); kfree(p_hwfn->p_iscsi_info);
p_hwfn->p_iscsi_info = NULL;
} }
static void _qed_iscsi_get_tstats(struct qed_hwfn *p_hwfn, static void _qed_iscsi_get_tstats(struct qed_hwfn *p_hwfn,
......
...@@ -57,13 +57,11 @@ extern const struct qed_ll2_ops qed_ll2_ops_pass; ...@@ -57,13 +57,11 @@ extern const struct qed_ll2_ops qed_ll2_ops_pass;
#endif #endif
#if IS_ENABLED(CONFIG_QED_ISCSI) #if IS_ENABLED(CONFIG_QED_ISCSI)
struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn); int qed_iscsi_alloc(struct qed_hwfn *p_hwfn);
void qed_iscsi_setup(struct qed_hwfn *p_hwfn, void qed_iscsi_setup(struct qed_hwfn *p_hwfn);
struct qed_iscsi_info *p_iscsi_info);
void qed_iscsi_free(struct qed_hwfn *p_hwfn, void qed_iscsi_free(struct qed_hwfn *p_hwfn);
struct qed_iscsi_info *p_iscsi_info);
/** /**
* @brief - Fills provided statistics struct with statistics. * @brief - Fills provided statistics struct with statistics.
...@@ -74,12 +72,15 @@ void qed_iscsi_free(struct qed_hwfn *p_hwfn, ...@@ -74,12 +72,15 @@ void qed_iscsi_free(struct qed_hwfn *p_hwfn,
void qed_get_protocol_stats_iscsi(struct qed_dev *cdev, void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
struct qed_mcp_iscsi_stats *stats); struct qed_mcp_iscsi_stats *stats);
#else /* IS_ENABLED(CONFIG_QED_ISCSI) */ #else /* IS_ENABLED(CONFIG_QED_ISCSI) */
static inline struct qed_iscsi_info *qed_iscsi_alloc( static inline int qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
struct qed_hwfn *p_hwfn) { return NULL; } {
static inline void qed_iscsi_setup(struct qed_hwfn *p_hwfn, return -EINVAL;
struct qed_iscsi_info *p_iscsi_info) {} }
static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn,
struct qed_iscsi_info *p_iscsi_info) {} static inline void qed_iscsi_setup(struct qed_hwfn *p_hwfn) {}
static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn) {}
static inline void static inline void
qed_get_protocol_stats_iscsi(struct qed_dev *cdev, qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
struct qed_mcp_iscsi_stats *stats) {} struct qed_mcp_iscsi_stats *stats) {}
......
...@@ -1920,7 +1920,7 @@ void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) ...@@ -1920,7 +1920,7 @@ void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
mutex_unlock(&p_ll2_conn->mutex); mutex_unlock(&p_ll2_conn->mutex);
} }
struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn) int qed_ll2_alloc(struct qed_hwfn *p_hwfn)
{ {
struct qed_ll2_info *p_ll2_connections; struct qed_ll2_info *p_ll2_connections;
u8 i; u8 i;
...@@ -1930,28 +1930,31 @@ struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn) ...@@ -1930,28 +1930,31 @@ struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn)
sizeof(struct qed_ll2_info), GFP_KERNEL); sizeof(struct qed_ll2_info), GFP_KERNEL);
if (!p_ll2_connections) { if (!p_ll2_connections) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n"); DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
return NULL; return -ENOMEM;
} }
for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++) for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
p_ll2_connections[i].my_id = i; p_ll2_connections[i].my_id = i;
return p_ll2_connections; p_hwfn->p_ll2_info = p_ll2_connections;
return 0;
} }
void qed_ll2_setup(struct qed_hwfn *p_hwfn, void qed_ll2_setup(struct qed_hwfn *p_hwfn)
struct qed_ll2_info *p_ll2_connections)
{ {
int i; int i;
for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++) for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
mutex_init(&p_ll2_connections[i].mutex); mutex_init(&p_hwfn->p_ll2_info[i].mutex);
} }
void qed_ll2_free(struct qed_hwfn *p_hwfn, void qed_ll2_free(struct qed_hwfn *p_hwfn)
struct qed_ll2_info *p_ll2_connections)
{ {
kfree(p_ll2_connections); if (!p_hwfn->p_ll2_info)
return;
kfree(p_hwfn->p_ll2_info);
p_hwfn->p_ll2_info = NULL;
} }
static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn, static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
......
...@@ -306,27 +306,24 @@ int qed_ll2_get_stats(struct qed_hwfn *p_hwfn, ...@@ -306,27 +306,24 @@ int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
* *
* @param p_hwfn * @param p_hwfn
* *
* @return pointer to alocated qed_ll2_info or NULL * @return int
*/ */
struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn); int qed_ll2_alloc(struct qed_hwfn *p_hwfn);
/** /**
* @brief qed_ll2_setup - Inits LL2 connections set * @brief qed_ll2_setup - Inits LL2 connections set
* *
* @param p_hwfn * @param p_hwfn
* @param p_ll2_connections
* *
*/ */
void qed_ll2_setup(struct qed_hwfn *p_hwfn, void qed_ll2_setup(struct qed_hwfn *p_hwfn);
struct qed_ll2_info *p_ll2_connections);
/** /**
* @brief qed_ll2_free - Releases LL2 connections set * @brief qed_ll2_free - Releases LL2 connections set
* *
* @param p_hwfn * @param p_hwfn
* @param p_ll2_connections
* *
*/ */
void qed_ll2_free(struct qed_hwfn *p_hwfn, void qed_ll2_free(struct qed_hwfn *p_hwfn);
struct qed_ll2_info *p_ll2_connections);
#endif #endif
...@@ -177,6 +177,7 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn) ...@@ -177,6 +177,7 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn)
} }
kfree(p_hwfn->mcp_info); kfree(p_hwfn->mcp_info);
p_hwfn->mcp_info = NULL;
return 0; return 0;
} }
......
...@@ -99,7 +99,7 @@ void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn, ...@@ -99,7 +99,7 @@ void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
p_history->head_idx++; p_history->head_idx++;
} }
struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn) int qed_ooo_alloc(struct qed_hwfn *p_hwfn)
{ {
u16 max_num_archipelagos = 0, cid_base; u16 max_num_archipelagos = 0, cid_base;
struct qed_ooo_info *p_ooo_info; struct qed_ooo_info *p_ooo_info;
...@@ -109,7 +109,7 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn) ...@@ -109,7 +109,7 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
if (p_hwfn->hw_info.personality != QED_PCI_ISCSI) { if (p_hwfn->hw_info.personality != QED_PCI_ISCSI) {
DP_NOTICE(p_hwfn, DP_NOTICE(p_hwfn,
"Failed to allocate qed_ooo_info: unknown personality\n"); "Failed to allocate qed_ooo_info: unknown personality\n");
return NULL; return -EINVAL;
} }
max_num_archipelagos = p_hwfn->pf_params.iscsi_pf_params.num_cons; max_num_archipelagos = p_hwfn->pf_params.iscsi_pf_params.num_cons;
...@@ -119,12 +119,12 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn) ...@@ -119,12 +119,12 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
if (!max_num_archipelagos) { if (!max_num_archipelagos) {
DP_NOTICE(p_hwfn, DP_NOTICE(p_hwfn,
"Failed to allocate qed_ooo_info: unknown amount of connections\n"); "Failed to allocate qed_ooo_info: unknown amount of connections\n");
return NULL; return -EINVAL;
} }
p_ooo_info = kzalloc(sizeof(*p_ooo_info), GFP_KERNEL); p_ooo_info = kzalloc(sizeof(*p_ooo_info), GFP_KERNEL);
if (!p_ooo_info) if (!p_ooo_info)
return NULL; return -ENOMEM;
p_ooo_info->cid_base = cid_base; p_ooo_info->cid_base = cid_base;
p_ooo_info->max_num_archipelagos = max_num_archipelagos; p_ooo_info->max_num_archipelagos = max_num_archipelagos;
...@@ -164,7 +164,8 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn) ...@@ -164,7 +164,8 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
p_ooo_info->ooo_history.num_of_cqes = QED_MAX_NUM_OOO_HISTORY_ENTRIES; p_ooo_info->ooo_history.num_of_cqes = QED_MAX_NUM_OOO_HISTORY_ENTRIES;
return p_ooo_info; p_hwfn->p_ooo_info = p_ooo_info;
return 0;
no_history_mem: no_history_mem:
kfree(p_ooo_info->p_archipelagos_mem); kfree(p_ooo_info->p_archipelagos_mem);
...@@ -172,7 +173,7 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn) ...@@ -172,7 +173,7 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
kfree(p_ooo_info->p_isles_mem); kfree(p_ooo_info->p_isles_mem);
no_isles_mem: no_isles_mem:
kfree(p_ooo_info); kfree(p_ooo_info);
return NULL; return -ENOMEM;
} }
void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn, void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
...@@ -249,19 +250,23 @@ void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn, ...@@ -249,19 +250,23 @@ void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
&p_ooo_info->free_buffers_list); &p_ooo_info->free_buffers_list);
} }
void qed_ooo_setup(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info) void qed_ooo_setup(struct qed_hwfn *p_hwfn)
{ {
qed_ooo_release_all_isles(p_hwfn, p_ooo_info); qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
memset(p_ooo_info->ooo_history.p_cqes, 0, memset(p_hwfn->p_ooo_info->ooo_history.p_cqes, 0,
p_ooo_info->ooo_history.num_of_cqes * p_hwfn->p_ooo_info->ooo_history.num_of_cqes *
sizeof(struct ooo_opaque)); sizeof(struct ooo_opaque));
p_ooo_info->ooo_history.head_idx = 0; p_hwfn->p_ooo_info->ooo_history.head_idx = 0;
} }
void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info) void qed_ooo_free(struct qed_hwfn *p_hwfn)
{ {
struct qed_ooo_info *p_ooo_info = p_hwfn->p_ooo_info;
struct qed_ooo_buffer *p_buffer; struct qed_ooo_buffer *p_buffer;
if (!p_ooo_info)
return;
qed_ooo_release_all_isles(p_hwfn, p_ooo_info); qed_ooo_release_all_isles(p_hwfn, p_ooo_info);
while (!list_empty(&p_ooo_info->free_buffers_list)) { while (!list_empty(&p_ooo_info->free_buffers_list)) {
p_buffer = list_first_entry(&p_ooo_info->free_buffers_list, p_buffer = list_first_entry(&p_ooo_info->free_buffers_list,
...@@ -282,6 +287,7 @@ void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info) ...@@ -282,6 +287,7 @@ void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info)
kfree(p_ooo_info->p_archipelagos_mem); kfree(p_ooo_info->p_archipelagos_mem);
kfree(p_ooo_info->ooo_history.p_cqes); kfree(p_ooo_info->ooo_history.p_cqes);
kfree(p_ooo_info); kfree(p_ooo_info);
p_hwfn->p_ooo_info = NULL;
} }
void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn, void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
......
...@@ -88,7 +88,11 @@ void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn, ...@@ -88,7 +88,11 @@ void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info, struct qed_ooo_info *p_ooo_info,
struct ooo_opaque *p_cqe); struct ooo_opaque *p_cqe);
struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn); int qed_ooo_alloc(struct qed_hwfn *p_hwfn);
void qed_ooo_setup(struct qed_hwfn *p_hwfn);
void qed_ooo_free(struct qed_hwfn *p_hwfn);
void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn, void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info, struct qed_ooo_info *p_ooo_info,
...@@ -97,10 +101,6 @@ void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn, ...@@ -97,10 +101,6 @@ void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn, void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info); struct qed_ooo_info *p_ooo_info);
void qed_ooo_setup(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info);
void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info);
void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn, void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info, struct qed_ooo_info *p_ooo_info,
struct qed_ooo_buffer *p_buffer); struct qed_ooo_buffer *p_buffer);
...@@ -140,8 +140,14 @@ static inline void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn, ...@@ -140,8 +140,14 @@ static inline void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info, struct qed_ooo_info *p_ooo_info,
struct ooo_opaque *p_cqe) {} struct ooo_opaque *p_cqe) {}
static inline struct qed_ooo_info *qed_ooo_alloc( static inline int qed_ooo_alloc(struct qed_hwfn *p_hwfn)
struct qed_hwfn *p_hwfn) { return NULL; } {
return -EINVAL;
}
static inline void qed_ooo_setup(struct qed_hwfn *p_hwfn) {}
static inline void qed_ooo_free(struct qed_hwfn *p_hwfn) {}
static inline void static inline void
qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn, qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
...@@ -152,12 +158,6 @@ static inline void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn, ...@@ -152,12 +158,6 @@ static inline void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info) struct qed_ooo_info *p_ooo_info)
{} {}
static inline void qed_ooo_setup(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info) {}
static inline void qed_ooo_free(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info) {}
static inline void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn, static inline void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info, struct qed_ooo_info *p_ooo_info,
struct qed_ooo_buffer *p_buffer) {} struct qed_ooo_buffer *p_buffer) {}
......
...@@ -270,28 +270,23 @@ void qed_spq_return_entry(struct qed_hwfn *p_hwfn, ...@@ -270,28 +270,23 @@ void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
* @param p_hwfn * @param p_hwfn
* @param num_elem number of elements in the eq * @param num_elem number of elements in the eq
* *
* @return struct qed_eq* - a newly allocated structure; NULL upon error. * @return int
*/ */
struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem);
u16 num_elem);
/** /**
* @brief qed_eq_setup - Reset the SPQ to its start state. * @brief qed_eq_setup - Reset the EQ to its start state.
* *
* @param p_hwfn * @param p_hwfn
* @param p_eq
*/ */
void qed_eq_setup(struct qed_hwfn *p_hwfn, void qed_eq_setup(struct qed_hwfn *p_hwfn);
struct qed_eq *p_eq);
/** /**
* @brief qed_eq_deallocate - deallocates the given EQ struct. * @brief qed_eq_free - deallocates the given EQ struct.
* *
* @param p_hwfn * @param p_hwfn
* @param p_eq
*/ */
void qed_eq_free(struct qed_hwfn *p_hwfn, void qed_eq_free(struct qed_hwfn *p_hwfn);
struct qed_eq *p_eq);
/** /**
* @brief qed_eq_prod_update - update the FW with default EQ producer * @brief qed_eq_prod_update - update the FW with default EQ producer
...@@ -342,28 +337,23 @@ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn); ...@@ -342,28 +337,23 @@ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn);
* *
* @param p_hwfn * @param p_hwfn
* *
* @return struct qed_eq* - a newly allocated structure; NULL upon error. * @return int
*/ */
struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn); int qed_consq_alloc(struct qed_hwfn *p_hwfn);
/** /**
* @brief qed_consq_setup - Reset the ConsQ to its start * @brief qed_consq_setup - Reset the ConsQ to its start state.
* state.
* *
* @param p_hwfn * @param p_hwfn
* @param p_eq
*/ */
void qed_consq_setup(struct qed_hwfn *p_hwfn, void qed_consq_setup(struct qed_hwfn *p_hwfn);
struct qed_consq *p_consq);
/** /**
* @brief qed_consq_free - deallocates the given ConsQ struct. * @brief qed_consq_free - deallocates the given ConsQ struct.
* *
* @param p_hwfn * @param p_hwfn
* @param p_eq
*/ */
void qed_consq_free(struct qed_hwfn *p_hwfn, void qed_consq_free(struct qed_hwfn *p_hwfn);
struct qed_consq *p_consq);
/** /**
* @file * @file
......
...@@ -403,14 +403,14 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie) ...@@ -403,14 +403,14 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
return rc; return rc;
} }
struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem) int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
{ {
struct qed_eq *p_eq; struct qed_eq *p_eq;
/* Allocate EQ struct */ /* Allocate EQ struct */
p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL); p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
if (!p_eq) if (!p_eq)
return NULL; return -ENOMEM;
/* Allocate and initialize EQ chain*/ /* Allocate and initialize EQ chain*/
if (qed_chain_alloc(p_hwfn->cdev, if (qed_chain_alloc(p_hwfn->cdev,
...@@ -426,24 +426,28 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem) ...@@ -426,24 +426,28 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
qed_int_register_cb(p_hwfn, qed_eq_completion, qed_int_register_cb(p_hwfn, qed_eq_completion,
p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons); p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
return p_eq; p_hwfn->p_eq = p_eq;
return 0;
eq_allocate_fail: eq_allocate_fail:
qed_eq_free(p_hwfn, p_eq); kfree(p_eq);
return NULL; return -ENOMEM;
} }
void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq) void qed_eq_setup(struct qed_hwfn *p_hwfn)
{ {
qed_chain_reset(&p_eq->chain); qed_chain_reset(&p_hwfn->p_eq->chain);
} }
void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq) void qed_eq_free(struct qed_hwfn *p_hwfn)
{ {
if (!p_eq) if (!p_hwfn->p_eq)
return; return;
qed_chain_free(p_hwfn->cdev, &p_eq->chain);
kfree(p_eq); qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain);
kfree(p_hwfn->p_eq);
p_hwfn->p_eq = NULL;
} }
/*************************************************************************** /***************************************************************************
...@@ -583,8 +587,8 @@ void qed_spq_free(struct qed_hwfn *p_hwfn) ...@@ -583,8 +587,8 @@ void qed_spq_free(struct qed_hwfn *p_hwfn)
} }
qed_chain_free(p_hwfn->cdev, &p_spq->chain); qed_chain_free(p_hwfn->cdev, &p_spq->chain);
;
kfree(p_spq); kfree(p_spq);
p_hwfn->p_spq = NULL;
} }
int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent) int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
...@@ -934,14 +938,14 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, ...@@ -934,14 +938,14 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
return rc; return rc;
} }
struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn) int qed_consq_alloc(struct qed_hwfn *p_hwfn)
{ {
struct qed_consq *p_consq; struct qed_consq *p_consq;
/* Allocate ConsQ struct */ /* Allocate ConsQ struct */
p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL); p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
if (!p_consq) if (!p_consq)
return NULL; return -ENOMEM;
/* Allocate and initialize EQ chain*/ /* Allocate and initialize EQ chain*/
if (qed_chain_alloc(p_hwfn->cdev, if (qed_chain_alloc(p_hwfn->cdev,
...@@ -952,22 +956,26 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn) ...@@ -952,22 +956,26 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
0x80, &p_consq->chain)) 0x80, &p_consq->chain))
goto consq_allocate_fail; goto consq_allocate_fail;
return p_consq; p_hwfn->p_consq = p_consq;
return 0;
consq_allocate_fail: consq_allocate_fail:
qed_consq_free(p_hwfn, p_consq); kfree(p_consq);
return NULL; return -ENOMEM;
} }
void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq) void qed_consq_setup(struct qed_hwfn *p_hwfn)
{ {
qed_chain_reset(&p_consq->chain); qed_chain_reset(&p_hwfn->p_consq->chain);
} }
void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq) void qed_consq_free(struct qed_hwfn *p_hwfn)
{ {
if (!p_consq) if (!p_hwfn->p_consq)
return; return;
qed_chain_free(p_hwfn->cdev, &p_consq->chain);
kfree(p_consq); qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain);
kfree(p_hwfn->p_consq);
p_hwfn->p_consq = NULL;
} }
...@@ -197,7 +197,6 @@ struct qede_dev { ...@@ -197,7 +197,6 @@ struct qede_dev {
#define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx) #define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx)
struct qed_int_info int_info; struct qed_int_info int_info;
unsigned char primary_mac[ETH_ALEN];
/* Smaller private varaiant of the RTNL lock */ /* Smaller private varaiant of the RTNL lock */
struct mutex qede_lock; struct mutex qede_lock;
......
...@@ -506,6 +506,14 @@ static int qede_set_link_ksettings(struct net_device *dev, ...@@ -506,6 +506,14 @@ static int qede_set_link_ksettings(struct net_device *dev,
params.autoneg = false; params.autoneg = false;
params.forced_speed = base->speed; params.forced_speed = base->speed;
switch (base->speed) { switch (base->speed) {
case SPEED_1000:
if (!(current_link.supported_caps &
QED_LM_1000baseT_Full_BIT)) {
DP_INFO(edev, "1G speed not supported\n");
return -EINVAL;
}
params.adv_speeds = QED_LM_1000baseT_Full_BIT;
break;
case SPEED_10000: case SPEED_10000:
if (!(current_link.supported_caps & if (!(current_link.supported_caps &
QED_LM_10000baseKR_Full_BIT)) { QED_LM_10000baseKR_Full_BIT)) {
...@@ -1297,7 +1305,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, ...@@ -1297,7 +1305,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
} }
/* Fill the entry in the SW ring and the BDs in the FW ring */ /* Fill the entry in the SW ring and the BDs in the FW ring */
idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; idx = txq->sw_tx_prod;
txq->sw_tx_ring.skbs[idx].skb = skb; txq->sw_tx_ring.skbs[idx].skb = skb;
first_bd = qed_chain_produce(&txq->tx_pbl); first_bd = qed_chain_produce(&txq->tx_pbl);
memset(first_bd, 0, sizeof(*first_bd)); memset(first_bd, 0, sizeof(*first_bd));
...@@ -1317,7 +1325,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, ...@@ -1317,7 +1325,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
/* update the first BD with the actual num BDs */ /* update the first BD with the actual num BDs */
first_bd->data.nbds = 1; first_bd->data.nbds = 1;
txq->sw_tx_prod++; txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
/* 'next page' entries are counted in the producer value */ /* 'next page' entries are counted in the producer value */
val = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); val = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
txq->tx_db.data.bd_prod = val; txq->tx_db.data.bd_prod = val;
...@@ -1351,7 +1359,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, ...@@ -1351,7 +1359,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE); BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
txq->sw_tx_cons++; txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
txq->sw_tx_ring.skbs[idx].skb = NULL; txq->sw_tx_ring.skbs[idx].skb = NULL;
return 0; return 0;
......
...@@ -495,12 +495,16 @@ void qede_force_mac(void *dev, u8 *mac, bool forced) ...@@ -495,12 +495,16 @@ void qede_force_mac(void *dev, u8 *mac, bool forced)
{ {
struct qede_dev *edev = dev; struct qede_dev *edev = dev;
__qede_lock(edev);
/* MAC hints take effect only if we haven't set one already */ /* MAC hints take effect only if we haven't set one already */
if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced) if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced) {
__qede_unlock(edev);
return; return;
}
ether_addr_copy(edev->ndev->dev_addr, mac); ether_addr_copy(edev->ndev->dev_addr, mac);
ether_addr_copy(edev->primary_mac, mac); __qede_unlock(edev);
} }
void qede_fill_rss_params(struct qede_dev *edev, void qede_fill_rss_params(struct qede_dev *edev,
...@@ -1061,41 +1065,51 @@ int qede_set_mac_addr(struct net_device *ndev, void *p) ...@@ -1061,41 +1065,51 @@ int qede_set_mac_addr(struct net_device *ndev, void *p)
{ {
struct qede_dev *edev = netdev_priv(ndev); struct qede_dev *edev = netdev_priv(ndev);
struct sockaddr *addr = p; struct sockaddr *addr = p;
int rc; int rc = 0;
ASSERT_RTNL(); /* @@@TBD To be removed */
DP_INFO(edev, "Set_mac_addr called\n"); /* Make sure the state doesn't transition while changing the MAC.
* Also, all flows accessing the dev_addr field are doing that under
* this lock.
*/
__qede_lock(edev);
if (!is_valid_ether_addr(addr->sa_data)) { if (!is_valid_ether_addr(addr->sa_data)) {
DP_NOTICE(edev, "The MAC address is not valid\n"); DP_NOTICE(edev, "The MAC address is not valid\n");
return -EFAULT; rc = -EFAULT;
goto out;
} }
if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) { if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
DP_NOTICE(edev, "qed prevents setting MAC\n"); DP_NOTICE(edev, "qed prevents setting MAC %pM\n",
return -EINVAL; addr->sa_data);
rc = -EINVAL;
goto out;
}
if (edev->state == QEDE_STATE_OPEN) {
/* Remove the previous primary mac */
rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
ndev->dev_addr);
if (rc)
goto out;
} }
ether_addr_copy(ndev->dev_addr, addr->sa_data); ether_addr_copy(ndev->dev_addr, addr->sa_data);
DP_INFO(edev, "Setting device MAC to %pM\n", addr->sa_data);
if (!netif_running(ndev)) { if (edev->state != QEDE_STATE_OPEN) {
DP_NOTICE(edev, "The device is currently down\n"); DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
return 0; "The device is currently down\n");
goto out;
} }
/* Remove the previous primary mac */ edev->ops->common->update_mac(edev->cdev, ndev->dev_addr);
rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
edev->primary_mac);
if (rc)
return rc;
edev->ops->common->update_mac(edev->cdev, addr->sa_data);
/* Add MAC filter according to the new unicast HW MAC address */ rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
ether_addr_copy(edev->primary_mac, ndev->dev_addr); ndev->dev_addr);
return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD, out:
edev->primary_mac); __qede_unlock(edev);
return rc;
} }
static int static int
...@@ -1200,7 +1214,7 @@ void qede_config_rx_mode(struct net_device *ndev) ...@@ -1200,7 +1214,7 @@ void qede_config_rx_mode(struct net_device *ndev)
* (configrue / leave the primary mac) * (configrue / leave the primary mac)
*/ */
rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE, rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
edev->primary_mac); edev->ndev->dev_addr);
if (rc) if (rc)
goto out; goto out;
......
...@@ -99,7 +99,7 @@ int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy) ...@@ -99,7 +99,7 @@ int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
/* Unmap the data and free skb */ /* Unmap the data and free skb */
int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len) int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len)
{ {
u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX; u16 idx = txq->sw_tx_cons;
struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
struct eth_tx_1st_bd *first_bd; struct eth_tx_1st_bd *first_bd;
struct eth_tx_bd *tx_data_bd; struct eth_tx_bd *tx_data_bd;
...@@ -156,7 +156,7 @@ static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq, ...@@ -156,7 +156,7 @@ static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
struct eth_tx_1st_bd *first_bd, struct eth_tx_1st_bd *first_bd,
int nbd, bool data_split) int nbd, bool data_split)
{ {
u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; u16 idx = txq->sw_tx_prod;
struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
struct eth_tx_bd *tx_data_bd; struct eth_tx_bd *tx_data_bd;
int i, split_bd_len = 0; int i, split_bd_len = 0;
...@@ -333,8 +333,8 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp, ...@@ -333,8 +333,8 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
struct sw_rx_data *metadata, u16 padding, u16 length) struct sw_rx_data *metadata, u16 padding, u16 length)
{ {
struct qede_tx_queue *txq = fp->xdp_tx; struct qede_tx_queue *txq = fp->xdp_tx;
u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
struct eth_tx_1st_bd *first_bd; struct eth_tx_1st_bd *first_bd;
u16 idx = txq->sw_tx_prod;
if (!qed_chain_get_elem_left(&txq->tx_pbl)) { if (!qed_chain_get_elem_left(&txq->tx_pbl)) {
txq->stopped_cnt++; txq->stopped_cnt++;
...@@ -363,7 +363,7 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp, ...@@ -363,7 +363,7 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
txq->sw_tx_ring.xdp[idx].page = metadata->data; txq->sw_tx_ring.xdp[idx].page = metadata->data;
txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping; txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping;
txq->sw_tx_prod++; txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
/* Mark the fastpath for future XDP doorbell */ /* Mark the fastpath for future XDP doorbell */
fp->xdp_xmit = 1; fp->xdp_xmit = 1;
...@@ -393,14 +393,14 @@ static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) ...@@ -393,14 +393,14 @@ static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
qed_chain_consume(&txq->tx_pbl); qed_chain_consume(&txq->tx_pbl);
idx = txq->sw_tx_cons & NUM_TX_BDS_MAX; idx = txq->sw_tx_cons;
dma_unmap_page(&edev->pdev->dev, dma_unmap_page(&edev->pdev->dev,
txq->sw_tx_ring.xdp[idx].mapping, txq->sw_tx_ring.xdp[idx].mapping,
PAGE_SIZE, DMA_BIDIRECTIONAL); PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(txq->sw_tx_ring.xdp[idx].page); __free_page(txq->sw_tx_ring.xdp[idx].page);
txq->sw_tx_cons++; txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
txq->xmit_pkts++; txq->xmit_pkts++;
} }
} }
...@@ -430,7 +430,7 @@ static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) ...@@ -430,7 +430,7 @@ static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
bytes_compl += len; bytes_compl += len;
pkts_compl++; pkts_compl++;
txq->sw_tx_cons++; txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
txq->xmit_pkts++; txq->xmit_pkts++;
} }
...@@ -1455,7 +1455,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -1455,7 +1455,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
#endif #endif
/* Fill the entry in the SW ring and the BDs in the FW ring */ /* Fill the entry in the SW ring and the BDs in the FW ring */
idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; idx = txq->sw_tx_prod;
txq->sw_tx_ring.skbs[idx].skb = skb; txq->sw_tx_ring.skbs[idx].skb = skb;
first_bd = (struct eth_tx_1st_bd *) first_bd = (struct eth_tx_1st_bd *)
qed_chain_produce(&txq->tx_pbl); qed_chain_produce(&txq->tx_pbl);
...@@ -1639,7 +1639,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -1639,7 +1639,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Advance packet producer only before sending the packet since mapping /* Advance packet producer only before sending the packet since mapping
* of pages may fail. * of pages may fail.
*/ */
txq->sw_tx_prod++; txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
/* 'next page' entries are counted in the producer value */ /* 'next page' entries are counted in the producer value */
txq->tx_db.data.bd_prod = txq->tx_db.data.bd_prod =
......
...@@ -618,6 +618,12 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, ...@@ -618,6 +618,12 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
memset(&edev->stats, 0, sizeof(edev->stats)); memset(&edev->stats, 0, sizeof(edev->stats));
memcpy(&edev->dev_info, info, sizeof(*info)); memcpy(&edev->dev_info, info, sizeof(*info));
/* As ethtool doesn't have the ability to show WoL behavior as
* 'default', if device supports it declare it's enabled.
*/
if (edev->dev_info.common.wol_support)
edev->wol_enabled = true;
INIT_LIST_HEAD(&edev->vlan_list); INIT_LIST_HEAD(&edev->vlan_list);
return edev; return edev;
...@@ -1066,12 +1072,15 @@ static int qede_set_num_queues(struct qede_dev *edev) ...@@ -1066,12 +1072,15 @@ static int qede_set_num_queues(struct qede_dev *edev)
return rc; return rc;
} }
static void qede_free_mem_sb(struct qede_dev *edev, static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
struct qed_sb_info *sb_info) u16 sb_id)
{ {
if (sb_info->sb_virt) if (sb_info->sb_virt) {
edev->ops->common->sb_release(edev->cdev, sb_info, sb_id);
dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt), dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
(void *)sb_info->sb_virt, sb_info->sb_phys); (void *)sb_info->sb_virt, sb_info->sb_phys);
memset(sb_info, 0, sizeof(*sb_info));
}
} }
/* This function allocates fast-path status block memory */ /* This function allocates fast-path status block memory */
...@@ -1298,12 +1307,12 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) ...@@ -1298,12 +1307,12 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
/* Allocate the parallel driver ring for Tx buffers */ /* Allocate the parallel driver ring for Tx buffers */
if (txq->is_xdp) { if (txq->is_xdp) {
size = sizeof(*txq->sw_tx_ring.xdp) * TX_RING_SIZE; size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers;
txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL); txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
if (!txq->sw_tx_ring.xdp) if (!txq->sw_tx_ring.xdp)
goto err; goto err;
} else { } else {
size = sizeof(*txq->sw_tx_ring.skbs) * TX_RING_SIZE; size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers;
txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL); txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
if (!txq->sw_tx_ring.skbs) if (!txq->sw_tx_ring.skbs)
goto err; goto err;
...@@ -1313,7 +1322,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) ...@@ -1313,7 +1322,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL, QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16, QED_CHAIN_CNT_TYPE_U16,
TX_RING_SIZE, txq->num_tx_buffers,
sizeof(*p_virt), &txq->tx_pbl); sizeof(*p_virt), &txq->tx_pbl);
if (rc) if (rc)
goto err; goto err;
...@@ -1328,7 +1337,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) ...@@ -1328,7 +1337,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
/* This function frees all memory of a single fp */ /* This function frees all memory of a single fp */
static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
{ {
qede_free_mem_sb(edev, fp->sb_info); qede_free_mem_sb(edev, fp->sb_info, fp->id);
if (fp->type & QEDE_FASTPATH_RX) if (fp->type & QEDE_FASTPATH_RX)
qede_free_mem_rxq(edev, fp->rxq); qede_free_mem_rxq(edev, fp->rxq);
...@@ -1890,9 +1899,10 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, ...@@ -1890,9 +1899,10 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
if (!is_locked) if (!is_locked)
__qede_lock(edev); __qede_lock(edev);
qede_roce_dev_event_close(edev);
edev->state = QEDE_STATE_CLOSED; edev->state = QEDE_STATE_CLOSED;
qede_roce_dev_event_close(edev);
/* Close OS Tx */ /* Close OS Tx */
netif_tx_disable(edev->ndev); netif_tx_disable(edev->ndev);
netif_carrier_off(edev->ndev); netif_carrier_off(edev->ndev);
...@@ -1988,9 +1998,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, ...@@ -1988,9 +1998,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
goto err4; goto err4;
DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n"); DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
/* Add primary mac and set Rx filters */
ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr);
/* Program un-configured VLANs */ /* Program un-configured VLANs */
qede_configure_vlan_filters(edev); qede_configure_vlan_filters(edev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment