Commit 0e0c52d6 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'introduce-define_flex-macro'

Przemek Kitszel says:

====================
introduce DEFINE_FLEX() macro

Add DEFINE_FLEX() macro, that helps on-stack allocation of structures
with trailing flex array member.
Expose __struct_size() macro which reads size of data allocated
by DEFINE_FLEX().

Accompany new macros introduction with actual usage,
in the ice driver - hence targeting for netdev tree.

Obvious benefits include simpler resulting code, less heap usage,
less error checking. Less obvious is the fact that compiler has
more room to optimize, and as a whole, even with more stuff on the stack,
we end up with overall better (smaller) report from bloat-o-meter:
add/remove: 8/6 grow/shrink: 7/18 up/down: 2211/-2270 (-59)
(individual results in each patch).
====================

Link: https://lore.kernel.org/r/20230912115937.1645707-1-przemyslaw.kitszel@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents e6435973 e268b972
......@@ -4790,11 +4790,11 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
struct ice_aqc_dis_txq_item *qg_list;
DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
u16 i, buf_size = __struct_size(qg_list);
struct ice_q_ctx *q_ctx;
int status = -ENOENT;
struct ice_hw *hw;
u16 i, buf_size;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return -EIO;
......@@ -4812,11 +4812,6 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
return -EIO;
}
buf_size = struct_size(qg_list, q_id, 1);
qg_list = kzalloc(buf_size, GFP_KERNEL);
if (!qg_list)
return -ENOMEM;
mutex_lock(&pi->sched_lock);
for (i = 0; i < num_queues; i++) {
......@@ -4849,7 +4844,6 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
q_ctx->q_teid = ICE_INVAL_TEID;
}
mutex_unlock(&pi->sched_lock);
kfree(qg_list);
return status;
}
......@@ -5018,10 +5012,10 @@ int
ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
u16 *q_id)
{
struct ice_aqc_dis_txq_item *qg_list;
DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
u16 qg_size = __struct_size(qg_list);
struct ice_hw *hw;
int status = 0;
u16 qg_size;
int i;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
......@@ -5029,11 +5023,6 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
hw = pi->hw;
qg_size = struct_size(qg_list, q_id, 1);
qg_list = kzalloc(qg_size, GFP_KERNEL);
if (!qg_list)
return -ENOMEM;
mutex_lock(&pi->sched_lock);
for (i = 0; i < count; i++) {
......@@ -5058,7 +5047,6 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
}
mutex_unlock(&pi->sched_lock);
kfree(qg_list);
return status;
}
......
......@@ -1560,21 +1560,14 @@ static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw,
*/
static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
{
enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
struct ice_aqc_get_pkg_info_resp *pkg_info;
u16 size;
DEFINE_FLEX(struct ice_aqc_get_pkg_info_resp, pkg_info, pkg_info,
ICE_PKG_CNT);
u16 size = __struct_size(pkg_info);
u32 i;
size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
pkg_info = kzalloc(size, GFP_KERNEL);
if (!pkg_info)
if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL))
return ICE_DDP_PKG_ERR;
if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) {
state = ICE_DDP_PKG_ERR;
goto init_pkg_free_alloc;
}
for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
#define ICE_PKG_FLAG_COUNT 4
char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
......@@ -1604,10 +1597,7 @@ static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
pkg_info->pkg_info[i].name, flags);
}
init_pkg_free_alloc:
kfree(pkg_info);
return state;
return ICE_DDP_PKG_SUCCESS;
}
/**
......@@ -1622,9 +1612,10 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
struct ice_pkg_hdr *ospkg,
struct ice_seg **seg)
{
struct ice_aqc_get_pkg_info_resp *pkg;
DEFINE_FLEX(struct ice_aqc_get_pkg_info_resp, pkg, pkg_info,
ICE_PKG_CNT);
u16 size = __struct_size(pkg);
enum ice_ddp_state state;
u16 size;
u32 i;
/* Check package version compatibility */
......@@ -1643,15 +1634,8 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
}
/* Check if FW is compatible with the OS package */
size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
pkg = kzalloc(size, GFP_KERNEL);
if (!pkg)
return ICE_DDP_PKG_ERR;
if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) {
state = ICE_DDP_PKG_LOAD_ERROR;
goto fw_ddp_compat_free_alloc;
}
if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL))
return ICE_DDP_PKG_LOAD_ERROR;
for (i = 0; i < le32_to_cpu(pkg->count); i++) {
/* loop till we find the NVM package */
......@@ -1668,8 +1652,7 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
/* done processing NVM package so break */
break;
}
fw_ddp_compat_free_alloc:
kfree(pkg);
return state;
}
......
......@@ -430,10 +430,11 @@ static void
ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport,
u16 vsi_num, u8 tc)
{
u16 numq, valq, buf_size, num_moved, qbuf_size;
DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf;
struct ice_aqc_move_elem *buf;
struct ice_sched_node *n_prt;
struct ice_hw *new_hw = NULL;
__le32 teid, parent_teid;
......@@ -505,26 +506,17 @@ ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport,
goto resume_traffic;
/* Move Vf's VSI node for this TC to newport's scheduler tree */
buf_size = struct_size(buf, teid, 1);
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf) {
dev_warn(dev, "Failure to alloc memory for VF node failover\n");
goto resume_traffic;
}
buf->hdr.src_parent_teid = parent_teid;
buf->hdr.dest_parent_teid = n_prt->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(1);
buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
buf->teid[0] = teid;
if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved,
NULL))
if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved))
dev_warn(dev, "Failure to move VF nodes for failover\n");
else
ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
kfree(buf);
goto resume_traffic;
qbuf_err:
......@@ -755,10 +747,11 @@ static void
ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
u8 tc)
{
u16 numq, valq, buf_size, num_moved, qbuf_size;
DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf;
struct ice_aqc_move_elem *buf;
struct ice_sched_node *n_prt;
__le32 teid, parent_teid;
struct ice_vsi_ctx *ctx;
......@@ -820,26 +813,17 @@ ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
goto resume_reclaim;
/* Move node to new parent */
buf_size = struct_size(buf, teid, 1);
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf) {
dev_warn(dev, "Failure to alloc memory for VF node failover\n");
goto resume_reclaim;
}
buf->hdr.src_parent_teid = parent_teid;
buf->hdr.dest_parent_teid = n_prt->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(1);
buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
buf->teid[0] = teid;
if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved,
NULL))
if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved))
dev_warn(dev, "Failure to move VF nodes for LAG reclaim\n");
else
ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
kfree(buf);
goto resume_reclaim;
reclaim_qerr:
......@@ -1792,10 +1776,11 @@ static void
ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
u16 vsi_num, u8 tc)
{
u16 numq, valq, buf_size, num_moved, qbuf_size;
DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf;
struct ice_aqc_move_elem *buf;
struct ice_sched_node *n_prt;
__le32 teid, parent_teid;
struct ice_vsi_ctx *ctx;
......@@ -1853,26 +1838,17 @@ ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
goto resume_sync;
/* Move node to new parent */
buf_size = struct_size(buf, teid, 1);
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf) {
dev_warn(dev, "Failure to alloc for VF node move in reset rebuild\n");
goto resume_sync;
}
buf->hdr.src_parent_teid = parent_teid;
buf->hdr.dest_parent_teid = n_prt->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(1);
buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
buf->teid[0] = teid;
if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved,
NULL))
if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved))
dev_warn(dev, "Failure to move VF nodes for LAG reset rebuild\n");
else
ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
kfree(buf);
goto resume_sync;
sync_qerr:
......
......@@ -1832,21 +1832,14 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
int err;
DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
return -EINVAL;
qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL);
if (!qg_buf)
return -ENOMEM;
qg_buf->num_txqs = 1;
err = ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
kfree(qg_buf);
return err;
return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
}
/**
......@@ -1888,24 +1881,18 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
static int
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
u16 q_idx = 0;
DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
int err = 0;
qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL);
if (!qg_buf)
return -ENOMEM;
u16 q_idx;
qg_buf->num_txqs = 1;
for (q_idx = 0; q_idx < count; q_idx++) {
err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
if (err)
goto err_cfg_txqs;
break;
}
err_cfg_txqs:
kfree(qg_buf);
return err;
}
......
......@@ -229,29 +229,22 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
* ice_sched_remove_elems - remove nodes from HW
* @hw: pointer to the HW struct
* @parent: pointer to the parent node
* @num_nodes: number of nodes
* @node_teids: array of node teids to be deleted
* @node_teid: node teid to be deleted
*
* This function remove nodes from HW
*/
static int
ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
u16 num_nodes, u32 *node_teids)
u32 node_teid)
{
struct ice_aqc_delete_elem *buf;
u16 i, num_groups_removed = 0;
u16 buf_size;
DEFINE_FLEX(struct ice_aqc_delete_elem, buf, teid, 1);
u16 buf_size = __struct_size(buf);
u16 num_groups_removed = 0;
int status;
buf_size = struct_size(buf, teid, num_nodes);
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf->hdr.parent_teid = parent->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(num_nodes);
for (i = 0; i < num_nodes; i++)
buf->teid[i] = cpu_to_le32(node_teids[i]);
buf->hdr.num_elems = cpu_to_le16(1);
buf->teid[0] = cpu_to_le32(node_teid);
status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
&num_groups_removed, NULL);
......@@ -259,7 +252,6 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
hw->adminq.sq_last_status);
devm_kfree(ice_hw_to_dev(hw), buf);
return status;
}
......@@ -326,7 +318,7 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
u32 teid = le32_to_cpu(node->info.node_teid);
ice_sched_remove_elems(hw, node->parent, 1, &teid);
ice_sched_remove_elems(hw, node->parent, teid);
}
parent = node->parent;
/* root has no parent */
......@@ -437,24 +429,20 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
}
/**
* ice_aq_move_sched_elems - move scheduler elements
* ice_aq_move_sched_elems - move scheduler element (just 1 group)
* @hw: pointer to the HW struct
* @grps_req: number of groups to move
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
* @grps_movd: returns total number of groups moved
* @cd: pointer to command details structure or NULL
*
* Move scheduling elements (0x0408)
*/
int
ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_move_elem *buf, u16 buf_size,
u16 *grps_movd, struct ice_sq_cd *cd)
ice_aq_move_sched_elems(struct ice_hw *hw, struct ice_aqc_move_elem *buf,
u16 buf_size, u16 *grps_movd)
{
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems,
grps_req, (void *)buf, buf_size,
grps_movd, cd);
1, buf, buf_size, grps_movd, NULL);
}
/**
......@@ -1193,7 +1181,7 @@ static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
int status;
/* remove the default leaf node */
status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
status = ice_sched_remove_elems(pi->hw, node->parent, teid);
if (!status)
ice_free_sched_node(pi, node);
}
......@@ -2232,12 +2220,12 @@ int
ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
u16 num_items, u32 *list)
{
struct ice_aqc_move_elem *buf;
DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
u16 buf_len = __struct_size(buf);
struct ice_sched_node *node;
u16 i, grps_movd = 0;
struct ice_hw *hw;
int status = 0;
u16 buf_len;
hw = pi->hw;
......@@ -2249,35 +2237,27 @@ ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
hw->max_children[parent->tx_sched_layer])
return -ENOSPC;
buf_len = struct_size(buf, teid, 1);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
for (i = 0; i < num_items; i++) {
node = ice_sched_find_node_by_teid(pi->root, list[i]);
if (!node) {
status = -EINVAL;
goto move_err_exit;
break;
}
buf->hdr.src_parent_teid = node->info.parent_teid;
buf->hdr.dest_parent_teid = parent->info.node_teid;
buf->teid[0] = node->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(1);
status = ice_aq_move_sched_elems(hw, 1, buf, buf_len,
&grps_movd, NULL);
status = ice_aq_move_sched_elems(hw, buf, buf_len, &grps_movd);
if (status && grps_movd != 1) {
status = -EIO;
goto move_err_exit;
break;
}
/* update the SW DB */
ice_sched_update_parent(parent, node);
}
move_err_exit:
kfree(buf);
return status;
}
......
......@@ -161,10 +161,8 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
u16 *num_nodes_added);
void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw);
void ice_sched_replay_agg(struct ice_hw *hw);
int
ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_move_elem *buf, u16 buf_size,
u16 *grps_movd, struct ice_sq_cd *cd);
int ice_aq_move_sched_elems(struct ice_hw *hw, struct ice_aqc_move_elem *buf,
u16 buf_size, u16 *grps_movd);
int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
#endif /* _ICE_SCHED_H_ */
......@@ -1812,15 +1812,11 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
enum ice_sw_lkup_type lkup_type,
enum ice_adminq_opc opc)
{
struct ice_aqc_alloc_free_res_elem *sw_buf;
DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
u16 buf_len = __struct_size(sw_buf);
struct ice_aqc_res_elem *vsi_ele;
u16 buf_len;
int status;
buf_len = struct_size(sw_buf, elem, 1);
sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
if (!sw_buf)
return -ENOMEM;
sw_buf->num_elems = cpu_to_le16(1);
if (lkup_type == ICE_SW_LKUP_MAC ||
......@@ -1840,8 +1836,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
sw_buf->res_type =
cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
} else {
status = -EINVAL;
goto ice_aq_alloc_free_vsi_list_exit;
return -EINVAL;
}
if (opc == ice_aqc_opc_free_res)
......@@ -1849,16 +1844,14 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
status = ice_aq_alloc_free_res(hw, sw_buf, buf_len, opc);
if (status)
goto ice_aq_alloc_free_vsi_list_exit;
return status;
if (opc == ice_aqc_opc_alloc_res) {
vsi_ele = &sw_buf->elem[0];
*vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
}
ice_aq_alloc_free_vsi_list_exit:
devm_kfree(ice_hw_to_dev(hw), sw_buf);
return status;
return 0;
}
/**
......@@ -2088,15 +2081,10 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
*/
int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
{
struct ice_aqc_alloc_free_res_elem *sw_buf;
u16 buf_len;
DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
u16 buf_len = __struct_size(sw_buf);
int status;
buf_len = struct_size(sw_buf, elem, 1);
sw_buf = kzalloc(buf_len, GFP_KERNEL);
if (!sw_buf)
return -ENOMEM;
sw_buf->num_elems = cpu_to_le16(1);
sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
ICE_AQC_RES_TYPE_S) |
......@@ -2105,7 +2093,6 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
ice_aqc_opc_alloc_res);
if (!status)
*rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
kfree(sw_buf);
return status;
}
......@@ -4434,28 +4421,19 @@ int
ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 *counter_id)
{
struct ice_aqc_alloc_free_res_elem *buf;
u16 buf_len;
DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
u16 buf_len = __struct_size(buf);
int status;
/* Allocate resource */
buf_len = struct_size(buf, elem, 1);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf->num_elems = cpu_to_le16(num_items);
buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
ICE_AQC_RES_TYPE_M) | alloc_shared);
status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res);
if (status)
goto exit;
return status;
*counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
exit:
kfree(buf);
return status;
}
......@@ -4471,16 +4449,10 @@ int
ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id)
{
struct ice_aqc_alloc_free_res_elem *buf;
u16 buf_len;
DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
u16 buf_len = __struct_size(buf);
int status;
/* Free resource */
buf_len = struct_size(buf, elem, 1);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf->num_elems = cpu_to_le16(num_items);
buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
ICE_AQC_RES_TYPE_M) | alloc_shared);
......@@ -4490,7 +4462,6 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
if (status)
ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
kfree(buf);
return status;
}
......@@ -4508,15 +4479,10 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
*/
int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id)
{
struct ice_aqc_alloc_free_res_elem *buf;
u16 buf_len;
DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
u16 buf_len = __struct_size(buf);
int status;
buf_len = struct_size(buf, elem, 1);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf->num_elems = cpu_to_le16(1);
if (shared)
buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
......@@ -4534,7 +4500,6 @@ int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id)
ice_debug(hw, ICE_DBG_SW, "Could not set resource type %u id %u to %s\n",
type, res_id, shared ? "SHARED" : "DEDICATED");
kfree(buf);
return status;
}
......
......@@ -217,21 +217,16 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
*/
static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
u16 size = __struct_size(qg_buf);
struct ice_q_vector *q_vector;
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
u16 size;
int err;
if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
return -EINVAL;
size = struct_size(qg_buf, txqs, 1);
qg_buf = kzalloc(size, GFP_KERNEL);
if (!qg_buf)
return -ENOMEM;
qg_buf->num_txqs = 1;
tx_ring = vsi->tx_rings[q_idx];
......@@ -240,7 +235,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
if (err)
goto free_buf;
return err;
if (ice_is_xdp_ena_vsi(vsi)) {
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
......@@ -249,29 +244,28 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
qg_buf->num_txqs = 1;
err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
if (err)
goto free_buf;
return err;
ice_set_ring_xdp(xdp_ring);
ice_tx_xsk_pool(vsi, q_idx);
}
err = ice_vsi_cfg_rxq(rx_ring);
if (err)
goto free_buf;
return err;
ice_qvec_cfg_msix(vsi, q_vector);
err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
if (err)
goto free_buf;
return err;
clear_bit(ICE_CFG_BUSY, vsi->state);
ice_qvec_toggle_napi(vsi, q_vector, true);
ice_qvec_ena_irq(vsi, q_vector);
netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
free_buf:
kfree(qg_buf);
return err;
return 0;
}
/**
......
......@@ -2,6 +2,15 @@
#ifndef __LINUX_COMPILER_TYPES_H
#define __LINUX_COMPILER_TYPES_H
/*
* __has_builtin is supported on gcc >= 10, clang >= 3 and icc >= 21.
* In the meantime, to support gcc < 10, we implement __has_builtin
* by hand.
*/
#ifndef __has_builtin
#define __has_builtin(x) (0)
#endif
#ifndef __ASSEMBLY__
/*
......@@ -134,17 +143,6 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
# define __preserve_most
#endif
/* Builtins */
/*
* __has_builtin is supported on gcc >= 10, clang >= 3 and icc >= 21.
* In the meantime, to support gcc < 10, we implement __has_builtin
* by hand.
*/
#ifndef __has_builtin
#define __has_builtin(x) (0)
#endif
/* Compiler specific macros. */
#ifdef __clang__
#include <linux/compiler-clang.h>
......@@ -352,6 +350,18 @@ struct ftrace_likely_data {
# define __realloc_size(x, ...)
#endif
/*
* When the size of an allocated object is needed, use the best available
* mechanism to find it. (For cases where sizeof() cannot be used.)
*/
#if __has_builtin(__builtin_dynamic_object_size)
#define __struct_size(p) __builtin_dynamic_object_size(p, 0)
#define __member_size(p) __builtin_dynamic_object_size(p, 1)
#else
#define __struct_size(p) __builtin_object_size(p, 0)
#define __member_size(p) __builtin_object_size(p, 1)
#endif
#ifndef asm_volatile_goto
#define asm_volatile_goto(x...) asm goto(x)
#endif
......
......@@ -93,13 +93,9 @@ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size)
#if __has_builtin(__builtin_dynamic_object_size)
#define POS __pass_dynamic_object_size(1)
#define POS0 __pass_dynamic_object_size(0)
#define __struct_size(p) __builtin_dynamic_object_size(p, 0)
#define __member_size(p) __builtin_dynamic_object_size(p, 1)
#else
#define POS __pass_object_size(1)
#define POS0 __pass_object_size(0)
#define __struct_size(p) __builtin_object_size(p, 0)
#define __member_size(p) __builtin_object_size(p, 1)
#endif
#define __compiletime_lessthan(bounds, length) ( \
......
......@@ -309,4 +309,39 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
#define struct_size_t(type, member, count) \
struct_size((type *)NULL, member, count)
/**
* _DEFINE_FLEX() - helper macro for DEFINE_FLEX() family.
* Enables caller macro to pass (different) initializer.
*
* @type: structure type name, including "struct" keyword.
* @name: Name for a variable to define.
* @member: Name of the array member.
* @count: Number of elements in the array; must be compile-time const.
* @initializer: initializer expression (could be empty for no init).
*/
#define _DEFINE_FLEX(type, name, member, count, initializer) \
_Static_assert(__builtin_constant_p(count), \
"onstack flex array members require compile-time const count"); \
union { \
u8 bytes[struct_size_t(type, member, count)]; \
type obj; \
} name##_u initializer; \
type *name = (type *)&name##_u
/**
* DEFINE_FLEX() - Define an on-stack instance of structure with a trailing
* flexible array member.
*
* @type: structure type name, including "struct" keyword.
* @name: Name for a variable to define.
* @member: Name of the array member.
* @count: Number of elements in the array; must be compile-time const.
*
* Define a zeroed, on-stack, instance of @type structure with a trailing
* flexible array member.
* Use __struct_size(@name) to get compile-time size of it afterwards.
*/
#define DEFINE_FLEX(type, name, member, count) \
_DEFINE_FLEX(type, name, member, count, = {})
#endif /* __LINUX_OVERFLOW_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment