Commit b214b98a authored by Przemek Kitszel's avatar Przemek Kitszel Committed by Tony Nguyen

ice: embed &ice_rq_event_info event into struct ice_aq_task

Expose struct ice_aq_task to callers,
what takes burden of memory ownership out from AQ-wait family of functions,
and reduces need for heap-based allocations.

Embed struct ice_rq_event_info event into struct ice_aq_task
(instead of it being a ptr) to remove some more code from the callers.

Subsequent commit will improve more based on this one.
Reviewed-by: default avatarJacob Keller <jacob.e.keller@intel.com>
Signed-off-by: default avatarPrzemek Kitszel <przemyslaw.kitszel@intel.com>
Reviewed-by: default avatarSimon Horman <horms@kernel.org>
Tested-by: Pucha Himasekhar Reddy <himasekharx.reddy.pucha@intel.com> (A Contingent worker at Intel)
Reviewed-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent e1e8a142
...@@ -917,8 +917,22 @@ void ice_fdir_release_flows(struct ice_hw *hw); ...@@ -917,8 +917,22 @@ void ice_fdir_release_flows(struct ice_hw *hw);
void ice_fdir_replay_flows(struct ice_hw *hw); void ice_fdir_replay_flows(struct ice_hw *hw);
void ice_fdir_replay_fltrs(struct ice_pf *pf); void ice_fdir_replay_fltrs(struct ice_pf *pf);
int ice_fdir_create_dflt_rules(struct ice_pf *pf); int ice_fdir_create_dflt_rules(struct ice_pf *pf);
int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
struct ice_rq_event_info *event); enum ice_aq_task_state {
ICE_AQ_TASK_WAITING,
ICE_AQ_TASK_COMPLETE,
ICE_AQ_TASK_CANCELED,
};
struct ice_aq_task {
struct hlist_node entry;
struct ice_rq_event_info event;
enum ice_aq_task_state state;
u16 opcode;
};
int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
u16 opcode, unsigned long timeout);
int ice_open(struct net_device *netdev); int ice_open(struct net_device *netdev);
int ice_open_internal(struct net_device *netdev); int ice_open_internal(struct net_device *netdev);
int ice_stop(struct net_device *netdev); int ice_stop(struct net_device *netdev);
......
...@@ -293,13 +293,12 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, ...@@ -293,13 +293,12 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
{ {
u16 completion_module, completion_retval; u16 completion_module, completion_retval;
struct device *dev = ice_pf_to_dev(pf); struct device *dev = ice_pf_to_dev(pf);
struct ice_rq_event_info event; struct ice_aq_task task = {};
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
struct ice_aq_desc *desc;
u32 completion_offset; u32 completion_offset;
int err; int err;
memset(&event, 0, sizeof(event));
dev_dbg(dev, "Writing block of %u bytes for module 0x%02x at offset %u\n", dev_dbg(dev, "Writing block of %u bytes for module 0x%02x at offset %u\n",
block_size, module, offset); block_size, module, offset);
...@@ -319,7 +318,7 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, ...@@ -319,7 +318,7 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
* is conservative and is intended to prevent failure to update when * is conservative and is intended to prevent failure to update when
* firmware is slow to respond. * firmware is slow to respond.
*/ */
err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write, 15 * HZ, &event); err = ice_aq_wait_for_event(pf, &task, ice_aqc_opc_nvm_write, 15 * HZ);
if (err) { if (err) {
dev_err(dev, "Timed out while trying to flash module 0x%02x with block of size %u at offset %u, err %d\n", dev_err(dev, "Timed out while trying to flash module 0x%02x with block of size %u at offset %u, err %d\n",
module, block_size, offset, err); module, block_size, offset, err);
...@@ -327,11 +326,12 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, ...@@ -327,11 +326,12 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
return -EIO; return -EIO;
} }
completion_module = le16_to_cpu(event.desc.params.nvm.module_typeid); desc = &task.event.desc;
completion_retval = le16_to_cpu(event.desc.retval); completion_module = le16_to_cpu(desc->params.nvm.module_typeid);
completion_retval = le16_to_cpu(desc->retval);
completion_offset = le16_to_cpu(event.desc.params.nvm.offset_low); completion_offset = le16_to_cpu(desc->params.nvm.offset_low);
completion_offset |= event.desc.params.nvm.offset_high << 16; completion_offset |= desc->params.nvm.offset_high << 16;
if (completion_module != module) { if (completion_module != module) {
dev_err(dev, "Unexpected module_typeid in write completion: got 0x%x, expected 0x%x\n", dev_err(dev, "Unexpected module_typeid in write completion: got 0x%x, expected 0x%x\n",
...@@ -363,8 +363,8 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, ...@@ -363,8 +363,8 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
*/ */
if (reset_level && last_cmd && module == ICE_SR_1ST_NVM_BANK_PTR) { if (reset_level && last_cmd && module == ICE_SR_1ST_NVM_BANK_PTR) {
if (hw->dev_caps.common_cap.pcie_reset_avoidance) { if (hw->dev_caps.common_cap.pcie_reset_avoidance) {
*reset_level = (event.desc.params.nvm.cmd_flags & *reset_level = desc->params.nvm.cmd_flags &
ICE_AQC_NVM_RESET_LVL_M); ICE_AQC_NVM_RESET_LVL_M;
dev_dbg(dev, "Firmware reported required reset level as %u\n", dev_dbg(dev, "Firmware reported required reset level as %u\n",
*reset_level); *reset_level);
} else { } else {
...@@ -479,15 +479,14 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component, ...@@ -479,15 +479,14 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
{ {
u16 completion_module, completion_retval; u16 completion_module, completion_retval;
struct device *dev = ice_pf_to_dev(pf); struct device *dev = ice_pf_to_dev(pf);
struct ice_rq_event_info event; struct ice_aq_task task = {};
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
struct ice_aq_desc *desc;
struct devlink *devlink; struct devlink *devlink;
int err; int err;
dev_dbg(dev, "Beginning erase of flash component '%s', module 0x%02x\n", component, module); dev_dbg(dev, "Beginning erase of flash component '%s', module 0x%02x\n", component, module);
memset(&event, 0, sizeof(event));
devlink = priv_to_devlink(pf); devlink = priv_to_devlink(pf);
devlink_flash_update_timeout_notify(devlink, "Erasing", component, ICE_FW_ERASE_TIMEOUT); devlink_flash_update_timeout_notify(devlink, "Erasing", component, ICE_FW_ERASE_TIMEOUT);
...@@ -502,7 +501,7 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component, ...@@ -502,7 +501,7 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
goto out_notify_devlink; goto out_notify_devlink;
} }
err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_erase, ICE_FW_ERASE_TIMEOUT * HZ, &event); err = ice_aq_wait_for_event(pf, &task, ice_aqc_opc_nvm_erase, ICE_FW_ERASE_TIMEOUT * HZ);
if (err) { if (err) {
dev_err(dev, "Timed out waiting for firmware to respond with erase completion for %s (module 0x%02x), err %d\n", dev_err(dev, "Timed out waiting for firmware to respond with erase completion for %s (module 0x%02x), err %d\n",
component, module, err); component, module, err);
...@@ -510,8 +509,9 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component, ...@@ -510,8 +509,9 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
goto out_notify_devlink; goto out_notify_devlink;
} }
completion_module = le16_to_cpu(event.desc.params.nvm.module_typeid); desc = &task.event.desc;
completion_retval = le16_to_cpu(event.desc.retval); completion_module = le16_to_cpu(desc->params.nvm.module_typeid);
completion_retval = le16_to_cpu(desc->retval);
if (completion_module != module) { if (completion_module != module) {
dev_err(dev, "Unexpected module_typeid in erase completion for %s: got 0x%x, expected 0x%x\n", dev_err(dev, "Unexpected module_typeid in erase completion for %s: got 0x%x, expected 0x%x\n",
...@@ -560,14 +560,12 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags, ...@@ -560,14 +560,12 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
u8 *emp_reset_available, struct netlink_ext_ack *extack) u8 *emp_reset_available, struct netlink_ext_ack *extack)
{ {
struct device *dev = ice_pf_to_dev(pf); struct device *dev = ice_pf_to_dev(pf);
struct ice_rq_event_info event; struct ice_aq_task task = {};
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
u16 completion_retval; u16 completion_retval;
u8 response_flags; u8 response_flags;
int err; int err;
memset(&event, 0, sizeof(event));
err = ice_nvm_write_activate(hw, activate_flags, &response_flags); err = ice_nvm_write_activate(hw, activate_flags, &response_flags);
if (err) { if (err) {
dev_err(dev, "Failed to switch active flash banks, err %d aq_err %s\n", dev_err(dev, "Failed to switch active flash banks, err %d aq_err %s\n",
...@@ -592,8 +590,8 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags, ...@@ -592,8 +590,8 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
} }
} }
err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write_activate, 30 * HZ, err = ice_aq_wait_for_event(pf, &task, ice_aqc_opc_nvm_write_activate,
&event); 30 * HZ);
if (err) { if (err) {
dev_err(dev, "Timed out waiting for firmware to switch active flash banks, err %d\n", dev_err(dev, "Timed out waiting for firmware to switch active flash banks, err %d\n",
err); err);
...@@ -601,7 +599,7 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags, ...@@ -601,7 +599,7 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
return err; return err;
} }
completion_retval = le16_to_cpu(event.desc.retval); completion_retval = le16_to_cpu(task.event.desc.retval);
if (completion_retval) { if (completion_retval) {
dev_err(dev, "Firmware failed to switch active flash banks aq_err %s\n", dev_err(dev, "Firmware failed to switch active flash banks aq_err %s\n",
ice_aq_str((enum ice_aq_err)completion_retval)); ice_aq_str((enum ice_aq_err)completion_retval));
......
...@@ -1250,26 +1250,12 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) ...@@ -1250,26 +1250,12 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
return status; return status;
} }
enum ice_aq_task_state {
ICE_AQ_TASK_WAITING = 0,
ICE_AQ_TASK_COMPLETE,
ICE_AQ_TASK_CANCELED,
};
struct ice_aq_task {
struct hlist_node entry;
u16 opcode;
struct ice_rq_event_info *event;
enum ice_aq_task_state state;
};
/** /**
* ice_aq_wait_for_event - Wait for an AdminQ event from firmware * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
* @pf: pointer to the PF private structure * @pf: pointer to the PF private structure
* @task: ptr to task structure
* @opcode: the opcode to wait for * @opcode: the opcode to wait for
* @timeout: how long to wait, in jiffies * @timeout: how long to wait, in jiffies
* @event: storage for the event info
* *
* Waits for a specific AdminQ completion event on the ARQ for a given PF. The * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
* current thread will be put to sleep until the specified event occurs or * current thread will be put to sleep until the specified event occurs or
...@@ -1281,22 +1267,16 @@ struct ice_aq_task { ...@@ -1281,22 +1267,16 @@ struct ice_aq_task {
* *
* Returns: zero on success, or a negative error code on failure. * Returns: zero on success, or a negative error code on failure.
*/ */
int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
struct ice_rq_event_info *event) u16 opcode, unsigned long timeout)
{ {
struct device *dev = ice_pf_to_dev(pf); struct device *dev = ice_pf_to_dev(pf);
struct ice_aq_task *task;
unsigned long start; unsigned long start;
long ret; long ret;
int err; int err;
task = kzalloc(sizeof(*task), GFP_KERNEL);
if (!task)
return -ENOMEM;
INIT_HLIST_NODE(&task->entry); INIT_HLIST_NODE(&task->entry);
task->opcode = opcode; task->opcode = opcode;
task->event = event;
task->state = ICE_AQ_TASK_WAITING; task->state = ICE_AQ_TASK_WAITING;
spin_lock_bh(&pf->aq_wait_lock); spin_lock_bh(&pf->aq_wait_lock);
...@@ -1331,7 +1311,6 @@ int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, ...@@ -1331,7 +1311,6 @@ int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
spin_lock_bh(&pf->aq_wait_lock); spin_lock_bh(&pf->aq_wait_lock);
hlist_del(&task->entry); hlist_del(&task->entry);
spin_unlock_bh(&pf->aq_wait_lock); spin_unlock_bh(&pf->aq_wait_lock);
kfree(task);
return err; return err;
} }
...@@ -1366,7 +1345,7 @@ static void ice_aq_check_events(struct ice_pf *pf, u16 opcode, ...@@ -1366,7 +1345,7 @@ static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
if (task->state || task->opcode != opcode) if (task->state || task->opcode != opcode)
continue; continue;
task_ev = task->event; task_ev = &task->event;
memcpy(&task_ev->desc, &event->desc, sizeof(event->desc)); memcpy(&task_ev->desc, &event->desc, sizeof(event->desc));
task_ev->msg_len = event->msg_len; task_ev->msg_len = event->msg_len;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment