Commit 7c9c8477 authored by Alon Giladi's avatar Alon Giladi Committed by Johannes Berg

wifi: iwlwifi: Enable loading of reduce-power tables into several segments

Replace the field reduce_power_dram with a struct that holds data about
the reduced-power tables drams regions. Generalize load_payloads_segments()
to work for both pnvm tables and reduction power tables.
Make required adjustments in the data structures.
Signed-off-by: default avatarAlon Giladi <alon.giladi@intel.com>
Signed-off-by: default avatarGregory Greenman <gregory.greenman@intel.com>
Link: https://lore.kernel.org/r/20230606103519.6fe66958f049.I85d80682229fc02fe354462cc9da40937558f30c@changeidSigned-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
parent ea3571f4
...@@ -330,7 +330,7 @@ int iwl_pnvm_load(struct iwl_trans *trans, ...@@ -330,7 +330,7 @@ int iwl_pnvm_load(struct iwl_trans *trans,
*/ */
trans->reduce_power_loaded = true; trans->reduce_power_loaded = true;
} else { } else {
ret = iwl_trans_load_reduce_power(trans, &pnvm_data); ret = iwl_trans_load_reduce_power(trans, &pnvm_data, capa);
if (ret) { if (ret) {
IWL_DEBUG_FW(trans, IWL_DEBUG_FW(trans,
"Failed to load reduce power table %d\n", "Failed to load reduce power table %d\n",
...@@ -340,7 +340,7 @@ int iwl_pnvm_load(struct iwl_trans *trans, ...@@ -340,7 +340,7 @@ int iwl_pnvm_load(struct iwl_trans *trans,
kfree(data); kfree(data);
} }
} }
iwl_trans_set_reduce_power(trans); iwl_trans_set_reduce_power(trans, capa);
iwl_init_notification_wait(notif_wait, &pnvm_wait, iwl_init_notification_wait(notif_wait, &pnvm_wait,
ntf_cmds, ARRAY_SIZE(ntf_cmds), ntf_cmds, ARRAY_SIZE(ntf_cmds),
......
...@@ -98,9 +98,9 @@ struct iwl_prph_scratch_control { ...@@ -98,9 +98,9 @@ struct iwl_prph_scratch_control {
} __packed; /* PERIPH_SCRATCH_CONTROL_S */ } __packed; /* PERIPH_SCRATCH_CONTROL_S */
/* /*
* struct iwl_prph_scratch_pnvm_cfg - ror config * struct iwl_prph_scratch_pnvm_cfg - PNVM scratch
* @pnvm_base_addr: PNVM start address * @pnvm_base_addr: PNVM start address
* @pnvm_size: PNVM size in DWs * @pnvm_size: the size of the PNVM image in bytes
* @reserved: reserved * @reserved: reserved
*/ */
struct iwl_prph_scratch_pnvm_cfg { struct iwl_prph_scratch_pnvm_cfg {
...@@ -142,7 +142,7 @@ struct iwl_prph_scratch_rbd_cfg { ...@@ -142,7 +142,7 @@ struct iwl_prph_scratch_rbd_cfg {
/* /*
* struct iwl_prph_scratch_uefi_cfg - prph scratch reduce power table * struct iwl_prph_scratch_uefi_cfg - prph scratch reduce power table
* @base_addr: reduce power table address * @base_addr: reduce power table address
* @size: table size in dwords * @size: the size of the entire power table image
*/ */
struct iwl_prph_scratch_uefi_cfg { struct iwl_prph_scratch_uefi_cfg {
__le64 base_addr; __le64 base_addr;
...@@ -292,10 +292,13 @@ int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans, ...@@ -292,10 +292,13 @@ int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa); const struct iwl_ucode_capabilities *capa);
void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans, void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa); const struct iwl_ucode_capabilities *capa);
int iwl_trans_pcie_ctx_info_gen3_load_reduce_power int
(struct iwl_trans *trans, iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans,
const struct iwl_pnvm_image *payloads); const struct iwl_pnvm_image *payloads,
void iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans); const struct iwl_ucode_capabilities *capa);
void
iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa);
int iwl_trans_pcie_ctx_info_gen3_set_step(struct iwl_trans *trans, int iwl_trans_pcie_ctx_info_gen3_set_step(struct iwl_trans *trans,
u32 mbx_addr_0_step, u32 mbx_addr_1_step); u32 mbx_addr_0_step, u32 mbx_addr_1_step);
#endif /* __iwl_context_info_file_gen3_h__ */ #endif /* __iwl_context_info_file_gen3_h__ */
...@@ -641,8 +641,10 @@ struct iwl_trans_ops { ...@@ -641,8 +641,10 @@ struct iwl_trans_ops {
void (*set_pnvm)(struct iwl_trans *trans, void (*set_pnvm)(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa); const struct iwl_ucode_capabilities *capa);
int (*load_reduce_power)(struct iwl_trans *trans, int (*load_reduce_power)(struct iwl_trans *trans,
const struct iwl_pnvm_image *payloads); const struct iwl_pnvm_image *payloads,
void (*set_reduce_power)(struct iwl_trans *trans); const struct iwl_ucode_capabilities *capa);
void (*set_reduce_power)(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa);
void (*interrupts)(struct iwl_trans *trans, bool enable); void (*interrupts)(struct iwl_trans *trans, bool enable);
int (*imr_dma_data)(struct iwl_trans *trans, int (*imr_dma_data)(struct iwl_trans *trans,
...@@ -731,6 +733,19 @@ struct iwl_dram_data { ...@@ -731,6 +733,19 @@ struct iwl_dram_data {
int size; int size;
}; };
/**
* @drams: array of several DRAM areas that contains the pnvm and power
* reduction table payloads.
* @n_regions: number of DRAM regions that were allocated
* @prph_scratch_mem_desc: points to a structure allocated in dram,
* designed to show FW where all the payloads are.
*/
struct iwl_dram_regions {
struct iwl_dram_data drams[IPC_DRAM_MAP_ENTRY_NUM_MAX];
struct iwl_dram_data prph_scratch_mem_desc;
u8 n_regions;
};
/** /**
* struct iwl_fw_mon - fw monitor per allocation id * struct iwl_fw_mon - fw monitor per allocation id
* @num_frags: number of fragments * @num_frags: number of fragments
...@@ -1560,15 +1575,18 @@ static inline void iwl_trans_set_pnvm(struct iwl_trans *trans, ...@@ -1560,15 +1575,18 @@ static inline void iwl_trans_set_pnvm(struct iwl_trans *trans,
static inline int iwl_trans_load_reduce_power static inline int iwl_trans_load_reduce_power
(struct iwl_trans *trans, (struct iwl_trans *trans,
const struct iwl_pnvm_image *payloads) const struct iwl_pnvm_image *payloads,
const struct iwl_ucode_capabilities *capa)
{ {
return trans->ops->load_reduce_power(trans, payloads); return trans->ops->load_reduce_power(trans, payloads, capa);
} }
static inline void iwl_trans_set_reduce_power(struct iwl_trans *trans) static inline void
iwl_trans_set_reduce_power(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa)
{ {
if (trans->ops->set_reduce_power) if (trans->ops->set_reduce_power)
trans->ops->set_reduce_power(trans); trans->ops->set_reduce_power(trans, capa);
} }
static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans) static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
......
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* /*
* Copyright (C) 2018-2022 Intel Corporation * Copyright (C) 2018-2023 Intel Corporation
*/ */
#include "iwl-trans.h" #include "iwl-trans.h"
#include "iwl-fh.h" #include "iwl-fh.h"
...@@ -317,11 +317,11 @@ static int iwl_pcie_load_payloads_continuously(struct iwl_trans *trans, ...@@ -317,11 +317,11 @@ static int iwl_pcie_load_payloads_continuously(struct iwl_trans *trans,
static int iwl_pcie_load_payloads_segments static int iwl_pcie_load_payloads_segments
(struct iwl_trans *trans, (struct iwl_trans *trans,
struct iwl_dram_regions *dram_regions,
const struct iwl_pnvm_image *pnvm_data) const struct iwl_pnvm_image *pnvm_data)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_dram_data *cur_payload_dram = &dram_regions->drams[0];
struct iwl_dram_data *cur_pnvm_dram = &trans_pcie->pnvm_dram[0], struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc;
*desc_dram = &trans_pcie->pnvm_regions_desc_array;
struct iwl_prph_scrath_mem_desc_addr_array *addresses; struct iwl_prph_scrath_mem_desc_addr_array *addresses;
const void *data; const void *data;
u32 len; u32 len;
...@@ -341,30 +341,31 @@ static int iwl_pcie_load_payloads_segments ...@@ -341,30 +341,31 @@ static int iwl_pcie_load_payloads_segments
memset(desc_dram->block, 0, len); memset(desc_dram->block, 0, len);
/* allocate DRAM region for each payload */ /* allocate DRAM region for each payload */
trans_pcie->n_pnvm_regions = 0; dram_regions->n_regions = 0;
for (i = 0; i < pnvm_data->n_chunks; i++) { for (i = 0; i < pnvm_data->n_chunks; i++) {
len = pnvm_data->chunks[i].len; len = pnvm_data->chunks[i].len;
data = pnvm_data->chunks[i].data; data = pnvm_data->chunks[i].data;
if (iwl_pcie_ctxt_info_alloc_dma(trans, data, len, if (iwl_pcie_ctxt_info_alloc_dma(trans,
cur_pnvm_dram)) { data,
iwl_trans_pcie_free_pnvm_dram(trans_pcie, trans->dev); len,
cur_payload_dram)) {
iwl_trans_pcie_free_pnvm_dram_regions(dram_regions,
trans->dev);
return -ENOMEM; return -ENOMEM;
} }
trans_pcie->n_pnvm_regions++; dram_regions->n_regions++;
cur_pnvm_dram++; cur_payload_dram++;
} }
/* fill desc with the DRAM payloads addresses */ /* fill desc with the DRAM payloads addresses */
addresses = desc_dram->block; addresses = desc_dram->block;
for (i = 0; i < pnvm_data->n_chunks; i++) { for (i = 0; i < pnvm_data->n_chunks; i++) {
addresses->mem_descs[i] = addresses->mem_descs[i] =
cpu_to_le64(trans_pcie->pnvm_dram[i].physical); cpu_to_le64(dram_regions->drams[i].physical);
} }
trans->pnvm_loaded = true;
return 0; return 0;
} }
...@@ -376,7 +377,7 @@ int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans, ...@@ -376,7 +377,7 @@ int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
&trans_pcie->prph_scratch->ctrl_cfg; &trans_pcie->prph_scratch->ctrl_cfg;
struct iwl_dram_data *dram = &trans_pcie->pnvm_dram[0]; struct iwl_dram_regions *dram_regions = &trans_pcie->pnvm_data;
int ret = 0; int ret = 0;
/* only allocate the DRAM if not allocated yet */ /* only allocate the DRAM if not allocated yet */
...@@ -394,28 +395,51 @@ int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans, ...@@ -394,28 +395,51 @@ int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,
return -EINVAL; return -EINVAL;
} }
/* allocate several DRAM sections */ /* save payloads in several DRAM sections */
if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) {
return iwl_pcie_load_payloads_segments(trans, pnvm_payloads); ret = iwl_pcie_load_payloads_segments(trans,
dram_regions,
/* allocate one DRAM section */ pnvm_payloads);
ret = iwl_pcie_load_payloads_continuously(trans, pnvm_payloads, dram); if (!ret)
if (!ret) { trans->pnvm_loaded = true;
trans_pcie->n_pnvm_regions = 1; } else {
trans->pnvm_loaded = true; /* save only in one DRAM section */
ret = iwl_pcie_load_payloads_continuously
(trans,
pnvm_payloads,
&dram_regions->drams[0]);
if (!ret) {
dram_regions->n_regions = 1;
trans->pnvm_loaded = true;
}
} }
return ret; return ret;
} }
static inline size_t
iwl_dram_regions_size(const struct iwl_dram_regions *dram_regions)
{
size_t total_size = 0;
int i;
for (i = 0; i < dram_regions->n_regions; i++)
total_size += dram_regions->drams[i].size;
return total_size;
}
static void iwl_pcie_set_pnvm_segments(struct iwl_trans *trans) static void iwl_pcie_set_pnvm_segments(struct iwl_trans *trans)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
&trans_pcie->prph_scratch->ctrl_cfg; &trans_pcie->prph_scratch->ctrl_cfg;
struct iwl_dram_regions *dram_regions = &trans_pcie->pnvm_data;
prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
cpu_to_le64(trans_pcie->pnvm_regions_desc_array.physical); cpu_to_le64(dram_regions->prph_scratch_mem_desc.physical);
prph_sc_ctrl->pnvm_cfg.pnvm_size =
cpu_to_le32(iwl_dram_regions_size(dram_regions));
} }
static void iwl_pcie_set_continuous_pnvm(struct iwl_trans *trans) static void iwl_pcie_set_continuous_pnvm(struct iwl_trans *trans)
...@@ -425,9 +449,9 @@ static void iwl_pcie_set_continuous_pnvm(struct iwl_trans *trans) ...@@ -425,9 +449,9 @@ static void iwl_pcie_set_continuous_pnvm(struct iwl_trans *trans)
&trans_pcie->prph_scratch->ctrl_cfg; &trans_pcie->prph_scratch->ctrl_cfg;
prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
cpu_to_le64(trans_pcie->pnvm_dram[0].physical); cpu_to_le64(trans_pcie->pnvm_data.drams[0].physical);
prph_sc_ctrl->pnvm_cfg.pnvm_size = prph_sc_ctrl->pnvm_cfg.pnvm_size =
cpu_to_le32(trans_pcie->pnvm_dram[0].size); cpu_to_le32(trans_pcie->pnvm_data.drams[0].size);
} }
void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans, void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
...@@ -443,12 +467,18 @@ void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans, ...@@ -443,12 +467,18 @@ void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
} }
int iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans, int iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans,
const struct iwl_pnvm_image *payloads) const struct iwl_pnvm_image *payloads,
const struct iwl_ucode_capabilities *capa)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
&trans_pcie->prph_scratch->ctrl_cfg; &trans_pcie->prph_scratch->ctrl_cfg;
struct iwl_dram_data *dram = &trans_pcie->reduce_power_dram; struct iwl_dram_regions *dram_regions = &trans_pcie->reduced_tables_data;
int ret = 0;
/* only allocate the DRAM if not allocated yet */
if (trans->reduce_power_loaded)
return 0;
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return 0; return 0;
...@@ -456,26 +486,68 @@ int iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans, ...@@ -456,26 +486,68 @@ int iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans,
if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size)) if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size))
return -EBUSY; return -EBUSY;
/* only allocate the DRAM if not allocated yet */ if (!payloads->n_chunks) {
if (!trans->reduce_power_loaded) IWL_DEBUG_FW(trans, "no payloads\n");
return iwl_pcie_load_payloads_continuously(trans, return -EINVAL;
payloads, }
dram);
return 0; /* save payloads in several DRAM sections */
if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) {
ret = iwl_pcie_load_payloads_segments(trans,
dram_regions,
payloads);
if (!ret)
trans->reduce_power_loaded = true;
} else {
/* save only in one DRAM section */
ret = iwl_pcie_load_payloads_continuously
(trans,
payloads,
&dram_regions->drams[0]);
if (!ret) {
dram_regions->n_regions = 1;
trans->reduce_power_loaded = true;
}
}
return ret;
} }
void iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans) static void iwl_pcie_set_reduce_power_segments(struct iwl_trans *trans)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
&trans_pcie->prph_scratch->ctrl_cfg; &trans_pcie->prph_scratch->ctrl_cfg;
struct iwl_dram_regions *dram_regions = &trans_pcie->reduced_tables_data;
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) prph_sc_ctrl->reduce_power_cfg.base_addr =
return; cpu_to_le64(dram_regions->prph_scratch_mem_desc.physical);
prph_sc_ctrl->reduce_power_cfg.size =
cpu_to_le32(iwl_dram_regions_size(dram_regions));
}
static void iwl_pcie_set_continuous_reduce_power(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
&trans_pcie->prph_scratch->ctrl_cfg;
prph_sc_ctrl->reduce_power_cfg.base_addr = prph_sc_ctrl->reduce_power_cfg.base_addr =
cpu_to_le64(trans_pcie->reduce_power_dram.physical); cpu_to_le64(trans_pcie->reduced_tables_data.drams[0].physical);
prph_sc_ctrl->reduce_power_cfg.size = prph_sc_ctrl->reduce_power_cfg.size =
cpu_to_le32(trans_pcie->reduce_power_dram.size); cpu_to_le32(trans_pcie->reduced_tables_data.drams[0].size);
}
void
iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa)
{
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return;
if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))
iwl_pcie_set_reduce_power_segments(trans);
else
iwl_pcie_set_continuous_reduce_power(trans);
} }
...@@ -307,10 +307,9 @@ enum iwl_pcie_imr_status { ...@@ -307,10 +307,9 @@ enum iwl_pcie_imr_status {
* @trans: pointer to the generic transport area * @trans: pointer to the generic transport area
* @scd_base_addr: scheduler sram base address in SRAM * @scd_base_addr: scheduler sram base address in SRAM
* @kw: keep warm address * @kw: keep warm address
* @pnvm_dram: array of several DRAM areas that contains the PNVM data * @pnvm_data: holds info about pnvm payloads allocated in DRAM
* @n_pnvm_regions: number of DRAM regions that were allocated for the pnvm * @reduced_tables_data: holds info about power reduced tablse
* @pnvm_regions_desc_array: array of PNVM payloads addresses. * payloads allocated in DRAM
* allocated in DRAM and sent to FW.
* @pci_dev: basic pci-network driver stuff * @pci_dev: basic pci-network driver stuff
* @hw_base: pci hardware address support * @hw_base: pci hardware address support
* @ucode_write_complete: indicates that the ucode has been copied. * @ucode_write_complete: indicates that the ucode has been copied.
...@@ -385,10 +384,8 @@ struct iwl_trans_pcie { ...@@ -385,10 +384,8 @@ struct iwl_trans_pcie {
struct iwl_dma_ptr kw; struct iwl_dma_ptr kw;
/* pnvm data */ /* pnvm data */
struct iwl_dram_data pnvm_dram[IPC_DRAM_MAP_ENTRY_NUM_MAX]; struct iwl_dram_regions pnvm_data;
u8 n_pnvm_regions; struct iwl_dram_regions reduced_tables_data;
struct iwl_dram_data pnvm_regions_desc_array;
struct iwl_dram_data reduce_power_dram;
struct iwl_txq *txq_memory; struct iwl_txq *txq_memory;
...@@ -485,8 +482,8 @@ struct iwl_trans ...@@ -485,8 +482,8 @@ struct iwl_trans
const struct pci_device_id *ent, const struct pci_device_id *ent,
const struct iwl_cfg_trans_params *cfg_trans); const struct iwl_cfg_trans_params *cfg_trans);
void iwl_trans_pcie_free(struct iwl_trans *trans); void iwl_trans_pcie_free(struct iwl_trans *trans);
void iwl_trans_pcie_free_pnvm_dram(struct iwl_trans_pcie *trans_pcie, void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions,
struct device *dev); struct device *dev);
bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans); bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans);
#define _iwl_trans_pcie_grab_nic_access(trans) \ #define _iwl_trans_pcie_grab_nic_access(trans) \
......
...@@ -1995,25 +1995,27 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, ...@@ -1995,25 +1995,27 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake; trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake;
} }
void iwl_trans_pcie_free_pnvm_dram(struct iwl_trans_pcie *trans_pcie, void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions,
struct device *dev) struct device *dev)
{ {
u8 i; u8 i;
struct iwl_dram_data *desc_dram = &trans_pcie->pnvm_regions_desc_array; struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc;
for (i = 0; i < trans_pcie->n_pnvm_regions; i++) { /* free DRAM payloads */
dma_free_coherent(dev, trans_pcie->pnvm_dram[i].size, for (i = 0; i < dram_regions->n_regions; i++) {
trans_pcie->pnvm_dram[i].block, dma_free_coherent(dev, dram_regions->drams[i].size,
trans_pcie->pnvm_dram[i].physical); dram_regions->drams[i].block,
dram_regions->drams[i].physical);
} }
trans_pcie->n_pnvm_regions = 0; dram_regions->n_regions = 0;
/* free DRAM addresses array */
if (desc_dram->block) { if (desc_dram->block) {
dma_free_coherent(dev, desc_dram->size, dma_free_coherent(dev, desc_dram->size,
desc_dram->block, desc_dram->block,
desc_dram->physical); desc_dram->physical);
} }
desc_dram->block = NULL; memset(desc_dram, 0, sizeof(*desc_dram));
} }
void iwl_trans_pcie_free(struct iwl_trans *trans) void iwl_trans_pcie_free(struct iwl_trans *trans)
...@@ -2048,13 +2050,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) ...@@ -2048,13 +2050,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iwl_pcie_free_fw_monitor(trans); iwl_pcie_free_fw_monitor(trans);
iwl_trans_pcie_free_pnvm_dram(trans_pcie, trans->dev); iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->pnvm_data,
trans->dev);
if (trans_pcie->reduce_power_dram.size) iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->reduced_tables_data,
dma_free_coherent(trans->dev, trans->dev);
trans_pcie->reduce_power_dram.size,
trans_pcie->reduce_power_dram.block,
trans_pcie->reduce_power_dram.physical);
mutex_destroy(&trans_pcie->mutex); mutex_destroy(&trans_pcie->mutex);
iwl_trans_free(trans); iwl_trans_free(trans);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment