Commit 33182810 authored by Alon Giladi's avatar Alon Giladi Committed by Johannes Berg

wifi: iwlwifi: Add support for fragmented pnvm images

Add support for fragmented pnvm images, depending on the FW capability.
Signed-off-by: default avatarAlon Giladi <alon.giladi@intel.com>
Signed-off-by: default avatarGregory Greenman <gregory.greenman@intel.com>
Link: https://lore.kernel.org/r/20230606103519.c49bfaf435a9.I0278312e7c3355b224cd870d4f8cf6578d12f03e@changeidSigned-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
parent f6fa5835
......@@ -323,6 +323,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* is supported.
* @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
* @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan (no longer used)
* @IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG: supports fragmented PNVM image
* @IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT: the firmware supports setting
* stabilization latency for SoCs.
* @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification
......@@ -398,6 +399,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31,
/* set 1 */
IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG = (__force iwl_ucode_tlv_capa_t)32,
IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT = (__force iwl_ucode_tlv_capa_t)37,
IWL_UCODE_TLV_CAPA_STA_PM_NOTIF = (__force iwl_ucode_tlv_capa_t)38,
IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)39,
......
......@@ -271,7 +271,8 @@ static u8 *iwl_get_pnvm_image(struct iwl_trans *trans_p, size_t *len)
}
int iwl_pnvm_load(struct iwl_trans *trans,
struct iwl_notif_wait_data *notif_wait)
struct iwl_notif_wait_data *notif_wait,
const struct iwl_ucode_capabilities *capa)
{
u8 *data;
size_t length;
......@@ -303,7 +304,7 @@ int iwl_pnvm_load(struct iwl_trans *trans,
goto reduce_tables;
}
ret = iwl_trans_load_pnvm(trans, &pnvm_data);
ret = iwl_trans_load_pnvm(trans, &pnvm_data, capa);
/* can only free data after pvnm_data use, but
* pnvm_data.version used below is not a pointer
*/
......@@ -314,7 +315,7 @@ int iwl_pnvm_load(struct iwl_trans *trans,
pnvm_data.version);
}
iwl_trans_set_pnvm(trans);
iwl_trans_set_pnvm(trans, capa);
reduce_tables:
/* now try to get the reduce power table, if not loaded yet */
......
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/******************************************************************************
*
* Copyright(c) 2020-2021 Intel Corporation
* Copyright(c) 2020-2022 Intel Corporation
*
*****************************************************************************/
......@@ -15,7 +15,8 @@
#define MAX_PNVM_NAME 64
int iwl_pnvm_load(struct iwl_trans *trans,
struct iwl_notif_wait_data *notif_wait);
struct iwl_notif_wait_data *notif_wait,
const struct iwl_ucode_capabilities *capa);
static inline
void iwl_pnvm_get_fs_name(struct iwl_trans *trans,
......
......@@ -280,8 +280,10 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive);
int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,
const struct iwl_pnvm_image *pnvm_payloads);
void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans);
const struct iwl_pnvm_image *pnvm_payloads,
const struct iwl_ucode_capabilities *capa);
void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa);
int iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
const void *data, u32 len);
int iwl_trans_pcie_ctx_info_gen3_set_step(struct iwl_trans *trans,
......
......@@ -634,8 +634,10 @@ struct iwl_trans_ops {
void (*debugfs_cleanup)(struct iwl_trans *trans);
void (*sync_nmi)(struct iwl_trans *trans);
int (*load_pnvm)(struct iwl_trans *trans,
const struct iwl_pnvm_image *pnvm_payloads);
void (*set_pnvm)(struct iwl_trans *trans);
const struct iwl_pnvm_image *pnvm_payloads,
const struct iwl_ucode_capabilities *capa);
void (*set_pnvm)(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa);
int (*set_reduce_power)(struct iwl_trans *trans,
const void *data, u32 len);
void (*interrupts)(struct iwl_trans *trans, bool enable);
......@@ -1539,15 +1541,17 @@ void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
u32 sw_err_bit);
static inline int iwl_trans_load_pnvm(struct iwl_trans *trans,
const struct iwl_pnvm_image *pnvm_data)
const struct iwl_pnvm_image *pnvm_data,
const struct iwl_ucode_capabilities *capa)
{
return trans->ops->load_pnvm(trans, pnvm_data);
return trans->ops->load_pnvm(trans, pnvm_data, capa);
}
static inline void iwl_trans_set_pnvm(struct iwl_trans *trans)
static inline void iwl_trans_set_pnvm(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa)
{
if (trans->ops->set_pnvm)
trans->ops->set_pnvm(trans);
trans->ops->set_pnvm(trans, capa);
}
static inline int iwl_trans_set_reduce_power(struct iwl_trans *trans,
......
......@@ -433,7 +433,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
/* if reached this point, Alive notification was received */
iwl_mei_alive_notif(true);
ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait);
ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait,
&mvm->fw->ucode_capa);
if (ret) {
IWL_ERR(mvm, "Timeout waiting for PNVM load!\n");
iwl_fw_set_current_image(&mvm->fwrt, old_type);
......
......@@ -281,33 +281,20 @@ void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive)
trans_pcie->prph_info = NULL;
}
int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,
const struct iwl_pnvm_image *pnvm_payloads)
static int iwl_pcie_load_payloads_continuously(struct iwl_trans *trans,
const struct iwl_pnvm_image *pnvm_data,
struct iwl_dram_data *dram)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
&trans_pcie->prph_scratch->ctrl_cfg;
struct iwl_dram_data *dram = &trans_pcie->pnvm_dram[0];
u32 len, len0, len1;
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return 0;
/* only allocate the DRAM if not allocated yet */
if (trans->pnvm_loaded)
return 0;
if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size))
return -EBUSY;
if (pnvm_payloads->n_chunks != UNFRAGMENTED_PNVM_PAYLOADS_NUMBER) {
if (pnvm_data->n_chunks != UNFRAGMENTED_PNVM_PAYLOADS_NUMBER) {
IWL_DEBUG_FW(trans, "expected 2 payloads, got %d.\n",
pnvm_payloads->n_chunks);
return -EINVAL;
pnvm_data->n_chunks);
return -EINVAL;
}
len0 = pnvm_payloads->chunks[0].len;
len1 = pnvm_payloads->chunks[1].len;
len0 = pnvm_data->chunks[0].len;
len1 = pnvm_data->chunks[1].len;
if (len1 > 0xFFFFFFFF - len0) {
IWL_DEBUG_FW(trans, "sizes of payloads overflow.\n");
return -EINVAL;
......@@ -322,32 +309,76 @@ int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,
}
dram->size = len;
memcpy(dram->block, pnvm_payloads->chunks[0].data, len0);
memcpy((u8 *)dram->block + len0, pnvm_payloads->chunks[1].data, len1);
trans_pcie->n_pnvm_regions = 1;
memcpy(dram->block, pnvm_data->chunks[0].data, len0);
memcpy((u8 *)dram->block + len0, pnvm_data->chunks[1].data, len1);
trans->pnvm_loaded = true;
return 0;
}
void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans)
/* FIXME: An implementation will be added with the next several commits. */
static int iwl_pcie_load_payloads_segments(struct iwl_trans *trans,
const struct iwl_pnvm_image *pnvm_payloads)
{
return -ENOMEM;
}
int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,
const struct iwl_pnvm_image *pnvm_payloads,
const struct iwl_ucode_capabilities *capa)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
&trans_pcie->prph_scratch->ctrl_cfg;
struct iwl_dram_data *dram = &trans_pcie->pnvm_dram[0];
int ret = 0;
/* only allocate the DRAM if not allocated yet */
if (trans->pnvm_loaded)
return 0;
if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size))
return -EBUSY;
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return;
/* FIXME: currently we concatenate payloads and save them only in
* pnvm_dram[0] - therefor only pnvm_dram[0] is delivered to the
* prph_sc. Need to add a UCODE sensitivity and another case in which
* we deliver to the prph_sc an array with all the DRAM addresses.
*/
return 0;
if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))
return iwl_pcie_load_payloads_segments(trans, pnvm_payloads);
ret = iwl_pcie_load_payloads_continuously(trans, pnvm_payloads, dram);
if (!ret) {
trans_pcie->n_pnvm_regions = 1;
trans->pnvm_loaded = true;
}
return ret;
}
/* FIXME: An implementation will be added with the next several commits. */
static void iwl_pcie_set_pnvm_segments(struct iwl_trans *trans) {}
static void iwl_pcie_set_continuous_pnvm(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
&trans_pcie->prph_scratch->ctrl_cfg;
prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
cpu_to_le64(trans_pcie->pnvm_dram[0].physical);
prph_sc_ctrl->pnvm_cfg.pnvm_size =
cpu_to_le32(trans_pcie->pnvm_dram[0].size);
}
void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa)
{
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return;
if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))
iwl_pcie_set_pnvm_segments(trans);
else
iwl_pcie_set_continuous_pnvm(trans);
}
int iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment