2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2025-01-06 04:33:58 +08:00

wifi: iwlwifi: Implement loading and setting of fragmented pnvm image

Save the pnvm payloads in several DRAM segments (not only in one as
used to). In addition, allocate a FW structure in DRAM that holds the
segments' addresses and forward its address to the FW. It's done when
FW has the capability to handle pnvm images this way (helps to process
large pnvm images).

Signed-off-by: Alon Giladi <alon.giladi@intel.com>
Signed-off-by: Gregory Greenman <gregory.greenman@intel.com>
Link: https://lore.kernel.org/r/20230606103519.dbdad8995ce1.I986213527982637042532de3851a1bd8a11be87a@changeid
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
This commit is contained in:
Alon Giladi 2023-06-06 10:43:03 +03:00 committed by Johannes Berg
parent 331828106e
commit 63b9e7b9f0
4 changed files with 86 additions and 6 deletions

View File

@ -109,6 +109,14 @@ struct iwl_prph_scratch_pnvm_cfg {
__le32 reserved;
} __packed; /* PERIPH_SCRATCH_PNVM_CFG_S */
/**
* struct iwl_prph_scrath_mem_desc_addr_array
* @mem_descs: array of dram addresses.
* Each address is the beggining of a pnvm payload.
*/
struct iwl_prph_scrath_mem_desc_addr_array {
__le64 mem_descs[IPC_DRAM_MAP_ENTRY_NUM_MAX];
} __packed; /* PERIPH_SCRATCH_MEM_DESC_ADDR_ARRAY_S_VER_1 */
/*
* struct iwl_prph_scratch_hwm_cfg - hwm config
* @hwm_base_addr: hwm start address

View File

@ -315,11 +315,58 @@ static int iwl_pcie_load_payloads_continuously(struct iwl_trans *trans,
return 0;
}
/* FIXME: An implementation will be added with the next several commits. */
static int iwl_pcie_load_payloads_segments(struct iwl_trans *trans,
const struct iwl_pnvm_image *pnvm_payloads)
static int iwl_pcie_load_payloads_segments
(struct iwl_trans *trans,
const struct iwl_pnvm_image *pnvm_data)
{
return -ENOMEM;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_dram_data *cur_pnvm_dram = &trans_pcie->pnvm_dram[0],
*desc_dram = &trans_pcie->pnvm_regions_desc_array;
struct iwl_prph_scrath_mem_desc_addr_array *addresses;
const void *data;
u32 len;
int i;
/* allocate and init DRAM descriptors array */
len = sizeof(struct iwl_prph_scrath_mem_desc_addr_array);
desc_dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent
(trans,
len,
&desc_dram->physical);
if (!desc_dram->block) {
IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA.\n");
return -ENOMEM;
}
desc_dram->size = len;
memset(desc_dram->block, 0, len);
/* allocate DRAM region for each payload */
trans_pcie->n_pnvm_regions = 0;
for (i = 0; i < pnvm_data->n_chunks; i++) {
len = pnvm_data->chunks[i].len;
data = pnvm_data->chunks[i].data;
if (iwl_pcie_ctxt_info_alloc_dma(trans, data, len,
cur_pnvm_dram)) {
iwl_trans_pcie_free_pnvm_dram(trans_pcie, trans->dev);
return -ENOMEM;
}
trans_pcie->n_pnvm_regions++;
cur_pnvm_dram++;
}
/* fill desc with the DRAM payloads addresses */
addresses = desc_dram->block;
for (i = 0; i < pnvm_data->n_chunks; i++) {
addresses->mem_descs[i] =
cpu_to_le64(trans_pcie->pnvm_dram[i].physical);
}
trans->pnvm_loaded = true;
return 0;
}
int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,
@ -342,9 +389,16 @@ int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return 0;
if (!pnvm_payloads->n_chunks) {
IWL_DEBUG_FW(trans, "no payloads\n");
return -EINVAL;
}
/* allocate several DRAM sections */
if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))
return iwl_pcie_load_payloads_segments(trans, pnvm_payloads);
/* allocate one DRAM section */
ret = iwl_pcie_load_payloads_continuously(trans, pnvm_payloads, dram);
if (!ret) {
trans_pcie->n_pnvm_regions = 1;
@ -354,8 +408,15 @@ int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,
return ret;
}
/* FIXME: An implementation will be added with the next several commits. */
static void iwl_pcie_set_pnvm_segments(struct iwl_trans *trans) {}
static void iwl_pcie_set_pnvm_segments(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
&trans_pcie->prph_scratch->ctrl_cfg;
prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
cpu_to_le64(trans_pcie->pnvm_regions_desc_array.physical);
}
static void iwl_pcie_set_continuous_pnvm(struct iwl_trans *trans)
{

View File

@ -309,6 +309,8 @@ enum iwl_pcie_imr_status {
* @kw: keep warm address
* @pnvm_dram: array of several DRAM areas that contains the PNVM data
* @n_pnvm_regions: number of DRAM regions that were allocated for the pnvm
* @pnvm_regions_desc_array: array of PNVM payloads addresses.
* allocated in DRAM and sent to FW.
* @pci_dev: basic pci-network driver stuff
* @hw_base: pci hardware address support
* @ucode_write_complete: indicates that the ucode has been copied.
@ -385,6 +387,7 @@ struct iwl_trans_pcie {
/* pnvm data */
struct iwl_dram_data pnvm_dram[IPC_DRAM_MAP_ENTRY_NUM_MAX];
u8 n_pnvm_regions;
struct iwl_dram_data pnvm_regions_desc_array;
struct iwl_dram_data reduce_power_dram;
struct iwl_txq *txq_memory;

View File

@ -1999,6 +1999,7 @@ void iwl_trans_pcie_free_pnvm_dram(struct iwl_trans_pcie *trans_pcie,
struct device *dev)
{
u8 i;
struct iwl_dram_data *desc_dram = &trans_pcie->pnvm_regions_desc_array;
for (i = 0; i < trans_pcie->n_pnvm_regions; i++) {
dma_free_coherent(dev, trans_pcie->pnvm_dram[i].size,
@ -2006,6 +2007,13 @@ void iwl_trans_pcie_free_pnvm_dram(struct iwl_trans_pcie *trans_pcie,
trans_pcie->pnvm_dram[i].physical);
}
trans_pcie->n_pnvm_regions = 0;
if (desc_dram->block) {
dma_free_coherent(dev, desc_dram->size,
desc_dram->block,
desc_dram->physical);
}
desc_dram->block = NULL;
}
void iwl_trans_pcie_free(struct iwl_trans *trans)