Commit e9600bc1 authored by Liam Girdwood's avatar Liam Girdwood Committed by Mark Brown

ASoC: Intel: Make ADSP memory block allocation more generic

Current block allocation is tied to block type and requestor type. Make the
allocation more generic by removing the struct module parameter and adding
a generic block allocator structure. Also pass in the list that the blocks
have to be added too in order to remove dependence on block requestor type.

ASoC: Intel: update scratch allocator to use generic block allocator

Update the scratch allocator to use the generic block allocator and calculate
total scratch buffer size.

ASoC: Intel: Add call to calculate offsets internally within the DSP.

A call to calculate internal DSP memory addresses used to allocate persistent
and scartch buffers.

ASoC: Intel: Add runtime module support.

Add support for runtime module objects that can be created for every FW
module that is parsed from the FW file. This gives a 1:N mapping between
the FW module from file and the runtime instantiations of that module.

We also need to make sure we remove every module and runtime module when
we unload the FW.

ASoC: Intel: Add DMA firmware loading support

Add support for DMA to load firmware modules to the DSP memory blocks.
Two DMA engines are supported, DesignWare and Intel MID.

ASoC: Intel: Add runtime module lookup API call

Add an API to allow quick lookup of runtime modules based on ID.

ASoC: Intel: Provide streams with dynamic module information

Remove the hard coded module paramaters and provide each module with
dynamically generated buffer information for scratch and persistent
buffers.
Signed-off-by: default avatarLiam Girdwood <liam.r.girdwood@linux.intel.com>
Signed-off-by: default avatarMark Brown <broonie@kernel.org>
parent 9a80e8f5
......@@ -67,17 +67,12 @@ static int sst_byt_parse_module(struct sst_dsp *dsp, struct sst_fw *fw,
{
struct dma_block_info *block;
struct sst_module *mod;
struct sst_module_data block_data;
struct sst_module_template template;
int count;
memset(&template, 0, sizeof(template));
template.id = module->type;
template.entry = module->entry_point;
template.p.type = SST_MEM_DRAM;
template.p.data_type = SST_DATA_P;
template.s.type = SST_MEM_DRAM;
template.s.data_type = SST_DATA_S;
mod = sst_module_new(fw, &template, NULL);
if (mod == NULL)
......@@ -94,19 +89,19 @@ static int sst_byt_parse_module(struct sst_dsp *dsp, struct sst_fw *fw,
switch (block->type) {
case SST_BYT_IRAM:
block_data.offset = block->ram_offset +
mod->offset = block->ram_offset +
dsp->addr.iram_offset;
block_data.type = SST_MEM_IRAM;
mod->type = SST_MEM_IRAM;
break;
case SST_BYT_DRAM:
block_data.offset = block->ram_offset +
mod->offset = block->ram_offset +
dsp->addr.dram_offset;
block_data.type = SST_MEM_DRAM;
mod->type = SST_MEM_DRAM;
break;
case SST_BYT_CACHE:
block_data.offset = block->ram_offset +
mod->offset = block->ram_offset +
(dsp->addr.fw_ext - dsp->addr.lpe);
block_data.type = SST_MEM_CACHE;
mod->type = SST_MEM_CACHE;
break;
default:
dev_err(dsp->dev, "wrong ram type 0x%x in block0x%x\n",
......@@ -114,11 +109,10 @@ static int sst_byt_parse_module(struct sst_dsp *dsp, struct sst_fw *fw,
return -EINVAL;
}
block_data.size = block->size;
block_data.data_type = SST_DATA_M;
block_data.data = (void *)block + sizeof(*block);
mod->size = block->size;
mod->data = (void *)block + sizeof(*block);
sst_module_insert_fixed_block(mod, &block_data);
sst_module_alloc_blocks(mod);
block = (void *)block + sizeof(*block) + block->size;
}
......
......@@ -26,6 +26,9 @@ struct sst_mem_block;
struct sst_module;
struct sst_fw;
/* do we need to remove or keep */
#define DSP_DRAM_ADDR_OFFSET 0x400000
/*
* DSP Operations exported by platform Audio DSP driver.
*/
......@@ -67,6 +70,8 @@ struct sst_addr {
u32 shim_offset;
u32 iram_offset;
u32 dram_offset;
u32 dsp_iram_offset;
u32 dsp_dram_offset;
void __iomem *lpe;
void __iomem *shim;
void __iomem *pci_cfg;
......@@ -83,15 +88,6 @@ struct sst_mailbox {
size_t out_size;
};
/*
* Audio DSP Firmware data types.
*/
enum sst_data_type {
SST_DATA_M = 0, /* module block data */
SST_DATA_P = 1, /* peristant data (text, data) */
SST_DATA_S = 2, /* scratch data (usually buffers) */
};
/*
* Audio DSP memory block types.
*/
......@@ -124,23 +120,6 @@ struct sst_fw {
void *private; /* core doesn't touch this */
};
/*
* Audio DSP Generic Module data.
*
* This is used to dsecribe any sections of persistent (text and data) and
* scratch (buffers) of module data in ADSP memory space.
*/
struct sst_module_data {
enum sst_mem_type type; /* destination memory type */
enum sst_data_type data_type; /* type of module data */
u32 size; /* size in bytes */
int32_t offset; /* offset in FW file */
u32 data_offset; /* offset in ADSP memory space */
void *data; /* module data */
};
/*
* Audio DSP Generic Module Template.
*
......@@ -150,15 +129,52 @@ struct sst_module_data {
struct sst_module_template {
u32 id;
u32 entry; /* entry point */
struct sst_module_data s; /* scratch data */
struct sst_module_data p; /* peristant data */
u32 scratch_size;
u32 persistent_size;
};
/*
* Block Allocator - Used to allocate blocks of DSP memory.
*/
struct sst_block_allocator {
u32 id;
u32 offset;
int size;
enum sst_mem_type type;
};
/*
* Runtime Module Instance - A module object can be instanciated multiple
* times within the DSP FW.
*/
struct sst_module_runtime {
struct sst_dsp *dsp;
int id;
struct sst_module *module; /* parent module we belong too */
u32 persistent_offset; /* private memory offset */
void *private;
struct list_head list;
struct list_head block_list; /* list of blocks used */
};
/*
* Runtime Module Context - The runtime context must be manually stored by the
* driver prior to enter S3 and restored after leaving S3. This should really be
* part of the memory context saved by the enter D3 message IPC ???
*/
struct sst_module_runtime_context {
dma_addr_t dma_buffer;
u32 *buffer;
};
/*
* Audio DSP Generic Module.
*
* Each Firmware file can consist of 1..N modules. A module can span multiple
* ADSP memory blocks. The simplest FW will be a file with 1 module.
* ADSP memory blocks. The simplest FW will be a file with 1 module. A module
* can be instanciated multiple times in the DSP.
*/
struct sst_module {
struct sst_dsp *dsp;
......@@ -167,10 +183,13 @@ struct sst_module {
/* module configuration */
u32 id;
u32 entry; /* module entry point */
u32 offset; /* module offset in firmware file */
s32 offset; /* module offset in firmware file */
u32 size; /* module size */
struct sst_module_data s; /* scratch data */
struct sst_module_data p; /* peristant data */
u32 scratch_size; /* global scratch memory required */
u32 persistent_size; /* private memory required */
enum sst_mem_type type; /* destination memory type */
u32 data_offset; /* offset in ADSP memory space */
void *data; /* module data */
/* runtime */
u32 usage_count; /* can be unloaded if count == 0 */
......@@ -180,6 +199,7 @@ struct sst_module {
struct list_head block_list; /* Module list of blocks in use */
struct list_head list; /* DSP list of modules */
struct list_head list_fw; /* FW list of modules */
struct list_head runtime_list; /* list of runtime module objects*/
};
/*
......@@ -208,7 +228,6 @@ struct sst_mem_block {
struct sst_block_ops *ops; /* block operations, if any */
/* block status */
enum sst_data_type data_type; /* data type held in this block */
u32 bytes_used; /* bytes in use by modules */
void *private; /* generic core does not touch this */
int users; /* number of modules using this block */
......@@ -253,6 +272,11 @@ struct sst_dsp {
struct list_head module_list;
struct list_head fw_list;
/* scratch buffer */
struct list_head scratch_block_list;
u32 scratch_offset;
u32 scratch_size;
/* platform data */
struct sst_pdata *pdata;
......@@ -290,18 +314,33 @@ void sst_fw_unload(struct sst_fw *sst_fw);
/* Create/Free firmware modules */
struct sst_module *sst_module_new(struct sst_fw *sst_fw,
struct sst_module_template *template, void *private);
void sst_module_free(struct sst_module *sst_module);
int sst_module_insert(struct sst_module *sst_module);
int sst_module_remove(struct sst_module *sst_module);
int sst_module_insert_fixed_block(struct sst_module *module,
struct sst_module_data *data);
void sst_module_free(struct sst_module *module);
struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id);
/* allocate/free pesistent/scratch memory regions managed by drv */
struct sst_module *sst_mem_block_alloc_scratch(struct sst_dsp *dsp);
void sst_mem_block_free_scratch(struct sst_dsp *dsp,
struct sst_module *scratch);
int sst_block_module_remove(struct sst_module *module);
int sst_module_alloc_blocks(struct sst_module *module);
int sst_module_free_blocks(struct sst_module *module);
/* Create/Free firmware module runtime instances */
struct sst_module_runtime *sst_module_runtime_new(struct sst_module *module,
int id, void *private);
void sst_module_runtime_free(struct sst_module_runtime *runtime);
struct sst_module_runtime *sst_module_runtime_get_from_id(
struct sst_module *module, u32 id);
int sst_module_runtime_alloc_blocks(struct sst_module_runtime *runtime,
int offset);
int sst_module_runtime_free_blocks(struct sst_module_runtime *runtime);
int sst_module_runtime_save(struct sst_module_runtime *runtime,
struct sst_module_runtime_context *context);
int sst_module_runtime_restore(struct sst_module_runtime *runtime,
struct sst_module_runtime_context *context);
/* generic block allocation */
int sst_alloc_blocks(struct sst_dsp *dsp, struct sst_block_allocator *ba,
struct list_head *block_list);
int sst_free_blocks(struct sst_dsp *dsp, struct list_head *block_list);
/* scratch allocation */
int sst_block_alloc_scratch(struct sst_dsp *dsp);
void sst_block_free_scratch(struct sst_dsp *dsp);
/* Register the DSPs memory blocks - would be nice to read from ACPI */
struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
......@@ -309,4 +348,10 @@ struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
void *private);
void sst_mem_block_unregister_all(struct sst_dsp *dsp);
/* Create/Free DMA resources */
int sst_dma_new(struct sst_dsp *sst);
void sst_dma_free(struct sst_dma *dma);
u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset,
enum sst_mem_type type);
#endif
......@@ -352,6 +352,7 @@ struct sst_dsp *sst_dsp_new(struct device *dev,
INIT_LIST_HEAD(&sst->free_block_list);
INIT_LIST_HEAD(&sst->module_list);
INIT_LIST_HEAD(&sst->fw_list);
INIT_LIST_HEAD(&sst->scratch_block_list);
/* Initialise SST Audio DSP */
if (sst->ops->init) {
......@@ -366,6 +367,10 @@ struct sst_dsp *sst_dsp_new(struct device *dev,
if (err)
goto irq_err;
err = sst_dma_new(sst);
if (err)
dev_warn(dev, "sst_dma_new failed %d\n", err);
return sst;
irq_err:
......@@ -381,6 +386,9 @@ void sst_dsp_free(struct sst_dsp *sst)
free_irq(sst->irq, sst);
if (sst->ops->free)
sst->ops->free(sst);
if (sst->dma)
sst_dma_free(sst->dma);
}
EXPORT_SYMBOL_GPL(sst_dsp_free);
......
......@@ -245,6 +245,13 @@ void sst_memcpy_fromio_32(struct sst_dsp *sst,
/* DSP reset & boot */
void sst_dsp_reset(struct sst_dsp *sst);
int sst_dsp_boot(struct sst_dsp *sst);
/* DMA */
int sst_dsp_dma_get_channel(struct sst_dsp *dsp, int chan_id);
void sst_dsp_dma_put_channel(struct sst_dsp *dsp);
int sst_dsp_dma_copyfrom(struct sst_dsp *sst, dma_addr_t dest_addr,
dma_addr_t src_addr, size_t size);
int sst_dsp_dma_copyto(struct sst_dsp *sst, dma_addr_t dest_addr,
dma_addr_t src_addr, size_t size);
/* Msg IO */
void sst_dsp_ipc_msg_tx(struct sst_dsp *dsp, u32 msg);
......
......@@ -23,6 +23,11 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/pci.h>
#include <linux/acpi.h>
/* supported DMA engine drivers */
#include <linux/platform_data/dma-dw.h>
#include <linux/dma/dw.h>
#include <asm/page.h>
#include <asm/pgtable.h>
......@@ -30,7 +35,20 @@
#include "sst-dsp.h"
#include "sst-dsp-priv.h"
static void block_module_remove(struct sst_module *module);
#define SST_DMA_RESOURCES 2
#define SST_DSP_DMA_MAX_BURST 0x3
#define SST_HSW_BLOCK_ANY 0xffffffff
#define SST_HSW_MASK_DMA_ADDR_DSP 0xfff00000
struct sst_dma {
struct sst_dsp *sst;
struct dw_dma_chip *chip;
struct dma_async_tx_descriptor *desc;
struct dma_chan *ch;
};
static inline void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes)
{
......@@ -38,6 +56,281 @@ static inline void sst_memcpy32(volatile void __iomem *dest, void *src, u32 byte
__iowrite32_copy((void *)dest, src, bytes/4);
}
static void sst_dma_transfer_complete(void *arg)
{
struct sst_dsp *sst = (struct sst_dsp *)arg;
dev_dbg(sst->dev, "DMA: callback\n");
}
static int sst_dsp_dma_copy(struct sst_dsp *sst, dma_addr_t dest_addr,
dma_addr_t src_addr, size_t size)
{
struct dma_async_tx_descriptor *desc;
struct sst_dma *dma = sst->dma;
if (dma->ch == NULL) {
dev_err(sst->dev, "error: no DMA channel\n");
return -ENODEV;
}
dev_dbg(sst->dev, "DMA: src: 0x%lx dest 0x%lx size %zu\n",
(unsigned long)src_addr, (unsigned long)dest_addr, size);
desc = dma->ch->device->device_prep_dma_memcpy(dma->ch, dest_addr,
src_addr, size, DMA_CTRL_ACK);
if (!desc){
dev_err(sst->dev, "error: dma prep memcpy failed\n");
return -EINVAL;
}
desc->callback = sst_dma_transfer_complete;
desc->callback_param = sst;
desc->tx_submit(desc);
dma_wait_for_async_tx(desc);
return 0;
}
/* copy to DSP */
int sst_dsp_dma_copyto(struct sst_dsp *sst, dma_addr_t dest_addr,
dma_addr_t src_addr, size_t size)
{
return sst_dsp_dma_copy(sst, dest_addr | SST_HSW_MASK_DMA_ADDR_DSP,
src_addr, size);
}
EXPORT_SYMBOL_GPL(sst_dsp_dma_copyto);
/* copy from DSP */
int sst_dsp_dma_copyfrom(struct sst_dsp *sst, dma_addr_t dest_addr,
dma_addr_t src_addr, size_t size)
{
return sst_dsp_dma_copy(sst, dest_addr,
src_addr | SST_HSW_MASK_DMA_ADDR_DSP, size);
}
EXPORT_SYMBOL_GPL(sst_dsp_dma_copyfrom);
/* remove module from memory - callers hold locks */
static void block_list_remove(struct sst_dsp *dsp,
struct list_head *block_list)
{
struct sst_mem_block *block, *tmp;
int err;
/* disable each block */
list_for_each_entry(block, block_list, module_list) {
if (block->ops && block->ops->disable) {
err = block->ops->disable(block);
if (err < 0)
dev_err(dsp->dev,
"error: cant disable block %d:%d\n",
block->type, block->index);
}
}
/* mark each block as free */
list_for_each_entry_safe(block, tmp, block_list, module_list) {
list_del(&block->module_list);
list_move(&block->list, &dsp->free_block_list);
dev_dbg(dsp->dev, "block freed %d:%d at offset 0x%x\n",
block->type, block->index, block->offset);
}
}
/* prepare the memory block to receive data from host - callers hold locks */
static int block_list_prepare(struct sst_dsp *dsp,
struct list_head *block_list)
{
struct sst_mem_block *block;
int ret = 0;
/* enable each block so that's it'e ready for data */
list_for_each_entry(block, block_list, module_list) {
if (block->ops && block->ops->enable) {
ret = block->ops->enable(block);
if (ret < 0) {
dev_err(dsp->dev,
"error: cant disable block %d:%d\n",
block->type, block->index);
goto err;
}
}
}
return ret;
err:
list_for_each_entry(block, block_list, module_list) {
if (block->ops && block->ops->disable)
block->ops->disable(block);
}
return ret;
}
struct dw_dma_platform_data dw_pdata = {
.is_private = 1,
.chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
.chan_priority = CHAN_PRIORITY_ASCENDING,
};
static struct dw_dma_chip *dw_probe(struct device *dev, struct resource *mem,
int irq)
{
struct dw_dma_chip *chip;
int err;
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return ERR_PTR(-ENOMEM);
chip->irq = irq;
chip->regs = devm_ioremap_resource(dev, mem);
if (IS_ERR(chip->regs))
return ERR_CAST(chip->regs);
err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31));
if (err)
return ERR_PTR(err);
chip->dev = dev;
err = dw_dma_probe(chip, &dw_pdata);
if (err)
return ERR_PTR(err);
return chip;
}
static void dw_remove(struct dw_dma_chip *chip)
{
dw_dma_remove(chip);
}
static bool dma_chan_filter(struct dma_chan *chan, void *param)
{
struct sst_dsp *dsp = (struct sst_dsp *)param;
return chan->device->dev == dsp->dma_dev;
}
int sst_dsp_dma_get_channel(struct sst_dsp *dsp, int chan_id)
{
struct sst_dma *dma = dsp->dma;
struct dma_slave_config slave;
dma_cap_mask_t mask;
int ret;
/* The Intel MID DMA engine driver needs the slave config set but
* Synopsis DMA engine driver safely ignores the slave config */
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
dma_cap_set(DMA_MEMCPY, mask);
dma->ch = dma_request_channel(mask, dma_chan_filter, dsp);
if (dma->ch == NULL) {
dev_err(dsp->dev, "error: DMA request channel failed\n");
return -EIO;
}
memset(&slave, 0, sizeof(slave));
slave.direction = DMA_MEM_TO_DEV;
slave.src_addr_width =
slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
slave.src_maxburst = slave.dst_maxburst = SST_DSP_DMA_MAX_BURST;
ret = dmaengine_slave_config(dma->ch, &slave);
if (ret) {
dev_err(dsp->dev, "error: unable to set DMA slave config %d\n",
ret);
dma_release_channel(dma->ch);
dma->ch = NULL;
}
return ret;
}
EXPORT_SYMBOL_GPL(sst_dsp_dma_get_channel);
void sst_dsp_dma_put_channel(struct sst_dsp *dsp)
{
struct sst_dma *dma = dsp->dma;
if (!dma->ch)
return;
dma_release_channel(dma->ch);
dma->ch = NULL;
}
EXPORT_SYMBOL_GPL(sst_dsp_dma_put_channel);
int sst_dma_new(struct sst_dsp *sst)
{
struct sst_pdata *sst_pdata = sst->pdata;
struct sst_dma *dma;
struct resource mem;
const char *dma_dev_name;
int ret = 0;
/* configure the correct platform data for whatever DMA engine
* is attached to the ADSP IP. */
switch (sst->pdata->dma_engine) {
case SST_DMA_TYPE_DW:
dma_dev_name = "dw_dmac";
break;
case SST_DMA_TYPE_MID:
dma_dev_name = "Intel MID DMA";
break;
default:
dev_err(sst->dev, "error: invalid DMA engine %d\n",
sst->pdata->dma_engine);
return -EINVAL;
}
dma = devm_kzalloc(sst->dev, sizeof(struct sst_dma), GFP_KERNEL);
if (!dma)
return -ENOMEM;
dma->sst = sst;
memset(&mem, 0, sizeof(mem));
mem.start = sst->addr.lpe_base + sst_pdata->dma_base;
mem.end = sst->addr.lpe_base + sst_pdata->dma_base + sst_pdata->dma_size - 1;
mem.flags = IORESOURCE_MEM;
/* now register DMA engine device */
dma->chip = dw_probe(sst->dma_dev, &mem, sst_pdata->irq);
if (IS_ERR(dma->chip)) {
dev_err(sst->dev, "error: DMA device register failed\n");
ret = PTR_ERR(dma->chip);
goto err_dma_dev;
}
sst->dma = dma;
sst->fw_use_dma = true;
return 0;
err_dma_dev:
devm_kfree(sst->dev, dma);
return ret;
}
EXPORT_SYMBOL(sst_dma_new);
void sst_dma_free(struct sst_dma *dma)
{
if (dma == NULL)
return;
if (dma->ch)
dma_release_channel(dma->ch);
if (dma->chip)
dw_remove(dma->chip);
}
EXPORT_SYMBOL(sst_dma_free);
/* create new generic firmware object */
struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
const struct firmware *fw, void *private)
......@@ -68,6 +361,12 @@ struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
/* copy FW data to DMA-able memory */
memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size);
if (dsp->fw_use_dma) {
err = sst_dsp_dma_get_channel(dsp, 0);
if (err < 0)
goto chan_err;
}
/* call core specific FW paser to load FW data into DSP */
err = dsp->ops->parse_fw(sst_fw);
if (err < 0) {
......@@ -75,6 +374,9 @@ struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
goto parse_err;
}
if (dsp->fw_use_dma)
sst_dsp_dma_put_channel(dsp);
mutex_lock(&dsp->mutex);
list_add(&sst_fw->list, &dsp->fw_list);
mutex_unlock(&dsp->mutex);
......@@ -82,9 +384,13 @@ struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
return sst_fw;
parse_err:
dma_free_coherent(dsp->dev, sst_fw->size,
if (dsp->fw_use_dma)
sst_dsp_dma_put_channel(dsp);
chan_err:
dma_free_coherent(dsp->dma_dev, sst_fw->size,
sst_fw->dma_buf,
sst_fw->dmable_fw_paddr);
sst_fw->dma_buf = NULL;
kfree(sst_fw);
return NULL;
}
......@@ -108,21 +414,37 @@ EXPORT_SYMBOL_GPL(sst_fw_reload);
void sst_fw_unload(struct sst_fw *sst_fw)
{
struct sst_dsp *dsp = sst_fw->dsp;
struct sst_module *module, *tmp;
struct sst_dsp *dsp = sst_fw->dsp;
struct sst_module *module, *mtmp;
struct sst_module_runtime *runtime, *rtmp;
dev_dbg(dsp->dev, "unloading firmware\n");
dev_dbg(dsp->dev, "unloading firmware\n");
mutex_lock(&dsp->mutex);
/* check module by module */
list_for_each_entry_safe(module, mtmp, &dsp->module_list, list) {
if (module->sst_fw == sst_fw) {
/* remove runtime modules */
list_for_each_entry_safe(runtime, rtmp, &module->runtime_list, list) {
block_list_remove(dsp, &runtime->block_list);
list_del(&runtime->list);
kfree(runtime);
}
/* now remove the module */
block_list_remove(dsp, &module->block_list);
list_del(&module->list);
kfree(module);
}
}
mutex_lock(&dsp->mutex);
list_for_each_entry_safe(module, tmp, &dsp->module_list, list) {
if (module->sst_fw == sst_fw) {
block_module_remove(module);
list_del(&module->list);
kfree(module);
}
}
/* remove all scratch blocks */
block_list_remove(dsp, &dsp->scratch_block_list);
mutex_unlock(&dsp->mutex);
mutex_unlock(&dsp->mutex);
}
EXPORT_SYMBOL_GPL(sst_fw_unload);
......@@ -135,7 +457,8 @@ void sst_fw_free(struct sst_fw *sst_fw)
list_del(&sst_fw->list);
mutex_unlock(&dsp->mutex);
dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
if (sst_fw->dma_buf)
dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
sst_fw->dmable_fw_paddr);
kfree(sst_fw);
}
......@@ -172,11 +495,11 @@ struct sst_module *sst_module_new(struct sst_fw *sst_fw,
sst_module->id = template->id;
sst_module->dsp = dsp;
sst_module->sst_fw = sst_fw;
memcpy(&sst_module->s, &template->s, sizeof(struct sst_module_data));
memcpy(&sst_module->p, &template->p, sizeof(struct sst_module_data));
sst_module->scratch_size = template->scratch_size;
sst_module->persistent_size = template->persistent_size;
INIT_LIST_HEAD(&sst_module->block_list);
INIT_LIST_HEAD(&sst_module->runtime_list);
mutex_lock(&dsp->mutex);
list_add(&sst_module->list, &dsp->module_list);
......@@ -199,73 +522,122 @@ void sst_module_free(struct sst_module *sst_module)
}
EXPORT_SYMBOL_GPL(sst_module_free);
static struct sst_mem_block *find_block(struct sst_dsp *dsp, int type,
u32 offset)
struct sst_module_runtime *sst_module_runtime_new(struct sst_module *module,
int id, void *private)
{
struct sst_dsp *dsp = module->dsp;
struct sst_module_runtime *runtime;
runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
if (runtime == NULL)
return NULL;
runtime->id = id;
runtime->dsp = dsp;
runtime->module = module;
INIT_LIST_HEAD(&runtime->block_list);
mutex_lock(&dsp->mutex);
list_add(&runtime->list, &module->runtime_list);
mutex_unlock(&dsp->mutex);
return runtime;
}
EXPORT_SYMBOL_GPL(sst_module_runtime_new);
void sst_module_runtime_free(struct sst_module_runtime *runtime)
{
struct sst_dsp *dsp = runtime->dsp;
mutex_lock(&dsp->mutex);
list_del(&runtime->list);
mutex_unlock(&dsp->mutex);
kfree(runtime);
}
EXPORT_SYMBOL_GPL(sst_module_runtime_free);
static struct sst_mem_block *find_block(struct sst_dsp *dsp,
struct sst_block_allocator *ba)
{
struct sst_mem_block *block;
list_for_each_entry(block, &dsp->free_block_list, list) {
if (block->type == type && block->offset == offset)
if (block->type == ba->type && block->offset == ba->offset)
return block;
}
return NULL;
}
static int block_alloc_contiguous(struct sst_module *module,
struct sst_module_data *data, u32 offset, int size)
/* Block allocator must be on block boundary */
static int block_alloc_contiguous(struct sst_dsp *dsp,
struct sst_block_allocator *ba, struct list_head *block_list)
{
struct list_head tmp = LIST_HEAD_INIT(tmp);
struct sst_dsp *dsp = module->dsp;
struct sst_mem_block *block;
u32 block_start = SST_HSW_BLOCK_ANY;
int size = ba->size, offset = ba->offset;
while (size > 0) {
block = find_block(dsp, data->type, offset);
while (ba->size > 0) {
block = find_block(dsp, ba);
if (!block) {
list_splice(&tmp, &dsp->free_block_list);
ba->size = size;
ba->offset = offset;
return -ENOMEM;
}
list_move_tail(&block->list, &tmp);
offset += block->size;
size -= block->size;
ba->offset += block->size;
ba->size -= block->size;
}
ba->size = size;
ba->offset = offset;
list_for_each_entry(block, &tmp, list) {
if (block->offset < block_start)
block_start = block->offset;
list_add(&block->module_list, block_list);
list_for_each_entry(block, &tmp, list)
list_add(&block->module_list, &module->block_list);
dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
block->type, block->index, block->offset);
}
list_splice(&tmp, &dsp->used_block_list);
return 0;
}
/* allocate free DSP blocks for module data - callers hold locks */
static int block_alloc(struct sst_module *module,
struct sst_module_data *data)
/* allocate first free DSP blocks for data - callers hold locks */
static int block_alloc(struct sst_dsp *dsp, struct sst_block_allocator *ba,
struct list_head *block_list)
{
struct sst_dsp *dsp = module->dsp;
struct sst_mem_block *block, *tmp;
int ret = 0;
if (data->size == 0)
if (ba->size == 0)
return 0;
/* find first free whole blocks that can hold module */
list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
/* ignore blocks with wrong type */
if (block->type != data->type)
if (block->type != ba->type)
continue;
if (data->size > block->size)
if (ba->size > block->size)
continue;
data->offset = block->offset;
block->data_type = data->data_type;
block->bytes_used = data->size % block->size;
list_add(&block->module_list, &module->block_list);
ba->offset = block->offset;
block->bytes_used = ba->size % block->size;
list_add(&block->module_list, block_list);
list_move(&block->list, &dsp->used_block_list);
dev_dbg(dsp->dev, " *module %d added block %d:%d\n",
module->id, block->type, block->index);
dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
block->type, block->index, block->offset);
return 0;
}
......@@ -273,15 +645,19 @@ static int block_alloc(struct sst_module *module,
list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
/* ignore blocks with wrong type */
if (block->type != data->type)
if (block->type != ba->type)
continue;
/* do we span > 1 blocks */
if (data->size > block->size) {
ret = block_alloc_contiguous(module, data,
block->offset, data->size);
if (ba->size > block->size) {
/* align ba to block boundary */
ba->offset = block->offset;
ret = block_alloc_contiguous(dsp, ba, block_list);
if (ret == 0)
return ret;
}
}
......@@ -289,93 +665,74 @@ static int block_alloc(struct sst_module *module,
return -ENOMEM;
}
/* remove module from memory - callers hold locks */
static void block_module_remove(struct sst_module *module)
int sst_alloc_blocks(struct sst_dsp *dsp, struct sst_block_allocator *ba,
struct list_head *block_list)
{
struct sst_mem_block *block, *tmp;
struct sst_dsp *dsp = module->dsp;
int err;
int ret;
/* disable each block */
list_for_each_entry(block, &module->block_list, module_list) {
dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
ba->size, ba->offset, ba->type);
if (block->ops && block->ops->disable) {
err = block->ops->disable(block);
if (err < 0)
dev_err(dsp->dev,
"error: cant disable block %d:%d\n",
block->type, block->index);
}
}
mutex_lock(&dsp->mutex);
/* mark each block as free */
list_for_each_entry_safe(block, tmp, &module->block_list, module_list) {
list_del(&block->module_list);
list_move(&block->list, &dsp->free_block_list);
ret = block_alloc(dsp, ba, block_list);
if (ret < 0) {
dev_err(dsp->dev, "error: can't alloc blocks %d\n", ret);
goto out;
}
}
/* prepare the memory block to receive data from host - callers hold locks */
static int block_module_prepare(struct sst_module *module)
{
struct sst_mem_block *block;
int ret = 0;
/* enable each block so that's it'e ready for module P/S data */
list_for_each_entry(block, &module->block_list, module_list) {
/* prepare DSP blocks for module usage */
ret = block_list_prepare(dsp, block_list);
if (ret < 0)
dev_err(dsp->dev, "error: prepare failed\n");
if (block->ops && block->ops->enable) {
ret = block->ops->enable(block);
if (ret < 0) {
dev_err(module->dsp->dev,
"error: cant disable block %d:%d\n",
block->type, block->index);
goto err;
}
}
}
out:
mutex_unlock(&dsp->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(sst_alloc_blocks);
err:
list_for_each_entry(block, &module->block_list, module_list) {
if (block->ops && block->ops->disable)
block->ops->disable(block);
}
return ret;
int sst_free_blocks(struct sst_dsp *dsp, struct list_head *block_list)
{
mutex_lock(&dsp->mutex);
block_list_remove(dsp, block_list);
mutex_unlock(&dsp->mutex);
return 0;
}
EXPORT_SYMBOL_GPL(sst_free_blocks);
/* allocate memory blocks for static module addresses - callers hold locks */
static int block_alloc_fixed(struct sst_module *module,
struct sst_module_data *data)
static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba,
struct list_head *block_list)
{
struct sst_dsp *dsp = module->dsp;
struct sst_mem_block *block, *tmp;
u32 end = data->offset + data->size, block_end;
u32 end = ba->offset + ba->size, block_end;
int err;
/* only IRAM/DRAM blocks are managed */
if (data->type != SST_MEM_IRAM && data->type != SST_MEM_DRAM)
if (ba->type != SST_MEM_IRAM && ba->type != SST_MEM_DRAM)
return 0;
/* are blocks already attached to this module */
list_for_each_entry_safe(block, tmp, &module->block_list, module_list) {
list_for_each_entry_safe(block, tmp, block_list, module_list) {
/* force compacting mem blocks of the same data_type */
if (block->data_type != data->data_type)
/* ignore blocks with wrong type */
if (block->type != ba->type)
continue;
block_end = block->offset + block->size;
/* find block that holds section */
if (data->offset >= block->offset && end < block_end)
if (ba->offset >= block->offset && end <= block_end)
return 0;
/* does block span more than 1 section */
if (data->offset >= block->offset && data->offset < block_end) {
if (ba->offset >= block->offset && ba->offset < block_end) {
err = block_alloc_contiguous(module, data,
block->offset + block->size,
data->size - block->size);
/* align ba to block boundary */
ba->size -= block_end - ba->offset;
ba->offset = block_end;
err = block_alloc_contiguous(dsp, ba, block_list);
if (err < 0)
return -ENOMEM;
......@@ -388,82 +745,270 @@ static int block_alloc_fixed(struct sst_module *module,
list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
block_end = block->offset + block->size;
/* ignore blocks with wrong type */
if (block->type != ba->type)
continue;
/* find block that holds section */
if (data->offset >= block->offset && end < block_end) {
if (ba->offset >= block->offset && end <= block_end) {
/* add block */
block->data_type = data->data_type;
list_move(&block->list, &dsp->used_block_list);
list_add(&block->module_list, &module->block_list);
list_add(&block->module_list, block_list);
dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
block->type, block->index, block->offset);
return 0;
}
/* does block span more than 1 section */
if (data->offset >= block->offset && data->offset < block_end) {
if (ba->offset >= block->offset && ba->offset < block_end) {
err = block_alloc_contiguous(module, data,
block->offset, data->size);
/* align ba to block boundary */
ba->offset = block->offset;
err = block_alloc_contiguous(dsp, ba, block_list);
if (err < 0)
return -ENOMEM;
return 0;
}
}
return -ENOMEM;
}
/* Load fixed module data into DSP memory blocks */
int sst_module_insert_fixed_block(struct sst_module *module,
struct sst_module_data *data)
int sst_module_alloc_blocks(struct sst_module *module)
{
struct sst_dsp *dsp = module->dsp;
struct sst_fw *sst_fw = module->sst_fw;
struct sst_block_allocator ba;
int ret;
ba.size = module->size;
ba.type = module->type;
ba.offset = module->offset;
dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
ba.size, ba.offset, ba.type);
mutex_lock(&dsp->mutex);
/* alloc blocks that includes this section */
ret = block_alloc_fixed(module, data);
ret = block_alloc_fixed(dsp, &ba, &module->block_list);
if (ret < 0) {
dev_err(dsp->dev,
"error: no free blocks for section at offset 0x%x size 0x%x\n",
data->offset, data->size);
module->offset, module->size);
mutex_unlock(&dsp->mutex);
return -ENOMEM;
}
/* prepare DSP blocks for module copy */
ret = block_module_prepare(module);
ret = block_list_prepare(dsp, &module->block_list);
if (ret < 0) {
dev_err(dsp->dev, "error: fw module prepare failed\n");
goto err;
}
/* copy partial module data to blocks */
sst_memcpy32(dsp->addr.lpe + data->offset, data->data, data->size);
if (dsp->fw_use_dma) {
ret = sst_dsp_dma_copyto(dsp,
dsp->addr.lpe_base + module->offset,
sst_fw->dmable_fw_paddr + module->data_offset,
module->size);
if (ret < 0) {
dev_err(dsp->dev, "error: module copy failed\n");
goto err;
}
} else
sst_memcpy32(dsp->addr.lpe + module->offset, module->data,
module->size);
mutex_unlock(&dsp->mutex);
return ret;
err:
block_module_remove(module);
block_list_remove(dsp, &module->block_list);
mutex_unlock(&dsp->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(sst_module_insert_fixed_block);
EXPORT_SYMBOL_GPL(sst_module_alloc_blocks);
/* Unload entire module from DSP memory */
int sst_block_module_remove(struct sst_module *module)
int sst_module_free_blocks(struct sst_module *module)
{
struct sst_dsp *dsp = module->dsp;
mutex_lock(&dsp->mutex);
block_module_remove(module);
block_list_remove(dsp, &module->block_list);
mutex_unlock(&dsp->mutex);
return 0;
}
EXPORT_SYMBOL_GPL(sst_module_free_blocks);
int sst_module_runtime_alloc_blocks(struct sst_module_runtime *runtime,
int offset)
{
struct sst_dsp *dsp = runtime->dsp;
struct sst_module *module = runtime->module;
struct sst_block_allocator ba;
int ret;
if (module->persistent_size == 0)
return 0;
ba.size = module->persistent_size;
ba.type = SST_MEM_DRAM;
mutex_lock(&dsp->mutex);
/* do we need to allocate at a fixed address ? */
if (offset != 0) {
ba.offset = offset;
dev_dbg(dsp->dev, "persistent fixed block request 0x%x bytes type %d offset 0x%x\n",
ba.size, ba.type, ba.offset);
/* alloc blocks that includes this section */
ret = block_alloc_fixed(dsp, &ba, &runtime->block_list);
} else {
dev_dbg(dsp->dev, "persistent block request 0x%x bytes type %d\n",
ba.size, ba.type);
/* alloc blocks that includes this section */
ret = block_alloc(dsp, &ba, &runtime->block_list);
}
if (ret < 0) {
dev_err(dsp->dev,
"error: no free blocks for runtime module size 0x%x\n",
module->persistent_size);
mutex_unlock(&dsp->mutex);
return -ENOMEM;
}
runtime->persistent_offset = ba.offset;
/* prepare DSP blocks for module copy */
ret = block_list_prepare(dsp, &runtime->block_list);
if (ret < 0) {
dev_err(dsp->dev, "error: runtime block prepare failed\n");
goto err;
}
mutex_unlock(&dsp->mutex);
return ret;
err:
block_list_remove(dsp, &module->block_list);
mutex_unlock(&dsp->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(sst_module_runtime_alloc_blocks);
int sst_module_runtime_free_blocks(struct sst_module_runtime *runtime)
{
struct sst_dsp *dsp = runtime->dsp;
mutex_lock(&dsp->mutex);
block_list_remove(dsp, &runtime->block_list);
mutex_unlock(&dsp->mutex);
return 0;
}
EXPORT_SYMBOL_GPL(sst_block_module_remove);
EXPORT_SYMBOL_GPL(sst_module_runtime_free_blocks);
int sst_module_runtime_save(struct sst_module_runtime *runtime,
struct sst_module_runtime_context *context)
{
struct sst_dsp *dsp = runtime->dsp;
struct sst_module *module = runtime->module;
int ret = 0;
dev_dbg(dsp->dev, "saving runtime %d memory at 0x%x size 0x%x\n",
runtime->id, runtime->persistent_offset,
module->persistent_size);
context->buffer = dma_alloc_coherent(dsp->dma_dev,
module->persistent_size,
&context->dma_buffer, GFP_DMA | GFP_KERNEL);
if (!context->buffer) {
dev_err(dsp->dev, "error: DMA context alloc failed\n");
return -ENOMEM;
}
mutex_lock(&dsp->mutex);
if (dsp->fw_use_dma) {
ret = sst_dsp_dma_get_channel(dsp, 0);
if (ret < 0)
goto err;
ret = sst_dsp_dma_copyfrom(dsp, context->dma_buffer,
dsp->addr.lpe_base + runtime->persistent_offset,
module->persistent_size);
sst_dsp_dma_put_channel(dsp);
if (ret < 0) {
dev_err(dsp->dev, "error: context copy failed\n");
goto err;
}
} else
sst_memcpy32(context->buffer, dsp->addr.lpe +
runtime->persistent_offset,
module->persistent_size);
err:
mutex_unlock(&dsp->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(sst_module_runtime_save);
int sst_module_runtime_restore(struct sst_module_runtime *runtime,
struct sst_module_runtime_context *context)
{
struct sst_dsp *dsp = runtime->dsp;
struct sst_module *module = runtime->module;
int ret = 0;
dev_dbg(dsp->dev, "restoring runtime %d memory at 0x%x size 0x%x\n",
runtime->id, runtime->persistent_offset,
module->persistent_size);
mutex_lock(&dsp->mutex);
if (!context->buffer) {
dev_info(dsp->dev, "no context buffer need to restore!\n");
goto err;
}
if (dsp->fw_use_dma) {
ret = sst_dsp_dma_get_channel(dsp, 0);
if (ret < 0)
goto err;
ret = sst_dsp_dma_copyto(dsp,
dsp->addr.lpe_base + runtime->persistent_offset,
context->dma_buffer, module->persistent_size);
sst_dsp_dma_put_channel(dsp);
if (ret < 0) {
dev_err(dsp->dev, "error: module copy failed\n");
goto err;
}
} else
sst_memcpy32(dsp->addr.lpe + runtime->persistent_offset,
context->buffer, module->persistent_size);
dma_free_coherent(dsp->dma_dev, module->persistent_size,
context->buffer, context->dma_buffer);
context->buffer = NULL;
err:
mutex_unlock(&dsp->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(sst_module_runtime_restore);
/* register a DSP memory block for use with FW based modules */
struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
......@@ -516,80 +1061,83 @@ void sst_mem_block_unregister_all(struct sst_dsp *dsp)
EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all);
/* allocate scratch buffer blocks */
struct sst_module *sst_mem_block_alloc_scratch(struct sst_dsp *dsp)
int sst_block_alloc_scratch(struct sst_dsp *dsp)
{
struct sst_module *sst_module, *scratch;
struct sst_mem_block *block, *tmp;
u32 block_size;
int ret = 0;
scratch = kzalloc(sizeof(struct sst_module), GFP_KERNEL);
if (scratch == NULL)
return NULL;
struct sst_module *module;
struct sst_block_allocator ba;
int ret;
mutex_lock(&dsp->mutex);
/* calculate required scratch size */
list_for_each_entry(sst_module, &dsp->module_list, list) {
if (scratch->s.size < sst_module->s.size)
scratch->s.size = sst_module->s.size;
dsp->scratch_size = 0;
list_for_each_entry(module, &dsp->module_list, list) {
dev_dbg(dsp->dev, "module %d scratch req 0x%x bytes\n",
module->id, module->scratch_size);
if (dsp->scratch_size < module->scratch_size)
dsp->scratch_size = module->scratch_size;
}
dev_dbg(dsp->dev, "scratch buffer required is %d bytes\n",
scratch->s.size);
/* init scratch module */
scratch->dsp = dsp;
scratch->s.type = SST_MEM_DRAM;
scratch->s.data_type = SST_DATA_S;
INIT_LIST_HEAD(&scratch->block_list);
dev_dbg(dsp->dev, "scratch buffer required is 0x%x bytes\n",
dsp->scratch_size);
/* check free blocks before looking at used blocks for space */
if (!list_empty(&dsp->free_block_list))
block = list_first_entry(&dsp->free_block_list,
struct sst_mem_block, list);
else
block = list_first_entry(&dsp->used_block_list,
struct sst_mem_block, list);
block_size = block->size;
if (dsp->scratch_size == 0) {
dev_info(dsp->dev, "no modules need scratch buffer\n");
mutex_unlock(&dsp->mutex);
return 0;
}
/* allocate blocks for module scratch buffers */
dev_dbg(dsp->dev, "allocating scratch blocks\n");
ret = block_alloc(scratch, &scratch->s);
ba.size = dsp->scratch_size;
ba.type = SST_MEM_DRAM;
/* do we need to allocate at fixed offset */
if (dsp->scratch_offset != 0) {
dev_dbg(dsp->dev, "block request 0x%x bytes type %d at 0x%x\n",
ba.size, ba.type, ba.offset);
ba.offset = dsp->scratch_offset;
/* alloc blocks that includes this section */
ret = block_alloc_fixed(dsp, &ba, &dsp->scratch_block_list);
} else {
dev_dbg(dsp->dev, "block request 0x%x bytes type %d\n",
ba.size, ba.type);
ba.offset = 0;
ret = block_alloc(dsp, &ba, &dsp->scratch_block_list);
}
if (ret < 0) {
dev_err(dsp->dev, "error: can't alloc scratch blocks\n");
goto err;
mutex_unlock(&dsp->mutex);
return ret;
}
/* assign the same offset of scratch to each module */
list_for_each_entry(sst_module, &dsp->module_list, list)
sst_module->s.offset = scratch->s.offset;
mutex_unlock(&dsp->mutex);
return scratch;
ret = block_list_prepare(dsp, &dsp->scratch_block_list);
if (ret < 0) {
dev_err(dsp->dev, "error: scratch block prepare failed\n");
return ret;
}
err:
list_for_each_entry_safe(block, tmp, &scratch->block_list, module_list)
list_del(&block->module_list);
/* assign the same offset of scratch to each module */
dsp->scratch_offset = ba.offset;
mutex_unlock(&dsp->mutex);
return NULL;
return dsp->scratch_size;
}
EXPORT_SYMBOL_GPL(sst_mem_block_alloc_scratch);
EXPORT_SYMBOL_GPL(sst_block_alloc_scratch);
/* free all scratch blocks */
void sst_mem_block_free_scratch(struct sst_dsp *dsp,
struct sst_module *scratch)
void sst_block_free_scratch(struct sst_dsp *dsp)
{
struct sst_mem_block *block, *tmp;
mutex_lock(&dsp->mutex);
list_for_each_entry_safe(block, tmp, &scratch->block_list, module_list)
list_del(&block->module_list);
block_list_remove(dsp, &dsp->scratch_block_list);
mutex_unlock(&dsp->mutex);
}
EXPORT_SYMBOL_GPL(sst_mem_block_free_scratch);
EXPORT_SYMBOL_GPL(sst_block_free_scratch);
/* get a module from it's unique ID */
struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
......@@ -609,3 +1157,40 @@ struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
return NULL;
}
EXPORT_SYMBOL_GPL(sst_module_get_from_id);
struct sst_module_runtime *sst_module_runtime_get_from_id(
struct sst_module *module, u32 id)
{
struct sst_module_runtime *runtime;
struct sst_dsp *dsp = module->dsp;
mutex_lock(&dsp->mutex);
list_for_each_entry(runtime, &module->runtime_list, list) {
if (runtime->id == id) {
mutex_unlock(&dsp->mutex);
return runtime;
}
}
mutex_unlock(&dsp->mutex);
return NULL;
}
EXPORT_SYMBOL_GPL(sst_module_runtime_get_from_id);
/* returns block address in DSP address space */
u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset,
enum sst_mem_type type)
{
switch (type) {
case SST_MEM_IRAM:
return offset - dsp->addr.iram_offset +
dsp->addr.dsp_iram_offset;
case SST_MEM_DRAM:
return offset - dsp->addr.dram_offset +
dsp->addr.dsp_dram_offset;
default:
return 0;
}
}
EXPORT_SYMBOL_GPL(sst_dsp_get_offset);
......@@ -42,6 +42,10 @@
#define SST_LP_SHIM_OFFSET 0xE7000
#define SST_WPT_IRAM_OFFSET 0xA0000
#define SST_LP_IRAM_OFFSET 0x80000
#define SST_WPT_DSP_DRAM_OFFSET 0x400000
#define SST_WPT_DSP_IRAM_OFFSET 0x00000
#define SST_LPT_DSP_DRAM_OFFSET 0x400000
#define SST_LPT_DSP_IRAM_OFFSET 0x00000
#define SST_SHIM_PM_REG 0x84
......@@ -86,9 +90,8 @@ static int hsw_parse_module(struct sst_dsp *dsp, struct sst_fw *fw,
{
struct dma_block_info *block;
struct sst_module *mod;
struct sst_module_data block_data;
struct sst_module_template template;
int count;
int count, ret;
void __iomem *ram;
/* TODO: allowed module types need to be configurable */
......@@ -109,13 +112,9 @@ static int hsw_parse_module(struct sst_dsp *dsp, struct sst_fw *fw,
memset(&template, 0, sizeof(template));
template.id = module->type;
template.entry = module->entry_point;
template.p.size = module->info.persistent_size;
template.p.type = SST_MEM_DRAM;
template.p.data_type = SST_DATA_P;
template.s.size = module->info.scratch_size;
template.s.type = SST_MEM_DRAM;
template.s.data_type = SST_DATA_S;
template.entry = module->entry_point - 4;
template.persistent_size = module->info.persistent_size;
template.scratch_size = module->info.scratch_size;
mod = sst_module_new(fw, &template, NULL);
if (mod == NULL)
......@@ -135,14 +134,14 @@ static int hsw_parse_module(struct sst_dsp *dsp, struct sst_fw *fw,
switch (block->type) {
case SST_HSW_IRAM:
ram = dsp->addr.lpe;
block_data.offset =
mod->offset =
block->ram_offset + dsp->addr.iram_offset;
block_data.type = SST_MEM_IRAM;
mod->type = SST_MEM_IRAM;
break;
case SST_HSW_DRAM:
ram = dsp->addr.lpe;
block_data.offset = block->ram_offset;
block_data.type = SST_MEM_DRAM;
mod->offset = block->ram_offset;
mod->type = SST_MEM_DRAM;
break;
default:
dev_err(dsp->dev, "error: bad type 0x%x for block 0x%x\n",
......@@ -151,30 +150,34 @@ static int hsw_parse_module(struct sst_dsp *dsp, struct sst_fw *fw,
return -EINVAL;
}
block_data.size = block->size;
block_data.data_type = SST_DATA_M;
block_data.data = (void *)block + sizeof(*block);
block_data.data_offset = block_data.data - fw->dma_buf;
mod->size = block->size;
mod->data = (void *)block + sizeof(*block);
mod->data_offset = mod->data - fw->dma_buf;
dev_dbg(dsp->dev, "copy firmware block %d type 0x%x "
dev_dbg(dsp->dev, "module block %d type 0x%x "
"size 0x%x ==> ram %p offset 0x%x\n",
count, block->type, block->size, ram,
count, mod->type, block->size, ram,
block->ram_offset);
sst_module_insert_fixed_block(mod, &block_data);
ret = sst_module_alloc_blocks(mod);
if (ret < 0) {
dev_err(dsp->dev, "error: could not allocate blocks for module %d\n",
count);
sst_module_free(mod);
return ret;
}
block = (void *)block + sizeof(*block) + block->size;
}
return 0;
}
static int hsw_parse_fw_image(struct sst_fw *sst_fw)
{
struct fw_header *header;
struct sst_module *scratch;
struct fw_module_header *module;
struct sst_dsp *dsp = sst_fw->dsp;
struct sst_hsw *hsw = sst_fw->private;
int ret, count;
/* Read the header information from the data pointer */
......@@ -204,12 +207,8 @@ static int hsw_parse_fw_image(struct sst_fw *sst_fw)
module = (void *)module + sizeof(*module) + module->mod_size;
}
/* allocate persistent/scratch mem regions */
scratch = sst_mem_block_alloc_scratch(dsp);
if (scratch == NULL)
return -ENOMEM;
sst_hsw_set_scratch_module(hsw, scratch);
/* allocate scratch mem regions */
sst_block_alloc_scratch(dsp);
return 0;
}
......@@ -467,12 +466,16 @@ static int hsw_init(struct sst_dsp *sst, struct sst_pdata *pdata)
region = lp_region;
region_count = ARRAY_SIZE(lp_region);
sst->addr.iram_offset = SST_LP_IRAM_OFFSET;
sst->addr.dsp_iram_offset = SST_LPT_DSP_IRAM_OFFSET;
sst->addr.dsp_dram_offset = SST_LPT_DSP_DRAM_OFFSET;
sst->addr.shim_offset = SST_LP_SHIM_OFFSET;
break;
case SST_DEV_ID_WILDCAT_POINT:
region = wpt_region;
region_count = ARRAY_SIZE(wpt_region);
sst->addr.iram_offset = SST_WPT_IRAM_OFFSET;
sst->addr.dsp_iram_offset = SST_WPT_DSP_IRAM_OFFSET;
sst->addr.dsp_dram_offset = SST_WPT_DSP_DRAM_OFFSET;
sst->addr.shim_offset = SST_WPT_SHIM_OFFSET;
break;
default:
......
......@@ -1351,10 +1351,11 @@ int sst_hsw_stream_buffer(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
}
int sst_hsw_stream_set_module_info(struct sst_hsw *hsw,
struct sst_hsw_stream *stream, enum sst_hsw_module_id module_id,
u32 entry_point)
struct sst_hsw_stream *stream, struct sst_module_runtime *runtime)
{
struct sst_hsw_module_map *map = &stream->request.map;
struct sst_dsp *dsp = sst_hsw_get_dsp(hsw);
struct sst_module *module = runtime->module;
if (stream->commited) {
dev_err(hsw->dev, "error: stream committed for set module\n");
......@@ -1363,36 +1364,25 @@ int sst_hsw_stream_set_module_info(struct sst_hsw *hsw,
/* only support initial module atm */
map->module_entries_count = 1;
map->module_entries[0].module_id = module_id;
map->module_entries[0].entry_point = entry_point;
return 0;
}
int sst_hsw_stream_set_pmemory_info(struct sst_hsw *hsw,
struct sst_hsw_stream *stream, u32 offset, u32 size)
{
if (stream->commited) {
dev_err(hsw->dev, "error: stream committed for set pmem\n");
return -EINVAL;
}
stream->request.persistent_mem.offset = offset;
stream->request.persistent_mem.size = size;
return 0;
}
int sst_hsw_stream_set_smemory_info(struct sst_hsw *hsw,
struct sst_hsw_stream *stream, u32 offset, u32 size)
{
if (stream->commited) {
dev_err(hsw->dev, "error: stream committed for set smem\n");
return -EINVAL;
}
stream->request.scratch_mem.offset = offset;
stream->request.scratch_mem.size = size;
map->module_entries[0].module_id = module->id;
map->module_entries[0].entry_point = module->entry;
stream->request.persistent_mem.offset =
sst_dsp_get_offset(dsp, runtime->persistent_offset, SST_MEM_DRAM);
stream->request.persistent_mem.size = module->persistent_size;
stream->request.scratch_mem.offset =
sst_dsp_get_offset(dsp, dsp->scratch_offset, SST_MEM_DRAM);
stream->request.scratch_mem.size = dsp->scratch_size;
dev_dbg(hsw->dev, "module %d runtime %d using:\n", module->id,
runtime->id);
dev_dbg(hsw->dev, " persistent offset 0x%x bytes 0x%x\n",
stream->request.persistent_mem.offset,
stream->request.persistent_mem.size);
dev_dbg(hsw->dev, " scratch offset 0x%x bytes 0x%x\n",
stream->request.scratch_mem.offset,
stream->request.scratch_mem.size);
return 0;
}
......@@ -1673,32 +1663,48 @@ int sst_hsw_dx_set_state(struct sst_hsw *hsw,
dev_dbg(hsw->dev, "ipc: got %d entry numbers for state %d\n",
dx->entries_no, state);
memcpy(&hsw->dx, dx, sizeof(*dx));
return 0;
return ret;
}
/* Used to save state into hsw->dx_reply */
int sst_hsw_dx_get_state(struct sst_hsw *hsw, u32 item,
u32 *offset, u32 *size, u32 *source)
struct sst_module_runtime *sst_hsw_runtime_module_create(struct sst_hsw *hsw,
int mod_id, int offset)
{
struct sst_hsw_ipc_dx_memory_item *dx_mem;
struct sst_hsw_ipc_dx_reply *dx_reply;
int entry_no;
struct sst_dsp *dsp = hsw->dsp;
struct sst_module *module;
struct sst_module_runtime *runtime;
int err;
dx_reply = &hsw->dx;
entry_no = dx_reply->entries_no;
module = sst_module_get_from_id(dsp, mod_id);
if (module == NULL) {
dev_err(dsp->dev, "error: failed to get module %d for pcm\n",
mod_id);
return NULL;
}
trace_ipc_request("PM get Dx state", entry_no);
runtime = sst_module_runtime_new(module, mod_id, NULL);
if (runtime == NULL) {
dev_err(dsp->dev, "error: failed to create module %d runtime\n",
mod_id);
return NULL;
}
if (item >= entry_no)
return -EINVAL;
err = sst_module_runtime_alloc_blocks(runtime, offset);
if (err < 0) {
dev_err(dsp->dev, "error: failed to alloc blocks for module %d runtime\n",
mod_id);
sst_module_runtime_free(runtime);
return NULL;
}
dx_mem = &dx_reply->mem_info[item];
*offset = dx_mem->offset;
*size = dx_mem->size;
*source = dx_mem->source;
dev_dbg(dsp->dev, "runtime id %d created for module %d\n", runtime->id,
mod_id);
return runtime;
}
return 0;
void sst_hsw_runtime_module_free(struct sst_module_runtime *runtime)
{
sst_module_runtime_free_blocks(runtime);
sst_module_runtime_free(runtime);
}
static int msg_empty_list_init(struct sst_hsw *hsw)
......@@ -1718,12 +1724,6 @@ static int msg_empty_list_init(struct sst_hsw *hsw)
return 0;
}
void sst_hsw_set_scratch_module(struct sst_hsw *hsw,
struct sst_module *scratch)
{
hsw->scratch = scratch;
}
struct sst_dsp *sst_hsw_get_dsp(struct sst_hsw *hsw)
{
return hsw->dsp;
......
......@@ -40,6 +40,7 @@ struct sst_hsw_stream;
struct sst_hsw_log_stream;
struct sst_pdata;
struct sst_module;
struct sst_module_runtime;
extern struct sst_ops haswell_ops;
/* Stream Allocate Path ID */
......@@ -432,8 +433,7 @@ int sst_hsw_stream_set_map_config(struct sst_hsw *hsw,
int sst_hsw_stream_set_style(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
enum sst_hsw_interleaving style);
int sst_hsw_stream_set_module_info(struct sst_hsw *hsw,
struct sst_hsw_stream *stream, enum sst_hsw_module_id module_id,
u32 entry_point);
struct sst_hsw_stream *stream, struct sst_module_runtime *runtime);
int sst_hsw_stream_set_pmemory_info(struct sst_hsw *hsw,
struct sst_hsw_stream *stream, u32 offset, u32 size);
int sst_hsw_stream_set_smemory_info(struct sst_hsw *hsw,
......@@ -486,7 +486,10 @@ int sst_hsw_dx_get_state(struct sst_hsw *hsw, u32 item,
int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata);
void sst_hsw_dsp_free(struct device *dev, struct sst_pdata *pdata);
struct sst_dsp *sst_hsw_get_dsp(struct sst_hsw *hsw);
void sst_hsw_set_scratch_module(struct sst_hsw *hsw,
struct sst_module *scratch);
/* runtime module management */
struct sst_module_runtime *sst_hsw_runtime_module_create(struct sst_hsw *hsw,
int mod_id, int offset);
void sst_hsw_runtime_module_free(struct sst_module_runtime *runtime);
#endif
......@@ -89,16 +89,23 @@ static const struct snd_pcm_hardware hsw_pcm_hardware = {
.buffer_bytes_max = HSW_PCM_PERIODS_MAX * PAGE_SIZE,
};
struct hsw_pcm_module_map {
int dai_id;
enum sst_hsw_module_id mod_id;
};
/* private data for each PCM DSP stream */
struct hsw_pcm_data {
int dai_id;
struct sst_hsw_stream *stream;
struct sst_module_runtime *runtime;
u32 volume[2];
struct snd_pcm_substream *substream;
struct snd_compr_stream *cstream;
unsigned int wpos;
struct mutex mutex;
bool allocated;
int persistent_offset;
};
/* private data for the driver */
......@@ -472,28 +479,8 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
/* we use hardcoded memory offsets atm, will be updated for new FW */
if (stream_type == SST_HSW_STREAM_TYPE_CAPTURE) {
sst_hsw_stream_set_module_info(hsw, pcm_data->stream,
SST_HSW_MODULE_PCM_CAPTURE, module_data->entry);
sst_hsw_stream_set_pmemory_info(hsw, pcm_data->stream,
0x449400, 0x4000);
sst_hsw_stream_set_smemory_info(hsw, pcm_data->stream,
0x400000, 0);
} else { /* stream_type == SST_HSW_STREAM_TYPE_SYSTEM */
sst_hsw_stream_set_module_info(hsw, pcm_data->stream,
SST_HSW_MODULE_PCM_SYSTEM, module_data->entry);
sst_hsw_stream_set_pmemory_info(hsw, pcm_data->stream,
module_data->offset, module_data->size);
sst_hsw_stream_set_pmemory_info(hsw, pcm_data->stream,
0x44d400, 0x3800);
sst_hsw_stream_set_smemory_info(hsw, pcm_data->stream,
module_data->offset, module_data->size);
sst_hsw_stream_set_smemory_info(hsw, pcm_data->stream,
0x400000, 0);
}
sst_hsw_stream_set_module_info(hsw, pcm_data->stream,
pcm_data->runtime);
ret = sst_hsw_stream_commit(hsw, pcm_data->stream);
if (ret < 0) {
......@@ -654,6 +641,55 @@ static struct snd_pcm_ops hsw_pcm_ops = {
.page = snd_pcm_sgbuf_ops_page,
};
/* static mappings between PCMs and modules - may be dynamic in future */
static struct hsw_pcm_module_map mod_map[] = {
{0, SST_HSW_MODULE_PCM_SYSTEM}, /* "System Pin" */
{1, SST_HSW_MODULE_PCM}, /* "Offload0 Pin" */
{2, SST_HSW_MODULE_PCM}, /* "Offload1 Pin" */
{3, SST_HSW_MODULE_PCM_REFERENCE}, /* "Loopback Pin" */
{4, SST_HSW_MODULE_PCM_CAPTURE}, /* "Capture Pin" */
};
static int hsw_pcm_create_modules(struct hsw_priv_data *pdata)
{
struct sst_hsw *hsw = pdata->hsw;
struct hsw_pcm_data *pcm_data;
int i;
for (i = 0; i < ARRAY_SIZE(mod_map); i++) {
pcm_data = &pdata->pcm[i];
pcm_data->runtime = sst_hsw_runtime_module_create(hsw,
mod_map[i].mod_id, pcm_data->persistent_offset);
if (pcm_data->runtime == NULL)
goto err;
pcm_data->persistent_offset =
pcm_data->runtime->persistent_offset;
}
return 0;
err:
for (--i; i >= 0; i--) {
pcm_data = &pdata->pcm[i];
sst_hsw_runtime_module_free(pcm_data->runtime);
}
return -ENODEV;
}
static void hsw_pcm_free_modules(struct hsw_priv_data *pdata)
{
struct hsw_pcm_data *pcm_data;
int i;
for (i = 0; i < ARRAY_SIZE(mod_map); i++) {
pcm_data = &pdata->pcm[i];
sst_hsw_runtime_module_free(pcm_data->runtime);
}
}
static void hsw_pcm_free(struct snd_pcm *pcm)
{
snd_pcm_lib_preallocate_free_for_all(pcm);
......@@ -797,6 +833,9 @@ static int hsw_pcm_probe(struct snd_soc_platform *platform)
}
}
/* allocate runtime modules */
hsw_pcm_create_modules(priv_data);
return 0;
err:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment