Commit 9ba8ffef authored by Dharageswari.R's avatar Dharageswari.R Committed by Mark Brown

ASoC: Intel: Skylake: Fix pipe memory allocation leak

We check and allocate pipeline resources in one shot. That causes
leaks if module creation fails later as that is not freed.

So split the resource allocation into two, first check if
resources are available and then add the resources upon
successful creation. So two new functions are added for checking
and current functions are re-purposed to only add the resources
for memory and MCPS.
Signed-off-by: default avatarDharageswari.R <dharageswari.r@intel.com>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
Signed-off-by: default avatarMark Brown <broonie@kernel.org>
parent 7ca42f5a
...@@ -54,12 +54,9 @@ static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w) ...@@ -54,12 +54,9 @@ static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w)
/* /*
* Each pipelines needs memory to be allocated. Check if we have free memory * Each pipelines needs memory to be allocated. Check if we have free memory
* from available pool. Then only add this to pool * from available pool.
* This is freed when pipe is deleted
* Note: DSP does actual memory management we only keep track for complete
* pool
*/ */
static bool skl_tplg_alloc_pipe_mem(struct skl *skl, static bool skl_is_pipe_mem_avail(struct skl *skl,
struct skl_module_cfg *mconfig) struct skl_module_cfg *mconfig)
{ {
struct skl_sst *ctx = skl->skl_sst; struct skl_sst *ctx = skl->skl_sst;
...@@ -74,10 +71,20 @@ static bool skl_tplg_alloc_pipe_mem(struct skl *skl, ...@@ -74,10 +71,20 @@ static bool skl_tplg_alloc_pipe_mem(struct skl *skl,
"exceeds ppl memory available %d mem %d\n", "exceeds ppl memory available %d mem %d\n",
skl->resource.max_mem, skl->resource.mem); skl->resource.max_mem, skl->resource.mem);
return false; return false;
} else {
return true;
} }
}
/*
* Add the mem to the mem pool. This is freed when pipe is deleted.
* Note: DSP does actual memory management we only keep track for complete
* pool
*/
static void skl_tplg_alloc_pipe_mem(struct skl *skl,
struct skl_module_cfg *mconfig)
{
skl->resource.mem += mconfig->pipe->memory_pages; skl->resource.mem += mconfig->pipe->memory_pages;
return true;
} }
/* /*
...@@ -85,10 +92,10 @@ static bool skl_tplg_alloc_pipe_mem(struct skl *skl, ...@@ -85,10 +92,10 @@ static bool skl_tplg_alloc_pipe_mem(struct skl *skl,
* quantified in MCPS (Million Clocks Per Second) required for module/pipe * quantified in MCPS (Million Clocks Per Second) required for module/pipe
* *
* Each pipelines needs mcps to be allocated. Check if we have mcps for this * Each pipelines needs mcps to be allocated. Check if we have mcps for this
* pipe. This adds the mcps to driver counter * pipe.
* This is removed on pipeline delete
*/ */
static bool skl_tplg_alloc_pipe_mcps(struct skl *skl,
static bool skl_is_pipe_mcps_avail(struct skl *skl,
struct skl_module_cfg *mconfig) struct skl_module_cfg *mconfig)
{ {
struct skl_sst *ctx = skl->skl_sst; struct skl_sst *ctx = skl->skl_sst;
...@@ -101,10 +108,15 @@ static bool skl_tplg_alloc_pipe_mcps(struct skl *skl, ...@@ -101,10 +108,15 @@ static bool skl_tplg_alloc_pipe_mcps(struct skl *skl,
"exceeds ppl mcps available %d > mem %d\n", "exceeds ppl mcps available %d > mem %d\n",
skl->resource.max_mcps, skl->resource.mcps); skl->resource.max_mcps, skl->resource.mcps);
return false; return false;
} else {
return true;
} }
}
static void skl_tplg_alloc_pipe_mcps(struct skl *skl,
struct skl_module_cfg *mconfig)
{
skl->resource.mcps += mconfig->mcps; skl->resource.mcps += mconfig->mcps;
return true;
} }
/* /*
...@@ -411,7 +423,7 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe) ...@@ -411,7 +423,7 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
mconfig = w->priv; mconfig = w->priv;
/* check resource available */ /* check resource available */
if (!skl_tplg_alloc_pipe_mcps(skl, mconfig)) if (!skl_is_pipe_mcps_avail(skl, mconfig))
return -ENOMEM; return -ENOMEM;
if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) { if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
...@@ -435,6 +447,7 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe) ...@@ -435,6 +447,7 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
ret = skl_tplg_set_module_params(w, ctx); ret = skl_tplg_set_module_params(w, ctx);
if (ret < 0) if (ret < 0)
return ret; return ret;
skl_tplg_alloc_pipe_mcps(skl, mconfig);
} }
return 0; return 0;
...@@ -477,10 +490,10 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, ...@@ -477,10 +490,10 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
struct skl_sst *ctx = skl->skl_sst; struct skl_sst *ctx = skl->skl_sst;
/* check resource available */ /* check resource available */
if (!skl_tplg_alloc_pipe_mcps(skl, mconfig)) if (!skl_is_pipe_mcps_avail(skl, mconfig))
return -EBUSY; return -EBUSY;
if (!skl_tplg_alloc_pipe_mem(skl, mconfig)) if (!skl_is_pipe_mem_avail(skl, mconfig))
return -ENOMEM; return -ENOMEM;
/* /*
...@@ -526,6 +539,9 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, ...@@ -526,6 +539,9 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
src_module = dst_module; src_module = dst_module;
} }
skl_tplg_alloc_pipe_mem(skl, mconfig);
skl_tplg_alloc_pipe_mcps(skl, mconfig);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment