Commit 4415b03a authored by Laurent Pinchart's avatar Laurent Pinchart Committed by Vinod Koul

dmaengine: shdma: Allocate cyclic sg list dynamically

The sg list used to prepare cyclic DMA descriptors is currently
allocated statically on the stack as an array of 32 elements. This makes
the shdma_prep_dma_cyclic() function consume a lot of stack space, as
reported by the compiler:

drivers/dma/sh/shdma-base.c: In function ‘shdma_prep_dma_cyclic’:
drivers/dma/sh/shdma-base.c:715:1: warning: the frame size of 1056 bytes
is larger than 1024 bytes [-Wframe-larger-than=]

Given the limited Linux kernel stack size, this could lead to stack
overflows. Fix the problem by allocating the sg list dynamically.
Signed-off-by: default avatarLaurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
Signed-off-by: default avatarSimon Horman <horms+renesas@verge.net.au>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent c091ff51
...@@ -672,11 +672,12 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( ...@@ -672,11 +672,12 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
{ {
struct shdma_chan *schan = to_shdma_chan(chan); struct shdma_chan *schan = to_shdma_chan(chan);
struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
struct dma_async_tx_descriptor *desc;
const struct shdma_ops *ops = sdev->ops; const struct shdma_ops *ops = sdev->ops;
unsigned int sg_len = buf_len / period_len; unsigned int sg_len = buf_len / period_len;
int slave_id = schan->slave_id; int slave_id = schan->slave_id;
dma_addr_t slave_addr; dma_addr_t slave_addr;
struct scatterlist sgl[SHDMA_MAX_SG_LEN]; struct scatterlist *sgl;
int i; int i;
if (!chan) if (!chan)
...@@ -700,7 +701,16 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( ...@@ -700,7 +701,16 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
slave_addr = ops->slave_addr(schan); slave_addr = ops->slave_addr(schan);
/*
* Allocate the sg list dynamically as it would consumer too much stack
* space.
*/
sgl = kcalloc(sg_len, sizeof(*sgl), GFP_KERNEL);
if (!sgl)
return NULL;
sg_init_table(sgl, sg_len); sg_init_table(sgl, sg_len);
for (i = 0; i < sg_len; i++) { for (i = 0; i < sg_len; i++) {
dma_addr_t src = buf_addr + (period_len * i); dma_addr_t src = buf_addr + (period_len * i);
...@@ -710,8 +720,11 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( ...@@ -710,8 +720,11 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
sg_dma_len(&sgl[i]) = period_len; sg_dma_len(&sgl[i]) = period_len;
} }
return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
direction, flags, true); direction, flags, true);
kfree(sgl);
return desc;
} }
static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment