Commit 2f4db6f7 authored by Łukasz Stelmach's avatar Łukasz Stelmach Committed by Mark Brown

spi: spi-s3c64xx: Check return values

Check return values in prepare_dma() and s3c64xx_spi_config() and
propagate errors upwards.

Fixes: 78843727 ("spi: s3c64xx: move to generic dmaengine API")
Reviewed-by: default avatarKrzysztof Kozlowski <krzk@kernel.org>
Signed-off-by: default avatarŁukasz Stelmach <l.stelmach@samsung.com>
Link: https://lore.kernel.org/r/20201002122243.26849-4-l.stelmach@samsung.comSigned-off-by: default avatarMark Brown <broonie@kernel.org>
parent ab4efca2
...@@ -122,6 +122,7 @@ ...@@ -122,6 +122,7 @@
struct s3c64xx_spi_dma_data { struct s3c64xx_spi_dma_data {
struct dma_chan *ch; struct dma_chan *ch;
dma_cookie_t cookie;
enum dma_transfer_direction direction; enum dma_transfer_direction direction;
}; };
...@@ -271,12 +272,13 @@ static void s3c64xx_spi_dmacb(void *data) ...@@ -271,12 +272,13 @@ static void s3c64xx_spi_dmacb(void *data)
spin_unlock_irqrestore(&sdd->lock, flags); spin_unlock_irqrestore(&sdd->lock, flags);
} }
static void prepare_dma(struct s3c64xx_spi_dma_data *dma, static int prepare_dma(struct s3c64xx_spi_dma_data *dma,
struct sg_table *sgt) struct sg_table *sgt)
{ {
struct s3c64xx_spi_driver_data *sdd; struct s3c64xx_spi_driver_data *sdd;
struct dma_slave_config config; struct dma_slave_config config;
struct dma_async_tx_descriptor *desc; struct dma_async_tx_descriptor *desc;
int ret;
memset(&config, 0, sizeof(config)); memset(&config, 0, sizeof(config));
...@@ -300,12 +302,24 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma, ...@@ -300,12 +302,24 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents, desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
dma->direction, DMA_PREP_INTERRUPT); dma->direction, DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(&sdd->pdev->dev, "unable to prepare %s scatterlist",
dma->direction == DMA_DEV_TO_MEM ? "rx" : "tx");
return -ENOMEM;
}
desc->callback = s3c64xx_spi_dmacb; desc->callback = s3c64xx_spi_dmacb;
desc->callback_param = dma; desc->callback_param = dma;
dmaengine_submit(desc); dma->cookie = dmaengine_submit(desc);
ret = dma_submit_error(dma->cookie);
if (ret) {
dev_err(&sdd->pdev->dev, "DMA submission failed");
return -EIO;
}
dma_async_issue_pending(dma->ch); dma_async_issue_pending(dma->ch);
return 0;
} }
static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable) static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
...@@ -355,11 +369,12 @@ static bool s3c64xx_spi_can_dma(struct spi_master *master, ...@@ -355,11 +369,12 @@ static bool s3c64xx_spi_can_dma(struct spi_master *master,
return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1; return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
} }
static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd, static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
struct spi_transfer *xfer, int dma_mode) struct spi_transfer *xfer, int dma_mode)
{ {
void __iomem *regs = sdd->regs; void __iomem *regs = sdd->regs;
u32 modecfg, chcfg; u32 modecfg, chcfg;
int ret = 0;
modecfg = readl(regs + S3C64XX_SPI_MODE_CFG); modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON); modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
...@@ -385,7 +400,7 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd, ...@@ -385,7 +400,7 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
chcfg |= S3C64XX_SPI_CH_TXCH_ON; chcfg |= S3C64XX_SPI_CH_TXCH_ON;
if (dma_mode) { if (dma_mode) {
modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
prepare_dma(&sdd->tx_dma, &xfer->tx_sg); ret = prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
} else { } else {
switch (sdd->cur_bpw) { switch (sdd->cur_bpw) {
case 32: case 32:
...@@ -417,12 +432,17 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd, ...@@ -417,12 +432,17 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
| S3C64XX_SPI_PACKET_CNT_EN, | S3C64XX_SPI_PACKET_CNT_EN,
regs + S3C64XX_SPI_PACKET_CNT); regs + S3C64XX_SPI_PACKET_CNT);
prepare_dma(&sdd->rx_dma, &xfer->rx_sg); ret = prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
} }
} }
if (ret)
return ret;
writel(modecfg, regs + S3C64XX_SPI_MODE_CFG); writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
writel(chcfg, regs + S3C64XX_SPI_CH_CFG); writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
return 0;
} }
static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd, static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
...@@ -555,9 +575,10 @@ static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd, ...@@ -555,9 +575,10 @@ static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
return 0; return 0;
} }
static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
{ {
void __iomem *regs = sdd->regs; void __iomem *regs = sdd->regs;
int ret;
u32 val; u32 val;
/* Disable Clock */ /* Disable Clock */
...@@ -605,7 +626,9 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) ...@@ -605,7 +626,9 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
if (sdd->port_conf->clk_from_cmu) { if (sdd->port_conf->clk_from_cmu) {
/* The src_clk clock is divided internally by 2 */ /* The src_clk clock is divided internally by 2 */
clk_set_rate(sdd->src_clk, sdd->cur_speed * 2); ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
if (ret)
return ret;
} else { } else {
/* Configure Clock */ /* Configure Clock */
val = readl(regs + S3C64XX_SPI_CLK_CFG); val = readl(regs + S3C64XX_SPI_CLK_CFG);
...@@ -619,6 +642,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) ...@@ -619,6 +642,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
val |= S3C64XX_SPI_ENCLK_ENABLE; val |= S3C64XX_SPI_ENCLK_ENABLE;
writel(val, regs + S3C64XX_SPI_CLK_CFG); writel(val, regs + S3C64XX_SPI_CLK_CFG);
} }
return 0;
} }
#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32) #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
...@@ -661,7 +686,9 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master, ...@@ -661,7 +686,9 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
sdd->cur_bpw = bpw; sdd->cur_bpw = bpw;
sdd->cur_speed = speed; sdd->cur_speed = speed;
sdd->cur_mode = spi->mode; sdd->cur_mode = spi->mode;
s3c64xx_spi_config(sdd); status = s3c64xx_spi_config(sdd);
if (status)
return status;
} }
if (!is_polling(sdd) && (xfer->len > fifo_len) && if (!is_polling(sdd) && (xfer->len > fifo_len) &&
...@@ -688,10 +715,15 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master, ...@@ -688,10 +715,15 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
/* Start the signals */ /* Start the signals */
s3c64xx_spi_set_cs(spi, true); s3c64xx_spi_set_cs(spi, true);
s3c64xx_enable_datapath(sdd, xfer, use_dma); status = s3c64xx_enable_datapath(sdd, xfer, use_dma);
spin_unlock_irqrestore(&sdd->lock, flags); spin_unlock_irqrestore(&sdd->lock, flags);
if (status) {
dev_err(&spi->dev, "failed to enable data path for transfer: %d\n", status);
break;
}
if (use_dma) if (use_dma)
status = s3c64xx_wait_for_dma(sdd, xfer); status = s3c64xx_wait_for_dma(sdd, xfer);
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment