Commit e5f0dfa7 authored by Vijaya Krishna Nivarthi's avatar Vijaya Krishna Nivarthi Committed by Mark Brown

spi: spi-geni-qcom: Add support for SE DMA mode

SE DMA mode can be used for larger transfers and FIFO mode
for smaller transfers.
Signed-off-by: default avatarVijaya Krishna Nivarthi <quic_vnivarth@quicinc.com>
Reviewed-by: default avatarDouglas Anderson <dianders@chromium.org>
Link: https://lore.kernel.org/r/1670509544-15977-1-git-send-email-quic_vnivarth@quicinc.comSigned-off-by: default avatarMark Brown <broonie@kernel.org>
parent 1b929c02
...@@ -87,6 +87,8 @@ struct spi_geni_master { ...@@ -87,6 +87,8 @@ struct spi_geni_master {
struct completion cs_done; struct completion cs_done;
struct completion cancel_done; struct completion cancel_done;
struct completion abort_done; struct completion abort_done;
struct completion tx_reset_done;
struct completion rx_reset_done;
unsigned int oversampling; unsigned int oversampling;
spinlock_t lock; spinlock_t lock;
int irq; int irq;
...@@ -95,6 +97,8 @@ struct spi_geni_master { ...@@ -95,6 +97,8 @@ struct spi_geni_master {
struct dma_chan *tx; struct dma_chan *tx;
struct dma_chan *rx; struct dma_chan *rx;
int cur_xfer_mode; int cur_xfer_mode;
dma_addr_t tx_se_dma;
dma_addr_t rx_se_dma;
}; };
static int get_spi_clk_cfg(unsigned int speed_hz, static int get_spi_clk_cfg(unsigned int speed_hz,
...@@ -129,23 +133,27 @@ static int get_spi_clk_cfg(unsigned int speed_hz, ...@@ -129,23 +133,27 @@ static int get_spi_clk_cfg(unsigned int speed_hz,
return ret; return ret;
} }
static void handle_fifo_timeout(struct spi_master *spi, static void handle_se_timeout(struct spi_master *spi,
struct spi_message *msg) struct spi_message *msg)
{ {
struct spi_geni_master *mas = spi_master_get_devdata(spi); struct spi_geni_master *mas = spi_master_get_devdata(spi);
unsigned long time_left; unsigned long time_left;
struct geni_se *se = &mas->se; struct geni_se *se = &mas->se;
const struct spi_transfer *xfer;
spin_lock_irq(&mas->lock); spin_lock_irq(&mas->lock);
reinit_completion(&mas->cancel_done); reinit_completion(&mas->cancel_done);
if (mas->cur_xfer_mode == GENI_SE_FIFO)
writel(0, se->base + SE_GENI_TX_WATERMARK_REG); writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
xfer = mas->cur_xfer;
mas->cur_xfer = NULL; mas->cur_xfer = NULL;
geni_se_cancel_m_cmd(se); geni_se_cancel_m_cmd(se);
spin_unlock_irq(&mas->lock); spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->cancel_done, HZ); time_left = wait_for_completion_timeout(&mas->cancel_done, HZ);
if (time_left) if (time_left)
return; goto unmap_if_dma;
spin_lock_irq(&mas->lock); spin_lock_irq(&mas->lock);
reinit_completion(&mas->abort_done); reinit_completion(&mas->abort_done);
...@@ -162,6 +170,39 @@ static void handle_fifo_timeout(struct spi_master *spi, ...@@ -162,6 +170,39 @@ static void handle_fifo_timeout(struct spi_master *spi,
*/ */
mas->abort_failed = true; mas->abort_failed = true;
} }
unmap_if_dma:
if (mas->cur_xfer_mode == GENI_SE_DMA) {
if (xfer) {
if (xfer->tx_buf && mas->tx_se_dma) {
spin_lock_irq(&mas->lock);
reinit_completion(&mas->tx_reset_done);
writel(1, se->base + SE_DMA_TX_FSM_RST);
spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->tx_reset_done, HZ);
if (!time_left)
dev_err(mas->dev, "DMA TX RESET failed\n");
geni_se_tx_dma_unprep(se, mas->tx_se_dma, xfer->len);
}
if (xfer->rx_buf && mas->rx_se_dma) {
spin_lock_irq(&mas->lock);
reinit_completion(&mas->rx_reset_done);
writel(1, se->base + SE_DMA_RX_FSM_RST);
spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->rx_reset_done, HZ);
if (!time_left)
dev_err(mas->dev, "DMA RX RESET failed\n");
geni_se_rx_dma_unprep(se, mas->rx_se_dma, xfer->len);
}
} else {
/*
* This can happen if a timeout happened and we had to wait
* for lock in this function because isr was holding the lock
* and handling transfer completion at that time.
*/
dev_warn(mas->dev, "Cancel/Abort on completed SPI transfer\n");
}
}
} }
static void handle_gpi_timeout(struct spi_master *spi, struct spi_message *msg) static void handle_gpi_timeout(struct spi_master *spi, struct spi_message *msg)
...@@ -178,7 +219,8 @@ static void spi_geni_handle_err(struct spi_master *spi, struct spi_message *msg) ...@@ -178,7 +219,8 @@ static void spi_geni_handle_err(struct spi_master *spi, struct spi_message *msg)
switch (mas->cur_xfer_mode) { switch (mas->cur_xfer_mode) {
case GENI_SE_FIFO: case GENI_SE_FIFO:
handle_fifo_timeout(spi, msg); case GENI_SE_DMA:
handle_se_timeout(spi, msg);
break; break;
case GENI_GPI_DMA: case GENI_GPI_DMA:
handle_gpi_timeout(spi, msg); handle_gpi_timeout(spi, msg);
...@@ -250,6 +292,8 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag) ...@@ -250,6 +292,8 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
} }
mas->cs_flag = set_flag; mas->cs_flag = set_flag;
/* set xfer_mode to FIFO to complete cs_done in isr */
mas->cur_xfer_mode = GENI_SE_FIFO;
reinit_completion(&mas->cs_done); reinit_completion(&mas->cs_done);
if (set_flag) if (set_flag)
geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0); geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
...@@ -260,7 +304,7 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag) ...@@ -260,7 +304,7 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
time_left = wait_for_completion_timeout(&mas->cs_done, HZ); time_left = wait_for_completion_timeout(&mas->cs_done, HZ);
if (!time_left) { if (!time_left) {
dev_warn(mas->dev, "Timeout setting chip select\n"); dev_warn(mas->dev, "Timeout setting chip select\n");
handle_fifo_timeout(spi, NULL); handle_se_timeout(spi, NULL);
} }
exit: exit:
...@@ -482,8 +526,12 @@ static bool geni_can_dma(struct spi_controller *ctlr, ...@@ -482,8 +526,12 @@ static bool geni_can_dma(struct spi_controller *ctlr,
{ {
struct spi_geni_master *mas = spi_master_get_devdata(slv->master); struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
/* check if dma is supported */ /*
return mas->cur_xfer_mode != GENI_SE_FIFO; * Return true if transfer needs to be mapped prior to
* calling transfer_one which is the case only for GPI_DMA.
* For SE_DMA mode, map/unmap is done in geni_se_*x_dma_prep.
*/
return mas->cur_xfer_mode == GENI_GPI_DMA;
} }
static int spi_geni_prepare_message(struct spi_master *spi, static int spi_geni_prepare_message(struct spi_master *spi,
...@@ -494,6 +542,7 @@ static int spi_geni_prepare_message(struct spi_master *spi, ...@@ -494,6 +542,7 @@ static int spi_geni_prepare_message(struct spi_master *spi,
switch (mas->cur_xfer_mode) { switch (mas->cur_xfer_mode) {
case GENI_SE_FIFO: case GENI_SE_FIFO:
case GENI_SE_DMA:
if (spi_geni_is_abort_still_pending(mas)) if (spi_geni_is_abort_still_pending(mas))
return -EBUSY; return -EBUSY;
ret = setup_fifo_params(spi_msg->spi, spi); ret = setup_fifo_params(spi_msg->spi, spi);
...@@ -597,7 +646,7 @@ static int spi_geni_init(struct spi_geni_master *mas) ...@@ -597,7 +646,7 @@ static int spi_geni_init(struct spi_geni_master *mas)
break; break;
} }
/* /*
* in case of failure to get dma channel, we can still do the * in case of failure to get gpi dma channel, we can still do the
* FIFO mode, so fallthrough * FIFO mode, so fallthrough
*/ */
dev_warn(mas->dev, "FIFO mode disabled, but couldn't get DMA, fall back to FIFO mode\n"); dev_warn(mas->dev, "FIFO mode disabled, but couldn't get DMA, fall back to FIFO mode\n");
...@@ -716,12 +765,12 @@ static void geni_spi_handle_rx(struct spi_geni_master *mas) ...@@ -716,12 +765,12 @@ static void geni_spi_handle_rx(struct spi_geni_master *mas)
mas->rx_rem_bytes -= rx_bytes; mas->rx_rem_bytes -= rx_bytes;
} }
static void setup_fifo_xfer(struct spi_transfer *xfer, static int setup_se_xfer(struct spi_transfer *xfer,
struct spi_geni_master *mas, struct spi_geni_master *mas,
u16 mode, struct spi_master *spi) u16 mode, struct spi_master *spi)
{ {
u32 m_cmd = 0; u32 m_cmd = 0;
u32 len; u32 len, fifo_size;
struct geni_se *se = &mas->se; struct geni_se *se = &mas->se;
int ret; int ret;
...@@ -748,7 +797,7 @@ static void setup_fifo_xfer(struct spi_transfer *xfer, ...@@ -748,7 +797,7 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
/* Speed and bits per word can be overridden per transfer */ /* Speed and bits per word can be overridden per transfer */
ret = geni_spi_set_clock_and_bw(mas, xfer->speed_hz); ret = geni_spi_set_clock_and_bw(mas, xfer->speed_hz);
if (ret) if (ret)
return; return ret;
mas->tx_rem_bytes = 0; mas->tx_rem_bytes = 0;
mas->rx_rem_bytes = 0; mas->rx_rem_bytes = 0;
...@@ -772,17 +821,50 @@ static void setup_fifo_xfer(struct spi_transfer *xfer, ...@@ -772,17 +821,50 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
mas->rx_rem_bytes = xfer->len; mas->rx_rem_bytes = xfer->len;
} }
/* Select transfer mode based on transfer length */
fifo_size = mas->tx_fifo_depth * mas->fifo_width_bits / mas->cur_bits_per_word;
mas->cur_xfer_mode = (len <= fifo_size) ? GENI_SE_FIFO : GENI_SE_DMA;
geni_se_select_mode(se, mas->cur_xfer_mode);
/* /*
* Lock around right before we start the transfer since our * Lock around right before we start the transfer since our
* interrupt could come in at any time now. * interrupt could come in at any time now.
*/ */
spin_lock_irq(&mas->lock); spin_lock_irq(&mas->lock);
geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION); geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
if (mas->cur_xfer_mode == GENI_SE_DMA) {
if (m_cmd & SPI_RX_ONLY) {
ret = geni_se_rx_dma_prep(se, xfer->rx_buf,
xfer->len, &mas->rx_se_dma);
if (ret) {
dev_err(mas->dev, "Failed to setup Rx dma %d\n", ret);
mas->rx_se_dma = 0;
goto unlock_and_return;
}
}
if (m_cmd & SPI_TX_ONLY) { if (m_cmd & SPI_TX_ONLY) {
ret = geni_se_tx_dma_prep(se, (void *)xfer->tx_buf,
xfer->len, &mas->tx_se_dma);
if (ret) {
dev_err(mas->dev, "Failed to setup Tx dma %d\n", ret);
mas->tx_se_dma = 0;
if (m_cmd & SPI_RX_ONLY) {
/* Unmap rx buffer if duplex transfer */
geni_se_rx_dma_unprep(se, mas->rx_se_dma, xfer->len);
mas->rx_se_dma = 0;
}
goto unlock_and_return;
}
}
} else if (m_cmd & SPI_TX_ONLY) {
if (geni_spi_handle_tx(mas)) if (geni_spi_handle_tx(mas))
writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG); writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
} }
unlock_and_return:
spin_unlock_irq(&mas->lock); spin_unlock_irq(&mas->lock);
return ret;
} }
static int spi_geni_transfer_one(struct spi_master *spi, static int spi_geni_transfer_one(struct spi_master *spi,
...@@ -790,6 +872,7 @@ static int spi_geni_transfer_one(struct spi_master *spi, ...@@ -790,6 +872,7 @@ static int spi_geni_transfer_one(struct spi_master *spi,
struct spi_transfer *xfer) struct spi_transfer *xfer)
{ {
struct spi_geni_master *mas = spi_master_get_devdata(spi); struct spi_geni_master *mas = spi_master_get_devdata(spi);
int ret;
if (spi_geni_is_abort_still_pending(mas)) if (spi_geni_is_abort_still_pending(mas))
return -EBUSY; return -EBUSY;
...@@ -798,9 +881,12 @@ static int spi_geni_transfer_one(struct spi_master *spi, ...@@ -798,9 +881,12 @@ static int spi_geni_transfer_one(struct spi_master *spi,
if (!xfer->len) if (!xfer->len)
return 0; return 0;
if (mas->cur_xfer_mode == GENI_SE_FIFO) { if (mas->cur_xfer_mode == GENI_SE_FIFO || mas->cur_xfer_mode == GENI_SE_DMA) {
setup_fifo_xfer(xfer, mas, slv->mode, spi); ret = setup_se_xfer(xfer, mas, slv->mode, spi);
return 1; /* SPI framework expects +ve ret code to wait for transfer complete */
if (!ret)
ret = 1;
return ret;
} }
return setup_gsi_xfer(xfer, mas, slv, spi); return setup_gsi_xfer(xfer, mas, slv, spi);
} }
...@@ -823,6 +909,7 @@ static irqreturn_t geni_spi_isr(int irq, void *data) ...@@ -823,6 +909,7 @@ static irqreturn_t geni_spi_isr(int irq, void *data)
spin_lock(&mas->lock); spin_lock(&mas->lock);
if (mas->cur_xfer_mode == GENI_SE_FIFO) {
if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN)) if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
geni_spi_handle_rx(mas); geni_spi_handle_rx(mas);
...@@ -858,6 +945,36 @@ static irqreturn_t geni_spi_isr(int irq, void *data) ...@@ -858,6 +945,36 @@ static irqreturn_t geni_spi_isr(int irq, void *data)
complete(&mas->cs_done); complete(&mas->cs_done);
} }
} }
} else if (mas->cur_xfer_mode == GENI_SE_DMA) {
const struct spi_transfer *xfer = mas->cur_xfer;
u32 dma_tx_status = readl_relaxed(se->base + SE_DMA_TX_IRQ_STAT);
u32 dma_rx_status = readl_relaxed(se->base + SE_DMA_RX_IRQ_STAT);
if (dma_tx_status)
writel(dma_tx_status, se->base + SE_DMA_TX_IRQ_CLR);
if (dma_rx_status)
writel(dma_rx_status, se->base + SE_DMA_RX_IRQ_CLR);
if (dma_tx_status & TX_DMA_DONE)
mas->tx_rem_bytes = 0;
if (dma_rx_status & RX_DMA_DONE)
mas->rx_rem_bytes = 0;
if (dma_tx_status & TX_RESET_DONE)
complete(&mas->tx_reset_done);
if (dma_rx_status & RX_RESET_DONE)
complete(&mas->rx_reset_done);
if (!mas->tx_rem_bytes && !mas->rx_rem_bytes && xfer) {
if (xfer->tx_buf && mas->tx_se_dma) {
geni_se_tx_dma_unprep(se, mas->tx_se_dma, xfer->len);
mas->tx_se_dma = 0;
}
if (xfer->rx_buf && mas->rx_se_dma) {
geni_se_rx_dma_unprep(se, mas->rx_se_dma, xfer->len);
mas->rx_se_dma = 0;
}
spi_finalize_current_transfer(spi);
mas->cur_xfer = NULL;
}
}
if (m_irq & M_CMD_CANCEL_EN) if (m_irq & M_CMD_CANCEL_EN)
complete(&mas->cancel_done); complete(&mas->cancel_done);
...@@ -949,6 +1066,8 @@ static int spi_geni_probe(struct platform_device *pdev) ...@@ -949,6 +1066,8 @@ static int spi_geni_probe(struct platform_device *pdev)
init_completion(&mas->cs_done); init_completion(&mas->cs_done);
init_completion(&mas->cancel_done); init_completion(&mas->cancel_done);
init_completion(&mas->abort_done); init_completion(&mas->abort_done);
init_completion(&mas->tx_reset_done);
init_completion(&mas->rx_reset_done);
spin_lock_init(&mas->lock); spin_lock_init(&mas->lock);
pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 250); pm_runtime_set_autosuspend_delay(&pdev->dev, 250);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment