Commit d5931dd0 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'spi-fix-v6.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi

Pull spi fixes from Mark Brown:
 "A series of fixes that came in since the merge window, the main thing
  being the fixes Andy did for DMA sync where we were calling into the
  DMA API in suprising ways and causing issues as a result, the main
  thing being confusing the IOMMU code.

  We've also got some fairly important fixes for the stm32 driver, it
  supports a wide range of hardware and some optimisations that were
  done recently have broken on some systems, and a fix to prevent
  glitched signals on the bus in the cadence driver"

* tag 'spi-fix-v6.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi:
  spi: stm32: Don't warn about spurious interrupts
  spi: Assign dummy scatterlist to unidirectional transfers
  spi: cadence: Ensure data lines set to low during dummy-cycle period
  spi: stm32: Revert change that enabled controller before asserting CS
  spi: Check if transfer is mapped before calling DMA sync APIs
  spi: Don't mark message DMA mapped when no transfer in it is
parents 28add42d 95d7c452
......@@ -145,6 +145,9 @@
#define CDNS_XSPI_STIG_DONE_FLAG BIT(0)
#define CDNS_XSPI_TRD_STATUS 0x0104
#define MODE_NO_OF_BYTES GENMASK(25, 24)
#define MODEBYTES_COUNT 1
/* Helper macros for filling command registers */
#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase) ( \
FIELD_PREP(CDNS_XSPI_CMD_INSTR_TYPE, (data_phase) ? \
......@@ -157,9 +160,10 @@
FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR3, ((op)->addr.val >> 24) & 0xFF) | \
FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR4, ((op)->addr.val >> 32) & 0xFF))
#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op) ( \
#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, modebytes) ( \
FIELD_PREP(CDNS_XSPI_CMD_P1_R3_ADDR5, ((op)->addr.val >> 40) & 0xFF) | \
FIELD_PREP(CDNS_XSPI_CMD_P1_R3_CMD, (op)->cmd.opcode) | \
FIELD_PREP(MODE_NO_OF_BYTES, modebytes) | \
FIELD_PREP(CDNS_XSPI_CMD_P1_R3_NUM_ADDR_BYTES, (op)->addr.nbytes))
#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op, chipsel) ( \
......@@ -173,12 +177,12 @@
#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op) \
FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R2_DCNT_L, (op)->data.nbytes & 0xFFFF)
#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op) ( \
#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op, dummybytes) ( \
FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_DCNT_H, \
((op)->data.nbytes >> 16) & 0xffff) | \
FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, \
(op)->dummy.buswidth != 0 ? \
(((op)->dummy.nbytes * 8) / (op)->dummy.buswidth) : \
(((dummybytes) * 8) / (op)->dummy.buswidth) : \
0))
#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op, chipsel) ( \
......@@ -351,6 +355,7 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
u32 cmd_regs[6];
u32 cmd_status;
int ret;
int dummybytes = op->dummy.nbytes;
ret = cdns_xspi_wait_for_controller_idle(cdns_xspi);
if (ret < 0)
......@@ -365,7 +370,12 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
memset(cmd_regs, 0, sizeof(cmd_regs));
cmd_regs[1] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase);
cmd_regs[2] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_2(op);
cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op);
if (dummybytes != 0) {
cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, 1);
dummybytes--;
} else {
cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, 0);
}
cmd_regs[4] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op,
cdns_xspi->cur_cs);
......@@ -375,7 +385,7 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
cmd_regs[0] = CDNS_XSPI_STIG_DONE_FLAG;
cmd_regs[1] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_1(op);
cmd_regs[2] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op);
cmd_regs[3] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op);
cmd_regs[3] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op, dummybytes);
cmd_regs[4] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op,
cdns_xspi->cur_cs);
......
......@@ -1016,8 +1016,10 @@ static irqreturn_t stm32fx_spi_irq_event(int irq, void *dev_id)
static irqreturn_t stm32fx_spi_irq_thread(int irq, void *dev_id)
{
struct spi_controller *ctrl = dev_id;
struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
spi_finalize_current_transfer(ctrl);
stm32fx_spi_disable(spi);
return IRQ_HANDLED;
}
......@@ -1055,7 +1057,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
mask |= STM32H7_SPI_SR_TXP | STM32H7_SPI_SR_RXP;
if (!(sr & mask)) {
dev_warn(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
dev_vdbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
sr, ier);
spin_unlock_irqrestore(&spi->lock, flags);
return IRQ_NONE;
......@@ -1185,8 +1187,6 @@ static int stm32_spi_prepare_msg(struct spi_controller *ctrl,
~clrb) | setb,
spi->base + spi->cfg->regs->cpol.reg);
stm32_spi_enable(spi);
spin_unlock_irqrestore(&spi->lock, flags);
return 0;
......@@ -1204,6 +1204,7 @@ static void stm32fx_spi_dma_tx_cb(void *data)
if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) {
spi_finalize_current_transfer(spi->ctrl);
stm32fx_spi_disable(spi);
}
}
......@@ -1218,6 +1219,7 @@ static void stm32_spi_dma_rx_cb(void *data)
struct stm32_spi *spi = data;
spi_finalize_current_transfer(spi->ctrl);
spi->cfg->disable(spi);
}
/**
......@@ -1305,6 +1307,8 @@ static int stm32fx_spi_transfer_one_irq(struct stm32_spi *spi)
stm32_spi_set_bits(spi, STM32FX_SPI_CR2, cr2);
stm32_spi_enable(spi);
/* starting data transfer when buffer is loaded */
if (spi->tx_buf)
spi->cfg->write_tx(spi);
......@@ -1341,6 +1345,8 @@ static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi)
spin_lock_irqsave(&spi->lock, flags);
stm32_spi_enable(spi);
/* Be sure to have data in fifo before starting data transfer */
if (spi->tx_buf)
stm32h7_spi_write_txfifo(spi);
......@@ -1372,6 +1378,8 @@ static void stm32fx_spi_transfer_one_dma_start(struct stm32_spi *spi)
*/
stm32_spi_set_bits(spi, STM32FX_SPI_CR2, STM32FX_SPI_CR2_ERRIE);
}
stm32_spi_enable(spi);
}
/**
......@@ -1405,6 +1413,8 @@ static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi)
stm32_spi_set_bits(spi, STM32H7_SPI_IER, ier);
stm32_spi_enable(spi);
if (STM32_SPI_HOST_MODE(spi))
stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
}
......
......@@ -1220,6 +1220,11 @@ void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
}
/* Dummy SG for unidirect transfers */
static struct scatterlist dummy_sg = {
.page_link = SG_END,
};
static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
{
struct device *tx_dev, *rx_dev;
......@@ -1243,6 +1248,7 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
else
rx_dev = ctlr->dev.parent;
ret = -ENOMSG;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
/* The sync is done before each transfer. */
unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
......@@ -1257,6 +1263,8 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
attrs);
if (ret != 0)
return ret;
} else {
xfer->tx_sg.sgl = &dummy_sg;
}
if (xfer->rx_buf != NULL) {
......@@ -1270,8 +1278,13 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
return ret;
}
} else {
xfer->rx_sg.sgl = &dummy_sg;
}
}
/* No transfer has been mapped, bail out with success */
if (ret)
return 0;
ctlr->cur_rx_dma_dev = rx_dev;
ctlr->cur_tx_dma_dev = tx_dev;
......@@ -1307,7 +1320,7 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
return 0;
}
static void spi_dma_sync_for_device(struct spi_controller *ctlr,
static void spi_dma_sync_for_device(struct spi_controller *ctlr, struct spi_message *msg,
struct spi_transfer *xfer)
{
struct device *rx_dev = ctlr->cur_rx_dma_dev;
......@@ -1316,11 +1329,14 @@ static void spi_dma_sync_for_device(struct spi_controller *ctlr,
if (!ctlr->cur_msg_mapped)
return;
if (!ctlr->can_dma(ctlr, msg->spi, xfer))
return;
dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
}
static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, struct spi_message *msg,
struct spi_transfer *xfer)
{
struct device *rx_dev = ctlr->cur_rx_dma_dev;
......@@ -1329,6 +1345,9 @@ static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
if (!ctlr->cur_msg_mapped)
return;
if (!ctlr->can_dma(ctlr, msg->spi, xfer))
return;
dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
}
......@@ -1346,11 +1365,13 @@ static inline int __spi_unmap_msg(struct spi_controller *ctlr,
}
static void spi_dma_sync_for_device(struct spi_controller *ctrl,
struct spi_message *msg,
struct spi_transfer *xfer)
{
}
static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
struct spi_message *msg,
struct spi_transfer *xfer)
{
}
......@@ -1622,10 +1643,10 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
reinit_completion(&ctlr->xfer_completion);
fallback_pio:
spi_dma_sync_for_device(ctlr, xfer);
spi_dma_sync_for_device(ctlr, msg, xfer);
ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
if (ret < 0) {
spi_dma_sync_for_cpu(ctlr, xfer);
spi_dma_sync_for_cpu(ctlr, msg, xfer);
if (ctlr->cur_msg_mapped &&
(xfer->error & SPI_TRANS_FAIL_NO_START)) {
......@@ -1650,7 +1671,7 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
msg->status = ret;
}
spi_dma_sync_for_cpu(ctlr, xfer);
spi_dma_sync_for_cpu(ctlr, msg, xfer);
} else {
if (xfer->len)
dev_err(&msg->spi->dev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment