Commit 60c1beef authored by Mark Brown's avatar Mark Brown

Merge remote-tracking branches 'spi/fix/atmel', 'spi/fix/doc', 'spi/fix/dw',...

Merge remote-tracking branches 'spi/fix/atmel', 'spi/fix/doc', 'spi/fix/dw', 'spi/fix/img-spfi', 'spi/fix/pl022' and 'spi/fix/ti-qspi' into spi-linus
......@@ -764,17 +764,17 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
(unsigned long long)xfer->rx_dma);
}
/* REVISIT: We're waiting for ENDRX before we start the next
/* REVISIT: We're waiting for RXBUFF before we start the next
* transfer because we need to handle some difficult timing
* issues otherwise. If we wait for ENDTX in one transfer and
* then starts waiting for ENDRX in the next, it's difficult
* to tell the difference between the ENDRX interrupt we're
* actually waiting for and the ENDRX interrupt of the
* issues otherwise. If we wait for TXBUFE in one transfer and
* then starts waiting for RXBUFF in the next, it's difficult
* to tell the difference between the RXBUFF interrupt we're
* actually waiting for and the RXBUFF interrupt of the
* previous transfer.
*
* It should be doable, though. Just not now...
*/
spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES));
spi_writel(as, IER, SPI_BIT(RXBUFF) | SPI_BIT(OVRES));
spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
}
......
......@@ -139,6 +139,9 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws)
1,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!txdesc)
return NULL;
txdesc->callback = dw_spi_dma_tx_done;
txdesc->callback_param = dws;
......@@ -184,6 +187,9 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws)
1,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!rxdesc)
return NULL;
rxdesc->callback = dw_spi_dma_rx_done;
rxdesc->callback_param = dws;
......
......@@ -36,13 +36,13 @@ struct spi_pci_desc {
static struct spi_pci_desc spi_pci_mid_desc_1 = {
.setup = dw_spi_mid_init,
.num_cs = 32,
.num_cs = 5,
.bus_num = 0,
};
static struct spi_pci_desc spi_pci_mid_desc_2 = {
.setup = dw_spi_mid_init,
.num_cs = 4,
.num_cs = 2,
.bus_num = 1,
};
......
......@@ -621,14 +621,14 @@ static void spi_hw_init(struct device *dev, struct dw_spi *dws)
if (!dws->fifo_len) {
u32 fifo;
for (fifo = 2; fifo <= 256; fifo++) {
for (fifo = 1; fifo < 256; fifo++) {
dw_writew(dws, DW_SPI_TXFLTR, fifo);
if (fifo != dw_readw(dws, DW_SPI_TXFLTR))
break;
}
dw_writew(dws, DW_SPI_TXFLTR, 0);
dws->fifo_len = (fifo == 2) ? 0 : fifo - 1;
dws->fifo_len = (fifo == 1) ? 0 : fifo;
dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
}
}
......
......@@ -459,6 +459,13 @@ static int img_spfi_transfer_one(struct spi_master *master,
unsigned long flags;
int ret;
if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) {
dev_err(spfi->dev,
"Transfer length (%d) is greater than the max supported (%d)",
xfer->len, SPFI_TRANSACTION_TSIZE_MASK);
return -EINVAL;
}
/*
* Stop all DMA and reset the controller if the previous transaction
* timed-out and never completed it's DMA.
......
......@@ -534,12 +534,12 @@ static void giveback(struct pl022 *pl022)
pl022->cur_msg = NULL;
pl022->cur_transfer = NULL;
pl022->cur_chip = NULL;
spi_finalize_current_message(pl022->master);
/* disable the SPI/SSP operation */
writew((readw(SSP_CR1(pl022->virtbase)) &
(~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
spi_finalize_current_message(pl022->master);
}
/**
......
......@@ -101,6 +101,7 @@ struct ti_qspi {
#define QSPI_FLEN(n) ((n - 1) << 0)
/* STATUS REGISTER */
#define BUSY 0x01
#define WC 0x02
/* INTERRUPT REGISTER */
......@@ -199,6 +200,21 @@ static void ti_qspi_restore_ctx(struct ti_qspi *qspi)
ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG);
}
static inline u32 qspi_is_busy(struct ti_qspi *qspi)
{
u32 stat;
unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT;
stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
while ((stat & BUSY) && time_after(timeout, jiffies)) {
cpu_relax();
stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
}
WARN(stat & BUSY, "qspi busy\n");
return stat & BUSY;
}
static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
{
int wlen, count;
......@@ -211,6 +227,9 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
wlen = t->bits_per_word >> 3; /* in bytes */
while (count) {
if (qspi_is_busy(qspi))
return -EBUSY;
switch (wlen) {
case 1:
dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n",
......@@ -266,6 +285,9 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
while (count) {
dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
if (qspi_is_busy(qspi))
return -EBUSY;
ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
if (!wait_for_completion_timeout(&qspi->transfer_complete,
QSPI_COMPLETION_TIMEOUT)) {
......
......@@ -649,7 +649,7 @@ struct spi_transfer {
* sequence completes. On some systems, many such sequences can execute as
* as single programmed DMA transfer. On all systems, these messages are
* queued, and might complete after transactions to other devices. Messages
* sent to a given spi_device are alway executed in FIFO order.
* sent to a given spi_device are always executed in FIFO order.
*
* The code that submits an spi_message (and its spi_transfers)
* to the lower layers is responsible for managing its memory.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment