Commit 3aad3f03 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'spi-for-linus' of git://git.secretlab.ca/git/linux

Pull SPI changes from Grant Likely:
 "Changes to both core spi code and spi device drivers.  The driver
  changes are the usual set of bug fixes and platform enablement.

  Core code changes include:

   - More intelligent assignment of SPI bus numbers when using DT

   - Common mechanism for using gpios as CS lines

   - Pull checks for bits_per_word and transfer speed out of drivers and
     into core code

   - Ensure temporary DMA buffers are DMA safe"

* tag 'spi-for-linus' of git://git.secretlab.ca/git/linux: (50 commits)
  spi: Document cs_gpios and cs_gpio in kernel-doc
  spi/of: Fix initialization of cs_gpios array
  spi/pxa2xx: add support for Lynxpoint SPI controllers
  spi/pxa2xx: add support for Intel Low Power Subsystem SPI
  spi/pxa2xx: add support for SPI_LOOP
  spi/pxa2xx: add support for runtime PM
  spi/pxa2xx: add support for DMA engine
  spi/pxa2xx: break out the private DMA API usage into a separate file
  spi/ath79: add shutdown handler
  spi/mips-lantiq: set SPI_MASTER_HALF_DUPLEX flag
  spi/mips-lantiq: make use of spi_finalize_current_message
  spi/bcm63xx: work around inability to keep CS up
  spi/davinci: use request_threaded_irq() to fix deadlock
  spi/orion: Use module_platform_driver()
  spi/bcm63xx: reject transfers unable to transfer
  spi: Ensure memory used for spi_write_then_read() is DMA safe
  spi/spi-mpc512x-psc: init mode bits supported by the driver
  spi/mpc512x-psc: don't use obsolet cell-index property
  spi: Remove erroneous __init, __exit and __exit_p() references in drivers
  spi/s3c64xx: fix checkpatch warnings and error
  ...
parents 10b6339e 095c3752
Renesas MSIOF spi controller
Required properties:
- compatible : "renesas,sh-msiof" for SuperH or
"renesas,sh-mobile-msiof" for SH Mobile series
- reg : Offset and length of the register set for the device
- interrupts : interrupt line used by MSIOF
Optional properties:
- num-cs : total number of chip-selects
- renesas,tx-fifo-size : Overrides the default tx fifo size given in words
- renesas,rx-fifo-size : Overrides the default rx fifo size given in words
......@@ -7168,6 +7168,7 @@ F: drivers/clk/spear/
SPI SUBSYSTEM
M: Grant Likely <grant.likely@secretlab.ca>
M: Mark Brown <broonie@opensource.wolfsonmicro.com>
L: spi-devel-general@lists.sourceforge.net
Q: http://patchwork.kernel.org/project/spi-devel-general/list/
T: git git://git.secretlab.ca/git/linux-2.6.git
......
......@@ -297,9 +297,20 @@ config SPI_PPC4xx
help
This selects a driver for the PPC4xx SPI Controller.
config SPI_PXA2XX_PXADMA
bool "PXA2xx SSP legacy PXA DMA API support"
depends on SPI_PXA2XX && ARCH_PXA
help
Enable PXA private legacy DMA API support. Note that this is
deprecated in favor of generic DMA engine API.
config SPI_PXA2XX_DMA
def_bool y
depends on SPI_PXA2XX && !SPI_PXA2XX_PXADMA
config SPI_PXA2XX
tristate "PXA2xx SSP SPI master"
depends on (ARCH_PXA || (X86_32 && PCI)) && EXPERIMENTAL
depends on ARCH_PXA || PCI || ACPI
select PXA_SSP if ARCH_PXA
help
This enables using a PXA2xx or Sodaville SSP port as a SPI master
......@@ -307,7 +318,7 @@ config SPI_PXA2XX
additional documentation can be found a Documentation/spi/pxa2xx.
config SPI_PXA2XX_PCI
def_bool SPI_PXA2XX && X86_32 && PCI
def_tristate SPI_PXA2XX && PCI
config SPI_RSPI
tristate "Renesas RSPI controller"
......
......@@ -47,7 +47,10 @@ obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o
obj-$(CONFIG_SPI_ORION) += spi-orion.o
obj-$(CONFIG_SPI_PL022) += spi-pl022.o
obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o
obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx.o
spi-pxa2xx-platform-objs := spi-pxa2xx.o
spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_PXADMA) += spi-pxa2xx-pxadma.o
spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_DMA) += spi-pxa2xx-dma.o
obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o
obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
obj-$(CONFIG_SPI_RSPI) += spi-rspi.o
obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o
......
......@@ -134,7 +134,7 @@ static int altera_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
hw->tx = t->tx_buf;
hw->rx = t->rx_buf;
hw->count = 0;
hw->bytes_per_word = (t->bits_per_word ? : spi->bits_per_word) / 8;
hw->bytes_per_word = t->bits_per_word / 8;
hw->len = t->len / hw->bytes_per_word;
if (hw->irq >= 0) {
......
......@@ -24,17 +24,24 @@
#include <linux/spi/spi_bitbang.h>
#include <linux/bitops.h>
#include <linux/gpio.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include <asm/mach-ath79/ath79_spi_platform.h>
#define DRV_NAME "ath79-spi"
#define ATH79_SPI_RRW_DELAY_FACTOR 12000
#define MHZ (1000 * 1000)
struct ath79_spi {
struct spi_bitbang bitbang;
u32 ioc_base;
u32 reg_ctrl;
void __iomem *base;
struct clk *clk;
unsigned rrw_delay;
};
static inline u32 ath79_spi_rr(struct ath79_spi *sp, unsigned reg)
......@@ -52,6 +59,12 @@ static inline struct ath79_spi *ath79_spidev_to_sp(struct spi_device *spi)
return spi_master_get_devdata(spi->master);
}
static inline void ath79_spi_delay(struct ath79_spi *sp, unsigned nsecs)
{
if (nsecs > sp->rrw_delay)
ndelay(nsecs - sp->rrw_delay);
}
static void ath79_spi_chipselect(struct spi_device *spi, int is_active)
{
struct ath79_spi *sp = ath79_spidev_to_sp(spi);
......@@ -83,15 +96,8 @@ static void ath79_spi_chipselect(struct spi_device *spi, int is_active)
}
static int ath79_spi_setup_cs(struct spi_device *spi)
static void ath79_spi_enable(struct ath79_spi *sp)
{
struct ath79_spi *sp = ath79_spidev_to_sp(spi);
struct ath79_spi_controller_data *cdata;
cdata = spi->controller_data;
if (spi->chip_select && !cdata)
return -EINVAL;
/* enable GPIO mode */
ath79_spi_wr(sp, AR71XX_SPI_REG_FS, AR71XX_SPI_FS_GPIO);
......@@ -101,44 +107,48 @@ static int ath79_spi_setup_cs(struct spi_device *spi)
/* TODO: setup speed? */
ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, 0x43);
}
if (spi->chip_select) {
int status = 0;
static void ath79_spi_disable(struct ath79_spi *sp)
{
/* restore CTRL register */
ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, sp->reg_ctrl);
/* disable GPIO mode */
ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0);
}
status = gpio_request(cdata->gpio, dev_name(&spi->dev));
if (status)
return status;
static int ath79_spi_setup_cs(struct spi_device *spi)
{
struct ath79_spi_controller_data *cdata;
int status;
status = gpio_direction_output(cdata->gpio,
spi->mode & SPI_CS_HIGH);
if (status) {
gpio_free(cdata->gpio);
return status;
}
} else {
cdata = spi->controller_data;
if (spi->chip_select && !cdata)
return -EINVAL;
status = 0;
if (spi->chip_select) {
unsigned long flags;
flags = GPIOF_DIR_OUT;
if (spi->mode & SPI_CS_HIGH)
sp->ioc_base |= AR71XX_SPI_IOC_CS0;
flags |= GPIOF_INIT_HIGH;
else
sp->ioc_base &= ~AR71XX_SPI_IOC_CS0;
ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
flags |= GPIOF_INIT_LOW;
status = gpio_request_one(cdata->gpio, flags,
dev_name(&spi->dev));
}
return 0;
return status;
}
static void ath79_spi_cleanup_cs(struct spi_device *spi)
{
struct ath79_spi *sp = ath79_spidev_to_sp(spi);
if (spi->chip_select) {
struct ath79_spi_controller_data *cdata = spi->controller_data;
gpio_free(cdata->gpio);
}
/* restore CTRL register */
ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, sp->reg_ctrl);
/* disable GPIO mode */
ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0);
}
static int ath79_spi_setup(struct spi_device *spi)
......@@ -184,7 +194,11 @@ static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned nsecs,
/* setup MSB (to slave) on trailing edge */
ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out);
ath79_spi_delay(sp, nsecs);
ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out | AR71XX_SPI_IOC_CLK);
ath79_spi_delay(sp, nsecs);
if (bits == 1)
ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out);
word <<= 1;
}
......@@ -198,6 +212,7 @@ static int ath79_spi_probe(struct platform_device *pdev)
struct ath79_spi *sp;
struct ath79_spi_platform_data *pdata;
struct resource *r;
unsigned long rate;
int ret;
master = spi_alloc_master(&pdev->dev, sizeof(*sp));
......@@ -236,12 +251,39 @@ static int ath79_spi_probe(struct platform_device *pdev)
goto err_put_master;
}
sp->clk = clk_get(&pdev->dev, "ahb");
if (IS_ERR(sp->clk)) {
ret = PTR_ERR(sp->clk);
goto err_unmap;
}
ret = clk_enable(sp->clk);
if (ret)
goto err_clk_put;
rate = DIV_ROUND_UP(clk_get_rate(sp->clk), MHZ);
if (!rate) {
ret = -EINVAL;
goto err_clk_disable;
}
sp->rrw_delay = ATH79_SPI_RRW_DELAY_FACTOR / rate;
dev_dbg(&pdev->dev, "register read/write delay is %u nsecs\n",
sp->rrw_delay);
ath79_spi_enable(sp);
ret = spi_bitbang_start(&sp->bitbang);
if (ret)
goto err_unmap;
goto err_disable;
return 0;
err_disable:
ath79_spi_disable(sp);
err_clk_disable:
clk_disable(sp->clk);
err_clk_put:
clk_put(sp->clk);
err_unmap:
iounmap(sp->base);
err_put_master:
......@@ -256,6 +298,9 @@ static int ath79_spi_remove(struct platform_device *pdev)
struct ath79_spi *sp = platform_get_drvdata(pdev);
spi_bitbang_stop(&sp->bitbang);
ath79_spi_disable(sp);
clk_disable(sp->clk);
clk_put(sp->clk);
iounmap(sp->base);
platform_set_drvdata(pdev, NULL);
spi_master_put(sp->bitbang.master);
......@@ -263,9 +308,15 @@ static int ath79_spi_remove(struct platform_device *pdev)
return 0;
}
static void ath79_spi_shutdown(struct platform_device *pdev)
{
ath79_spi_remove(pdev);
}
static struct platform_driver ath79_spi_driver = {
.probe = ath79_spi_probe,
.remove = ath79_spi_remove,
.shutdown = ath79_spi_shutdown,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
......
......@@ -1088,7 +1088,7 @@ static struct platform_driver atmel_spi_driver = {
.suspend = atmel_spi_suspend,
.resume = atmel_spi_resume,
.probe = atmel_spi_probe,
.remove = __exit_p(atmel_spi_remove),
.remove = atmel_spi_remove,
};
module_platform_driver(atmel_spi_driver);
......
......@@ -717,7 +717,7 @@ static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw)
}
}
static void __init au1550_spi_setup_psc_as_spi(struct au1550_spi *hw)
static void au1550_spi_setup_psc_as_spi(struct au1550_spi *hw)
{
u32 stat, cfg;
......@@ -766,7 +766,7 @@ static void __init au1550_spi_setup_psc_as_spi(struct au1550_spi *hw)
}
static int __init au1550_spi_probe(struct platform_device *pdev)
static int au1550_spi_probe(struct platform_device *pdev)
{
struct au1550_spi *hw;
struct spi_master *master;
......@@ -968,7 +968,7 @@ static int __init au1550_spi_probe(struct platform_device *pdev)
return err;
}
static int __exit au1550_spi_remove(struct platform_device *pdev)
static int au1550_spi_remove(struct platform_device *pdev)
{
struct au1550_spi *hw = platform_get_drvdata(pdev);
......@@ -997,7 +997,7 @@ static int __exit au1550_spi_remove(struct platform_device *pdev)
MODULE_ALIAS("platform:au1550-spi");
static struct platform_driver au1550_spi_drv = {
.remove = __exit_p(au1550_spi_remove),
.remove = au1550_spi_remove,
.driver = {
.name = "au1550-spi",
.owner = THIS_MODULE,
......
......@@ -37,6 +37,8 @@
#define PFX KBUILD_MODNAME
#define BCM63XX_SPI_MAX_PREPEND 15
struct bcm63xx_spi {
struct completion done;
......@@ -49,16 +51,10 @@ struct bcm63xx_spi {
unsigned int msg_type_shift;
unsigned int msg_ctl_width;
/* Data buffers */
const unsigned char *tx_ptr;
unsigned char *rx_ptr;
/* data iomem */
u8 __iomem *tx_io;
const u8 __iomem *rx_io;
int remaining_bytes;
struct clk *clk;
struct platform_device *pdev;
};
......@@ -175,24 +171,17 @@ static int bcm63xx_spi_setup(struct spi_device *spi)
return 0;
}
/* Fill the TX FIFO with as many bytes as possible */
static void bcm63xx_spi_fill_tx_fifo(struct bcm63xx_spi *bs)
{
u8 size;
/* Fill the Tx FIFO with as many bytes as possible */
size = bs->remaining_bytes < bs->fifo_size ? bs->remaining_bytes :
bs->fifo_size;
memcpy_toio(bs->tx_io, bs->tx_ptr, size);
bs->remaining_bytes -= size;
}
static unsigned int bcm63xx_txrx_bufs(struct spi_device *spi,
struct spi_transfer *t)
static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *first,
unsigned int num_transfers)
{
struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
u16 msg_ctl;
u16 cmd;
u8 rx_tail;
unsigned int i, timeout = 0, prepend_len = 0, len = 0;
struct spi_transfer *t = first;
bool do_rx = false;
bool do_tx = false;
/* Disable the CMD_DONE interrupt */
bcm_spi_writeb(bs, 0, SPI_INT_MASK);
......@@ -200,25 +189,45 @@ static unsigned int bcm63xx_txrx_bufs(struct spi_device *spi,
dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n",
t->tx_buf, t->rx_buf, t->len);
/* Transmitter is inhibited */
bs->tx_ptr = t->tx_buf;
bs->rx_ptr = t->rx_buf;
if (num_transfers > 1 && t->tx_buf && t->len <= BCM63XX_SPI_MAX_PREPEND)
prepend_len = t->len;
if (t->tx_buf) {
bs->remaining_bytes = t->len;
bcm63xx_spi_fill_tx_fifo(bs);
/* prepare the buffer */
for (i = 0; i < num_transfers; i++) {
if (t->tx_buf) {
do_tx = true;
memcpy_toio(bs->tx_io + len, t->tx_buf, t->len);
/* don't prepend more than one tx */
if (t != first)
prepend_len = 0;
}
if (t->rx_buf) {
do_rx = true;
/* prepend is half-duplex write only */
if (t == first)
prepend_len = 0;
}
len += t->len;
t = list_entry(t->transfer_list.next, struct spi_transfer,
transfer_list);
}
len -= prepend_len;
init_completion(&bs->done);
/* Fill in the Message control register */
msg_ctl = (t->len << SPI_BYTE_CNT_SHIFT);
msg_ctl = (len << SPI_BYTE_CNT_SHIFT);
if (t->rx_buf && t->tx_buf)
if (do_rx && do_tx && prepend_len == 0)
msg_ctl |= (SPI_FD_RW << bs->msg_type_shift);
else if (t->rx_buf)
else if (do_rx)
msg_ctl |= (SPI_HD_R << bs->msg_type_shift);
else if (t->tx_buf)
else if (do_tx)
msg_ctl |= (SPI_HD_W << bs->msg_type_shift);
switch (bs->msg_ctl_width) {
......@@ -232,14 +241,41 @@ static unsigned int bcm63xx_txrx_bufs(struct spi_device *spi,
/* Issue the transfer */
cmd = SPI_CMD_START_IMMEDIATE;
cmd |= (0 << SPI_CMD_PREPEND_BYTE_CNT_SHIFT);
cmd |= (prepend_len << SPI_CMD_PREPEND_BYTE_CNT_SHIFT);
cmd |= (spi->chip_select << SPI_CMD_DEVICE_ID_SHIFT);
bcm_spi_writew(bs, cmd, SPI_CMD);
/* Enable the CMD_DONE interrupt */
bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK);
return t->len - bs->remaining_bytes;
timeout = wait_for_completion_timeout(&bs->done, HZ);
if (!timeout)
return -ETIMEDOUT;
/* read out all data */
rx_tail = bcm_spi_readb(bs, SPI_RX_TAIL);
if (do_rx && rx_tail != len)
return -EIO;
if (!rx_tail)
return 0;
len = 0;
t = first;
/* Read out all the data */
for (i = 0; i < num_transfers; i++) {
if (t->rx_buf)
memcpy_fromio(t->rx_buf, bs->rx_io + len, t->len);
if (t != first || prepend_len == 0)
len += t->len;
t = list_entry(t->transfer_list.next, struct spi_transfer,
transfer_list);
}
return 0;
}
static int bcm63xx_spi_prepare_transfer(struct spi_master *master)
......@@ -264,41 +300,76 @@ static int bcm63xx_spi_transfer_one(struct spi_master *master,
struct spi_message *m)
{
struct bcm63xx_spi *bs = spi_master_get_devdata(master);
struct spi_transfer *t;
struct spi_transfer *t, *first = NULL;
struct spi_device *spi = m->spi;
int status = 0;
unsigned int timeout = 0;
unsigned int n_transfers = 0, total_len = 0;
bool can_use_prepend = false;
/*
* This SPI controller does not support keeping CS active after a
* transfer.
* Work around this by merging as many transfers we can into one big
* full-duplex transfers.
*/
list_for_each_entry(t, &m->transfers, transfer_list) {
unsigned int len = t->len;
u8 rx_tail;
status = bcm63xx_spi_check_transfer(spi, t);
if (status < 0)
goto exit;
/* configure adapter for a new transfer */
bcm63xx_spi_setup_transfer(spi, t);
if (!first)
first = t;
n_transfers++;
total_len += t->len;
if (n_transfers == 2 && !first->rx_buf && !t->tx_buf &&
first->len <= BCM63XX_SPI_MAX_PREPEND)
can_use_prepend = true;
else if (can_use_prepend && t->tx_buf)
can_use_prepend = false;
/* we can only transfer one fifo worth of data */
if ((can_use_prepend &&
total_len > (bs->fifo_size + BCM63XX_SPI_MAX_PREPEND)) ||
(!can_use_prepend && total_len > bs->fifo_size)) {
dev_err(&spi->dev, "unable to do transfers larger than FIFO size (%i > %i)\n",
total_len, bs->fifo_size);
status = -EINVAL;
goto exit;
}
/* all combined transfers have to have the same speed */
if (t->speed_hz != first->speed_hz) {
dev_err(&spi->dev, "unable to change speed between transfers\n");
status = -EINVAL;
goto exit;
}
while (len) {
/* send the data */
len -= bcm63xx_txrx_bufs(spi, t);
/* CS will be deasserted directly after transfer */
if (t->delay_usecs) {
dev_err(&spi->dev, "unable to keep CS asserted after transfer\n");
status = -EINVAL;
goto exit;
}
if (t->cs_change ||
list_is_last(&t->transfer_list, &m->transfers)) {
/* configure adapter for a new transfer */
bcm63xx_spi_setup_transfer(spi, first);
timeout = wait_for_completion_timeout(&bs->done, HZ);
if (!timeout) {
status = -ETIMEDOUT;
/* send the data */
status = bcm63xx_txrx_bufs(spi, first, n_transfers);
if (status)
goto exit;
}
/* read out all data */
rx_tail = bcm_spi_readb(bs, SPI_RX_TAIL);
m->actual_length += total_len;
/* Read out all the data */
if (rx_tail)
memcpy_fromio(bs->rx_ptr, bs->rx_io, rx_tail);
first = NULL;
n_transfers = 0;
total_len = 0;
can_use_prepend = false;
}
m->actual_length += t->len;
}
exit:
m->status = status;
......
......@@ -416,8 +416,7 @@ bfin_sport_spi_pump_transfers(unsigned long data)
drv_data->cs_change = transfer->cs_change;
/* Bits per word setup */
bits_per_word = transfer->bits_per_word ? :
message->spi->bits_per_word ? : 8;
bits_per_word = transfer->bits_per_word;
if (bits_per_word % 16 == 0)
drv_data->ops = &bfin_sport_transfer_ops_u16;
else
......
......@@ -642,8 +642,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
drv_data->cs_change = transfer->cs_change;
/* Bits per word setup */
bits_per_word = transfer->bits_per_word ? :
message->spi->bits_per_word ? : 8;
bits_per_word = transfer->bits_per_word;
if (bits_per_word % 16 == 0) {
drv_data->n_bytes = bits_per_word/8;
drv_data->len = (transfer->len) >> 1;
......@@ -1274,7 +1273,7 @@ static int bfin_spi_destroy_queue(struct bfin_spi_master_data *drv_data)
return 0;
}
static int __init bfin_spi_probe(struct platform_device *pdev)
static int bfin_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct bfin5xx_spi_master *platform_info;
......
......@@ -69,7 +69,7 @@ static unsigned bitbang_txrx_8(
unsigned ns,
struct spi_transfer *t
) {
unsigned bits = t->bits_per_word ? : spi->bits_per_word;
unsigned bits = t->bits_per_word;
unsigned count = t->len;
const u8 *tx = t->tx_buf;
u8 *rx = t->rx_buf;
......@@ -95,7 +95,7 @@ static unsigned bitbang_txrx_16(
unsigned ns,
struct spi_transfer *t
) {
unsigned bits = t->bits_per_word ? : spi->bits_per_word;
unsigned bits = t->bits_per_word;
unsigned count = t->len;
const u16 *tx = t->tx_buf;
u16 *rx = t->rx_buf;
......@@ -121,7 +121,7 @@ static unsigned bitbang_txrx_32(
unsigned ns,
struct spi_transfer *t
) {
unsigned bits = t->bits_per_word ? : spi->bits_per_word;
unsigned bits = t->bits_per_word;
unsigned count = t->len;
const u32 *tx = t->tx_buf;
u32 *rx = t->rx_buf;
......@@ -427,40 +427,41 @@ EXPORT_SYMBOL_GPL(spi_bitbang_transfer);
*/
int spi_bitbang_start(struct spi_bitbang *bitbang)
{
int status;
struct spi_master *master = bitbang->master;
int status;
if (!bitbang->master || !bitbang->chipselect)
if (!master || !bitbang->chipselect)
return -EINVAL;
INIT_WORK(&bitbang->work, bitbang_work);
spin_lock_init(&bitbang->lock);
INIT_LIST_HEAD(&bitbang->queue);
if (!bitbang->master->mode_bits)
bitbang->master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags;
if (!master->mode_bits)
master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags;
if (!bitbang->master->transfer)
bitbang->master->transfer = spi_bitbang_transfer;
if (!master->transfer)
master->transfer = spi_bitbang_transfer;
if (!bitbang->txrx_bufs) {
bitbang->use_dma = 0;
bitbang->txrx_bufs = spi_bitbang_bufs;
if (!bitbang->master->setup) {
if (!master->setup) {
if (!bitbang->setup_transfer)
bitbang->setup_transfer =
spi_bitbang_setup_transfer;
bitbang->master->setup = spi_bitbang_setup;
bitbang->master->cleanup = spi_bitbang_cleanup;
master->setup = spi_bitbang_setup;
master->cleanup = spi_bitbang_cleanup;
}
} else if (!bitbang->master->setup)
} else if (!master->setup)
return -EINVAL;
if (bitbang->master->transfer == spi_bitbang_transfer &&
if (master->transfer == spi_bitbang_transfer &&
!bitbang->setup_transfer)
return -EINVAL;
/* this task is the only thing to touch the SPI bits */
bitbang->busy = 0;
bitbang->workqueue = create_singlethread_workqueue(
dev_name(bitbang->master->dev.parent));
dev_name(master->dev.parent));
if (bitbang->workqueue == NULL) {
status = -EBUSY;
goto err1;
......@@ -469,7 +470,7 @@ int spi_bitbang_start(struct spi_bitbang *bitbang)
/* driver may get busy before register() returns, especially
* if someone registered boardinfo for devices
*/
status = spi_register_master(bitbang->master);
status = spi_register_master(master);
if (status < 0)
goto err2;
......
......@@ -68,7 +68,7 @@ static int spi_clps711x_setup_xfer(struct spi_device *spi,
struct spi_transfer *xfer)
{
u32 speed = xfer->speed_hz ? : spi->max_speed_hz;
u8 bpw = xfer->bits_per_word ? : spi->bits_per_word;
u8 bpw = xfer->bits_per_word;
struct spi_clps711x_data *hw = spi_master_get_devdata(spi->master);
if (bpw != 8) {
......
......@@ -329,8 +329,7 @@ static int mcfqspi_transfer_one_message(struct spi_master *master,
mcfqspi_cs_select(mcfqspi, spi->chip_select, cs_high);
mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE);
if ((t->bits_per_word ? t->bits_per_word :
spi->bits_per_word) == 8)
if (t->bits_per_word == 8)
mcfqspi_transfer_msg8(mcfqspi, t->len, t->tx_buf,
t->rx_buf);
else
......
......@@ -28,6 +28,8 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/edma.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/slab.h>
......@@ -135,7 +137,7 @@ struct davinci_spi {
int dma_rx_chnum;
int dma_tx_chnum;
struct davinci_spi_platform_data *pdata;
struct davinci_spi_platform_data pdata;
void (*get_rx)(u32 rx_data, struct davinci_spi *);
u32 (*get_tx)(struct davinci_spi *);
......@@ -213,7 +215,7 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
bool gpio_chipsel = false;
dspi = spi_master_get_devdata(spi->master);
pdata = dspi->pdata;
pdata = &dspi->pdata;
if (pdata->chip_sel && chip_sel < pdata->num_chipselect &&
pdata->chip_sel[chip_sel] != SPI_INTERN_CS)
......@@ -392,7 +394,7 @@ static int davinci_spi_setup(struct spi_device *spi)
struct davinci_spi_platform_data *pdata;
dspi = spi_master_get_devdata(spi->master);
pdata = dspi->pdata;
pdata = &dspi->pdata;
/* if bits per word length is zero then set it default 8 */
if (!spi->bits_per_word)
......@@ -534,7 +536,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
struct scatterlist sg_rx, sg_tx;
dspi = spi_master_get_devdata(spi->master);
pdata = dspi->pdata;
pdata = &dspi->pdata;
spicfg = (struct davinci_spi_config *)spi->controller_data;
if (!spicfg)
spicfg = &davinci_spi_default_cfg;
......@@ -699,6 +701,19 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
return ret;
}
/**
* dummy_thread_fn - dummy thread function
* @irq: IRQ number for this SPI Master
* @context_data: structure for SPI Master controller davinci_spi
*
* This is to satisfy the request_threaded_irq() API so that the irq
* handler is called in interrupt context.
*/
static irqreturn_t dummy_thread_fn(s32 irq, void *data)
{
return IRQ_HANDLED;
}
/**
* davinci_spi_irq - Interrupt handler for SPI Master Controller
* @irq: IRQ number for this SPI Master
......@@ -758,6 +773,70 @@ static int davinci_spi_request_dma(struct davinci_spi *dspi)
return r;
}
#if defined(CONFIG_OF)
static const struct of_device_id davinci_spi_of_match[] = {
{
.compatible = "ti,dm644x-spi",
},
{
.compatible = "ti,da8xx-spi",
.data = (void *)SPI_VERSION_2,
},
{ },
};
MODULE_DEVICE_TABLE(of, davini_spi_of_match);
/**
* spi_davinci_get_pdata - Get platform data from DTS binding
* @pdev: ptr to platform data
* @dspi: ptr to driver data
*
* Parses and populates pdata in dspi from device tree bindings.
*
* NOTE: Not all platform data params are supported currently.
*/
static int spi_davinci_get_pdata(struct platform_device *pdev,
struct davinci_spi *dspi)
{
struct device_node *node = pdev->dev.of_node;
struct davinci_spi_platform_data *pdata;
unsigned int num_cs, intr_line = 0;
const struct of_device_id *match;
pdata = &dspi->pdata;
pdata->version = SPI_VERSION_1;
match = of_match_device(of_match_ptr(davinci_spi_of_match),
&pdev->dev);
if (!match)
return -ENODEV;
/* match data has the SPI version number for SPI_VERSION_2 */
if (match->data == (void *)SPI_VERSION_2)
pdata->version = SPI_VERSION_2;
/*
* default num_cs is 1 and all chipsel are internal to the chip
* indicated by chip_sel being NULL. GPIO based CS is not
* supported yet in DT bindings.
*/
num_cs = 1;
of_property_read_u32(node, "num-cs", &num_cs);
pdata->num_chipselect = num_cs;
of_property_read_u32(node, "ti,davinci-spi-intr-line", &intr_line);
pdata->intr_line = intr_line;
return 0;
}
#else
#define davinci_spi_of_match NULL
static struct davinci_spi_platform_data
*spi_davinci_get_pdata(struct platform_device *pdev,
struct davinci_spi *dspi)
{
return -ENODEV;
}
#endif
/**
* davinci_spi_probe - probe function for SPI Master Controller
* @pdev: platform_device structure which contains plateform specific data
......@@ -780,12 +859,6 @@ static int davinci_spi_probe(struct platform_device *pdev)
int i = 0, ret = 0;
u32 spipc0;
pdata = pdev->dev.platform_data;
if (pdata == NULL) {
ret = -ENODEV;
goto err;
}
master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi));
if (master == NULL) {
ret = -ENOMEM;
......@@ -800,6 +873,19 @@ static int davinci_spi_probe(struct platform_device *pdev)
goto free_master;
}
if (pdev->dev.platform_data) {
pdata = pdev->dev.platform_data;
dspi->pdata = *pdata;
} else {
/* update dspi pdata with that from the DT */
ret = spi_davinci_get_pdata(pdev, dspi);
if (ret < 0)
goto free_master;
}
/* pdata in dspi is now updated and point pdata to that */
pdata = &dspi->pdata;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL) {
ret = -ENOENT;
......@@ -807,7 +893,6 @@ static int davinci_spi_probe(struct platform_device *pdev)
}
dspi->pbase = r->start;
dspi->pdata = pdata;
mem = request_mem_region(r->start, resource_size(r), pdev->name);
if (mem == NULL) {
......@@ -827,8 +912,8 @@ static int davinci_spi_probe(struct platform_device *pdev)
goto unmap_io;
}
ret = request_irq(dspi->irq, davinci_spi_irq, 0, dev_name(&pdev->dev),
dspi);
ret = request_threaded_irq(dspi->irq, davinci_spi_irq, dummy_thread_fn,
0, dev_name(&pdev->dev), dspi);
if (ret)
goto unmap_io;
......@@ -843,8 +928,9 @@ static int davinci_spi_probe(struct platform_device *pdev)
ret = -ENODEV;
goto put_master;
}
clk_enable(dspi->clk);
clk_prepare_enable(dspi->clk);
master->dev.of_node = pdev->dev.of_node;
master->bus_num = pdev->id;
master->num_chipselect = pdata->num_chipselect;
master->setup = davinci_spi_setup;
......@@ -927,7 +1013,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
dma_release_channel(dspi->dma_rx);
dma_release_channel(dspi->dma_tx);
free_clk:
clk_disable(dspi->clk);
clk_disable_unprepare(dspi->clk);
clk_put(dspi->clk);
put_master:
spi_master_put(master);
......@@ -963,7 +1049,7 @@ static int davinci_spi_remove(struct platform_device *pdev)
spi_bitbang_stop(&dspi->bitbang);
clk_disable(dspi->clk);
clk_disable_unprepare(dspi->clk);
clk_put(dspi->clk);
spi_master_put(master);
free_irq(dspi->irq, dspi);
......@@ -978,6 +1064,7 @@ static struct platform_driver davinci_spi_driver = {
.driver = {
.name = "spi_davinci",
.owner = THIS_MODULE,
.of_match_table = davinci_spi_of_match,
},
.probe = davinci_spi_probe,
.remove = davinci_spi_remove,
......
......@@ -446,7 +446,7 @@ static inline int bits_per_word(const struct ep93xx_spi *espi)
struct spi_message *msg = espi->current_msg;
struct spi_transfer *t = msg->state;
return t->bits_per_word ? t->bits_per_word : msg->spi->bits_per_word;
return t->bits_per_word;
}
static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t)
......
......@@ -398,7 +398,7 @@ static int falcon_sflash_xfer_one(struct spi_master *master,
}
m->status = ret;
m->complete(m->context);
spi_finalize_current_message(master);
return 0;
}
......@@ -423,6 +423,7 @@ static int falcon_sflash_probe(struct platform_device *pdev)
master->mode_bits = SPI_MODE_3;
master->num_chipselect = 1;
master->flags = SPI_MASTER_HALF_DUPLEX;
master->bus_num = -1;
master->setup = falcon_sflash_setup;
master->prepare_transfer_hardware = falcon_sflash_prepare_xfer;
......
......@@ -365,9 +365,26 @@ static int spi_gpio_probe_dt(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
pdata->sck = of_get_named_gpio(np, "gpio-sck", 0);
pdata->miso = of_get_named_gpio(np, "gpio-miso", 0);
pdata->mosi = of_get_named_gpio(np, "gpio-mosi", 0);
ret = of_get_named_gpio(np, "gpio-sck", 0);
if (ret < 0) {
dev_err(&pdev->dev, "gpio-sck property not found\n");
goto error_free;
}
pdata->sck = ret;
ret = of_get_named_gpio(np, "gpio-miso", 0);
if (ret < 0) {
dev_info(&pdev->dev, "gpio-miso property not found, switching to no-rx mode\n");
pdata->miso = SPI_GPIO_NO_MISO;
} else
pdata->miso = ret;
ret = of_get_named_gpio(np, "gpio-mosi", 0);
if (ret < 0) {
dev_info(&pdev->dev, "gpio-mosi property not found, switching to no-tx mode\n");
pdata->mosi = SPI_GPIO_NO_MOSI;
} else
pdata->mosi = ret;
ret = of_property_read_u32(np, "num-chipselects", &tmp);
if (ret < 0) {
......
......@@ -949,3 +949,4 @@ module_platform_driver(spi_imx_driver);
MODULE_DESCRIPTION("SPI Master Controller driver");
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
......@@ -438,6 +438,7 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
master->num_chipselect = pdata->max_chipselect;
}
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
master->setup = mpc512x_psc_spi_setup;
master->transfer = mpc512x_psc_spi_transfer;
master->cleanup = mpc512x_psc_spi_cleanup;
......@@ -522,17 +523,11 @@ static int mpc512x_psc_spi_of_probe(struct platform_device *op)
regaddr64 = of_translate_address(op->dev.of_node, regaddr_p);
/* get PSC id (0..11, used by port_config) */
if (op->dev.platform_data == NULL) {
const u32 *psc_nump;
psc_nump = of_get_property(op->dev.of_node, "cell-index", NULL);
if (!psc_nump || *psc_nump > 11) {
dev_err(&op->dev, "mpc512x_psc_spi: Device node %s "
"has invalid cell-index property\n",
op->dev.of_node->full_name);
return -EINVAL;
}
id = *psc_nump;
id = of_alias_get_id(op->dev.of_node, "spi");
if (id < 0) {
dev_err(&op->dev, "no alias id for %s\n",
op->dev.of_node->full_name);
return id;
}
return mpc512x_psc_spi_do_probe(&op->dev, (u32) regaddr64, (u32) size64,
......
......@@ -241,6 +241,7 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
INIT_COMPLETION(spi->c);
ctrl0 = readl(ssp->base + HW_SSP_CTRL0);
ctrl0 &= ~BM_SSP_CTRL0_XFER_COUNT;
ctrl0 |= BM_SSP_CTRL0_DATA_XFER | mxs_spi_cs_to_reg(cs);
if (*first)
......@@ -256,8 +257,10 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
if ((sg_count + 1 == sgs) && *last)
ctrl0 |= BM_SSP_CTRL0_IGNORE_CRC;
if (ssp->devid == IMX23_SSP)
if (ssp->devid == IMX23_SSP) {
ctrl0 &= ~BM_SSP_CTRL0_XFER_COUNT;
ctrl0 |= min;
}
dma_xfer[sg_count].pio[0] = ctrl0;
dma_xfer[sg_count].pio[3] = min;
......
......@@ -481,7 +481,7 @@ static int omap1_spi100k_transfer(struct spi_device *spi, struct spi_message *m)
return 0;
}
static int __init omap1_spi100k_reset(struct omap1_spi100k *spi100k)
static int omap1_spi100k_reset(struct omap1_spi100k *spi100k)
{
return 0;
}
......@@ -560,7 +560,7 @@ static int omap1_spi100k_probe(struct platform_device *pdev)
return status;
}
static int __exit omap1_spi100k_remove(struct platform_device *pdev)
static int omap1_spi100k_remove(struct platform_device *pdev)
{
struct spi_master *master;
struct omap1_spi100k *spi100k;
......@@ -604,7 +604,7 @@ static struct platform_driver omap1_spi100k_driver = {
.name = "omap1_spi100k",
.owner = THIS_MODULE,
},
.remove = __exit_p(omap1_spi100k_remove),
.remove = omap1_spi100k_remove,
};
......
......@@ -476,7 +476,7 @@ static void uwire_off(struct uwire_spi *uwire)
spi_master_put(uwire->bitbang.master);
}
static int __init uwire_probe(struct platform_device *pdev)
static int uwire_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct uwire_spi *uwire;
......@@ -536,7 +536,7 @@ static int __init uwire_probe(struct platform_device *pdev)
return status;
}
static int __exit uwire_remove(struct platform_device *pdev)
static int uwire_remove(struct platform_device *pdev)
{
struct uwire_spi *uwire = dev_get_drvdata(&pdev->dev);
int status;
......@@ -557,7 +557,7 @@ static struct platform_driver uwire_driver = {
.name = "omap_uwire",
.owner = THIS_MODULE,
},
.remove = __exit_p(uwire_remove),
.remove = uwire_remove,
// suspend ... unuse ck
// resume ... use ck
};
......
......@@ -298,10 +298,10 @@ static void omap2_mcspi_rx_callback(void *data)
struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
complete(&mcspi_dma->dma_rx_completion);
/* We must disable the DMA RX request */
omap2_mcspi_set_dma_req(spi, 1, 0);
complete(&mcspi_dma->dma_rx_completion);
}
static void omap2_mcspi_tx_callback(void *data)
......@@ -310,10 +310,10 @@ static void omap2_mcspi_tx_callback(void *data)
struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
complete(&mcspi_dma->dma_tx_completion);
/* We must disable the DMA TX request */
omap2_mcspi_set_dma_req(spi, 0, 0);
complete(&mcspi_dma->dma_tx_completion);
}
static void omap2_mcspi_tx_dma(struct spi_device *spi,
......@@ -927,6 +927,7 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
struct spi_device *spi;
struct spi_transfer *t = NULL;
struct spi_master *master;
int cs_active = 0;
struct omap2_mcspi_cs *cs;
struct omap2_mcspi_device_config *cd;
......@@ -935,6 +936,7 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
u32 chconf;
spi = m->spi;
master = spi->master;
cs = spi->controller_state;
cd = spi->controller_data;
......@@ -952,6 +954,14 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
if (!t->speed_hz && !t->bits_per_word)
par_override = 0;
}
if (cd && cd->cs_per_word) {
chconf = mcspi->ctx.modulctrl;
chconf &= ~OMAP2_MCSPI_MODULCTRL_SINGLE;
mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
mcspi->ctx.modulctrl =
mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
}
if (!cs_active) {
omap2_mcspi_force_cs(spi, 1);
......@@ -1013,6 +1023,14 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
if (cs_active)
omap2_mcspi_force_cs(spi, 0);
if (cd && cd->cs_per_word) {
chconf = mcspi->ctx.modulctrl;
chconf |= OMAP2_MCSPI_MODULCTRL_SINGLE;
mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
mcspi->ctx.modulctrl =
mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
}
omap2_mcspi_set_enable(spi, 0);
m->status = status;
......@@ -1020,7 +1038,7 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
}
static int omap2_mcspi_transfer_one_message(struct spi_master *master,
struct spi_message *m)
struct spi_message *m)
{
struct omap2_mcspi *mcspi;
struct spi_transfer *t;
......@@ -1041,7 +1059,7 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
|| (len && !(rx_buf || tx_buf))
|| (t->bits_per_word &&
( t->bits_per_word < 4
|| t->bits_per_word > 32))) {
|| t->bits_per_word > 32))) {
dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
t->speed_hz,
len,
......@@ -1052,8 +1070,8 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
}
if (t->speed_hz && t->speed_hz < (OMAP2_MCSPI_MAX_FREQ >> 15)) {
dev_dbg(mcspi->dev, "speed_hz %d below minimum %d Hz\n",
t->speed_hz,
OMAP2_MCSPI_MAX_FREQ >> 15);
t->speed_hz,
OMAP2_MCSPI_MAX_FREQ >> 15);
return -EINVAL;
}
......@@ -1099,7 +1117,7 @@ static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
return ret;
mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE,
OMAP2_MCSPI_WAKEUPENABLE_WKEN);
OMAP2_MCSPI_WAKEUPENABLE_WKEN);
ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
omap2_mcspi_set_master_mode(master);
......@@ -1228,7 +1246,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
sprintf(dma_ch_name, "rx%d", i);
dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
dma_ch_name);
dma_ch_name);
if (!dma_res) {
dev_dbg(&pdev->dev, "cannot get DMA RX channel\n");
status = -ENODEV;
......@@ -1238,7 +1256,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start;
sprintf(dma_ch_name, "tx%d", i);
dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
dma_ch_name);
dma_ch_name);
if (!dma_res) {
dev_dbg(&pdev->dev, "cannot get DMA TX channel\n");
status = -ENODEV;
......@@ -1254,7 +1272,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(pinctrl))
dev_warn(&pdev->dev,
"pins are not configured from the driver\n");
"pins are not configured from the driver\n");
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
......
......@@ -366,7 +366,7 @@ static int orion_spi_transfer_one_message(struct spi_master *master,
return 0;
}
static int __init orion_spi_reset(struct orion_spi *orion_spi)
static int orion_spi_reset(struct orion_spi *orion_spi)
{
/* Verify that the CS is deasserted */
orion_spi_set_cs(orion_spi, 0);
......@@ -396,7 +396,7 @@ static int orion_spi_setup(struct spi_device *spi)
return 0;
}
static int __init orion_spi_probe(struct platform_device *pdev)
static int orion_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct orion_spi *spi;
......@@ -479,7 +479,7 @@ static int __init orion_spi_probe(struct platform_device *pdev)
}
static int __exit orion_spi_remove(struct platform_device *pdev)
static int orion_spi_remove(struct platform_device *pdev)
{
struct spi_master *master;
struct resource *r;
......@@ -513,20 +513,11 @@ static struct platform_driver orion_spi_driver = {
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(orion_spi_of_match_table),
},
.remove = __exit_p(orion_spi_remove),
.probe = orion_spi_probe,
.remove = orion_spi_remove,
};
static int __init orion_spi_init(void)
{
return platform_driver_probe(&orion_spi_driver, orion_spi_probe);
}
module_init(orion_spi_init);
static void __exit orion_spi_exit(void)
{
platform_driver_unregister(&orion_spi_driver);
}
module_exit(orion_spi_exit);
module_platform_driver(orion_spi_driver);
MODULE_DESCRIPTION("Orion SPI driver");
MODULE_AUTHOR("Shadi Ammouri <shadi@marvell.com>");
......
......@@ -389,7 +389,7 @@ static void free_gpios(struct ppc4xx_spi *hw)
/*
* platform_device layer stuff...
*/
static int __init spi_ppc4xx_of_probe(struct platform_device *op)
static int spi_ppc4xx_of_probe(struct platform_device *op)
{
struct ppc4xx_spi *hw;
struct spi_master *master;
......@@ -560,7 +560,7 @@ static int __init spi_ppc4xx_of_probe(struct platform_device *op)
return ret;
}
static int __exit spi_ppc4xx_of_remove(struct platform_device *op)
static int spi_ppc4xx_of_remove(struct platform_device *op)
{
struct spi_master *master = dev_get_drvdata(&op->dev);
struct ppc4xx_spi *hw = spi_master_get_devdata(master);
......@@ -583,7 +583,7 @@ MODULE_DEVICE_TABLE(of, spi_ppc4xx_of_match);
static struct platform_driver spi_ppc4xx_of_driver = {
.probe = spi_ppc4xx_of_probe,
.remove = __exit_p(spi_ppc4xx_of_remove),
.remove = spi_ppc4xx_of_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
......
/*
* PXA2xx SPI DMA engine support.
*
* Copyright (C) 2013, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/pxa2xx_ssp.h>
#include <linux/scatterlist.h>
#include <linux/sizes.h>
#include <linux/spi/spi.h>
#include <linux/spi/pxa2xx_spi.h>
#include "spi-pxa2xx.h"
static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
enum dma_data_direction dir)
{
int i, nents, len = drv_data->len;
struct scatterlist *sg;
struct device *dmadev;
struct sg_table *sgt;
void *buf, *pbuf;
/*
* Some DMA controllers have problems transferring buffers that are
* not multiple of 4 bytes. So we truncate the transfer so that it
* is suitable for such controllers, and handle the trailing bytes
* manually after the DMA completes.
*
* REVISIT: It would be better if this information could be
* retrieved directly from the DMA device in a similar way than
* ->copy_align etc. is done.
*/
len = ALIGN(drv_data->len, 4);
if (dir == DMA_TO_DEVICE) {
dmadev = drv_data->tx_chan->device->dev;
sgt = &drv_data->tx_sgt;
buf = drv_data->tx;
drv_data->tx_map_len = len;
} else {
dmadev = drv_data->rx_chan->device->dev;
sgt = &drv_data->rx_sgt;
buf = drv_data->rx;
drv_data->rx_map_len = len;
}
nents = DIV_ROUND_UP(len, SZ_2K);
if (nents != sgt->nents) {
int ret;
sg_free_table(sgt);
ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
if (ret)
return ret;
}
pbuf = buf;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes = min_t(size_t, len, SZ_2K);
if (buf)
sg_set_buf(sg, pbuf, bytes);
else
sg_set_buf(sg, drv_data->dummy, bytes);
pbuf += bytes;
len -= bytes;
}
nents = dma_map_sg(dmadev, sgt->sgl, sgt->nents, dir);
if (!nents)
return -ENOMEM;
return nents;
}
static void pxa2xx_spi_unmap_dma_buffer(struct driver_data *drv_data,
enum dma_data_direction dir)
{
struct device *dmadev;
struct sg_table *sgt;
if (dir == DMA_TO_DEVICE) {
dmadev = drv_data->tx_chan->device->dev;
sgt = &drv_data->tx_sgt;
} else {
dmadev = drv_data->rx_chan->device->dev;
sgt = &drv_data->rx_sgt;
}
dma_unmap_sg(dmadev, sgt->sgl, sgt->nents, dir);
}
static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data)
{
if (!drv_data->dma_mapped)
return;
pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_FROM_DEVICE);
pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE);
drv_data->dma_mapped = 0;
}
static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
bool error)
{
struct spi_message *msg = drv_data->cur_msg;
/*
* It is possible that one CPU is handling ROR interrupt and other
* just gets DMA completion. Calling pump_transfers() twice for the
* same transfer leads to problems thus we prevent concurrent calls
* by using ->dma_running.
*/
if (atomic_dec_and_test(&drv_data->dma_running)) {
void __iomem *reg = drv_data->ioaddr;
/*
* If the other CPU is still handling the ROR interrupt we
* might not know about the error yet. So we re-check the
* ROR bit here before we clear the status register.
*/
if (!error) {
u32 status = read_SSSR(reg) & drv_data->mask_sr;
error = status & SSSR_ROR;
}
/* Clear status & disable interrupts */
write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
write_SSSR_CS(drv_data, drv_data->clear_sr);
if (!pxa25x_ssp_comp(drv_data))
write_SSTO(0, reg);
if (!error) {
pxa2xx_spi_unmap_dma_buffers(drv_data);
/* Handle the last bytes of unaligned transfer */
drv_data->tx += drv_data->tx_map_len;
drv_data->write(drv_data);
drv_data->rx += drv_data->rx_map_len;
drv_data->read(drv_data);
msg->actual_length += drv_data->len;
msg->state = pxa2xx_spi_next_transfer(drv_data);
} else {
/* In case we got an error we disable the SSP now */
write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
msg->state = ERROR_STATE;
}
tasklet_schedule(&drv_data->pump_transfers);
}
}
static void pxa2xx_spi_dma_callback(void *data)
{
pxa2xx_spi_dma_transfer_complete(data, false);
}
static struct dma_async_tx_descriptor *
pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
enum dma_transfer_direction dir)
{
struct pxa2xx_spi_master *pdata = drv_data->master_info;
struct chip_data *chip = drv_data->cur_chip;
enum dma_slave_buswidth width;
struct dma_slave_config cfg;
struct dma_chan *chan;
struct sg_table *sgt;
int nents, ret;
switch (drv_data->n_bytes) {
case 1:
width = DMA_SLAVE_BUSWIDTH_1_BYTE;
break;
case 2:
width = DMA_SLAVE_BUSWIDTH_2_BYTES;
break;
default:
width = DMA_SLAVE_BUSWIDTH_4_BYTES;
break;
}
memset(&cfg, 0, sizeof(cfg));
cfg.direction = dir;
if (dir == DMA_MEM_TO_DEV) {
cfg.dst_addr = drv_data->ssdr_physical;
cfg.dst_addr_width = width;
cfg.dst_maxburst = chip->dma_burst_size;
cfg.slave_id = pdata->tx_slave_id;
sgt = &drv_data->tx_sgt;
nents = drv_data->tx_nents;
chan = drv_data->tx_chan;
} else {
cfg.src_addr = drv_data->ssdr_physical;
cfg.src_addr_width = width;
cfg.src_maxburst = chip->dma_burst_size;
cfg.slave_id = pdata->rx_slave_id;
sgt = &drv_data->rx_sgt;
nents = drv_data->rx_nents;
chan = drv_data->rx_chan;
}
ret = dmaengine_slave_config(chan, &cfg);
if (ret) {
dev_warn(&drv_data->pdev->dev, "DMA slave config failed\n");
return NULL;
}
return dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
}
static bool pxa2xx_spi_dma_filter(struct dma_chan *chan, void *param)
{
const struct pxa2xx_spi_master *pdata = param;
return chan->chan_id == pdata->tx_chan_id ||
chan->chan_id == pdata->rx_chan_id;
}
bool pxa2xx_spi_dma_is_possible(size_t len)
{
return len <= MAX_DMA_LEN;
}
int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
{
const struct chip_data *chip = drv_data->cur_chip;
int ret;
if (!chip->enable_dma)
return 0;
/* Don't bother with DMA if we can't do even a single burst */
if (drv_data->len < chip->dma_burst_size)
return 0;
ret = pxa2xx_spi_map_dma_buffer(drv_data, DMA_TO_DEVICE);
if (ret <= 0) {
dev_warn(&drv_data->pdev->dev, "failed to DMA map TX\n");
return 0;
}
drv_data->tx_nents = ret;
ret = pxa2xx_spi_map_dma_buffer(drv_data, DMA_FROM_DEVICE);
if (ret <= 0) {
pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE);
dev_warn(&drv_data->pdev->dev, "failed to DMA map RX\n");
return 0;
}
drv_data->rx_nents = ret;
return 1;
}
irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
{
u32 status;
status = read_SSSR(drv_data->ioaddr) & drv_data->mask_sr;
if (status & SSSR_ROR) {
dev_err(&drv_data->pdev->dev, "FIFO overrun\n");
dmaengine_terminate_all(drv_data->rx_chan);
dmaengine_terminate_all(drv_data->tx_chan);
pxa2xx_spi_dma_transfer_complete(drv_data, true);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst)
{
struct dma_async_tx_descriptor *tx_desc, *rx_desc;
tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV);
if (!tx_desc) {
dev_err(&drv_data->pdev->dev,
"failed to get DMA TX descriptor\n");
return -EBUSY;
}
rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM);
if (!rx_desc) {
dev_err(&drv_data->pdev->dev,
"failed to get DMA RX descriptor\n");
return -EBUSY;
}
/* We are ready when RX completes */
rx_desc->callback = pxa2xx_spi_dma_callback;
rx_desc->callback_param = drv_data;
dmaengine_submit(rx_desc);
dmaengine_submit(tx_desc);
return 0;
}
void pxa2xx_spi_dma_start(struct driver_data *drv_data)
{
dma_async_issue_pending(drv_data->rx_chan);
dma_async_issue_pending(drv_data->tx_chan);
atomic_set(&drv_data->dma_running, 1);
}
int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
{
struct pxa2xx_spi_master *pdata = drv_data->master_info;
dma_cap_mask_t mask;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
drv_data->dummy = devm_kzalloc(&drv_data->pdev->dev, SZ_2K, GFP_KERNEL);
if (!drv_data->dummy)
return -ENOMEM;
drv_data->tx_chan = dma_request_channel(mask, pxa2xx_spi_dma_filter,
pdata);
if (!drv_data->tx_chan)
return -ENODEV;
drv_data->rx_chan = dma_request_channel(mask, pxa2xx_spi_dma_filter,
pdata);
if (!drv_data->rx_chan) {
dma_release_channel(drv_data->tx_chan);
drv_data->tx_chan = NULL;
return -ENODEV;
}
return 0;
}
void pxa2xx_spi_dma_release(struct driver_data *drv_data)
{
if (drv_data->rx_chan) {
dmaengine_terminate_all(drv_data->rx_chan);
dma_release_channel(drv_data->rx_chan);
sg_free_table(&drv_data->rx_sgt);
drv_data->rx_chan = NULL;
}
if (drv_data->tx_chan) {
dmaengine_terminate_all(drv_data->tx_chan);
dma_release_channel(drv_data->tx_chan);
sg_free_table(&drv_data->tx_sgt);
drv_data->tx_chan = NULL;
}
}
void pxa2xx_spi_dma_resume(struct driver_data *drv_data)
{
}
int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
struct spi_device *spi,
u8 bits_per_word, u32 *burst_code,
u32 *threshold)
{
struct pxa2xx_spi_chip *chip_info = spi->controller_data;
/*
* If the DMA burst size is given in chip_info we use that,
* otherwise we use the default. Also we use the default FIFO
* thresholds for now.
*/
*burst_code = chip_info ? chip_info->dma_burst_size : 16;
*threshold = SSCR1_RxTresh(RX_THRESH_DFLT)
| SSCR1_TxTresh(TX_THRESH_DFLT);
return 0;
}
......@@ -8,147 +8,58 @@
#include <linux/module.h>
#include <linux/spi/pxa2xx_spi.h>
struct ce4100_info {
struct ssp_device ssp;
struct platform_device *spi_pdev;
};
static DEFINE_MUTEX(ssp_lock);
static LIST_HEAD(ssp_list);
struct ssp_device *pxa_ssp_request(int port, const char *label)
{
struct ssp_device *ssp = NULL;
mutex_lock(&ssp_lock);
list_for_each_entry(ssp, &ssp_list, node) {
if (ssp->port_id == port && ssp->use_count == 0) {
ssp->use_count++;
ssp->label = label;
break;
}
}
mutex_unlock(&ssp_lock);
if (&ssp->node == &ssp_list)
return NULL;
return ssp;
}
EXPORT_SYMBOL_GPL(pxa_ssp_request);
void pxa_ssp_free(struct ssp_device *ssp)
{
mutex_lock(&ssp_lock);
if (ssp->use_count) {
ssp->use_count--;
ssp->label = NULL;
} else
dev_err(&ssp->pdev->dev, "device already free\n");
mutex_unlock(&ssp_lock);
}
EXPORT_SYMBOL_GPL(pxa_ssp_free);
static int ce4100_spi_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
struct platform_device_info pi;
int ret;
resource_size_t phys_beg;
resource_size_t phys_len;
struct ce4100_info *spi_info;
struct platform_device *pdev;
struct pxa2xx_spi_master spi_pdata;
struct ssp_device *ssp;
ret = pci_enable_device(dev);
ret = pcim_enable_device(dev);
if (ret)
return ret;
phys_beg = pci_resource_start(dev, 0);
phys_len = pci_resource_len(dev, 0);
if (!request_mem_region(phys_beg, phys_len,
"CE4100 SPI")) {
dev_err(&dev->dev, "Can't request register space.\n");
ret = -EBUSY;
ret = pcim_iomap_regions(dev, 1 << 0, "PXA2xx SPI");
if (!ret)
return ret;
}
pdev = platform_device_alloc("pxa2xx-spi", dev->devfn);
spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL);
if (!pdev || !spi_info ) {
ret = -ENOMEM;
goto err_nomem;
}
memset(&spi_pdata, 0, sizeof(spi_pdata));
spi_pdata.num_chipselect = dev->devfn;
ret = platform_device_add_data(pdev, &spi_pdata, sizeof(spi_pdata));
if (ret)
goto err_nomem;
pdev->dev.parent = &dev->dev;
pdev->dev.of_node = dev->dev.of_node;
ssp = &spi_info->ssp;
ssp = &spi_pdata.ssp;
ssp->phys_base = pci_resource_start(dev, 0);
ssp->mmio_base = ioremap(phys_beg, phys_len);
ssp->mmio_base = pcim_iomap_table(dev)[0];
if (!ssp->mmio_base) {
dev_err(&pdev->dev, "failed to ioremap() registers\n");
ret = -EIO;
goto err_nomem;
dev_err(&dev->dev, "failed to ioremap() registers\n");
return -EIO;
}
ssp->irq = dev->irq;
ssp->port_id = pdev->id;
ssp->port_id = dev->devfn;
ssp->type = PXA25x_SSP;
mutex_lock(&ssp_lock);
list_add(&ssp->node, &ssp_list);
mutex_unlock(&ssp_lock);
memset(&pi, 0, sizeof(pi));
pi.parent = &dev->dev;
pi.name = "pxa2xx-spi";
pi.id = ssp->port_id;
pi.data = &spi_pdata;
pi.size_data = sizeof(spi_pdata);
pci_set_drvdata(dev, spi_info);
pdev = platform_device_register_full(&pi);
if (!pdev)
return -ENOMEM;
ret = platform_device_add(pdev);
if (ret)
goto err_dev_add;
pci_set_drvdata(dev, pdev);
return ret;
err_dev_add:
pci_set_drvdata(dev, NULL);
mutex_lock(&ssp_lock);
list_del(&ssp->node);
mutex_unlock(&ssp_lock);
iounmap(ssp->mmio_base);
err_nomem:
release_mem_region(phys_beg, phys_len);
platform_device_put(pdev);
kfree(spi_info);
return ret;
return 0;
}
static void ce4100_spi_remove(struct pci_dev *dev)
{
struct ce4100_info *spi_info;
struct ssp_device *ssp;
spi_info = pci_get_drvdata(dev);
ssp = &spi_info->ssp;
platform_device_unregister(spi_info->spi_pdev);
iounmap(ssp->mmio_base);
release_mem_region(pci_resource_start(dev, 0),
pci_resource_len(dev, 0));
mutex_lock(&ssp_lock);
list_del(&ssp->node);
mutex_unlock(&ssp_lock);
struct platform_device *pdev = pci_get_drvdata(dev);
pci_set_drvdata(dev, NULL);
pci_disable_device(dev);
kfree(spi_info);
platform_device_unregister(pdev);
}
static DEFINE_PCI_DEVICE_TABLE(ce4100_spi_devices) = {
......
/*
* PXA2xx SPI private DMA support.
*
* Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/pxa2xx_ssp.h>
#include <linux/spi/spi.h>
#include <linux/spi/pxa2xx_spi.h>
#include "spi-pxa2xx.h"
#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
bool pxa2xx_spi_dma_is_possible(size_t len)
{
/* Try to map dma buffer and do a dma transfer if successful, but
* only if the length is non-zero and less than MAX_DMA_LEN.
*
* Zero-length non-descriptor DMA is illegal on PXA2xx; force use
* of PIO instead. Care is needed above because the transfer may
* have have been passed with buffers that are already dma mapped.
* A zero-length transfer in PIO mode will not try to write/read
* to/from the buffers
*
* REVISIT large transfers are exactly where we most want to be
* using DMA. If this happens much, split those transfers into
* multiple DMA segments rather than forcing PIO.
*/
return len > 0 && len <= MAX_DMA_LEN;
}
int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
{
struct spi_message *msg = drv_data->cur_msg;
struct device *dev = &msg->spi->dev;
if (!drv_data->cur_chip->enable_dma)
return 0;
if (msg->is_dma_mapped)
return drv_data->rx_dma && drv_data->tx_dma;
if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
return 0;
/* Modify setup if rx buffer is null */
if (drv_data->rx == NULL) {
*drv_data->null_dma_buf = 0;
drv_data->rx = drv_data->null_dma_buf;
drv_data->rx_map_len = 4;
} else
drv_data->rx_map_len = drv_data->len;
/* Modify setup if tx buffer is null */
if (drv_data->tx == NULL) {
*drv_data->null_dma_buf = 0;
drv_data->tx = drv_data->null_dma_buf;
drv_data->tx_map_len = 4;
} else
drv_data->tx_map_len = drv_data->len;
/* Stream map the tx buffer. Always do DMA_TO_DEVICE first
* so we flush the cache *before* invalidating it, in case
* the tx and rx buffers overlap.
*/
drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
drv_data->tx_map_len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, drv_data->tx_dma))
return 0;
/* Stream map the rx buffer */
drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
drv_data->rx_map_len, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, drv_data->rx_dma)) {
dma_unmap_single(dev, drv_data->tx_dma,
drv_data->tx_map_len, DMA_TO_DEVICE);
return 0;
}
return 1;
}
static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data)
{
struct device *dev;
if (!drv_data->dma_mapped)
return;
if (!drv_data->cur_msg->is_dma_mapped) {
dev = &drv_data->cur_msg->spi->dev;
dma_unmap_single(dev, drv_data->rx_dma,
drv_data->rx_map_len, DMA_FROM_DEVICE);
dma_unmap_single(dev, drv_data->tx_dma,
drv_data->tx_map_len, DMA_TO_DEVICE);
}
drv_data->dma_mapped = 0;
}
static int wait_ssp_rx_stall(void const __iomem *ioaddr)
{
unsigned long limit = loops_per_jiffy << 1;
while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit)
cpu_relax();
return limit;
}
static int wait_dma_channel_stop(int channel)
{
unsigned long limit = loops_per_jiffy << 1;
while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit)
cpu_relax();
return limit;
}
static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data,
const char *msg)
{
void __iomem *reg = drv_data->ioaddr;
/* Stop and reset */
DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
write_SSSR_CS(drv_data, drv_data->clear_sr);
write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
if (!pxa25x_ssp_comp(drv_data))
write_SSTO(0, reg);
pxa2xx_spi_flush(drv_data);
write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
pxa2xx_spi_unmap_dma_buffers(drv_data);
dev_err(&drv_data->pdev->dev, "%s\n", msg);
drv_data->cur_msg->state = ERROR_STATE;
tasklet_schedule(&drv_data->pump_transfers);
}
static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data)
{
void __iomem *reg = drv_data->ioaddr;
struct spi_message *msg = drv_data->cur_msg;
/* Clear and disable interrupts on SSP and DMA channels*/
write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
write_SSSR_CS(drv_data, drv_data->clear_sr);
DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
dev_err(&drv_data->pdev->dev,
"dma_handler: dma rx channel stop failed\n");
if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
dev_err(&drv_data->pdev->dev,
"dma_transfer: ssp rx stall failed\n");
pxa2xx_spi_unmap_dma_buffers(drv_data);
/* update the buffer pointer for the amount completed in dma */
drv_data->rx += drv_data->len -
(DCMD(drv_data->rx_channel) & DCMD_LENGTH);
/* read trailing data from fifo, it does not matter how many
* bytes are in the fifo just read until buffer is full
* or fifo is empty, which ever occurs first */
drv_data->read(drv_data);
/* return count of what was actually read */
msg->actual_length += drv_data->len -
(drv_data->rx_end - drv_data->rx);
/* Transfer delays and chip select release are
* handled in pump_transfers or giveback
*/
/* Move to next transfer */
msg->state = pxa2xx_spi_next_transfer(drv_data);
/* Schedule transfer tasklet */
tasklet_schedule(&drv_data->pump_transfers);
}
void pxa2xx_spi_dma_handler(int channel, void *data)
{
struct driver_data *drv_data = data;
u32 irq_status = DCSR(channel) & DMA_INT_MASK;
if (irq_status & DCSR_BUSERR) {
if (channel == drv_data->tx_channel)
pxa2xx_spi_dma_error_stop(drv_data,
"dma_handler: bad bus address on tx channel");
else
pxa2xx_spi_dma_error_stop(drv_data,
"dma_handler: bad bus address on rx channel");
return;
}
/* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
if ((channel == drv_data->tx_channel)
&& (irq_status & DCSR_ENDINTR)
&& (drv_data->ssp_type == PXA25x_SSP)) {
/* Wait for rx to stall */
if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
dev_err(&drv_data->pdev->dev,
"dma_handler: ssp rx stall failed\n");
/* finish this transfer, start the next */
pxa2xx_spi_dma_transfer_complete(drv_data);
}
}
irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
{
u32 irq_status;
void __iomem *reg = drv_data->ioaddr;
irq_status = read_SSSR(reg) & drv_data->mask_sr;
if (irq_status & SSSR_ROR) {
pxa2xx_spi_dma_error_stop(drv_data,
"dma_transfer: fifo overrun");
return IRQ_HANDLED;
}
/* Check for false positive timeout */
if ((irq_status & SSSR_TINT)
&& (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
write_SSSR(SSSR_TINT, reg);
return IRQ_HANDLED;
}
if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
/* Clear and disable timeout interrupt, do the rest in
* dma_transfer_complete */
if (!pxa25x_ssp_comp(drv_data))
write_SSTO(0, reg);
/* finish this transfer, start the next */
pxa2xx_spi_dma_transfer_complete(drv_data);
return IRQ_HANDLED;
}
/* Opps problem detected */
return IRQ_NONE;
}
int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst)
{
u32 dma_width;
switch (drv_data->n_bytes) {
case 1:
dma_width = DCMD_WIDTH1;
break;
case 2:
dma_width = DCMD_WIDTH2;
break;
default:
dma_width = DCMD_WIDTH4;
break;
}
/* Setup rx DMA Channel */
DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
DTADR(drv_data->rx_channel) = drv_data->rx_dma;
if (drv_data->rx == drv_data->null_dma_buf)
/* No target address increment */
DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
| dma_width
| dma_burst
| drv_data->len;
else
DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
| DCMD_FLOWSRC
| dma_width
| dma_burst
| drv_data->len;
/* Setup tx DMA Channel */
DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
DSADR(drv_data->tx_channel) = drv_data->tx_dma;
DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
if (drv_data->tx == drv_data->null_dma_buf)
/* No source address increment */
DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
| dma_width
| dma_burst
| drv_data->len;
else
DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
| DCMD_FLOWTRG
| dma_width
| dma_burst
| drv_data->len;
/* Enable dma end irqs on SSP to detect end of transfer */
if (drv_data->ssp_type == PXA25x_SSP)
DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
return 0;
}
void pxa2xx_spi_dma_start(struct driver_data *drv_data)
{
DCSR(drv_data->rx_channel) |= DCSR_RUN;
DCSR(drv_data->tx_channel) |= DCSR_RUN;
}
int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
{
struct device *dev = &drv_data->pdev->dev;
struct ssp_device *ssp = drv_data->ssp;
/* Get two DMA channels (rx and tx) */
drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx",
DMA_PRIO_HIGH,
pxa2xx_spi_dma_handler,
drv_data);
if (drv_data->rx_channel < 0) {
dev_err(dev, "problem (%d) requesting rx channel\n",
drv_data->rx_channel);
return -ENODEV;
}
drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
DMA_PRIO_MEDIUM,
pxa2xx_spi_dma_handler,
drv_data);
if (drv_data->tx_channel < 0) {
dev_err(dev, "problem (%d) requesting tx channel\n",
drv_data->tx_channel);
pxa_free_dma(drv_data->rx_channel);
return -ENODEV;
}
DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel;
DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel;
return 0;
}
void pxa2xx_spi_dma_release(struct driver_data *drv_data)
{
struct ssp_device *ssp = drv_data->ssp;
DRCMR(ssp->drcmr_rx) = 0;
DRCMR(ssp->drcmr_tx) = 0;
if (drv_data->tx_channel != 0)
pxa_free_dma(drv_data->tx_channel);
if (drv_data->rx_channel != 0)
pxa_free_dma(drv_data->rx_channel);
}
void pxa2xx_spi_dma_resume(struct driver_data *drv_data)
{
if (drv_data->rx_channel != -1)
DRCMR(drv_data->ssp->drcmr_rx) =
DRCMR_MAPVLD | drv_data->rx_channel;
if (drv_data->tx_channel != -1)
DRCMR(drv_data->ssp->drcmr_tx) =
DRCMR_MAPVLD | drv_data->tx_channel;
}
int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
struct spi_device *spi,
u8 bits_per_word, u32 *burst_code,
u32 *threshold)
{
struct pxa2xx_spi_chip *chip_info =
(struct pxa2xx_spi_chip *)spi->controller_data;
int bytes_per_word;
int burst_bytes;
int thresh_words;
int req_burst_size;
int retval = 0;
/* Set the threshold (in registers) to equal the same amount of data
* as represented by burst size (in bytes). The computation below
* is (burst_size rounded up to nearest 8 byte, word or long word)
* divided by (bytes/register); the tx threshold is the inverse of
* the rx, so that there will always be enough data in the rx fifo
* to satisfy a burst, and there will always be enough space in the
* tx fifo to accept a burst (a tx burst will overwrite the fifo if
* there is not enough space), there must always remain enough empty
* space in the rx fifo for any data loaded to the tx fifo.
* Whenever burst_size (in bytes) equals bits/word, the fifo threshold
* will be 8, or half the fifo;
* The threshold can only be set to 2, 4 or 8, but not 16, because
* to burst 16 to the tx fifo, the fifo would have to be empty;
* however, the minimum fifo trigger level is 1, and the tx will
* request service when the fifo is at this level, with only 15 spaces.
*/
/* find bytes/word */
if (bits_per_word <= 8)
bytes_per_word = 1;
else if (bits_per_word <= 16)
bytes_per_word = 2;
else
bytes_per_word = 4;
/* use struct pxa2xx_spi_chip->dma_burst_size if available */
if (chip_info)
req_burst_size = chip_info->dma_burst_size;
else {
switch (chip->dma_burst_size) {
default:
/* if the default burst size is not set,
* do it now */
chip->dma_burst_size = DCMD_BURST8;
case DCMD_BURST8:
req_burst_size = 8;
break;
case DCMD_BURST16:
req_burst_size = 16;
break;
case DCMD_BURST32:
req_burst_size = 32;
break;
}
}
if (req_burst_size <= 8) {
*burst_code = DCMD_BURST8;
burst_bytes = 8;
} else if (req_burst_size <= 16) {
if (bytes_per_word == 1) {
/* don't burst more than 1/2 the fifo */
*burst_code = DCMD_BURST8;
burst_bytes = 8;
retval = 1;
} else {
*burst_code = DCMD_BURST16;
burst_bytes = 16;
}
} else {
if (bytes_per_word == 1) {
/* don't burst more than 1/2 the fifo */
*burst_code = DCMD_BURST8;
burst_bytes = 8;
retval = 1;
} else if (bytes_per_word == 2) {
/* don't burst more than 1/2 the fifo */
*burst_code = DCMD_BURST16;
burst_bytes = 16;
retval = 1;
} else {
*burst_code = DCMD_BURST32;
burst_bytes = 32;
}
}
thresh_words = burst_bytes / bytes_per_word;
/* thresh_words will be between 2 and 8 */
*threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT)
| (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT);
return retval;
}
/*
* Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
* Copyright (C) 2013, Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -24,17 +25,20 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/dma-mapping.h>
#include <linux/spi/spi.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/acpi.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/delay.h>
#include "spi-pxa2xx.h"
MODULE_AUTHOR("Stephen Street");
MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
......@@ -45,12 +49,6 @@ MODULE_ALIAS("platform:pxa2xx-spi");
#define TIMOUT_DFLT 1000
#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
#define IS_DMA_ALIGNED(x) ((((u32)(x)) & 0x07) == 0)
#define MAX_DMA_LEN 8191
#define DMA_ALIGNMENT 8
/*
* for testing SSCR1 changes that require SSP restart, basically
* everything except the service and interrupt enables, the pxa270 developer
......@@ -65,115 +63,97 @@ MODULE_ALIAS("platform:pxa2xx-spi");
| SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
#define DEFINE_SSP_REG(reg, off) \
static inline u32 read_##reg(void const __iomem *p) \
{ return __raw_readl(p + (off)); } \
\
static inline void write_##reg(u32 v, void __iomem *p) \
{ __raw_writel(v, p + (off)); }
DEFINE_SSP_REG(SSCR0, 0x00)
DEFINE_SSP_REG(SSCR1, 0x04)
DEFINE_SSP_REG(SSSR, 0x08)
DEFINE_SSP_REG(SSITR, 0x0c)
DEFINE_SSP_REG(SSDR, 0x10)
DEFINE_SSP_REG(SSTO, 0x28)
DEFINE_SSP_REG(SSPSP, 0x2c)
#define START_STATE ((void*)0)
#define RUNNING_STATE ((void*)1)
#define DONE_STATE ((void*)2)
#define ERROR_STATE ((void*)-1)
#define QUEUE_RUNNING 0
#define QUEUE_STOPPED 1
struct driver_data {
/* Driver model hookup */
struct platform_device *pdev;
/* SSP Info */
struct ssp_device *ssp;
#define LPSS_RX_THRESH_DFLT 64
#define LPSS_TX_LOTHRESH_DFLT 160
#define LPSS_TX_HITHRESH_DFLT 224
/* SPI framework hookup */
enum pxa_ssp_type ssp_type;
struct spi_master *master;
/* Offset from drv_data->lpss_base */
#define SPI_CS_CONTROL 0x18
#define SPI_CS_CONTROL_SW_MODE BIT(0)
#define SPI_CS_CONTROL_CS_HIGH BIT(1)
/* PXA hookup */
struct pxa2xx_spi_master *master_info;
/* DMA setup stuff */
int rx_channel;
int tx_channel;
u32 *null_dma_buf;
/* SSP register addresses */
void __iomem *ioaddr;
u32 ssdr_physical;
/* SSP masks*/
u32 dma_cr1;
u32 int_cr1;
u32 clear_sr;
u32 mask_sr;
/* Driver message queue */
struct workqueue_struct *workqueue;
struct work_struct pump_messages;
spinlock_t lock;
struct list_head queue;
int busy;
int run;
/* Message Transfer pump */
struct tasklet_struct pump_transfers;
/* Current message transfer state info */
struct spi_message* cur_msg;
struct spi_transfer* cur_transfer;
struct chip_data *cur_chip;
size_t len;
void *tx;
void *tx_end;
void *rx;
void *rx_end;
int dma_mapped;
dma_addr_t rx_dma;
dma_addr_t tx_dma;
size_t rx_map_len;
size_t tx_map_len;
u8 n_bytes;
u32 dma_width;
int (*write)(struct driver_data *drv_data);
int (*read)(struct driver_data *drv_data);
irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
void (*cs_control)(u32 command);
};
static bool is_lpss_ssp(const struct driver_data *drv_data)
{
return drv_data->ssp_type == LPSS_SSP;
}
struct chip_data {
u32 cr0;
u32 cr1;
u32 psp;
u32 timeout;
u8 n_bytes;
u32 dma_width;
u32 dma_burst_size;
u32 threshold;
u32 dma_threshold;
u8 enable_dma;
u8 bits_per_word;
u32 speed_hz;
union {
int gpio_cs;
unsigned int frm;
};
int gpio_cs_inverted;
int (*write)(struct driver_data *drv_data);
int (*read)(struct driver_data *drv_data);
void (*cs_control)(u32 command);
};
/*
* Read and write LPSS SSP private registers. Caller must first check that
* is_lpss_ssp() returns true before these can be called.
*/
static u32 __lpss_ssp_read_priv(struct driver_data *drv_data, unsigned offset)
{
WARN_ON(!drv_data->lpss_base);
return readl(drv_data->lpss_base + offset);
}
static void pump_messages(struct work_struct *work);
static void __lpss_ssp_write_priv(struct driver_data *drv_data,
unsigned offset, u32 value)
{
WARN_ON(!drv_data->lpss_base);
writel(value, drv_data->lpss_base + offset);
}
/*
* lpss_ssp_setup - perform LPSS SSP specific setup
* @drv_data: pointer to the driver private data
*
* Perform LPSS SSP specific setup. This function must be called first if
* one is going to use LPSS SSP private registers.
*/
static void lpss_ssp_setup(struct driver_data *drv_data)
{
unsigned offset = 0x400;
u32 value, orig;
if (!is_lpss_ssp(drv_data))
return;
/*
* Perform auto-detection of the LPSS SSP private registers. They
* can be either at 1k or 2k offset from the base address.
*/
orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
value = orig | SPI_CS_CONTROL_SW_MODE;
writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL);
value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
if (value != (orig | SPI_CS_CONTROL_SW_MODE)) {
offset = 0x800;
goto detection_done;
}
value &= ~SPI_CS_CONTROL_SW_MODE;
writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL);
value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
if (value != orig) {
offset = 0x800;
goto detection_done;
}
detection_done:
/* Now set the LPSS base */
drv_data->lpss_base = drv_data->ioaddr + offset;
/* Enable software chip select control */
value = SPI_CS_CONTROL_SW_MODE | SPI_CS_CONTROL_CS_HIGH;
__lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value);
}
static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
{
u32 value;
if (!is_lpss_ssp(drv_data))
return;
value = __lpss_ssp_read_priv(drv_data, SPI_CS_CONTROL);
if (enable)
value &= ~SPI_CS_CONTROL_CS_HIGH;
else
value |= SPI_CS_CONTROL_CS_HIGH;
__lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value);
}
static void cs_assert(struct driver_data *drv_data)
{
......@@ -189,8 +169,12 @@ static void cs_assert(struct driver_data *drv_data)
return;
}
if (gpio_is_valid(chip->gpio_cs))
if (gpio_is_valid(chip->gpio_cs)) {
gpio_set_value(chip->gpio_cs, chip->gpio_cs_inverted);
return;
}
lpss_ssp_cs_control(drv_data, true);
}
static void cs_deassert(struct driver_data *drv_data)
......@@ -205,30 +189,15 @@ static void cs_deassert(struct driver_data *drv_data)
return;
}
if (gpio_is_valid(chip->gpio_cs))
if (gpio_is_valid(chip->gpio_cs)) {
gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted);
}
static void write_SSSR_CS(struct driver_data *drv_data, u32 val)
{
void __iomem *reg = drv_data->ioaddr;
if (drv_data->ssp_type == CE4100_SSP)
val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK;
write_SSSR(val, reg);
}
return;
}
static int pxa25x_ssp_comp(struct driver_data *drv_data)
{
if (drv_data->ssp_type == PXA25x_SSP)
return 1;
if (drv_data->ssp_type == CE4100_SSP)
return 1;
return 0;
lpss_ssp_cs_control(drv_data, false);
}
static int flush(struct driver_data *drv_data)
int pxa2xx_spi_flush(struct driver_data *drv_data)
{
unsigned long limit = loops_per_jiffy << 1;
......@@ -354,7 +323,7 @@ static int u32_reader(struct driver_data *drv_data)
return drv_data->rx == drv_data->rx_end;
}
static void *next_transfer(struct driver_data *drv_data)
void *pxa2xx_spi_next_transfer(struct driver_data *drv_data)
{
struct spi_message *msg = drv_data->cur_msg;
struct spi_transfer *trans = drv_data->cur_transfer;
......@@ -370,89 +339,15 @@ static void *next_transfer(struct driver_data *drv_data)
return DONE_STATE;
}
static int map_dma_buffers(struct driver_data *drv_data)
{
struct spi_message *msg = drv_data->cur_msg;
struct device *dev = &msg->spi->dev;
if (!drv_data->cur_chip->enable_dma)
return 0;
if (msg->is_dma_mapped)
return drv_data->rx_dma && drv_data->tx_dma;
if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
return 0;
/* Modify setup if rx buffer is null */
if (drv_data->rx == NULL) {
*drv_data->null_dma_buf = 0;
drv_data->rx = drv_data->null_dma_buf;
drv_data->rx_map_len = 4;
} else
drv_data->rx_map_len = drv_data->len;
/* Modify setup if tx buffer is null */
if (drv_data->tx == NULL) {
*drv_data->null_dma_buf = 0;
drv_data->tx = drv_data->null_dma_buf;
drv_data->tx_map_len = 4;
} else
drv_data->tx_map_len = drv_data->len;
/* Stream map the tx buffer. Always do DMA_TO_DEVICE first
* so we flush the cache *before* invalidating it, in case
* the tx and rx buffers overlap.
*/
drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
drv_data->tx_map_len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, drv_data->tx_dma))
return 0;
/* Stream map the rx buffer */
drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
drv_data->rx_map_len, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, drv_data->rx_dma)) {
dma_unmap_single(dev, drv_data->tx_dma,
drv_data->tx_map_len, DMA_TO_DEVICE);
return 0;
}
return 1;
}
static void unmap_dma_buffers(struct driver_data *drv_data)
{
struct device *dev;
if (!drv_data->dma_mapped)
return;
if (!drv_data->cur_msg->is_dma_mapped) {
dev = &drv_data->cur_msg->spi->dev;
dma_unmap_single(dev, drv_data->rx_dma,
drv_data->rx_map_len, DMA_FROM_DEVICE);
dma_unmap_single(dev, drv_data->tx_dma,
drv_data->tx_map_len, DMA_TO_DEVICE);
}
drv_data->dma_mapped = 0;
}
/* caller already set message->status; dma and pio irqs are blocked */
static void giveback(struct driver_data *drv_data)
{
struct spi_transfer* last_transfer;
unsigned long flags;
struct spi_message *msg;
spin_lock_irqsave(&drv_data->lock, flags);
msg = drv_data->cur_msg;
drv_data->cur_msg = NULL;
drv_data->cur_transfer = NULL;
queue_work(drv_data->workqueue, &drv_data->pump_messages);
spin_unlock_irqrestore(&drv_data->lock, flags);
last_transfer = list_entry(msg->transfers.prev,
struct spi_transfer,
......@@ -481,13 +376,7 @@ static void giveback(struct driver_data *drv_data)
*/
/* get a pointer to the next message, if any */
spin_lock_irqsave(&drv_data->lock, flags);
if (list_empty(&drv_data->queue))
next_msg = NULL;
else
next_msg = list_entry(drv_data->queue.next,
struct spi_message, queue);
spin_unlock_irqrestore(&drv_data->lock, flags);
next_msg = spi_get_next_queued_message(drv_data->master);
/* see if the next and current messages point
* to the same chip
......@@ -498,168 +387,10 @@ static void giveback(struct driver_data *drv_data)
cs_deassert(drv_data);
}
msg->state = NULL;
if (msg->complete)
msg->complete(msg->context);
spi_finalize_current_message(drv_data->master);
drv_data->cur_chip = NULL;
}
static int wait_ssp_rx_stall(void const __iomem *ioaddr)
{
unsigned long limit = loops_per_jiffy << 1;
while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit)
cpu_relax();
return limit;
}
static int wait_dma_channel_stop(int channel)
{
unsigned long limit = loops_per_jiffy << 1;
while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit)
cpu_relax();
return limit;
}
static void dma_error_stop(struct driver_data *drv_data, const char *msg)
{
void __iomem *reg = drv_data->ioaddr;
/* Stop and reset */
DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
write_SSSR_CS(drv_data, drv_data->clear_sr);
write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
if (!pxa25x_ssp_comp(drv_data))
write_SSTO(0, reg);
flush(drv_data);
write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
unmap_dma_buffers(drv_data);
dev_err(&drv_data->pdev->dev, "%s\n", msg);
drv_data->cur_msg->state = ERROR_STATE;
tasklet_schedule(&drv_data->pump_transfers);
}
static void dma_transfer_complete(struct driver_data *drv_data)
{
void __iomem *reg = drv_data->ioaddr;
struct spi_message *msg = drv_data->cur_msg;
/* Clear and disable interrupts on SSP and DMA channels*/
write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
write_SSSR_CS(drv_data, drv_data->clear_sr);
DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
dev_err(&drv_data->pdev->dev,
"dma_handler: dma rx channel stop failed\n");
if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
dev_err(&drv_data->pdev->dev,
"dma_transfer: ssp rx stall failed\n");
unmap_dma_buffers(drv_data);
/* update the buffer pointer for the amount completed in dma */
drv_data->rx += drv_data->len -
(DCMD(drv_data->rx_channel) & DCMD_LENGTH);
/* read trailing data from fifo, it does not matter how many
* bytes are in the fifo just read until buffer is full
* or fifo is empty, which ever occurs first */
drv_data->read(drv_data);
/* return count of what was actually read */
msg->actual_length += drv_data->len -
(drv_data->rx_end - drv_data->rx);
/* Transfer delays and chip select release are
* handled in pump_transfers or giveback
*/
/* Move to next transfer */
msg->state = next_transfer(drv_data);
/* Schedule transfer tasklet */
tasklet_schedule(&drv_data->pump_transfers);
}
static void dma_handler(int channel, void *data)
{
struct driver_data *drv_data = data;
u32 irq_status = DCSR(channel) & DMA_INT_MASK;
if (irq_status & DCSR_BUSERR) {
if (channel == drv_data->tx_channel)
dma_error_stop(drv_data,
"dma_handler: "
"bad bus address on tx channel");
else
dma_error_stop(drv_data,
"dma_handler: "
"bad bus address on rx channel");
return;
}
/* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
if ((channel == drv_data->tx_channel)
&& (irq_status & DCSR_ENDINTR)
&& (drv_data->ssp_type == PXA25x_SSP)) {
/* Wait for rx to stall */
if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
dev_err(&drv_data->pdev->dev,
"dma_handler: ssp rx stall failed\n");
/* finish this transfer, start the next */
dma_transfer_complete(drv_data);
}
}
static irqreturn_t dma_transfer(struct driver_data *drv_data)
{
u32 irq_status;
void __iomem *reg = drv_data->ioaddr;
irq_status = read_SSSR(reg) & drv_data->mask_sr;
if (irq_status & SSSR_ROR) {
dma_error_stop(drv_data, "dma_transfer: fifo overrun");
return IRQ_HANDLED;
}
/* Check for false positive timeout */
if ((irq_status & SSSR_TINT)
&& (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
write_SSSR(SSSR_TINT, reg);
return IRQ_HANDLED;
}
if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
/* Clear and disable timeout interrupt, do the rest in
* dma_transfer_complete */
if (!pxa25x_ssp_comp(drv_data))
write_SSTO(0, reg);
/* finish this transfer, start the next */
dma_transfer_complete(drv_data);
return IRQ_HANDLED;
}
/* Opps problem detected */
return IRQ_NONE;
}
static void reset_sccr1(struct driver_data *drv_data)
{
void __iomem *reg = drv_data->ioaddr;
......@@ -681,7 +412,7 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg)
reset_sccr1(drv_data);
if (!pxa25x_ssp_comp(drv_data))
write_SSTO(0, reg);
flush(drv_data);
pxa2xx_spi_flush(drv_data);
write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
dev_err(&drv_data->pdev->dev, "%s\n", msg);
......@@ -709,7 +440,7 @@ static void int_transfer_complete(struct driver_data *drv_data)
*/
/* Move to next transfer */
drv_data->cur_msg->state = next_transfer(drv_data);
drv_data->cur_msg->state = pxa2xx_spi_next_transfer(drv_data);
/* Schedule transfer tasklet */
tasklet_schedule(&drv_data->pump_transfers);
......@@ -789,10 +520,20 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
{
struct driver_data *drv_data = dev_id;
void __iomem *reg = drv_data->ioaddr;
u32 sccr1_reg = read_SSCR1(reg);
u32 sccr1_reg;
u32 mask = drv_data->mask_sr;
u32 status;
/*
* The IRQ might be shared with other peripherals so we must first
* check that are we RPM suspended or not. If we are we assume that
* the IRQ was not for us (we shouldn't be RPM suspended when the
* interrupt is enabled).
*/
if (pm_runtime_suspended(&drv_data->pdev->dev))
return IRQ_NONE;
sccr1_reg = read_SSCR1(reg);
status = read_SSSR(reg);
/* Ignore possible writes if we don't need to write */
......@@ -820,106 +561,12 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
return drv_data->transfer_handler(drv_data);
}
static int set_dma_burst_and_threshold(struct chip_data *chip,
struct spi_device *spi,
u8 bits_per_word, u32 *burst_code,
u32 *threshold)
static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
{
struct pxa2xx_spi_chip *chip_info =
(struct pxa2xx_spi_chip *)spi->controller_data;
int bytes_per_word;
int burst_bytes;
int thresh_words;
int req_burst_size;
int retval = 0;
/* Set the threshold (in registers) to equal the same amount of data
* as represented by burst size (in bytes). The computation below
* is (burst_size rounded up to nearest 8 byte, word or long word)
* divided by (bytes/register); the tx threshold is the inverse of
* the rx, so that there will always be enough data in the rx fifo
* to satisfy a burst, and there will always be enough space in the
* tx fifo to accept a burst (a tx burst will overwrite the fifo if
* there is not enough space), there must always remain enough empty
* space in the rx fifo for any data loaded to the tx fifo.
* Whenever burst_size (in bytes) equals bits/word, the fifo threshold
* will be 8, or half the fifo;
* The threshold can only be set to 2, 4 or 8, but not 16, because
* to burst 16 to the tx fifo, the fifo would have to be empty;
* however, the minimum fifo trigger level is 1, and the tx will
* request service when the fifo is at this level, with only 15 spaces.
*/
unsigned long ssp_clk = drv_data->max_clk_rate;
const struct ssp_device *ssp = drv_data->ssp;
/* find bytes/word */
if (bits_per_word <= 8)
bytes_per_word = 1;
else if (bits_per_word <= 16)
bytes_per_word = 2;
else
bytes_per_word = 4;
/* use struct pxa2xx_spi_chip->dma_burst_size if available */
if (chip_info)
req_burst_size = chip_info->dma_burst_size;
else {
switch (chip->dma_burst_size) {
default:
/* if the default burst size is not set,
* do it now */
chip->dma_burst_size = DCMD_BURST8;
case DCMD_BURST8:
req_burst_size = 8;
break;
case DCMD_BURST16:
req_burst_size = 16;
break;
case DCMD_BURST32:
req_burst_size = 32;
break;
}
}
if (req_burst_size <= 8) {
*burst_code = DCMD_BURST8;
burst_bytes = 8;
} else if (req_burst_size <= 16) {
if (bytes_per_word == 1) {
/* don't burst more than 1/2 the fifo */
*burst_code = DCMD_BURST8;
burst_bytes = 8;
retval = 1;
} else {
*burst_code = DCMD_BURST16;
burst_bytes = 16;
}
} else {
if (bytes_per_word == 1) {
/* don't burst more than 1/2 the fifo */
*burst_code = DCMD_BURST8;
burst_bytes = 8;
retval = 1;
} else if (bytes_per_word == 2) {
/* don't burst more than 1/2 the fifo */
*burst_code = DCMD_BURST16;
burst_bytes = 16;
retval = 1;
} else {
*burst_code = DCMD_BURST32;
burst_bytes = 32;
}
}
thresh_words = burst_bytes / bytes_per_word;
/* thresh_words will be between 2 and 8 */
*threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT)
| (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT);
return retval;
}
static unsigned int ssp_get_clk_div(struct ssp_device *ssp, int rate)
{
unsigned long ssp_clk = clk_get_rate(ssp->clk);
rate = min_t(int, ssp_clk, rate);
if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
return ((ssp_clk / (2 * rate) - 1) & 0xff) << 8;
......@@ -934,7 +581,6 @@ static void pump_transfers(unsigned long data)
struct spi_transfer *transfer = NULL;
struct spi_transfer *previous = NULL;
struct chip_data *chip = NULL;
struct ssp_device *ssp = drv_data->ssp;
void __iomem *reg = drv_data->ioaddr;
u32 clk_div = 0;
u8 bits = 0;
......@@ -976,8 +622,8 @@ static void pump_transfers(unsigned long data)
cs_deassert(drv_data);
}
/* Check for transfers that need multiple DMA segments */
if (transfer->len > MAX_DMA_LEN && chip->enable_dma) {
/* Check if we can DMA this transfer */
if (!pxa2xx_spi_dma_is_possible(transfer->len) && chip->enable_dma) {
/* reject already-mapped transfers; PIO won't always work */
if (message->is_dma_mapped
......@@ -1000,21 +646,20 @@ static void pump_transfers(unsigned long data)
}
/* Setup the transfer state based on the type of transfer */
if (flush(drv_data) == 0) {
if (pxa2xx_spi_flush(drv_data) == 0) {
dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
message->status = -EIO;
giveback(drv_data);
return;
}
drv_data->n_bytes = chip->n_bytes;
drv_data->dma_width = chip->dma_width;
drv_data->tx = (void *)transfer->tx_buf;
drv_data->tx_end = drv_data->tx + transfer->len;
drv_data->rx = transfer->rx_buf;
drv_data->rx_end = drv_data->rx + transfer->len;
drv_data->rx_dma = transfer->rx_dma;
drv_data->tx_dma = transfer->tx_dma;
drv_data->len = transfer->len & DCMD_LENGTH;
drv_data->len = transfer->len;
drv_data->write = drv_data->tx ? chip->write : null_writer;
drv_data->read = drv_data->rx ? chip->read : null_reader;
......@@ -1031,25 +676,22 @@ static void pump_transfers(unsigned long data)
if (transfer->bits_per_word)
bits = transfer->bits_per_word;
clk_div = ssp_get_clk_div(ssp, speed);
clk_div = ssp_get_clk_div(drv_data, speed);
if (bits <= 8) {
drv_data->n_bytes = 1;
drv_data->dma_width = DCMD_WIDTH1;
drv_data->read = drv_data->read != null_reader ?
u8_reader : null_reader;
drv_data->write = drv_data->write != null_writer ?
u8_writer : null_writer;
} else if (bits <= 16) {
drv_data->n_bytes = 2;
drv_data->dma_width = DCMD_WIDTH2;
drv_data->read = drv_data->read != null_reader ?
u16_reader : null_reader;
drv_data->write = drv_data->write != null_writer ?
u16_writer : null_writer;
} else if (bits <= 32) {
drv_data->n_bytes = 4;
drv_data->dma_width = DCMD_WIDTH4;
drv_data->read = drv_data->read != null_reader ?
u32_reader : null_reader;
drv_data->write = drv_data->write != null_writer ?
......@@ -1058,7 +700,8 @@ static void pump_transfers(unsigned long data)
/* if bits/word is changed in dma mode, then must check the
* thresholds and burst also */
if (chip->enable_dma) {
if (set_dma_burst_and_threshold(chip, message->spi,
if (pxa2xx_spi_set_dma_burst_and_threshold(chip,
message->spi,
bits, &dma_burst,
&dma_thresh))
if (printk_ratelimit())
......@@ -1077,70 +720,21 @@ static void pump_transfers(unsigned long data)
message->state = RUNNING_STATE;
/* Try to map dma buffer and do a dma transfer if successful, but
* only if the length is non-zero and less than MAX_DMA_LEN.
*
* Zero-length non-descriptor DMA is illegal on PXA2xx; force use
* of PIO instead. Care is needed above because the transfer may
* have have been passed with buffers that are already dma mapped.
* A zero-length transfer in PIO mode will not try to write/read
* to/from the buffers
*
* REVISIT large transfers are exactly where we most want to be
* using DMA. If this happens much, split those transfers into
* multiple DMA segments rather than forcing PIO.
*/
drv_data->dma_mapped = 0;
if (drv_data->len > 0 && drv_data->len <= MAX_DMA_LEN)
drv_data->dma_mapped = map_dma_buffers(drv_data);
if (pxa2xx_spi_dma_is_possible(drv_data->len))
drv_data->dma_mapped = pxa2xx_spi_map_dma_buffers(drv_data);
if (drv_data->dma_mapped) {
/* Ensure we have the correct interrupt handler */
drv_data->transfer_handler = dma_transfer;
/* Setup rx DMA Channel */
DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
DTADR(drv_data->rx_channel) = drv_data->rx_dma;
if (drv_data->rx == drv_data->null_dma_buf)
/* No target address increment */
DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
| drv_data->dma_width
| dma_burst
| drv_data->len;
else
DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
| DCMD_FLOWSRC
| drv_data->dma_width
| dma_burst
| drv_data->len;
/* Setup tx DMA Channel */
DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
DSADR(drv_data->tx_channel) = drv_data->tx_dma;
DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
if (drv_data->tx == drv_data->null_dma_buf)
/* No source address increment */
DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
| drv_data->dma_width
| dma_burst
| drv_data->len;
else
DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
| DCMD_FLOWTRG
| drv_data->dma_width
| dma_burst
| drv_data->len;
/* Enable dma end irqs on SSP to detect end of transfer */
if (drv_data->ssp_type == PXA25x_SSP)
DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
drv_data->transfer_handler = pxa2xx_spi_dma_transfer;
pxa2xx_spi_dma_prepare(drv_data, dma_burst);
/* Clear status and start DMA engine */
cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
write_SSSR(drv_data->clear_sr, reg);
DCSR(drv_data->rx_channel) |= DCSR_RUN;
DCSR(drv_data->tx_channel) |= DCSR_RUN;
pxa2xx_spi_dma_start(drv_data);
} else {
/* Ensure we have the correct interrupt handler */
drv_data->transfer_handler = interrupt_transfer;
......@@ -1150,6 +744,13 @@ static void pump_transfers(unsigned long data)
write_SSSR_CS(drv_data, drv_data->clear_sr);
}
if (is_lpss_ssp(drv_data)) {
if ((read_SSIRF(reg) & 0xff) != chip->lpss_rx_threshold)
write_SSIRF(chip->lpss_rx_threshold, reg);
if ((read_SSITF(reg) & 0xffff) != chip->lpss_tx_threshold)
write_SSITF(chip->lpss_tx_threshold, reg);
}
/* see if we need to reload the config registers */
if ((read_SSCR0(reg) != cr0)
|| (read_SSCR1(reg) & SSCR1_CHANGE_MASK) !=
......@@ -1176,31 +777,12 @@ static void pump_transfers(unsigned long data)
write_SSCR1(cr1, reg);
}
static void pump_messages(struct work_struct *work)
static int pxa2xx_spi_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
struct driver_data *drv_data =
container_of(work, struct driver_data, pump_messages);
unsigned long flags;
/* Lock queue and check for queue work */
spin_lock_irqsave(&drv_data->lock, flags);
if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
drv_data->busy = 0;
spin_unlock_irqrestore(&drv_data->lock, flags);
return;
}
/* Make sure we are not already running a message */
if (drv_data->cur_msg) {
spin_unlock_irqrestore(&drv_data->lock, flags);
return;
}
/* Extract head of queue */
drv_data->cur_msg = list_entry(drv_data->queue.next,
struct spi_message, queue);
list_del_init(&drv_data->cur_msg->queue);
struct driver_data *drv_data = spi_master_get_devdata(master);
drv_data->cur_msg = msg;
/* Initial message state*/
drv_data->cur_msg->state = START_STATE;
drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
......@@ -1213,34 +795,27 @@ static void pump_messages(struct work_struct *work)
/* Mark as busy and launch transfers */
tasklet_schedule(&drv_data->pump_transfers);
drv_data->busy = 1;
spin_unlock_irqrestore(&drv_data->lock, flags);
return 0;
}
static int transfer(struct spi_device *spi, struct spi_message *msg)
static int pxa2xx_spi_prepare_transfer(struct spi_master *master)
{
struct driver_data *drv_data = spi_master_get_devdata(spi->master);
unsigned long flags;
spin_lock_irqsave(&drv_data->lock, flags);
if (drv_data->run == QUEUE_STOPPED) {
spin_unlock_irqrestore(&drv_data->lock, flags);
return -ESHUTDOWN;
}
msg->actual_length = 0;
msg->status = -EINPROGRESS;
msg->state = START_STATE;
struct driver_data *drv_data = spi_master_get_devdata(master);
list_add_tail(&msg->queue, &drv_data->queue);
pm_runtime_get_sync(&drv_data->pdev->dev);
return 0;
}
if (drv_data->run == QUEUE_RUNNING && !drv_data->busy)
queue_work(drv_data->workqueue, &drv_data->pump_messages);
static int pxa2xx_spi_unprepare_transfer(struct spi_master *master)
{
struct driver_data *drv_data = spi_master_get_devdata(master);
spin_unlock_irqrestore(&drv_data->lock, flags);
/* Disable the SSP now */
write_SSCR0(read_SSCR0(drv_data->ioaddr) & ~SSCR0_SSE,
drv_data->ioaddr);
pm_runtime_mark_last_busy(&drv_data->pdev->dev);
pm_runtime_put_autosuspend(&drv_data->pdev->dev);
return 0;
}
......@@ -1287,10 +862,18 @@ static int setup(struct spi_device *spi)
struct pxa2xx_spi_chip *chip_info = NULL;
struct chip_data *chip;
struct driver_data *drv_data = spi_master_get_devdata(spi->master);
struct ssp_device *ssp = drv_data->ssp;
unsigned int clk_div;
uint tx_thres = TX_THRESH_DFLT;
uint rx_thres = RX_THRESH_DFLT;
uint tx_thres, tx_hi_thres, rx_thres;
if (is_lpss_ssp(drv_data)) {
tx_thres = LPSS_TX_LOTHRESH_DFLT;
tx_hi_thres = LPSS_TX_HITHRESH_DFLT;
rx_thres = LPSS_RX_THRESH_DFLT;
} else {
tx_thres = TX_THRESH_DFLT;
tx_hi_thres = 0;
rx_thres = RX_THRESH_DFLT;
}
if (!pxa25x_ssp_comp(drv_data)
&& (spi->bits_per_word < 4 || spi->bits_per_word > 32)) {
......@@ -1330,8 +913,6 @@ static int setup(struct spi_device *spi)
chip->gpio_cs = -1;
chip->enable_dma = 0;
chip->timeout = TIMOUT_DFLT;
chip->dma_burst_size = drv_data->master_info->enable_dma ?
DCMD_BURST8 : 0;
}
/* protocol drivers may change the chip settings, so...
......@@ -1345,23 +926,37 @@ static int setup(struct spi_device *spi)
chip->timeout = chip_info->timeout;
if (chip_info->tx_threshold)
tx_thres = chip_info->tx_threshold;
if (chip_info->tx_hi_threshold)
tx_hi_thres = chip_info->tx_hi_threshold;
if (chip_info->rx_threshold)
rx_thres = chip_info->rx_threshold;
chip->enable_dma = drv_data->master_info->enable_dma;
chip->dma_threshold = 0;
if (chip_info->enable_loopback)
chip->cr1 = SSCR1_LBM;
} else if (ACPI_HANDLE(&spi->dev)) {
/*
* Slave devices enumerated from ACPI namespace don't
* usually have chip_info but we still might want to use
* DMA with them.
*/
chip->enable_dma = drv_data->master_info->enable_dma;
}
chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) |
(SSCR1_TxTresh(tx_thres) & SSCR1_TFT);
chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres);
chip->lpss_tx_threshold = SSITF_TxLoThresh(tx_thres)
| SSITF_TxHiThresh(tx_hi_thres);
/* set dma burst and threshold outside of chip_info path so that if
* chip_info goes away after setting chip->enable_dma, the
* burst and threshold can still respond to changes in bits_per_word */
if (chip->enable_dma) {
/* set up legal burst and threshold for dma */
if (set_dma_burst_and_threshold(chip, spi, spi->bits_per_word,
if (pxa2xx_spi_set_dma_burst_and_threshold(chip, spi,
spi->bits_per_word,
&chip->dma_burst_size,
&chip->dma_threshold)) {
dev_warn(&spi->dev, "in setup: DMA burst size reduced "
......@@ -1369,7 +964,7 @@ static int setup(struct spi_device *spi)
}
}
clk_div = ssp_get_clk_div(ssp, spi->max_speed_hz);
clk_div = ssp_get_clk_div(drv_data, spi->max_speed_hz);
chip->speed_hz = spi->max_speed_hz;
chip->cr0 = clk_div
......@@ -1382,32 +977,32 @@ static int setup(struct spi_device *spi)
chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0)
| (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0);
if (spi->mode & SPI_LOOP)
chip->cr1 |= SSCR1_LBM;
/* NOTE: PXA25x_SSP _could_ use external clocking ... */
if (!pxa25x_ssp_comp(drv_data))
dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
clk_get_rate(ssp->clk)
drv_data->max_clk_rate
/ (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)),
chip->enable_dma ? "DMA" : "PIO");
else
dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
clk_get_rate(ssp->clk) / 2
drv_data->max_clk_rate / 2
/ (1 + ((chip->cr0 & SSCR0_SCR(0x0ff)) >> 8)),
chip->enable_dma ? "DMA" : "PIO");
if (spi->bits_per_word <= 8) {
chip->n_bytes = 1;
chip->dma_width = DCMD_WIDTH1;
chip->read = u8_reader;
chip->write = u8_writer;
} else if (spi->bits_per_word <= 16) {
chip->n_bytes = 2;
chip->dma_width = DCMD_WIDTH2;
chip->read = u16_reader;
chip->write = u16_writer;
} else if (spi->bits_per_word <= 32) {
chip->cr0 |= SSCR0_EDSS;
chip->n_bytes = 4;
chip->dma_width = DCMD_WIDTH4;
chip->read = u32_reader;
chip->write = u32_writer;
} else {
......@@ -1438,93 +1033,98 @@ static void cleanup(struct spi_device *spi)
kfree(chip);
}
static int init_queue(struct driver_data *drv_data)
#ifdef CONFIG_ACPI
static int pxa2xx_spi_acpi_add_dma(struct acpi_resource *res, void *data)
{
INIT_LIST_HEAD(&drv_data->queue);
spin_lock_init(&drv_data->lock);
drv_data->run = QUEUE_STOPPED;
drv_data->busy = 0;
tasklet_init(&drv_data->pump_transfers,
pump_transfers, (unsigned long)drv_data);
INIT_WORK(&drv_data->pump_messages, pump_messages);
drv_data->workqueue = create_singlethread_workqueue(
dev_name(drv_data->master->dev.parent));
if (drv_data->workqueue == NULL)
return -EBUSY;
struct pxa2xx_spi_master *pdata = data;
if (res->type == ACPI_RESOURCE_TYPE_FIXED_DMA) {
const struct acpi_resource_fixed_dma *dma;
dma = &res->data.fixed_dma;
if (pdata->tx_slave_id < 0) {
pdata->tx_slave_id = dma->request_lines;
pdata->tx_chan_id = dma->channels;
} else if (pdata->rx_slave_id < 0) {
pdata->rx_slave_id = dma->request_lines;
pdata->rx_chan_id = dma->channels;
}
}
return 0;
/* Tell the ACPI core to skip this resource */
return 1;
}
static int start_queue(struct driver_data *drv_data)
static struct pxa2xx_spi_master *
pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
{
unsigned long flags;
spin_lock_irqsave(&drv_data->lock, flags);
if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
spin_unlock_irqrestore(&drv_data->lock, flags);
return -EBUSY;
struct pxa2xx_spi_master *pdata;
struct list_head resource_list;
struct acpi_device *adev;
struct ssp_device *ssp;
struct resource *res;
int devid;
if (!ACPI_HANDLE(&pdev->dev) ||
acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
return NULL;
pdata = devm_kzalloc(&pdev->dev, sizeof(*ssp), GFP_KERNEL);
if (!pdata) {
dev_err(&pdev->dev,
"failed to allocate memory for platform data\n");
return NULL;
}
drv_data->run = QUEUE_RUNNING;
drv_data->cur_msg = NULL;
drv_data->cur_transfer = NULL;
drv_data->cur_chip = NULL;
spin_unlock_irqrestore(&drv_data->lock, flags);
queue_work(drv_data->workqueue, &drv_data->pump_messages);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return NULL;
return 0;
}
ssp = &pdata->ssp;
static int stop_queue(struct driver_data *drv_data)
{
unsigned long flags;
unsigned limit = 500;
int status = 0;
spin_lock_irqsave(&drv_data->lock, flags);
/* This is a bit lame, but is optimized for the common execution path.
* A wait_queue on the drv_data->busy could be used, but then the common
* execution path (pump_messages) would be required to call wake_up or
* friends on every SPI message. Do this instead */
drv_data->run = QUEUE_STOPPED;
while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) {
spin_unlock_irqrestore(&drv_data->lock, flags);
msleep(10);
spin_lock_irqsave(&drv_data->lock, flags);
ssp->phys_base = res->start;
ssp->mmio_base = devm_request_and_ioremap(&pdev->dev, res);
if (!ssp->mmio_base) {
dev_err(&pdev->dev, "failed to ioremap mmio_base\n");
return NULL;
}
if (!list_empty(&drv_data->queue) || drv_data->busy)
status = -EBUSY;
ssp->clk = devm_clk_get(&pdev->dev, NULL);
ssp->irq = platform_get_irq(pdev, 0);
ssp->type = LPSS_SSP;
ssp->pdev = pdev;
spin_unlock_irqrestore(&drv_data->lock, flags);
ssp->port_id = -1;
if (adev->pnp.unique_id && !kstrtoint(adev->pnp.unique_id, 0, &devid))
ssp->port_id = devid;
return status;
}
pdata->num_chipselect = 1;
pdata->rx_slave_id = -1;
pdata->tx_slave_id = -1;
static int destroy_queue(struct driver_data *drv_data)
{
int status;
INIT_LIST_HEAD(&resource_list);
acpi_dev_get_resources(adev, &resource_list, pxa2xx_spi_acpi_add_dma,
pdata);
acpi_dev_free_resource_list(&resource_list);
status = stop_queue(drv_data);
/* we are unloading the module or failing to load (only two calls
* to this routine), and neither call can handle a return value.
* However, destroy_workqueue calls flush_workqueue, and that will
* block until all work is done. If the reason that stop_queue
* timed out is that the work will never finish, then it does no
* good to call destroy_workqueue, so return anyway. */
if (status != 0)
return status;
pdata->enable_dma = pdata->rx_slave_id >= 0 && pdata->tx_slave_id >= 0;
destroy_workqueue(drv_data->workqueue);
return pdata;
}
return 0;
static struct acpi_device_id pxa2xx_spi_acpi_match[] = {
{ "INT33C0", 0 },
{ "INT33C1", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
#else
static inline struct pxa2xx_spi_master *
pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
{
return NULL;
}
#endif
static int pxa2xx_spi_probe(struct platform_device *pdev)
{
......@@ -1535,11 +1135,21 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
struct ssp_device *ssp;
int status;
platform_info = dev->platform_data;
platform_info = dev_get_platdata(dev);
if (!platform_info) {
platform_info = pxa2xx_spi_acpi_get_pdata(pdev);
if (!platform_info) {
dev_err(&pdev->dev, "missing platform data\n");
return -ENODEV;
}
}
ssp = pxa_ssp_request(pdev->id, pdev->name);
if (ssp == NULL) {
dev_err(&pdev->dev, "failed to request SSP%d\n", pdev->id);
if (!ssp)
ssp = &platform_info->ssp;
if (!ssp->mmio_base) {
dev_err(&pdev->dev, "failed to get ssp\n");
return -ENODEV;
}
......@@ -1558,19 +1168,21 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
master->dev.parent = &pdev->dev;
master->dev.of_node = pdev->dev.of_node;
ACPI_HANDLE_SET(&master->dev, ACPI_HANDLE(&pdev->dev));
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
master->bus_num = pdev->id;
master->bus_num = ssp->port_id;
master->num_chipselect = platform_info->num_chipselect;
master->dma_alignment = DMA_ALIGNMENT;
master->cleanup = cleanup;
master->setup = setup;
master->transfer = transfer;
master->transfer_one_message = pxa2xx_spi_transfer_one_message;
master->prepare_transfer_hardware = pxa2xx_spi_prepare_transfer;
master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
drv_data->ssp_type = ssp->type;
drv_data->null_dma_buf = (u32 *)ALIGN((u32)(drv_data +
sizeof(struct driver_data)), 8);
drv_data->null_dma_buf = (u32 *)PTR_ALIGN(&drv_data[1], DMA_ALIGNMENT);
drv_data->ioaddr = ssp->mmio_base;
drv_data->ssdr_physical = ssp->phys_base + SSDR;
......@@ -1581,7 +1193,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
} else {
drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE;
drv_data->dma_cr1 = DEFAULT_DMA_CR1;
drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
}
......@@ -1597,35 +1209,17 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
drv_data->tx_channel = -1;
drv_data->rx_channel = -1;
if (platform_info->enable_dma) {
/* Get two DMA channels (rx and tx) */
drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx",
DMA_PRIO_HIGH,
dma_handler,
drv_data);
if (drv_data->rx_channel < 0) {
dev_err(dev, "problem (%d) requesting rx channel\n",
drv_data->rx_channel);
status = -ENODEV;
goto out_error_irq_alloc;
status = pxa2xx_spi_dma_setup(drv_data);
if (status) {
dev_warn(dev, "failed to setup DMA, using PIO\n");
platform_info->enable_dma = false;
}
drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
DMA_PRIO_MEDIUM,
dma_handler,
drv_data);
if (drv_data->tx_channel < 0) {
dev_err(dev, "problem (%d) requesting tx channel\n",
drv_data->tx_channel);
status = -ENODEV;
goto out_error_dma_alloc;
}
DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel;
DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel;
}
/* Enable SOC clock */
clk_enable(ssp->clk);
clk_prepare_enable(ssp->clk);
drv_data->max_clk_rate = clk_get_rate(ssp->clk);
/* Load default SSP configuration */
write_SSCR0(0, drv_data->ioaddr);
......@@ -1640,41 +1234,29 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
write_SSTO(0, drv_data->ioaddr);
write_SSPSP(0, drv_data->ioaddr);
/* Initial and start queue */
status = init_queue(drv_data);
if (status != 0) {
dev_err(&pdev->dev, "problem initializing queue\n");
goto out_error_clock_enabled;
}
status = start_queue(drv_data);
if (status != 0) {
dev_err(&pdev->dev, "problem starting queue\n");
goto out_error_clock_enabled;
}
lpss_ssp_setup(drv_data);
tasklet_init(&drv_data->pump_transfers, pump_transfers,
(unsigned long)drv_data);
/* Register with the SPI framework */
platform_set_drvdata(pdev, drv_data);
status = spi_register_master(master);
if (status != 0) {
dev_err(&pdev->dev, "problem registering spi master\n");
goto out_error_queue_alloc;
goto out_error_clock_enabled;
}
return status;
pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
out_error_queue_alloc:
destroy_queue(drv_data);
return status;
out_error_clock_enabled:
clk_disable(ssp->clk);
out_error_dma_alloc:
if (drv_data->tx_channel != -1)
pxa_free_dma(drv_data->tx_channel);
if (drv_data->rx_channel != -1)
pxa_free_dma(drv_data->rx_channel);
out_error_irq_alloc:
clk_disable_unprepare(ssp->clk);
pxa2xx_spi_dma_release(drv_data);
free_irq(ssp->irq, drv_data);
out_error_master_alloc:
......@@ -1687,37 +1269,23 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
{
struct driver_data *drv_data = platform_get_drvdata(pdev);
struct ssp_device *ssp;
int status = 0;
if (!drv_data)
return 0;
ssp = drv_data->ssp;
/* Remove the queue */
status = destroy_queue(drv_data);
if (status != 0)
/* the kernel does not check the return status of this
* this routine (mod->exit, within the kernel). Therefore
* nothing is gained by returning from here, the module is
* going away regardless, and we should not leave any more
* resources allocated than necessary. We cannot free the
* message memory in drv_data->queue, but we can release the
* resources below. I think the kernel should honor -EBUSY
* returns but... */
dev_err(&pdev->dev, "pxa2xx_spi_remove: workqueue will not "
"complete, message memory not freed\n");
pm_runtime_get_sync(&pdev->dev);
/* Disable the SSP at the peripheral and SOC level */
write_SSCR0(0, drv_data->ioaddr);
clk_disable(ssp->clk);
clk_disable_unprepare(ssp->clk);
/* Release DMA */
if (drv_data->master_info->enable_dma) {
DRCMR(ssp->drcmr_rx) = 0;
DRCMR(ssp->drcmr_tx) = 0;
pxa_free_dma(drv_data->tx_channel);
pxa_free_dma(drv_data->rx_channel);
}
if (drv_data->master_info->enable_dma)
pxa2xx_spi_dma_release(drv_data);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
/* Release IRQ */
free_irq(ssp->irq, drv_data);
......@@ -1749,11 +1317,11 @@ static int pxa2xx_spi_suspend(struct device *dev)
struct ssp_device *ssp = drv_data->ssp;
int status = 0;
status = stop_queue(drv_data);
status = spi_master_suspend(drv_data->master);
if (status != 0)
return status;
write_SSCR0(0, drv_data->ioaddr);
clk_disable(ssp->clk);
clk_disable_unprepare(ssp->clk);
return 0;
}
......@@ -1764,18 +1332,13 @@ static int pxa2xx_spi_resume(struct device *dev)
struct ssp_device *ssp = drv_data->ssp;
int status = 0;
if (drv_data->rx_channel != -1)
DRCMR(drv_data->ssp->drcmr_rx) =
DRCMR_MAPVLD | drv_data->rx_channel;
if (drv_data->tx_channel != -1)
DRCMR(drv_data->ssp->drcmr_tx) =
DRCMR_MAPVLD | drv_data->tx_channel;
pxa2xx_spi_dma_resume(drv_data);
/* Enable the SSP clock */
clk_enable(ssp->clk);
clk_prepare_enable(ssp->clk);
/* Start the queue running */
status = start_queue(drv_data);
status = spi_master_resume(drv_data->master);
if (status != 0) {
dev_err(dev, "problem starting queue (%d)\n", status);
return status;
......@@ -1783,20 +1346,38 @@ static int pxa2xx_spi_resume(struct device *dev)
return 0;
}
#endif
#ifdef CONFIG_PM_RUNTIME
static int pxa2xx_spi_runtime_suspend(struct device *dev)
{
struct driver_data *drv_data = dev_get_drvdata(dev);
clk_disable_unprepare(drv_data->ssp->clk);
return 0;
}
static int pxa2xx_spi_runtime_resume(struct device *dev)
{
struct driver_data *drv_data = dev_get_drvdata(dev);
clk_prepare_enable(drv_data->ssp->clk);
return 0;
}
#endif
static const struct dev_pm_ops pxa2xx_spi_pm_ops = {
.suspend = pxa2xx_spi_suspend,
.resume = pxa2xx_spi_resume,
SET_SYSTEM_SLEEP_PM_OPS(pxa2xx_spi_suspend, pxa2xx_spi_resume)
SET_RUNTIME_PM_OPS(pxa2xx_spi_runtime_suspend,
pxa2xx_spi_runtime_resume, NULL)
};
#endif
static struct platform_driver driver = {
.driver = {
.name = "pxa2xx-spi",
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &pxa2xx_spi_pm_ops,
#endif
.acpi_match_table = ACPI_PTR(pxa2xx_spi_acpi_match),
},
.probe = pxa2xx_spi_probe,
.remove = pxa2xx_spi_remove,
......
/*
* Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
* Copyright (C) 2013, Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef SPI_PXA2XX_H
#define SPI_PXA2XX_H
#include <linux/atomic.h>
#include <linux/dmaengine.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/pxa2xx_ssp.h>
#include <linux/scatterlist.h>
#include <linux/sizes.h>
#include <linux/spi/spi.h>
#include <linux/spi/pxa2xx_spi.h>
struct driver_data {
/* Driver model hookup */
struct platform_device *pdev;
/* SSP Info */
struct ssp_device *ssp;
/* SPI framework hookup */
enum pxa_ssp_type ssp_type;
struct spi_master *master;
/* PXA hookup */
struct pxa2xx_spi_master *master_info;
/* PXA private DMA setup stuff */
int rx_channel;
int tx_channel;
u32 *null_dma_buf;
/* SSP register addresses */
void __iomem *ioaddr;
u32 ssdr_physical;
/* SSP masks*/
u32 dma_cr1;
u32 int_cr1;
u32 clear_sr;
u32 mask_sr;
/* Maximun clock rate */
unsigned long max_clk_rate;
/* Message Transfer pump */
struct tasklet_struct pump_transfers;
/* DMA engine support */
struct dma_chan *rx_chan;
struct dma_chan *tx_chan;
struct sg_table rx_sgt;
struct sg_table tx_sgt;
int rx_nents;
int tx_nents;
void *dummy;
atomic_t dma_running;
/* Current message transfer state info */
struct spi_message *cur_msg;
struct spi_transfer *cur_transfer;
struct chip_data *cur_chip;
size_t len;
void *tx;
void *tx_end;
void *rx;
void *rx_end;
int dma_mapped;
dma_addr_t rx_dma;
dma_addr_t tx_dma;
size_t rx_map_len;
size_t tx_map_len;
u8 n_bytes;
int (*write)(struct driver_data *drv_data);
int (*read)(struct driver_data *drv_data);
irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
void (*cs_control)(u32 command);
void __iomem *lpss_base;
};
struct chip_data {
u32 cr0;
u32 cr1;
u32 psp;
u32 timeout;
u8 n_bytes;
u32 dma_burst_size;
u32 threshold;
u32 dma_threshold;
u16 lpss_rx_threshold;
u16 lpss_tx_threshold;
u8 enable_dma;
u8 bits_per_word;
u32 speed_hz;
union {
int gpio_cs;
unsigned int frm;
};
int gpio_cs_inverted;
int (*write)(struct driver_data *drv_data);
int (*read)(struct driver_data *drv_data);
void (*cs_control)(u32 command);
};
#define DEFINE_SSP_REG(reg, off) \
static inline u32 read_##reg(void const __iomem *p) \
{ return __raw_readl(p + (off)); } \
\
static inline void write_##reg(u32 v, void __iomem *p) \
{ __raw_writel(v, p + (off)); }
DEFINE_SSP_REG(SSCR0, 0x00)
DEFINE_SSP_REG(SSCR1, 0x04)
DEFINE_SSP_REG(SSSR, 0x08)
DEFINE_SSP_REG(SSITR, 0x0c)
DEFINE_SSP_REG(SSDR, 0x10)
DEFINE_SSP_REG(SSTO, 0x28)
DEFINE_SSP_REG(SSPSP, 0x2c)
DEFINE_SSP_REG(SSITF, SSITF)
DEFINE_SSP_REG(SSIRF, SSIRF)
#define START_STATE ((void *)0)
#define RUNNING_STATE ((void *)1)
#define DONE_STATE ((void *)2)
#define ERROR_STATE ((void *)-1)
#define IS_DMA_ALIGNED(x) IS_ALIGNED((unsigned long)(x), DMA_ALIGNMENT)
#define DMA_ALIGNMENT 8
static inline int pxa25x_ssp_comp(struct driver_data *drv_data)
{
if (drv_data->ssp_type == PXA25x_SSP)
return 1;
if (drv_data->ssp_type == CE4100_SSP)
return 1;
return 0;
}
static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val)
{
void __iomem *reg = drv_data->ioaddr;
if (drv_data->ssp_type == CE4100_SSP)
val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK;
write_SSSR(val, reg);
}
extern int pxa2xx_spi_flush(struct driver_data *drv_data);
extern void *pxa2xx_spi_next_transfer(struct driver_data *drv_data);
/*
* Select the right DMA implementation.
*/
#if defined(CONFIG_SPI_PXA2XX_PXADMA)
#define SPI_PXA2XX_USE_DMA 1
#define MAX_DMA_LEN 8191
#define DEFAULT_DMA_CR1 (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE)
#elif defined(CONFIG_SPI_PXA2XX_DMA)
#define SPI_PXA2XX_USE_DMA 1
#define MAX_DMA_LEN SZ_64K
#define DEFAULT_DMA_CR1 (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TRAIL)
#else
#undef SPI_PXA2XX_USE_DMA
#define MAX_DMA_LEN 0
#define DEFAULT_DMA_CR1 0
#endif
#ifdef SPI_PXA2XX_USE_DMA
extern bool pxa2xx_spi_dma_is_possible(size_t len);
extern int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data);
extern irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data);
extern int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst);
extern void pxa2xx_spi_dma_start(struct driver_data *drv_data);
extern int pxa2xx_spi_dma_setup(struct driver_data *drv_data);
extern void pxa2xx_spi_dma_release(struct driver_data *drv_data);
extern void pxa2xx_spi_dma_resume(struct driver_data *drv_data);
extern int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
struct spi_device *spi,
u8 bits_per_word,
u32 *burst_code,
u32 *threshold);
#else
static inline bool pxa2xx_spi_dma_is_possible(size_t len) { return false; }
static inline int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
{
return 0;
}
#define pxa2xx_spi_dma_transfer NULL
static inline void pxa2xx_spi_dma_prepare(struct driver_data *drv_data,
u32 dma_burst) {}
static inline void pxa2xx_spi_dma_start(struct driver_data *drv_data) {}
static inline int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
{
return 0;
}
static inline void pxa2xx_spi_dma_release(struct driver_data *drv_data) {}
static inline void pxa2xx_spi_dma_resume(struct driver_data *drv_data) {}
static inline int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
struct spi_device *spi,
u8 bits_per_word,
u32 *burst_code,
u32 *threshold)
{
return -ENODEV;
}
#endif
#endif /* SPI_PXA2XX_H */
......@@ -62,7 +62,7 @@
#define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9)
#define S3C64XX_SPI_CLKSEL_SRCSHFT 9
#define S3C64XX_SPI_ENCLK_ENABLE (1<<8)
#define S3C64XX_SPI_PSR_MASK 0xff
#define S3C64XX_SPI_PSR_MASK 0xff
#define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29)
#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29)
......@@ -697,7 +697,7 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
INIT_COMPLETION(sdd->xfer_completion);
/* Only BPW and Speed may change across transfers */
bpw = xfer->bits_per_word ? : spi->bits_per_word;
bpw = xfer->bits_per_word;
speed = xfer->speed_hz ? : spi->max_speed_hz;
if (xfer->len % (bpw / 8)) {
......@@ -743,8 +743,7 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
sdd->regs + S3C64XX_SPI_SLAVE_SEL);
if (status) {
dev_err(&spi->dev, "I/O Error: "
"rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
(sdd->state & RXBUSY) ? 'f' : 'p',
(sdd->state & TXBUSY) ? 'f' : 'p',
......@@ -799,7 +798,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
/* Acquire DMA channels */
while (!acquire_dma(sdd))
msleep(10);
usleep_range(10000, 11000);
pm_runtime_get_sync(&sdd->pdev->dev);
......@@ -841,16 +840,14 @@ static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs) {
dev_err(&spi->dev, "could not allocate memory for controller"
" data\n");
dev_err(&spi->dev, "could not allocate memory for controller data\n");
of_node_put(data_np);
return ERR_PTR(-ENOMEM);
}
cs->line = of_get_named_gpio(data_np, "cs-gpio", 0);
if (!gpio_is_valid(cs->line)) {
dev_err(&spi->dev, "chip select gpio is not specified or "
"invalid\n");
dev_err(&spi->dev, "chip select gpio is not specified or invalid\n");
kfree(cs);
of_node_put(data_np);
return ERR_PTR(-EINVAL);
......@@ -957,6 +954,8 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
if (spi->max_speed_hz >= speed) {
spi->max_speed_hz = speed;
} else {
dev_err(&spi->dev, "Can't set %dHz transfer speed\n",
spi->max_speed_hz);
err = -EINVAL;
goto setup_exit;
}
......@@ -1076,8 +1075,8 @@ static int s3c64xx_spi_get_dmares(
if (!sdd->pdev->dev.of_node) {
res = platform_get_resource(pdev, IORESOURCE_DMA, tx ? 0 : 1);
if (!res) {
dev_err(&pdev->dev, "Unable to get SPI-%s dma "
"resource\n", chan_str);
dev_err(&pdev->dev, "Unable to get SPI-%s dma resource\n",
chan_str);
return -ENXIO;
}
dma_data->dmach = res->start;
......@@ -1133,8 +1132,7 @@ static void s3c64xx_spi_dt_gpio_free(struct s3c64xx_spi_driver_data *sdd)
gpio_free(sdd->gpios[idx]);
}
static struct s3c64xx_spi_info * s3c64xx_spi_parse_dt(
struct device *dev)
static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
{
struct s3c64xx_spi_info *sci;
u32 temp;
......@@ -1146,16 +1144,14 @@ static struct s3c64xx_spi_info * s3c64xx_spi_parse_dt(
}
if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) {
dev_warn(dev, "spi bus clock parent not specified, using "
"clock at index 0 as parent\n");
dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n");
sci->src_clk_nr = 0;
} else {
sci->src_clk_nr = temp;
}
if (of_property_read_u32(dev->of_node, "num-cs", &temp)) {
dev_warn(dev, "number of chip select lines not specified, "
"assuming 1 chip select line\n");
dev_warn(dev, "number of chip select lines not specified, assuming 1 chip select line\n");
sci->num_cs = 1;
} else {
sci->num_cs = temp;
......@@ -1195,7 +1191,7 @@ static inline struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config(
platform_get_device_id(pdev)->driver_data;
}
static int __init s3c64xx_spi_probe(struct platform_device *pdev)
static int s3c64xx_spi_probe(struct platform_device *pdev)
{
struct resource *mem_res;
struct s3c64xx_spi_driver_data *sdd;
......@@ -1245,8 +1241,8 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
if (pdev->dev.of_node) {
ret = of_alias_get_id(pdev->dev.of_node, "spi");
if (ret < 0) {
dev_err(&pdev->dev, "failed to get alias id, "
"errno %d\n", ret);
dev_err(&pdev->dev, "failed to get alias id, errno %d\n",
ret);
goto err0;
}
sdd->port_id = ret;
......@@ -1280,7 +1276,7 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
if (sdd->regs == NULL) {
dev_err(&pdev->dev, "Unable to remap IO\n");
ret = -ENXIO;
goto err1;
goto err0;
}
if (!sci->cfg_gpio && pdev->dev.of_node) {
......@@ -1289,36 +1285,36 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
} else if (sci->cfg_gpio == NULL || sci->cfg_gpio()) {
dev_err(&pdev->dev, "Unable to config gpio\n");
ret = -EBUSY;
goto err2;
goto err0;
}
/* Setup clocks */
sdd->clk = clk_get(&pdev->dev, "spi");
sdd->clk = devm_clk_get(&pdev->dev, "spi");
if (IS_ERR(sdd->clk)) {
dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
ret = PTR_ERR(sdd->clk);
goto err3;
goto err1;
}
if (clk_prepare_enable(sdd->clk)) {
dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
ret = -EBUSY;
goto err4;
goto err1;
}
sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr);
sdd->src_clk = clk_get(&pdev->dev, clk_name);
sdd->src_clk = devm_clk_get(&pdev->dev, clk_name);
if (IS_ERR(sdd->src_clk)) {
dev_err(&pdev->dev,
"Unable to acquire clock '%s'\n", clk_name);
ret = PTR_ERR(sdd->src_clk);
goto err5;
goto err2;
}
if (clk_prepare_enable(sdd->src_clk)) {
dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
ret = -EBUSY;
goto err6;
goto err2;
}
/* Setup Deufult Mode */
......@@ -1328,11 +1324,12 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
init_completion(&sdd->xfer_completion);
INIT_LIST_HEAD(&sdd->queue);
ret = request_irq(irq, s3c64xx_spi_irq, 0, "spi-s3c64xx", sdd);
ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0,
"spi-s3c64xx", sdd);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n",
irq, ret);
goto err7;
goto err3;
}
writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
......@@ -1342,11 +1339,10 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
if (spi_register_master(master)) {
dev_err(&pdev->dev, "cannot register SPI master\n");
ret = -EBUSY;
goto err8;
goto err3;
}
dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d "
"with %d Slaves attached\n",
dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
sdd->port_id, master->num_chipselect);
dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n",
mem_res->end, mem_res->start,
......@@ -1356,21 +1352,13 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
return 0;
err8:
free_irq(irq, sdd);
err7:
err3:
clk_disable_unprepare(sdd->src_clk);
err6:
clk_put(sdd->src_clk);
err5:
err2:
clk_disable_unprepare(sdd->clk);
err4:
clk_put(sdd->clk);
err3:
err1:
if (!sdd->cntrlr_info->cfg_gpio && pdev->dev.of_node)
s3c64xx_spi_dt_gpio_free(sdd);
err2:
err1:
err0:
platform_set_drvdata(pdev, NULL);
spi_master_put(master);
......@@ -1389,13 +1377,9 @@ static int s3c64xx_spi_remove(struct platform_device *pdev)
writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
free_irq(platform_get_irq(pdev, 0), sdd);
clk_disable_unprepare(sdd->src_clk);
clk_put(sdd->src_clk);
clk_disable_unprepare(sdd->clk);
clk_put(sdd->clk);
if (!sdd->cntrlr_info->cfg_gpio && pdev->dev.of_node)
s3c64xx_spi_dt_gpio_free(sdd);
......
......@@ -20,6 +20,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
......@@ -592,6 +593,37 @@ static u32 sh_msiof_spi_txrx_word(struct spi_device *spi, unsigned nsecs,
return 0;
}
#ifdef CONFIG_OF
static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
{
struct sh_msiof_spi_info *info;
struct device_node *np = dev->of_node;
u32 num_cs = 0;
info = devm_kzalloc(dev, sizeof(struct sh_msiof_spi_info), GFP_KERNEL);
if (!info) {
dev_err(dev, "failed to allocate setup data\n");
return NULL;
}
/* Parse the MSIOF properties */
of_property_read_u32(np, "num-cs", &num_cs);
of_property_read_u32(np, "renesas,tx-fifo-size",
&info->tx_fifo_override);
of_property_read_u32(np, "renesas,rx-fifo-size",
&info->rx_fifo_override);
info->num_chipselect = num_cs;
return info;
}
#else
static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
{
return NULL;
}
#endif
static int sh_msiof_spi_probe(struct platform_device *pdev)
{
struct resource *r;
......@@ -610,7 +642,17 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
p = spi_master_get_devdata(master);
platform_set_drvdata(pdev, p);
p->info = pdev->dev.platform_data;
if (pdev->dev.of_node)
p->info = sh_msiof_spi_parse_dt(&pdev->dev);
else
p->info = pdev->dev.platform_data;
if (!p->info) {
dev_err(&pdev->dev, "failed to obtain device info\n");
ret = -ENXIO;
goto err1;
}
init_completion(&p->done);
p->clk = clk_get(&pdev->dev, NULL);
......@@ -715,6 +757,17 @@ static int sh_msiof_spi_runtime_nop(struct device *dev)
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id sh_msiof_match[] = {
{ .compatible = "renesas,sh-msiof", },
{ .compatible = "renesas,sh-mobile-msiof", },
{},
};
MODULE_DEVICE_TABLE(of, sh_msiof_match);
#else
#define sh_msiof_match NULL
#endif
static struct dev_pm_ops sh_msiof_spi_dev_pm_ops = {
.runtime_suspend = sh_msiof_spi_runtime_nop,
.runtime_resume = sh_msiof_spi_runtime_nop,
......@@ -727,6 +780,7 @@ static struct platform_driver sh_msiof_spi_drv = {
.name = "spi_sh_msiof",
.owner = THIS_MODULE,
.pm = &sh_msiof_spi_dev_pm_ops,
.of_match_table = sh_msiof_match,
},
};
module_platform_driver(sh_msiof_spi_drv);
......
......@@ -382,8 +382,7 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
sspi = spi_master_get_devdata(spi->master);
bits_per_word = t && t->bits_per_word ? t->bits_per_word :
spi->bits_per_word;
bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz;
/* Enable IO mode for RX, TX */
......@@ -570,7 +569,7 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
ret = -EINVAL;
goto free_pin;
}
clk_enable(sspi->clk);
clk_prepare_enable(sspi->clk);
sspi->ctrl_freq = clk_get_rate(sspi->clk);
init_completion(&sspi->done);
......@@ -594,7 +593,7 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
return 0;
free_clk:
clk_disable(sspi->clk);
clk_disable_unprepare(sspi->clk);
clk_put(sspi->clk);
free_pin:
pinctrl_put(sspi->p);
......@@ -618,7 +617,7 @@ static int spi_sirfsoc_remove(struct platform_device *pdev)
if (sspi->chipselect[i] > 0)
gpio_free(sspi->chipselect[i]);
}
clk_disable(sspi->clk);
clk_disable_unprepare(sspi->clk);
clk_put(sspi->clk);
pinctrl_put(sspi->p);
spi_master_put(master);
......@@ -659,6 +658,7 @@ static const struct dev_pm_ops spi_sirfsoc_pm_ops = {
static const struct of_device_id spi_sirfsoc_of_match[] = {
{ .compatible = "sirf,prima2-spi", },
{ .compatible = "sirf,marco-spi", },
{}
};
MODULE_DEVICE_TABLE(of, sirfsoc_spi_of_match);
......
......@@ -269,9 +269,7 @@ static int tegra_sflash_start_transfer_one(struct spi_device *spi,
u32 speed;
unsigned long command;
speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz;
if (!speed)
speed = tsd->spi_max_frequency;
speed = t->speed_hz;
if (speed != tsd->cur_speed) {
clk_set_rate(tsd->clk, speed);
tsd->cur_speed = speed;
......@@ -319,6 +317,15 @@ static int tegra_sflash_start_transfer_one(struct spi_device *spi,
return tegra_sflash_start_cpu_based_transfer(tsd, t);
}
static int tegra_sflash_setup(struct spi_device *spi)
{
struct tegra_sflash_data *tsd = spi_master_get_devdata(spi->master);
/* Set speed to the spi max fequency if spi device has not set */
spi->max_speed_hz = spi->max_speed_hz ? : tsd->spi_max_frequency;
return 0;
}
static int tegra_sflash_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
......@@ -492,6 +499,7 @@ static int tegra_sflash_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA;
master->setup = tegra_sflash_setup;
master->transfer_one_message = tegra_sflash_transfer_one_message;
master->num_chipselect = MAX_CHIP_SELECT;
master->bus_num = -1;
......
......@@ -284,8 +284,7 @@ static unsigned tegra_slink_calculate_curr_xfer_param(
unsigned max_len;
unsigned total_fifo_words;
bits_per_word = t->bits_per_word ? t->bits_per_word :
spi->bits_per_word;
bits_per_word = t->bits_per_word;
tspi->bytes_per_word = (bits_per_word - 1) / 8 + 1;
if (bits_per_word == 8 || bits_per_word == 16) {
......@@ -378,8 +377,7 @@ static unsigned int tegra_slink_read_rx_fifo_to_client_rxbuf(
} else {
unsigned int bits_per_word;
bits_per_word = t->bits_per_word ? t->bits_per_word :
tspi->cur_spi->bits_per_word;
bits_per_word = t->bits_per_word;
for (count = 0; count < rx_full_count; count++) {
x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
for (i = 0; (i < tspi->bytes_per_word); i++)
......@@ -444,8 +442,7 @@ static void tegra_slink_copy_spi_rxbuf_to_client_rxbuf(
unsigned int x;
unsigned int rx_mask, bits_per_word;
bits_per_word = t->bits_per_word ? t->bits_per_word :
tspi->cur_spi->bits_per_word;
bits_per_word = t->bits_per_word;
rx_mask = (1 << bits_per_word) - 1;
for (count = 0; count < tspi->curr_dma_words; count++) {
x = tspi->rx_dma_buf[count];
......@@ -728,9 +725,7 @@ static int tegra_slink_start_transfer_one(struct spi_device *spi,
unsigned long command2;
bits_per_word = t->bits_per_word;
speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz;
if (!speed)
speed = tspi->spi_max_frequency;
speed = t->speed_hz;
if (speed != tspi->cur_speed) {
clk_set_rate(tspi->clk, speed * 4);
tspi->cur_speed = speed;
......@@ -841,6 +836,8 @@ static int tegra_slink_setup(struct spi_device *spi)
BUG_ON(spi->chip_select >= MAX_CHIP_SELECT);
/* Set speed to the spi max fequency if spi device has not set */
spi->max_speed_hz = spi->max_speed_hz ? : tspi->spi_max_frequency;
ret = pm_runtime_get_sync(tspi->dev);
if (ret < 0) {
dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
......
......@@ -189,9 +189,8 @@ static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m)
unsigned int len = t->len;
unsigned int wsize;
u32 speed_hz = t->speed_hz ? : spi->max_speed_hz;
u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word;
u8 bits_per_word = t->bits_per_word;
bits_per_word = bits_per_word ? : 8;
wsize = bits_per_word >> 3; /* in bytes */
if (prev_speed_hz != speed_hz
......@@ -316,9 +315,8 @@ static int txx9spi_transfer(struct spi_device *spi, struct spi_message *m)
/* check each transfer's parameters */
list_for_each_entry (t, &m->transfers, transfer_list) {
u32 speed_hz = t->speed_hz ? : spi->max_speed_hz;
u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word;
u8 bits_per_word = t->bits_per_word;
bits_per_word = bits_per_word ? : 8;
if (!t->tx_buf && !t->rx_buf && t->len)
return -EINVAL;
if (bits_per_word != 8 && bits_per_word != 16)
......@@ -337,7 +335,7 @@ static int txx9spi_transfer(struct spi_device *spi, struct spi_message *m)
return 0;
}
static int __init txx9spi_probe(struct platform_device *dev)
static int txx9spi_probe(struct platform_device *dev)
{
struct spi_master *master;
struct txx9spi *c;
......@@ -432,7 +430,7 @@ static int __init txx9spi_probe(struct platform_device *dev)
return ret;
}
static int __exit txx9spi_remove(struct platform_device *dev)
static int txx9spi_remove(struct platform_device *dev)
{
struct spi_master *master = spi_master_get(platform_get_drvdata(dev));
struct txx9spi *c = spi_master_get_devdata(master);
......@@ -450,7 +448,7 @@ static int __exit txx9spi_remove(struct platform_device *dev)
MODULE_ALIAS("platform:spi_txx9");
static struct platform_driver txx9spi_driver = {
.remove = __exit_p(txx9spi_remove),
.remove = txx9spi_remove,
.driver = {
.name = "spi_txx9",
.owner = THIS_MODULE,
......
......@@ -1080,7 +1080,8 @@ static int of_spi_register_master(struct spi_master *master)
if (!master->cs_gpios)
return -ENOMEM;
memset(cs, -EINVAL, master->num_chipselect);
for (i = 0; i < master->num_chipselect; i++)
cs[i] = -EINVAL;
for (i = 0; i < nb; i++)
cs[i] = of_get_named_gpio(np, "cs-gpios", i);
......@@ -1135,6 +1136,9 @@ int spi_register_master(struct spi_master *master)
if (master->num_chipselect == 0)
return -EINVAL;
if ((master->bus_num < 0) && master->dev.of_node)
master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
/* convention: dynamically assigned bus IDs count down from the max */
if (master->bus_num < 0) {
/* FIXME switch to an IDR based scheme, something like
......@@ -1366,12 +1370,14 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
}
/**
* Set transfer bits_per_word as spi device default if it is not
* set for this transfer.
* Set transfer bits_per_word and max speed as spi device default if
* it is not set for this transfer.
*/
list_for_each_entry(xfer, &message->transfers, transfer_list) {
if (!xfer->bits_per_word)
xfer->bits_per_word = spi->bits_per_word;
if (!xfer->speed_hz)
xfer->speed_hz = spi->max_speed_hz;
}
message->spi = spi;
......@@ -1656,7 +1662,8 @@ int spi_write_then_read(struct spi_device *spi,
* using the pre-allocated buffer or the transfer is too large.
*/
if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), GFP_KERNEL);
local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
GFP_KERNEL | GFP_DMA);
if (!local_buf)
return -ENOMEM;
} else {
......
......@@ -22,6 +22,9 @@ struct omap2_mcspi_dev_attr {
struct omap2_mcspi_device_config {
unsigned turbo_mode:1;
/* toggle chip select after every word */
unsigned cs_per_word:1;
};
#endif
......@@ -155,6 +155,14 @@
#define SSACD_ACDS(x) ((x) << 0) /* Audio clock divider select */
#define SSACD_SCDX8 (1 << 7) /* SYSCLK division ratio select */
/* LPSS SSP */
#define SSITF 0x44 /* TX FIFO trigger level */
#define SSITF_TxLoThresh(x) (((x) - 1) << 8)
#define SSITF_TxHiThresh(x) ((x) - 1)
#define SSIRF 0x48 /* RX FIFO trigger level */
#define SSIRF_RxThresh(x) ((x) - 1)
enum pxa_ssp_type {
SSP_UNDEFINED = 0,
PXA25x_SSP, /* pxa 210, 250, 255, 26x */
......@@ -164,6 +172,7 @@ enum pxa_ssp_type {
PXA168_SSP,
PXA910_SSP,
CE4100_SSP,
LPSS_SSP,
};
struct ssp_device {
......@@ -206,6 +215,15 @@ static inline u32 pxa_ssp_read_reg(struct ssp_device *dev, u32 reg)
return __raw_readl(dev->mmio_base + reg);
}
#ifdef CONFIG_ARCH_PXA
struct ssp_device *pxa_ssp_request(int port, const char *label);
void pxa_ssp_free(struct ssp_device *);
#else
static inline struct ssp_device *pxa_ssp_request(int port, const char *label)
{
return NULL;
}
static inline void pxa_ssp_free(struct ssp_device *ssp) {}
#endif
#endif
......@@ -28,6 +28,15 @@ struct pxa2xx_spi_master {
u32 clock_enable;
u16 num_chipselect;
u8 enable_dma;
/* DMA engine specific config */
int rx_chan_id;
int tx_chan_id;
int rx_slave_id;
int tx_slave_id;
/* For non-PXA arches */
struct ssp_device ssp;
};
/* spi_board_info.controller_data for SPI slave devices,
......@@ -35,6 +44,7 @@ struct pxa2xx_spi_master {
*/
struct pxa2xx_spi_chip {
u8 tx_threshold;
u8 tx_hi_threshold;
u8 rx_threshold;
u8 dma_burst_size;
u32 timeout;
......@@ -50,103 +60,5 @@ struct pxa2xx_spi_chip {
extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info);
#else
/*
* This is the implemtation for CE4100 on x86. ARM defines them in mach/ or
* plat/ include path.
* The CE4100 does not provide DMA support. This bits are here to let the driver
* compile and will never be used. Maybe we get DMA support at a later point in
* time.
*/
#define DCSR(n) (n)
#define DSADR(n) (n)
#define DTADR(n) (n)
#define DCMD(n) (n)
#define DRCMR(n) (n)
#define DCSR_RUN (1 << 31) /* Run Bit */
#define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch */
#define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable */
#define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */
#define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */
#define DCSR_ENDINTR (1 << 2) /* End Interrupt */
#define DCSR_STARTINTR (1 << 1) /* Start Interrupt */
#define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt */
#define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable */
#define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */
#define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */
#define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */
#define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */
#define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */
#define DCSR_EORINTR (1 << 9) /* The end of Receive */
#define DRCMR_MAPVLD (1 << 7) /* Map Valid */
#define DRCMR_CHLNUM 0x1f /* mask for Channel Number */
#define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor */
#define DDADR_STOP (1 << 0) /* Stop */
#define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */
#define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */
#define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */
#define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */
#define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */
#define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */
#define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */
#define DCMD_BURST8 (1 << 16) /* 8 byte burst */
#define DCMD_BURST16 (2 << 16) /* 16 byte burst */
#define DCMD_BURST32 (3 << 16) /* 32 byte burst */
#define DCMD_WIDTH1 (1 << 14) /* 1 byte width */
#define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
#define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
/*
* Descriptor structure for PXA's DMA engine
* Note: this structure must always be aligned to a 16-byte boundary.
*/
typedef enum {
DMA_PRIO_HIGH = 0,
DMA_PRIO_MEDIUM = 1,
DMA_PRIO_LOW = 2
} pxa_dma_prio;
/*
* DMA registration
*/
static inline int pxa_request_dma(char *name,
pxa_dma_prio prio,
void (*irq_handler)(int, void *),
void *data)
{
return -ENODEV;
}
static inline void pxa_free_dma(int dma_ch)
{
}
/*
* The CE4100 does not have the clk framework implemented and SPI clock can
* not be switched on/off or the divider changed.
*/
static inline void clk_disable(struct clk *clk)
{
}
static inline int clk_enable(struct clk *clk)
{
return 0;
}
static inline unsigned long clk_get_rate(struct clk *clk)
{
return 3686400;
}
#endif
#endif
......@@ -57,6 +57,8 @@ extern struct bus_type spi_bus_type;
* @modalias: Name of the driver to use with this device, or an alias
* for that name. This appears in the sysfs "modalias" attribute
* for driver coldplugging, and in uevents used for hotplugging
* @cs_gpio: gpio number of the chipselect line (optional, -EINVAL when
* when not using a GPIO line)
*
* A @spi_device is used to interchange data between an SPI slave
* (usually a discrete chip) and CPU memory.
......@@ -258,6 +260,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @unprepare_transfer_hardware: there are currently no more messages on the
* queue so the subsystem notifies the driver that it may relax the
* hardware by issuing this call
* @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
* number. Any individual value may be -EINVAL for CS lines that
* are not GPIOs (driven by the SPI controller itself).
*
* Each SPI master controller can communicate with one or more @spi_device
* children. These make a small bus, sharing MOSI, MISO and SCK signals
......
......@@ -62,8 +62,8 @@
*/
struct spi_gpio_platform_data {
unsigned sck;
unsigned mosi;
unsigned miso;
unsigned long mosi;
unsigned long miso;
u16 num_chipselect;
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment