Commit 6ad45a27 authored by Mark Brown's avatar Mark Brown

spi: Make core DMA mapping functions generate scatterlists

We cannot unconditionally use dma_map_single() to map data for use with
SPI since transfers may exceed a page and virtual addresses may not be
provided with physically contiguous pages. Further, addresses allocated
using vmalloc() need to be mapped differently to other addresses.

Currently only the MXS driver handles all this, a few drivers do handle
the possibility that buffers may not be physically contiguous which is
the main potential problem but many don't even do that. Factoring this
out into the core will make it easier for drivers to do a good job so if
the driver is using the core DMA code then generate a scatterlist
instead of mapping to a single address so do that.

This code is mainly based on a combination of the existing code in the MXS
and PXA2xx drivers. In future we should be able to extend it to allow the
core to concatenate adjacent transfers if they are compatible, improving
performance.

Currently for simplicity clients are not allowed to use the scatterlist
when they do DMA mapping, in the future the existing single address
mappings will be replaced with use of the scatterlist most likely as
part of pre-verifying transfers.

This change makes it mandatory to use scatterlists when using the core DMA
mapping so update the s3c64xx driver to do this when used with dmaengine.
Doing so makes the code more ugly but it is expected that the old s3c-dma
code can be removed very soon.
Signed-off-by: default avatarMark Brown <broonie@linaro.org>
parent 3a2eba9b
...@@ -381,7 +381,7 @@ static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd, ...@@ -381,7 +381,7 @@ static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd,
#else #else
static void prepare_dma(struct s3c64xx_spi_dma_data *dma, static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
unsigned len, dma_addr_t buf) struct sg_table *sgt)
{ {
struct s3c64xx_spi_driver_data *sdd; struct s3c64xx_spi_driver_data *sdd;
struct dma_slave_config config; struct dma_slave_config config;
...@@ -407,8 +407,8 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma, ...@@ -407,8 +407,8 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
dmaengine_slave_config(dma->ch, &config); dmaengine_slave_config(dma->ch, &config);
} }
desc = dmaengine_prep_slave_single(dma->ch, buf, len, desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
dma->direction, DMA_PREP_INTERRUPT); dma->direction, DMA_PREP_INTERRUPT);
desc->callback = s3c64xx_spi_dmacb; desc->callback = s3c64xx_spi_dmacb;
desc->callback_param = dma; desc->callback_param = dma;
...@@ -515,7 +515,11 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, ...@@ -515,7 +515,11 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
chcfg |= S3C64XX_SPI_CH_TXCH_ON; chcfg |= S3C64XX_SPI_CH_TXCH_ON;
if (dma_mode) { if (dma_mode) {
modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
#ifndef CONFIG_S3C_DMA
prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
#else
prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma); prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma);
#endif
} else { } else {
switch (sdd->cur_bpw) { switch (sdd->cur_bpw) {
case 32: case 32:
...@@ -547,7 +551,11 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, ...@@ -547,7 +551,11 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
| S3C64XX_SPI_PACKET_CNT_EN, | S3C64XX_SPI_PACKET_CNT_EN,
regs + S3C64XX_SPI_PACKET_CNT); regs + S3C64XX_SPI_PACKET_CNT);
#ifndef CONFIG_S3C_DMA
prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
#else
prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma); prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma);
#endif
} }
} }
......
...@@ -582,13 +582,70 @@ static void spi_set_cs(struct spi_device *spi, bool enable) ...@@ -582,13 +582,70 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
spi->master->set_cs(spi, !enable); spi->master->set_cs(spi, !enable);
} }
static int spi_map_buf(struct spi_master *master, struct device *dev,
struct sg_table *sgt, void *buf, size_t len,
enum dma_data_direction dir)
{
const bool vmalloced_buf = is_vmalloc_addr(buf);
const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len;
const int sgs = DIV_ROUND_UP(len, desc_len);
struct page *vm_page;
void *sg_buf;
size_t min;
int i, ret;
ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
if (ret != 0)
return ret;
for (i = 0; i < sgs; i++) {
min = min_t(size_t, len, desc_len);
if (vmalloced_buf) {
vm_page = vmalloc_to_page(buf);
if (!vm_page) {
sg_free_table(sgt);
return -ENOMEM;
}
sg_buf = page_address(vm_page) +
((size_t)buf & ~PAGE_MASK);
} else {
sg_buf = buf;
}
sg_set_buf(&sgt->sgl[i], sg_buf, min);
buf += min;
len -= min;
}
ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
if (ret < 0) {
sg_free_table(sgt);
return ret;
}
sgt->nents = ret;
return 0;
}
static void spi_unmap_buf(struct spi_master *master, struct device *dev,
struct sg_table *sgt, enum dma_data_direction dir)
{
if (sgt->orig_nents) {
dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
sg_free_table(sgt);
}
}
static int spi_map_msg(struct spi_master *master, struct spi_message *msg) static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
{ {
struct device *dev = master->dev.parent;
struct device *tx_dev, *rx_dev; struct device *tx_dev, *rx_dev;
struct spi_transfer *xfer; struct spi_transfer *xfer;
void *tmp; void *tmp;
size_t max_tx, max_rx; size_t max_tx, max_rx;
int ret;
if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
max_tx = 0; max_tx = 0;
...@@ -631,7 +688,7 @@ static int spi_map_msg(struct spi_master *master, struct spi_message *msg) ...@@ -631,7 +688,7 @@ static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
} }
} }
if (msg->is_dma_mapped || !master->can_dma) if (!master->can_dma)
return 0; return 0;
tx_dev = &master->dma_tx->dev->device; tx_dev = &master->dma_tx->dev->device;
...@@ -642,25 +699,21 @@ static int spi_map_msg(struct spi_master *master, struct spi_message *msg) ...@@ -642,25 +699,21 @@ static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
continue; continue;
if (xfer->tx_buf != NULL) { if (xfer->tx_buf != NULL) {
xfer->tx_dma = dma_map_single(tx_dev, ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
(void *)xfer->tx_buf, (void *)xfer->tx_buf, xfer->len,
xfer->len, DMA_TO_DEVICE);
DMA_TO_DEVICE); if (ret != 0)
if (dma_mapping_error(dev, xfer->tx_dma)) { return ret;
dev_err(dev, "dma_map_single Tx failed\n");
return -ENOMEM;
}
} }
if (xfer->rx_buf != NULL) { if (xfer->rx_buf != NULL) {
xfer->rx_dma = dma_map_single(rx_dev, ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
xfer->rx_buf, xfer->len, xfer->rx_buf, xfer->len,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(dev, xfer->rx_dma)) { if (ret != 0) {
dev_err(dev, "dma_map_single Rx failed\n"); spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
dma_unmap_single(tx_dev, xfer->tx_dma, DMA_TO_DEVICE);
xfer->len, DMA_TO_DEVICE); return ret;
return -ENOMEM;
} }
} }
} }
...@@ -675,7 +728,7 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) ...@@ -675,7 +728,7 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
struct spi_transfer *xfer; struct spi_transfer *xfer;
struct device *tx_dev, *rx_dev; struct device *tx_dev, *rx_dev;
if (!master->cur_msg_mapped || msg->is_dma_mapped || !master->can_dma) if (!master->cur_msg_mapped || !master->can_dma)
return 0; return 0;
tx_dev = &master->dma_tx->dev->device; tx_dev = &master->dma_tx->dev->device;
...@@ -685,12 +738,8 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) ...@@ -685,12 +738,8 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
if (!master->can_dma(master, msg->spi, xfer)) if (!master->can_dma(master, msg->spi, xfer))
continue; continue;
if (xfer->rx_buf) spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
dma_unmap_single(rx_dev, xfer->rx_dma, xfer->len, spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
DMA_FROM_DEVICE);
if (xfer->tx_buf)
dma_unmap_single(tx_dev, xfer->tx_dma, xfer->len,
DMA_TO_DEVICE);
} }
return 0; return 0;
...@@ -1503,6 +1552,8 @@ int spi_register_master(struct spi_master *master) ...@@ -1503,6 +1552,8 @@ int spi_register_master(struct spi_master *master)
mutex_init(&master->bus_lock_mutex); mutex_init(&master->bus_lock_mutex);
master->bus_lock_flag = 0; master->bus_lock_flag = 0;
init_completion(&master->xfer_completion); init_completion(&master->xfer_completion);
if (!master->max_dma_len)
master->max_dma_len = INT_MAX;
/* register the device, then userspace will see it. /* register the device, then userspace will see it.
* registration fails if the bus ID is in use. * registration fails if the bus ID is in use.
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/scatterlist.h>
struct dma_chan; struct dma_chan;
...@@ -268,6 +269,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) ...@@ -268,6 +269,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @auto_runtime_pm: the core should ensure a runtime PM reference is held * @auto_runtime_pm: the core should ensure a runtime PM reference is held
* while the hardware is prepared, using the parent * while the hardware is prepared, using the parent
* device for the spidev * device for the spidev
* @max_dma_len: Maximum length of a DMA transfer for the device.
* @prepare_transfer_hardware: a message will soon arrive from the queue * @prepare_transfer_hardware: a message will soon arrive from the queue
* so the subsystem requests the driver to prepare the transfer hardware * so the subsystem requests the driver to prepare the transfer hardware
* by issuing this call * by issuing this call
...@@ -421,6 +423,7 @@ struct spi_master { ...@@ -421,6 +423,7 @@ struct spi_master {
bool cur_msg_prepared; bool cur_msg_prepared;
bool cur_msg_mapped; bool cur_msg_mapped;
struct completion xfer_completion; struct completion xfer_completion;
size_t max_dma_len;
int (*prepare_transfer_hardware)(struct spi_master *master); int (*prepare_transfer_hardware)(struct spi_master *master);
int (*transfer_one_message)(struct spi_master *master, int (*transfer_one_message)(struct spi_master *master,
...@@ -533,6 +536,8 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum); ...@@ -533,6 +536,8 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum);
* (optionally) changing the chipselect status, then starting * (optionally) changing the chipselect status, then starting
* the next transfer or completing this @spi_message. * the next transfer or completing this @spi_message.
* @transfer_list: transfers are sequenced through @spi_message.transfers * @transfer_list: transfers are sequenced through @spi_message.transfers
* @tx_sg: Scatterlist for transmit, currently not for client use
* @rx_sg: Scatterlist for receive, currently not for client use
* *
* SPI transfers always write the same number of bytes as they read. * SPI transfers always write the same number of bytes as they read.
* Protocol drivers should always provide @rx_buf and/or @tx_buf. * Protocol drivers should always provide @rx_buf and/or @tx_buf.
...@@ -600,6 +605,8 @@ struct spi_transfer { ...@@ -600,6 +605,8 @@ struct spi_transfer {
dma_addr_t tx_dma; dma_addr_t tx_dma;
dma_addr_t rx_dma; dma_addr_t rx_dma;
struct sg_table tx_sg;
struct sg_table rx_sg;
unsigned cs_change:1; unsigned cs_change:1;
unsigned tx_nbits:3; unsigned tx_nbits:3;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment