Commit feeef096 authored by Heiner Kallweit's avatar Heiner Kallweit Committed by Ulf Hansson

mmc: use new core function mmc_get_dma_dir

Use new core function mmc_get_dma_dir().
Signed-off-by: default avatarHeiner Kallweit <hkallweit1@gmail.com>
Signed-off-by: default avatarUlf Hansson <ulf.hansson@linaro.org>
parent 6335d683
......@@ -212,10 +212,7 @@ static void goldfish_mmc_xfer_done(struct goldfish_mmc_host *host,
if (host->dma_in_use) {
enum dma_data_direction dma_data_dir;
if (data->flags & MMC_DATA_WRITE)
dma_data_dir = DMA_TO_DEVICE;
else
dma_data_dir = DMA_FROM_DEVICE;
dma_data_dir = mmc_get_dma_dir(data);
if (dma_data_dir == DMA_FROM_DEVICE) {
/*
......@@ -390,10 +387,7 @@ static void goldfish_mmc_prepare_data(struct goldfish_mmc_host *host,
*/
sg_len = (data->blocks == 1) ? 1 : data->sg_len;
if (data->flags & MMC_DATA_WRITE)
dma_data_dir = DMA_TO_DEVICE;
else
dma_data_dir = DMA_FROM_DEVICE;
dma_data_dir = mmc_get_dma_dir(data);
host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
sg_len, dma_data_dir);
......
......@@ -954,8 +954,7 @@ static void atmci_pdc_cleanup(struct atmel_mci *host)
if (data)
dma_unmap_sg(&host->pdev->dev,
data->sg, data->sg_len,
((data->flags & MMC_DATA_WRITE)
? DMA_TO_DEVICE : DMA_FROM_DEVICE));
mmc_get_dma_dir(data));
}
/*
......@@ -993,8 +992,7 @@ static void atmci_dma_cleanup(struct atmel_mci *host)
if (data)
dma_unmap_sg(host->dma.chan->device->dev,
data->sg, data->sg_len,
((data->flags & MMC_DATA_WRITE)
? DMA_TO_DEVICE : DMA_FROM_DEVICE));
mmc_get_dma_dir(data));
}
/*
......@@ -1095,7 +1093,6 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
{
u32 iflags, tmp;
unsigned int sg_len;
enum dma_data_direction dir;
int i;
data->error = -EINPROGRESS;
......@@ -1107,13 +1104,10 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
/* Enable pdc mode */
atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE);
if (data->flags & MMC_DATA_READ) {
dir = DMA_FROM_DEVICE;
if (data->flags & MMC_DATA_READ)
iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
} else {
dir = DMA_TO_DEVICE;
else
iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE;
}
/* Set BLKLEN */
tmp = atmci_readl(host, ATMCI_MR);
......@@ -1123,7 +1117,8 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
/* Configure PDC */
host->data_size = data->blocks * data->blksz;
sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir);
sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
mmc_get_dma_dir(data));
if ((!host->caps.has_rwproof)
&& (host->data->flags & MMC_DATA_WRITE)) {
......@@ -1135,9 +1130,8 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
}
if (host->data_size)
atmci_pdc_set_both_buf(host,
((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT));
atmci_pdc_set_both_buf(host, data->flags & MMC_DATA_READ ?
XFER_RECEIVE : XFER_TRANSMIT);
return iflags;
}
......@@ -1148,7 +1142,6 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
struct dma_async_tx_descriptor *desc;
struct scatterlist *sg;
unsigned int i;
enum dma_data_direction direction;
enum dma_transfer_direction slave_dirn;
unsigned int sglen;
u32 maxburst;
......@@ -1186,12 +1179,10 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
return -ENODEV;
if (data->flags & MMC_DATA_READ) {
direction = DMA_FROM_DEVICE;
host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
maxburst = atmci_convert_chksize(host,
host->dma_conf.src_maxburst);
} else {
direction = DMA_TO_DEVICE;
host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
maxburst = atmci_convert_chksize(host,
host->dma_conf.dst_maxburst);
......@@ -1202,7 +1193,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
ATMCI_DMAEN);
sglen = dma_map_sg(chan->device->dev, data->sg,
data->sg_len, direction);
data->sg_len, mmc_get_dma_dir(data));
dmaengine_slave_config(chan, &host->dma_conf);
desc = dmaengine_prep_slave_sg(chan,
......@@ -1217,7 +1208,8 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
return iflags;
unmap_exit:
dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction);
dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
mmc_get_dma_dir(data));
return -ENOMEM;
}
......
......@@ -478,18 +478,14 @@ static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
int ret = 0;
host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
((data->flags & MMC_DATA_WRITE)
? DMA_TO_DEVICE
: DMA_FROM_DEVICE));
mmc_get_dma_dir(data));
/* no individual DMA segment should need a partial FIFO */
for (i = 0; i < host->sg_len; i++) {
if (sg_dma_len(data->sg + i) & mask) {
dma_unmap_sg(mmc_dev(host->mmc),
data->sg, data->sg_len,
(data->flags & MMC_DATA_WRITE)
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
data->sg, data->sg_len,
mmc_get_dma_dir(data));
return -1;
}
}
......@@ -802,9 +798,7 @@ mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data)
davinci_abort_dma(host);
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
(data->flags & MMC_DATA_WRITE)
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
mmc_get_dma_dir(data));
host->do_dma = false;
}
host->data_dir = DAVINCI_MMC_DATADIR_NONE;
......
......@@ -432,14 +432,6 @@ static void dw_mci_stop_dma(struct dw_mci *host)
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
static int dw_mci_get_dma_dir(struct mmc_data *data)
{
if (data->flags & MMC_DATA_WRITE)
return DMA_TO_DEVICE;
else
return DMA_FROM_DEVICE;
}
static void dw_mci_dma_cleanup(struct dw_mci *host)
{
struct mmc_data *data = host->data;
......@@ -448,7 +440,7 @@ static void dw_mci_dma_cleanup(struct dw_mci *host)
dma_unmap_sg(host->dev,
data->sg,
data->sg_len,
dw_mci_get_dma_dir(data));
mmc_get_dma_dir(data));
data->host_cookie = COOKIE_UNMAPPED;
}
}
......@@ -904,7 +896,7 @@ static int dw_mci_pre_dma_transfer(struct dw_mci *host,
sg_len = dma_map_sg(host->dev,
data->sg,
data->sg_len,
dw_mci_get_dma_dir(data));
mmc_get_dma_dir(data));
if (sg_len == 0)
return -EINVAL;
......@@ -944,7 +936,7 @@ static void dw_mci_post_req(struct mmc_host *mmc,
dma_unmap_sg(slot->host->dev,
data->sg,
data->sg_len,
dw_mci_get_dma_dir(data));
mmc_get_dma_dir(data));
data->host_cookie = COOKIE_UNMAPPED;
}
......
......@@ -200,11 +200,6 @@ static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host)
return -ENODEV;
}
static inline int jz4740_mmc_get_dma_dir(struct mmc_data *data)
{
return (data->flags & MMC_DATA_READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
}
static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host,
struct mmc_data *data)
{
......@@ -215,7 +210,7 @@ static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host,
struct mmc_data *data)
{
struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data);
enum dma_data_direction dir = mmc_get_dma_dir(data);
dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
}
......@@ -227,7 +222,7 @@ static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
struct dma_chan *chan)
{
struct jz4740_mmc_host_next *next_data = &host->next_data;
enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data);
enum dma_data_direction dir = mmc_get_dma_dir(data);
int sg_len;
if (!next && data->host_cookie &&
......
......@@ -888,10 +888,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
u32 clock_rate;
unsigned long timeout;
if (data->flags & MMC_DATA_READ)
direction = DMA_FROM_DEVICE;
else
direction = DMA_TO_DEVICE;
direction = mmc_get_dma_dir(data);
mmc_spi_setup_data_message(host, multiple, direction);
t = &host->t;
......
......@@ -516,17 +516,14 @@ static void mmci_dma_data_error(struct mmci_host *host)
static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
{
struct dma_chan *chan;
enum dma_data_direction dir;
if (data->flags & MMC_DATA_READ) {
dir = DMA_FROM_DEVICE;
if (data->flags & MMC_DATA_READ)
chan = host->dma_rx_channel;
} else {
dir = DMA_TO_DEVICE;
else
chan = host->dma_tx_channel;
}
dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
mmc_get_dma_dir(data));
}
static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
......@@ -589,17 +586,14 @@ static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
struct dma_chan *chan;
struct dma_device *device;
struct dma_async_tx_descriptor *desc;
enum dma_data_direction buffer_dirn;
int nr_sg;
unsigned long flags = DMA_CTRL_ACK;
if (data->flags & MMC_DATA_READ) {
conf.direction = DMA_DEV_TO_MEM;
buffer_dirn = DMA_FROM_DEVICE;
chan = host->dma_rx_channel;
} else {
conf.direction = DMA_MEM_TO_DEV;
buffer_dirn = DMA_TO_DEVICE;
chan = host->dma_tx_channel;
}
......@@ -612,7 +606,8 @@ static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
return -EINVAL;
device = chan->device;
nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
mmc_get_dma_dir(data));
if (nr_sg == 0)
return -EINVAL;
......@@ -631,7 +626,8 @@ static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
return 0;
unmap_exit:
dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
dma_unmap_sg(device->dev, data->sg, data->sg_len,
mmc_get_dma_dir(data));
return -ENOMEM;
}
......
......@@ -256,7 +256,7 @@ static void moxart_dma_complete(void *param)
static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
{
u32 len, dir_data, dir_slave;
u32 len, dir_slave;
long dma_time;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *dma_chan;
......@@ -266,16 +266,14 @@ static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
if (data->flags & MMC_DATA_WRITE) {
dma_chan = host->dma_chan_tx;
dir_data = DMA_TO_DEVICE;
dir_slave = DMA_MEM_TO_DEV;
} else {
dma_chan = host->dma_chan_rx;
dir_data = DMA_FROM_DEVICE;
dir_slave = DMA_DEV_TO_MEM;
}
len = dma_map_sg(dma_chan->device->dev, data->sg,
data->sg_len, dir_data);
data->sg_len, mmc_get_dma_dir(data));
if (len > 0) {
desc = dmaengine_prep_slave_sg(dma_chan, data->sg,
......@@ -301,7 +299,7 @@ static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
dma_unmap_sg(dma_chan->device->dev,
data->sg, data->sg_len,
dir_data);
mmc_get_dma_dir(data));
}
......
......@@ -474,11 +474,9 @@ static void msdc_prepare_data(struct msdc_host *host, struct mmc_request *mrq)
struct mmc_data *data = mrq->data;
if (!(data->host_cookie & MSDC_PREPARE_FLAG)) {
bool read = (data->flags & MMC_DATA_READ) != 0;
data->host_cookie |= MSDC_PREPARE_FLAG;
data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len,
read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
mmc_get_dma_dir(data));
}
}
......@@ -490,10 +488,8 @@ static void msdc_unprepare_data(struct msdc_host *host, struct mmc_request *mrq)
return;
if (data->host_cookie & MSDC_PREPARE_FLAG) {
bool read = (data->flags & MMC_DATA_READ) != 0;
dma_unmap_sg(host->dev, data->sg, data->sg_len,
read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
mmc_get_dma_dir(data));
data->host_cookie &= ~MSDC_PREPARE_FLAG;
}
}
......
......@@ -125,10 +125,10 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data)
return 1;
} else {
dma_addr_t phys_addr;
int dma_dir = (data->flags & MMC_DATA_READ) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE;
host->sg_frags = dma_map_sg(mmc_dev(host->mmc), data->sg,
data->sg_len, dma_dir);
host->sg_frags = dma_map_sg(mmc_dev(host->mmc),
data->sg, data->sg_len,
mmc_get_dma_dir(data));
phys_addr = sg_dma_address(data->sg);
mvsd_write(MVSD_SYS_ADDR_LOW, (u32)phys_addr & 0xffff);
mvsd_write(MVSD_SYS_ADDR_HI, (u32)phys_addr >> 16);
......@@ -294,8 +294,7 @@ static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data,
host->pio_size = 0;
} else {
dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags,
(data->flags & MMC_DATA_READ) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
mmc_get_dma_dir(data));
}
if (err_status & MVSD_ERR_DATA_TIMEOUT)
......
......@@ -935,15 +935,6 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
OMAP_HSMMC_WRITE(host->base, CMD, cmdreg);
}
static int
omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
{
if (data->flags & MMC_DATA_WRITE)
return DMA_TO_DEVICE;
else
return DMA_FROM_DEVICE;
}
static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host,
struct mmc_data *data)
{
......@@ -1055,7 +1046,7 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
dmaengine_terminate_all(chan);
dma_unmap_sg(chan->device->dev,
host->data->sg, host->data->sg_len,
omap_hsmmc_get_dma_dir(host, host->data));
mmc_get_dma_dir(host->data));
host->data->host_cookie = 0;
}
......@@ -1350,7 +1341,7 @@ static void omap_hsmmc_dma_callback(void *param)
if (!data->host_cookie)
dma_unmap_sg(chan->device->dev,
data->sg, data->sg_len,
omap_hsmmc_get_dma_dir(host, data));
mmc_get_dma_dir(data));
req_in_progress = host->req_in_progress;
host->dma_ch = -1;
......@@ -1383,7 +1374,7 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
/* Check if next job is already prepared */
if (next || data->host_cookie != host->next_data.cookie) {
dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
omap_hsmmc_get_dma_dir(host, data));
mmc_get_dma_dir(data));
} else {
dma_len = host->next_data.dma_len;
......@@ -1569,7 +1560,7 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data);
dma_unmap_sg(c->device->dev, data->sg, data->sg_len,
omap_hsmmc_get_dma_dir(host, data));
mmc_get_dma_dir(data));
data->host_cookie = 0;
}
}
......
......@@ -1104,7 +1104,7 @@ static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
conf.direction = DMA_MEM_TO_DEV;
dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
mmc_get_dma_dir(data));
dmaengine_slave_config(host->dma, &conf);
desc = dmaengine_prep_slave_sg(host->dma, data->sg, data->sg_len,
......@@ -1121,7 +1121,7 @@ static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
unmap_exit:
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
mmc_get_dma_dir(data));
return -ENOMEM;
}
......
......@@ -502,8 +502,7 @@ static int sdhci_pre_dma_transfer(struct sdhci_host *host,
return data->sg_count;
sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
data->flags & MMC_DATA_WRITE ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
mmc_get_dma_dir(data));
if (sg_count == 0)
return -ENOSPC;
......@@ -2219,8 +2218,7 @@ static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
if (data->host_cookie != COOKIE_UNMAPPED)
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
data->flags & MMC_DATA_WRITE ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
mmc_get_dma_dir(data));
data->host_cookie = COOKIE_UNMAPPED;
}
......@@ -2336,8 +2334,7 @@ static bool sdhci_request_done(struct sdhci_host *host)
if (data && data->host_cookie == COOKIE_MAPPED) {
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
(data->flags & MMC_DATA_READ) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
mmc_get_dma_dir(data));
data->host_cookie = COOKIE_UNMAPPED;
}
}
......
......@@ -385,14 +385,6 @@ static void sunxi_mmc_init_idma_des(struct sunxi_mmc_host *host,
wmb();
}
static enum dma_data_direction sunxi_mmc_get_dma_dir(struct mmc_data *data)
{
if (data->flags & MMC_DATA_WRITE)
return DMA_TO_DEVICE;
else
return DMA_FROM_DEVICE;
}
static int sunxi_mmc_map_dma(struct sunxi_mmc_host *host,
struct mmc_data *data)
{
......@@ -400,7 +392,7 @@ static int sunxi_mmc_map_dma(struct sunxi_mmc_host *host,
struct scatterlist *sg;
dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
sunxi_mmc_get_dma_dir(data));
mmc_get_dma_dir(data));
if (dma_len == 0) {
dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n");
return -ENOMEM;
......@@ -551,7 +543,7 @@ static irqreturn_t sunxi_mmc_finalize_request(struct sunxi_mmc_host *host)
rval |= SDXC_FIFO_RESET;
mmc_writel(host, REG_GCTRL, rval);
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
sunxi_mmc_get_dma_dir(data));
mmc_get_dma_dir(data));
}
mmc_writel(host, REG_RINTR, 0xffff);
......@@ -1022,7 +1014,7 @@ static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
if (data)
dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
sunxi_mmc_get_dma_dir(data));
mmc_get_dma_dir(data));
dev_err(mmc_dev(mmc), "request already pending\n");
mrq->cmd->error = -EBUSY;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment