Commit ef17fee1 authored by Guennadi Liakhovetski's avatar Guennadi Liakhovetski Committed by Chris Ball

mmc: tmio_mmc: fix PIO fallback on DMA descriptor allocation failure

The easiest way to fall back to PIO, when a DMA descriptor allocation
fails is to disable DMA on the controller but continue with the current
request in PIO mode. This way tmio_mmc_start_dma() can become void, since
it cannot be failing any more. The current version is also broken: it is
testing a wrong pointer and thus failing to recognise, that a descriptor
allocation wasn't successful.
Signed-off-by: default avatarGuennadi Liakhovetski <g.liakhovetski@gmx.de>
Signed-off-by: default avatarChris Ball <cjb@laptop.org>
parent 729b0c79
...@@ -427,11 +427,12 @@ static void tmio_dma_complete(void *arg) ...@@ -427,11 +427,12 @@ static void tmio_dma_complete(void *arg)
enable_mmc_irqs(host, TMIO_STAT_DATAEND); enable_mmc_irqs(host, TMIO_STAT_DATAEND);
} }
static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
{ {
struct scatterlist *sg = host->sg_ptr; struct scatterlist *sg = host->sg_ptr;
struct dma_async_tx_descriptor *desc = NULL; struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_rx; struct dma_chan *chan = host->chan_rx;
dma_cookie_t cookie;
int ret; int ret;
ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE); ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE);
...@@ -442,21 +443,20 @@ static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) ...@@ -442,21 +443,20 @@ static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
} }
if (desc) { if (desc) {
host->desc = desc;
desc->callback = tmio_dma_complete; desc->callback = tmio_dma_complete;
desc->callback_param = host; desc->callback_param = host;
host->cookie = desc->tx_submit(desc); cookie = desc->tx_submit(desc);
if (host->cookie < 0) { if (cookie < 0) {
host->desc = NULL; desc = NULL;
ret = host->cookie; ret = cookie;
} else { } else {
chan->device->device_issue_pending(chan); chan->device->device_issue_pending(chan);
} }
} }
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
__func__, host->sg_len, ret, host->cookie, host->mrq); __func__, host->sg_len, ret, cookie, host->mrq);
if (!host->desc) { if (!desc) {
/* DMA failed, fall back to PIO */ /* DMA failed, fall back to PIO */
if (ret >= 0) if (ret >= 0)
ret = -EIO; ret = -EIO;
...@@ -471,23 +471,18 @@ static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) ...@@ -471,23 +471,18 @@ static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
dev_warn(&host->pdev->dev, dev_warn(&host->pdev->dev,
"DMA failed: %d, falling back to PIO\n", ret); "DMA failed: %d, falling back to PIO\n", ret);
tmio_mmc_enable_dma(host, false); tmio_mmc_enable_dma(host, false);
reset(host);
/* Fail this request, let above layers recover */
host->mrq->cmd->error = ret;
tmio_mmc_finish_request(host);
} }
dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
desc, host->cookie, host->sg_len); desc, cookie, host->sg_len);
return ret > 0 ? 0 : ret;
} }
static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
{ {
struct scatterlist *sg = host->sg_ptr; struct scatterlist *sg = host->sg_ptr;
struct dma_async_tx_descriptor *desc = NULL; struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_tx; struct dma_chan *chan = host->chan_tx;
dma_cookie_t cookie;
int ret; int ret;
ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE); ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE);
...@@ -498,19 +493,18 @@ static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) ...@@ -498,19 +493,18 @@ static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
} }
if (desc) { if (desc) {
host->desc = desc;
desc->callback = tmio_dma_complete; desc->callback = tmio_dma_complete;
desc->callback_param = host; desc->callback_param = host;
host->cookie = desc->tx_submit(desc); cookie = desc->tx_submit(desc);
if (host->cookie < 0) { if (cookie < 0) {
host->desc = NULL; desc = NULL;
ret = host->cookie; ret = cookie;
} }
} }
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
__func__, host->sg_len, ret, host->cookie, host->mrq); __func__, host->sg_len, ret, cookie, host->mrq);
if (!host->desc) { if (!desc) {
/* DMA failed, fall back to PIO */ /* DMA failed, fall back to PIO */
if (ret >= 0) if (ret >= 0)
ret = -EIO; ret = -EIO;
...@@ -525,30 +519,22 @@ static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) ...@@ -525,30 +519,22 @@ static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
dev_warn(&host->pdev->dev, dev_warn(&host->pdev->dev,
"DMA failed: %d, falling back to PIO\n", ret); "DMA failed: %d, falling back to PIO\n", ret);
tmio_mmc_enable_dma(host, false); tmio_mmc_enable_dma(host, false);
reset(host);
/* Fail this request, let above layers recover */
host->mrq->cmd->error = ret;
tmio_mmc_finish_request(host);
} }
dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
desc, host->cookie); desc, cookie);
return ret > 0 ? 0 : ret;
} }
static int tmio_mmc_start_dma(struct tmio_mmc_host *host, static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
struct mmc_data *data) struct mmc_data *data)
{ {
if (data->flags & MMC_DATA_READ) { if (data->flags & MMC_DATA_READ) {
if (host->chan_rx) if (host->chan_rx)
return tmio_mmc_start_dma_rx(host); tmio_mmc_start_dma_rx(host);
} else { } else {
if (host->chan_tx) if (host->chan_tx)
return tmio_mmc_start_dma_tx(host); tmio_mmc_start_dma_tx(host);
} }
return 0;
} }
static void tmio_issue_tasklet_fn(unsigned long priv) static void tmio_issue_tasklet_fn(unsigned long priv)
...@@ -584,9 +570,6 @@ static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) ...@@ -584,9 +570,6 @@ static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
static void tmio_mmc_request_dma(struct tmio_mmc_host *host, static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
struct tmio_mmc_data *pdata) struct tmio_mmc_data *pdata)
{ {
host->cookie = -EINVAL;
host->desc = NULL;
/* We can only either use DMA for both Tx and Rx or not use it at all */ /* We can only either use DMA for both Tx and Rx or not use it at all */
if (pdata->dma) { if (pdata->dma) {
dma_cap_mask_t mask; dma_cap_mask_t mask;
...@@ -632,15 +615,11 @@ static void tmio_mmc_release_dma(struct tmio_mmc_host *host) ...@@ -632,15 +615,11 @@ static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
host->chan_rx = NULL; host->chan_rx = NULL;
dma_release_channel(chan); dma_release_channel(chan);
} }
host->cookie = -EINVAL;
host->desc = NULL;
} }
#else #else
static int tmio_mmc_start_dma(struct tmio_mmc_host *host, static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
struct mmc_data *data) struct mmc_data *data)
{ {
return 0;
} }
static void tmio_mmc_request_dma(struct tmio_mmc_host *host, static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
...@@ -682,7 +661,9 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host, ...@@ -682,7 +661,9 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
return tmio_mmc_start_dma(host, data); tmio_mmc_start_dma(host, data);
return 0;
} }
/* Process requests from the MMC layer */ /* Process requests from the MMC layer */
......
...@@ -112,9 +112,7 @@ struct tmio_mmc_host { ...@@ -112,9 +112,7 @@ struct tmio_mmc_host {
struct tasklet_struct dma_complete; struct tasklet_struct dma_complete;
struct tasklet_struct dma_issue; struct tasklet_struct dma_issue;
#ifdef CONFIG_TMIO_MMC_DMA #ifdef CONFIG_TMIO_MMC_DMA
struct dma_async_tx_descriptor *desc;
unsigned int dma_sglen; unsigned int dma_sglen;
dma_cookie_t cookie;
#endif #endif
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment