Commit 54f8d501 authored by Bartlomiej Zolnierkiewicz's avatar Bartlomiej Zolnierkiewicz Committed by Dan Williams

dmaengine: remove DMA unmap from drivers

Remove support for DMA unmapping from drivers as it is no longer
needed (DMA core code is now handling it).

Cc: Vinod Koul <vinod.koul@intel.com>
Cc: Tomasz Figa <t.figa@samsung.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: default avatarBartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: default avatarKyungmin Park <kyungmin.park@samsung.com>
[djbw: fix up chan2parent() unused warning in drivers/dma/dw/core.c]
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 6f57fd05
...@@ -393,36 +393,6 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt, ...@@ -393,36 +393,6 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
return slot_cnt; return slot_cnt;
} }
static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc)
{
return 0;
}
static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan)
{
union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
switch (chan->device->id) {
case DMA0_ID:
case DMA1_ID:
return hw_desc.dma->dest_addr;
case AAU_ID:
return hw_desc.aau->dest_addr;
default:
BUG();
}
return 0;
}
static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan)
{
BUG();
return 0;
}
static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc, static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan) struct iop_adma_chan *chan)
{ {
......
...@@ -82,8 +82,6 @@ struct iop_adma_chan { ...@@ -82,8 +82,6 @@ struct iop_adma_chan {
* @slot_cnt: total slots used in an transaction (group of operations) * @slot_cnt: total slots used in an transaction (group of operations)
* @slots_per_op: number of slots per operation * @slots_per_op: number of slots per operation
* @idx: pool index * @idx: pool index
* @unmap_src_cnt: number of xor sources
* @unmap_len: transaction bytecount
* @tx_list: list of descriptors that are associated with one operation * @tx_list: list of descriptors that are associated with one operation
* @async_tx: support for the async_tx api * @async_tx: support for the async_tx api
* @group_list: list of slots that make up a multi-descriptor transaction * @group_list: list of slots that make up a multi-descriptor transaction
...@@ -99,8 +97,6 @@ struct iop_adma_desc_slot { ...@@ -99,8 +97,6 @@ struct iop_adma_desc_slot {
u16 slot_cnt; u16 slot_cnt;
u16 slots_per_op; u16 slots_per_op;
u16 idx; u16 idx;
u16 unmap_src_cnt;
size_t unmap_len;
struct list_head tx_list; struct list_head tx_list;
struct dma_async_tx_descriptor async_tx; struct dma_async_tx_descriptor async_tx;
union { union {
......
...@@ -218,20 +218,6 @@ iop_chan_xor_slot_count(size_t len, int src_cnt, int *slots_per_op) ...@@ -218,20 +218,6 @@ iop_chan_xor_slot_count(size_t len, int src_cnt, int *slots_per_op)
#define iop_chan_pq_slot_count iop_chan_xor_slot_count #define iop_chan_pq_slot_count iop_chan_xor_slot_count
#define iop_chan_pq_zero_sum_slot_count iop_chan_xor_slot_count #define iop_chan_pq_zero_sum_slot_count iop_chan_xor_slot_count
static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan)
{
struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
return hw_desc->dest_addr;
}
static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan)
{
struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
return hw_desc->q_dest_addr;
}
static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc, static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan) struct iop_adma_chan *chan)
{ {
...@@ -350,18 +336,6 @@ iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt, ...@@ -350,18 +336,6 @@ iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt,
hw_desc->desc_ctrl = u_desc_ctrl.value; hw_desc->desc_ctrl = u_desc_ctrl.value;
} }
static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc)
{
struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
union {
u32 value;
struct iop13xx_adma_desc_ctrl field;
} u_desc_ctrl;
u_desc_ctrl.value = hw_desc->desc_ctrl;
return u_desc_ctrl.field.pq_xfer_en;
}
static inline void static inline void
iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
unsigned long flags) unsigned long flags)
......
...@@ -1164,43 +1164,12 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x, ...@@ -1164,43 +1164,12 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
kfree(txd); kfree(txd);
} }
static void pl08x_unmap_buffers(struct pl08x_txd *txd)
{
struct device *dev = txd->vd.tx.chan->device->dev;
struct pl08x_sg *dsg;
if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
list_for_each_entry(dsg, &txd->dsg_list, node)
dma_unmap_single(dev, dsg->src_addr, dsg->len,
DMA_TO_DEVICE);
else {
list_for_each_entry(dsg, &txd->dsg_list, node)
dma_unmap_page(dev, dsg->src_addr, dsg->len,
DMA_TO_DEVICE);
}
}
if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
list_for_each_entry(dsg, &txd->dsg_list, node)
dma_unmap_single(dev, dsg->dst_addr, dsg->len,
DMA_FROM_DEVICE);
else
list_for_each_entry(dsg, &txd->dsg_list, node)
dma_unmap_page(dev, dsg->dst_addr, dsg->len,
DMA_FROM_DEVICE);
}
}
static void pl08x_desc_free(struct virt_dma_desc *vd) static void pl08x_desc_free(struct virt_dma_desc *vd)
{ {
struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
dma_descriptor_unmap(txd); dma_descriptor_unmap(txd);
if (!plchan->slave)
pl08x_unmap_buffers(txd);
if (!txd->done) if (!txd->done)
pl08x_release_mux(plchan); pl08x_release_mux(plchan);
......
...@@ -344,32 +344,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) ...@@ -344,32 +344,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
/* move myself to free_list */ /* move myself to free_list */
list_move(&desc->desc_node, &atchan->free_list); list_move(&desc->desc_node, &atchan->free_list);
/* unmap dma addresses (not on slave channels) */
dma_descriptor_unmap(txd); dma_descriptor_unmap(txd);
if (!atchan->chan_common.private) {
struct device *parent = chan2parent(&atchan->chan_common);
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
dma_unmap_single(parent,
desc->lli.daddr,
desc->len, DMA_FROM_DEVICE);
else
dma_unmap_page(parent,
desc->lli.daddr,
desc->len, DMA_FROM_DEVICE);
}
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
dma_unmap_single(parent,
desc->lli.saddr,
desc->len, DMA_TO_DEVICE);
else
dma_unmap_page(parent,
desc->lli.saddr,
desc->len, DMA_TO_DEVICE);
}
}
/* for cyclic transfers, /* for cyclic transfers,
* no need to replay callback function while stopping */ * no need to replay callback function while stopping */
if (!atc_chan_is_cyclic(atchan)) { if (!atc_chan_is_cyclic(atchan)) {
......
...@@ -85,10 +85,6 @@ static struct device *chan2dev(struct dma_chan *chan) ...@@ -85,10 +85,6 @@ static struct device *chan2dev(struct dma_chan *chan)
{ {
return &chan->dev->device; return &chan->dev->device;
} }
static struct device *chan2parent(struct dma_chan *chan)
{
return chan->dev->device.parent;
}
static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
{ {
...@@ -312,26 +308,6 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, ...@@ -312,26 +308,6 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
list_move(&desc->desc_node, &dwc->free_list); list_move(&desc->desc_node, &dwc->free_list);
dma_descriptor_unmap(txd); dma_descriptor_unmap(txd);
if (!is_slave_direction(dwc->direction)) {
struct device *parent = chan2parent(&dwc->chan);
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
dma_unmap_single(parent, desc->lli.dar,
desc->total_len, DMA_FROM_DEVICE);
else
dma_unmap_page(parent, desc->lli.dar,
desc->total_len, DMA_FROM_DEVICE);
}
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
dma_unmap_single(parent, desc->lli.sar,
desc->total_len, DMA_TO_DEVICE);
else
dma_unmap_page(parent, desc->lli.sar,
desc->total_len, DMA_TO_DEVICE);
}
}
spin_unlock_irqrestore(&dwc->lock, flags); spin_unlock_irqrestore(&dwc->lock, flags);
if (callback) if (callback)
......
...@@ -733,28 +733,6 @@ static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac) ...@@ -733,28 +733,6 @@ static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
spin_unlock_irqrestore(&edmac->lock, flags); spin_unlock_irqrestore(&edmac->lock, flags);
} }
static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc)
{
struct device *dev = desc->txd.chan->device->dev;
if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
dma_unmap_single(dev, desc->src_addr, desc->size,
DMA_TO_DEVICE);
else
dma_unmap_page(dev, desc->src_addr, desc->size,
DMA_TO_DEVICE);
}
if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
dma_unmap_single(dev, desc->dst_addr, desc->size,
DMA_FROM_DEVICE);
else
dma_unmap_page(dev, desc->dst_addr, desc->size,
DMA_FROM_DEVICE);
}
}
static void ep93xx_dma_tasklet(unsigned long data) static void ep93xx_dma_tasklet(unsigned long data)
{ {
struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
...@@ -787,14 +765,7 @@ static void ep93xx_dma_tasklet(unsigned long data) ...@@ -787,14 +765,7 @@ static void ep93xx_dma_tasklet(unsigned long data)
/* Now we can release all the chained descriptors */ /* Now we can release all the chained descriptors */
list_for_each_entry_safe(desc, d, &list, node) { list_for_each_entry_safe(desc, d, &list, node) {
/*
* For the memcpy channels the API requires us to unmap the
* buffers unless requested otherwise.
*/
dma_descriptor_unmap(&desc->txd); dma_descriptor_unmap(&desc->txd);
if (!edmac->chan.private)
ep93xx_dma_unmap_buffers(desc);
ep93xx_dma_desc_put(edmac, desc); ep93xx_dma_desc_put(edmac, desc);
} }
......
...@@ -869,22 +869,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, ...@@ -869,22 +869,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
dma_run_dependencies(txd); dma_run_dependencies(txd);
dma_descriptor_unmap(txd); dma_descriptor_unmap(txd);
/* Unmap the dst buffer, if requested */
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE);
else
dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE);
}
/* Unmap the src buffer, if requested */
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
dma_unmap_single(dev, src, len, DMA_TO_DEVICE);
else
dma_unmap_page(dev, src, len, DMA_TO_DEVICE);
}
#ifdef FSL_DMA_LD_DEBUG #ifdef FSL_DMA_LD_DEBUG
chan_dbg(chan, "LD %p free\n", desc); chan_dbg(chan, "LD %p free\n", desc);
#endif #endif
......
...@@ -531,21 +531,6 @@ static void ioat1_cleanup_event(unsigned long data) ...@@ -531,21 +531,6 @@ static void ioat1_cleanup_event(unsigned long data)
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
} }
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
size_t len, struct ioat_dma_descriptor *hw)
{
struct pci_dev *pdev = chan->device->pdev;
size_t offset = len - hw->size;
if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
ioat_unmap(pdev, hw->dst_addr - offset, len,
PCI_DMA_FROMDEVICE, flags, 1);
if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP))
ioat_unmap(pdev, hw->src_addr - offset, len,
PCI_DMA_TODEVICE, flags, 0);
}
dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan) dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
{ {
dma_addr_t phys_complete; dma_addr_t phys_complete;
...@@ -603,7 +588,6 @@ static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete) ...@@ -603,7 +588,6 @@ static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
if (tx->cookie) { if (tx->cookie) {
dma_cookie_complete(tx); dma_cookie_complete(tx);
dma_descriptor_unmap(tx); dma_descriptor_unmap(tx);
ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
ioat->active -= desc->hw->tx_cnt; ioat->active -= desc->hw->tx_cnt;
if (tx->callback) { if (tx->callback) {
tx->callback(tx->callback_param); tx->callback(tx->callback_param);
......
...@@ -342,16 +342,6 @@ static inline bool is_ioat_bug(unsigned long err) ...@@ -342,16 +342,6 @@ static inline bool is_ioat_bug(unsigned long err)
return !!err; return !!err;
} }
static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len,
int direction, enum dma_ctrl_flags flags, bool dst)
{
if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) ||
(!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE)))
pci_unmap_single(pdev, addr, len, direction);
else
pci_unmap_page(pdev, addr, len, direction);
}
int ioat_probe(struct ioatdma_device *device); int ioat_probe(struct ioatdma_device *device);
int ioat_register(struct ioatdma_device *device); int ioat_register(struct ioatdma_device *device);
int ioat1_dma_probe(struct ioatdma_device *dev, int dca); int ioat1_dma_probe(struct ioatdma_device *dev, int dca);
...@@ -363,8 +353,6 @@ void ioat_init_channel(struct ioatdma_device *device, ...@@ -363,8 +353,6 @@ void ioat_init_channel(struct ioatdma_device *device,
struct ioat_chan_common *chan, int idx); struct ioat_chan_common *chan, int idx);
enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
struct dma_tx_state *txstate); struct dma_tx_state *txstate);
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
size_t len, struct ioat_dma_descriptor *hw);
bool ioat_cleanup_preamble(struct ioat_chan_common *chan, bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
dma_addr_t *phys_complete); dma_addr_t *phys_complete);
void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
......
...@@ -149,7 +149,6 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) ...@@ -149,7 +149,6 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
dump_desc_dbg(ioat, desc); dump_desc_dbg(ioat, desc);
if (tx->cookie) { if (tx->cookie) {
dma_descriptor_unmap(tx); dma_descriptor_unmap(tx);
ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
dma_cookie_complete(tx); dma_cookie_complete(tx);
if (tx->callback) { if (tx->callback) {
tx->callback(tx->callback_param); tx->callback(tx->callback_param);
......
...@@ -96,13 +96,6 @@ static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, ...@@ -96,13 +96,6 @@ static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0,
static void ioat3_eh(struct ioat2_dma_chan *ioat); static void ioat3_eh(struct ioat2_dma_chan *ioat);
static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
{
struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
return raw->field[xor_idx_to_field[idx]];
}
static void xor_set_src(struct ioat_raw_descriptor *descs[2], static void xor_set_src(struct ioat_raw_descriptor *descs[2],
dma_addr_t addr, u32 offset, int idx) dma_addr_t addr, u32 offset, int idx)
{ {
...@@ -296,164 +289,6 @@ static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *s ...@@ -296,164 +289,6 @@ static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *s
kmem_cache_free(device->sed_pool, sed); kmem_cache_free(device->sed_pool, sed);
} }
static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat,
struct ioat_ring_ent *desc, int idx)
{
struct ioat_chan_common *chan = &ioat->base;
struct pci_dev *pdev = chan->device->pdev;
size_t len = desc->len;
size_t offset = len - desc->hw->size;
struct dma_async_tx_descriptor *tx = &desc->txd;
enum dma_ctrl_flags flags = tx->flags;
switch (desc->hw->ctl_f.op) {
case IOAT_OP_COPY:
if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */
ioat_dma_unmap(chan, flags, len, desc->hw);
break;
case IOAT_OP_XOR_VAL:
case IOAT_OP_XOR: {
struct ioat_xor_descriptor *xor = desc->xor;
struct ioat_ring_ent *ext;
struct ioat_xor_ext_descriptor *xor_ex = NULL;
int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt);
struct ioat_raw_descriptor *descs[2];
int i;
if (src_cnt > 5) {
ext = ioat2_get_ring_ent(ioat, idx + 1);
xor_ex = ext->xor_ex;
}
if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
descs[0] = (struct ioat_raw_descriptor *) xor;
descs[1] = (struct ioat_raw_descriptor *) xor_ex;
for (i = 0; i < src_cnt; i++) {
dma_addr_t src = xor_get_src(descs, i);
ioat_unmap(pdev, src - offset, len,
PCI_DMA_TODEVICE, flags, 0);
}
/* dest is a source in xor validate operations */
if (xor->ctl_f.op == IOAT_OP_XOR_VAL) {
ioat_unmap(pdev, xor->dst_addr - offset, len,
PCI_DMA_TODEVICE, flags, 1);
break;
}
}
if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
ioat_unmap(pdev, xor->dst_addr - offset, len,
PCI_DMA_FROMDEVICE, flags, 1);
break;
}
case IOAT_OP_PQ_VAL:
case IOAT_OP_PQ: {
struct ioat_pq_descriptor *pq = desc->pq;
struct ioat_ring_ent *ext;
struct ioat_pq_ext_descriptor *pq_ex = NULL;
int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
struct ioat_raw_descriptor *descs[2];
int i;
if (src_cnt > 3) {
ext = ioat2_get_ring_ent(ioat, idx + 1);
pq_ex = ext->pq_ex;
}
/* in the 'continue' case don't unmap the dests as sources */
if (dmaf_p_disabled_continue(flags))
src_cnt--;
else if (dmaf_continue(flags))
src_cnt -= 3;
if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
descs[0] = (struct ioat_raw_descriptor *) pq;
descs[1] = (struct ioat_raw_descriptor *) pq_ex;
for (i = 0; i < src_cnt; i++) {
dma_addr_t src = pq_get_src(descs, i);
ioat_unmap(pdev, src - offset, len,
PCI_DMA_TODEVICE, flags, 0);
}
/* the dests are sources in pq validate operations */
if (pq->ctl_f.op == IOAT_OP_XOR_VAL) {
if (!(flags & DMA_PREP_PQ_DISABLE_P))
ioat_unmap(pdev, pq->p_addr - offset,
len, PCI_DMA_TODEVICE, flags, 0);
if (!(flags & DMA_PREP_PQ_DISABLE_Q))
ioat_unmap(pdev, pq->q_addr - offset,
len, PCI_DMA_TODEVICE, flags, 0);
break;
}
}
if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
if (!(flags & DMA_PREP_PQ_DISABLE_P))
ioat_unmap(pdev, pq->p_addr - offset, len,
PCI_DMA_BIDIRECTIONAL, flags, 1);
if (!(flags & DMA_PREP_PQ_DISABLE_Q))
ioat_unmap(pdev, pq->q_addr - offset, len,
PCI_DMA_BIDIRECTIONAL, flags, 1);
}
break;
}
case IOAT_OP_PQ_16S:
case IOAT_OP_PQ_VAL_16S: {
struct ioat_pq_descriptor *pq = desc->pq;
int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
struct ioat_raw_descriptor *descs[4];
int i;
/* in the 'continue' case don't unmap the dests as sources */
if (dmaf_p_disabled_continue(flags))
src_cnt--;
else if (dmaf_continue(flags))
src_cnt -= 3;
if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
descs[0] = (struct ioat_raw_descriptor *)pq;
descs[1] = (struct ioat_raw_descriptor *)(desc->sed->hw);
descs[2] = (struct ioat_raw_descriptor *)(&desc->sed->hw->b[0]);
for (i = 0; i < src_cnt; i++) {
dma_addr_t src = pq16_get_src(descs, i);
ioat_unmap(pdev, src - offset, len,
PCI_DMA_TODEVICE, flags, 0);
}
/* the dests are sources in pq validate operations */
if (pq->ctl_f.op == IOAT_OP_XOR_VAL) {
if (!(flags & DMA_PREP_PQ_DISABLE_P))
ioat_unmap(pdev, pq->p_addr - offset,
len, PCI_DMA_TODEVICE,
flags, 0);
if (!(flags & DMA_PREP_PQ_DISABLE_Q))
ioat_unmap(pdev, pq->q_addr - offset,
len, PCI_DMA_TODEVICE,
flags, 0);
break;
}
}
if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
if (!(flags & DMA_PREP_PQ_DISABLE_P))
ioat_unmap(pdev, pq->p_addr - offset, len,
PCI_DMA_BIDIRECTIONAL, flags, 1);
if (!(flags & DMA_PREP_PQ_DISABLE_Q))
ioat_unmap(pdev, pq->q_addr - offset, len,
PCI_DMA_BIDIRECTIONAL, flags, 1);
}
break;
}
default:
dev_err(&pdev->dev, "%s: unknown op type: %#x\n",
__func__, desc->hw->ctl_f.op);
}
}
static bool desc_has_ext(struct ioat_ring_ent *desc) static bool desc_has_ext(struct ioat_ring_ent *desc)
{ {
struct ioat_dma_descriptor *hw = desc->hw; struct ioat_dma_descriptor *hw = desc->hw;
...@@ -578,7 +413,6 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) ...@@ -578,7 +413,6 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
if (tx->cookie) { if (tx->cookie) {
dma_cookie_complete(tx); dma_cookie_complete(tx);
dma_descriptor_unmap(tx); dma_descriptor_unmap(tx);
ioat3_dma_unmap(ioat, desc, idx + i);
if (tx->callback) { if (tx->callback) {
tx->callback(tx->callback_param); tx->callback(tx->callback_param);
tx->callback = NULL; tx->callback = NULL;
......
...@@ -61,80 +61,6 @@ static void iop_adma_free_slots(struct iop_adma_desc_slot *slot) ...@@ -61,80 +61,6 @@ static void iop_adma_free_slots(struct iop_adma_desc_slot *slot)
} }
} }
static void
iop_desc_unmap(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc)
{
struct dma_async_tx_descriptor *tx = &desc->async_tx;
struct iop_adma_desc_slot *unmap = desc->group_head;
struct device *dev = &iop_chan->device->pdev->dev;
u32 len = unmap->unmap_len;
enum dma_ctrl_flags flags = tx->flags;
u32 src_cnt;
dma_addr_t addr;
dma_addr_t dest;
src_cnt = unmap->unmap_src_cnt;
dest = iop_desc_get_dest_addr(unmap, iop_chan);
if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
enum dma_data_direction dir;
if (src_cnt > 1) /* is xor? */
dir = DMA_BIDIRECTIONAL;
else
dir = DMA_FROM_DEVICE;
dma_unmap_page(dev, dest, len, dir);
}
if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
while (src_cnt--) {
addr = iop_desc_get_src_addr(unmap, iop_chan, src_cnt);
if (addr == dest)
continue;
dma_unmap_page(dev, addr, len, DMA_TO_DEVICE);
}
}
desc->group_head = NULL;
}
static void
iop_desc_unmap_pq(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc)
{
struct dma_async_tx_descriptor *tx = &desc->async_tx;
struct iop_adma_desc_slot *unmap = desc->group_head;
struct device *dev = &iop_chan->device->pdev->dev;
u32 len = unmap->unmap_len;
enum dma_ctrl_flags flags = tx->flags;
u32 src_cnt = unmap->unmap_src_cnt;
dma_addr_t pdest = iop_desc_get_dest_addr(unmap, iop_chan);
dma_addr_t qdest = iop_desc_get_qdest_addr(unmap, iop_chan);
int i;
if (tx->flags & DMA_PREP_CONTINUE)
src_cnt -= 3;
if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP) && !desc->pq_check_result) {
dma_unmap_page(dev, pdest, len, DMA_BIDIRECTIONAL);
dma_unmap_page(dev, qdest, len, DMA_BIDIRECTIONAL);
}
if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
dma_addr_t addr;
for (i = 0; i < src_cnt; i++) {
addr = iop_desc_get_src_addr(unmap, iop_chan, i);
dma_unmap_page(dev, addr, len, DMA_TO_DEVICE);
}
if (desc->pq_check_result) {
dma_unmap_page(dev, pdest, len, DMA_TO_DEVICE);
dma_unmap_page(dev, qdest, len, DMA_TO_DEVICE);
}
}
desc->group_head = NULL;
}
static dma_cookie_t static dma_cookie_t
iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *iop_chan, dma_cookie_t cookie) struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
...@@ -153,15 +79,8 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, ...@@ -153,15 +79,8 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
tx->callback(tx->callback_param); tx->callback(tx->callback_param);
dma_descriptor_unmap(tx); dma_descriptor_unmap(tx);
/* unmap dma addresses if (desc->group_head)
* (unmap_single vs unmap_page?) desc->group_head = NULL;
*/
if (desc->group_head && desc->unmap_len) {
if (iop_desc_is_pq(desc))
iop_desc_unmap_pq(iop_chan, desc);
else
iop_desc_unmap(iop_chan, desc);
}
} }
/* run dependent operations */ /* run dependent operations */
...@@ -592,7 +511,6 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) ...@@ -592,7 +511,6 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
if (sw_desc) { if (sw_desc) {
grp_start = sw_desc->group_head; grp_start = sw_desc->group_head;
iop_desc_init_interrupt(grp_start, iop_chan); iop_desc_init_interrupt(grp_start, iop_chan);
grp_start->unmap_len = 0;
sw_desc->async_tx.flags = flags; sw_desc->async_tx.flags = flags;
} }
spin_unlock_bh(&iop_chan->lock); spin_unlock_bh(&iop_chan->lock);
...@@ -624,8 +542,6 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, ...@@ -624,8 +542,6 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
iop_desc_set_byte_count(grp_start, iop_chan, len); iop_desc_set_byte_count(grp_start, iop_chan, len);
iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
iop_desc_set_memcpy_src_addr(grp_start, dma_src); iop_desc_set_memcpy_src_addr(grp_start, dma_src);
sw_desc->unmap_src_cnt = 1;
sw_desc->unmap_len = len;
sw_desc->async_tx.flags = flags; sw_desc->async_tx.flags = flags;
} }
spin_unlock_bh(&iop_chan->lock); spin_unlock_bh(&iop_chan->lock);
...@@ -658,8 +574,6 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, ...@@ -658,8 +574,6 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
iop_desc_init_xor(grp_start, src_cnt, flags); iop_desc_init_xor(grp_start, src_cnt, flags);
iop_desc_set_byte_count(grp_start, iop_chan, len); iop_desc_set_byte_count(grp_start, iop_chan, len);
iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
sw_desc->unmap_src_cnt = src_cnt;
sw_desc->unmap_len = len;
sw_desc->async_tx.flags = flags; sw_desc->async_tx.flags = flags;
while (src_cnt--) while (src_cnt--)
iop_desc_set_xor_src_addr(grp_start, src_cnt, iop_desc_set_xor_src_addr(grp_start, src_cnt,
...@@ -695,8 +609,6 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src, ...@@ -695,8 +609,6 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
grp_start->xor_check_result = result; grp_start->xor_check_result = result;
pr_debug("\t%s: grp_start->xor_check_result: %p\n", pr_debug("\t%s: grp_start->xor_check_result: %p\n",
__func__, grp_start->xor_check_result); __func__, grp_start->xor_check_result);
sw_desc->unmap_src_cnt = src_cnt;
sw_desc->unmap_len = len;
sw_desc->async_tx.flags = flags; sw_desc->async_tx.flags = flags;
while (src_cnt--) while (src_cnt--)
iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
...@@ -749,8 +661,6 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, ...@@ -749,8 +661,6 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
dst[0] = dst[1] & 0x7; dst[0] = dst[1] & 0x7;
iop_desc_set_pq_addr(g, dst); iop_desc_set_pq_addr(g, dst);
sw_desc->unmap_src_cnt = src_cnt;
sw_desc->unmap_len = len;
sw_desc->async_tx.flags = flags; sw_desc->async_tx.flags = flags;
for (i = 0; i < src_cnt; i++) for (i = 0; i < src_cnt; i++)
iop_desc_set_pq_src_addr(g, i, src[i], scf[i]); iop_desc_set_pq_src_addr(g, i, src[i], scf[i]);
...@@ -805,8 +715,6 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, ...@@ -805,8 +715,6 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
g->pq_check_result = pqres; g->pq_check_result = pqres;
pr_debug("\t%s: g->pq_check_result: %p\n", pr_debug("\t%s: g->pq_check_result: %p\n",
__func__, g->pq_check_result); __func__, g->pq_check_result);
sw_desc->unmap_src_cnt = src_cnt+2;
sw_desc->unmap_len = len;
sw_desc->async_tx.flags = flags; sw_desc->async_tx.flags = flags;
while (src_cnt--) while (src_cnt--)
iop_desc_set_pq_zero_sum_src_addr(g, src_cnt, iop_desc_set_pq_zero_sum_src_addr(g, src_cnt,
......
...@@ -60,14 +60,6 @@ static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) ...@@ -60,14 +60,6 @@ static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
return hw_desc->phy_dest_addr; return hw_desc->phy_dest_addr;
} }
static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
int src_idx)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)];
}
static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
u32 byte_count) u32 byte_count)
{ {
...@@ -279,42 +271,8 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, ...@@ -279,42 +271,8 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
desc->async_tx.callback_param); desc->async_tx.callback_param);
dma_descriptor_unmap(&desc->async_tx); dma_descriptor_unmap(&desc->async_tx);
/* unmap dma addresses if (desc->group_head)
* (unmap_single vs unmap_page?)
*/
if (desc->group_head && desc->unmap_len) {
struct mv_xor_desc_slot *unmap = desc->group_head;
struct device *dev = mv_chan_to_devp(mv_chan);
u32 len = unmap->unmap_len;
enum dma_ctrl_flags flags = desc->async_tx.flags;
u32 src_cnt;
dma_addr_t addr;
dma_addr_t dest;
src_cnt = unmap->unmap_src_cnt;
dest = mv_desc_get_dest_addr(unmap);
if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
enum dma_data_direction dir;
if (src_cnt > 1) /* is xor ? */
dir = DMA_BIDIRECTIONAL;
else
dir = DMA_FROM_DEVICE;
dma_unmap_page(dev, dest, len, dir);
}
if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
while (src_cnt--) {
addr = mv_desc_get_src_addr(unmap,
src_cnt);
if (addr == dest)
continue;
dma_unmap_page(dev, addr, len,
DMA_TO_DEVICE);
}
}
desc->group_head = NULL; desc->group_head = NULL;
}
} }
/* run dependent operations */ /* run dependent operations */
......
...@@ -801,218 +801,6 @@ static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan, ...@@ -801,218 +801,6 @@ static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan,
local_irq_restore(flags); local_irq_restore(flags);
} }
/**
* ppc440spe_desc_get_src_addr - extract the source address from the descriptor
*/
static u32 ppc440spe_desc_get_src_addr(struct ppc440spe_adma_desc_slot *desc,
struct ppc440spe_adma_chan *chan, int src_idx)
{
struct dma_cdb *dma_hw_desc;
struct xor_cb *xor_hw_desc;
switch (chan->device->id) {
case PPC440SPE_DMA0_ID:
case PPC440SPE_DMA1_ID:
dma_hw_desc = desc->hw_desc;
/* May have 0, 1, 2, or 3 sources */
switch (dma_hw_desc->opc) {
case DMA_CDB_OPC_NO_OP:
case DMA_CDB_OPC_DFILL128:
return 0;
case DMA_CDB_OPC_DCHECK128:
if (unlikely(src_idx)) {
printk(KERN_ERR "%s: try to get %d source for"
" DCHECK128\n", __func__, src_idx);
BUG();
}
return le32_to_cpu(dma_hw_desc->sg1l);
case DMA_CDB_OPC_MULTICAST:
case DMA_CDB_OPC_MV_SG1_SG2:
if (unlikely(src_idx > 2)) {
printk(KERN_ERR "%s: try to get %d source from"
" DMA descr\n", __func__, src_idx);
BUG();
}
if (src_idx) {
if (le32_to_cpu(dma_hw_desc->sg1u) &
DMA_CUED_XOR_WIN_MSK) {
u8 region;
if (src_idx == 1)
return le32_to_cpu(
dma_hw_desc->sg1l) +
desc->unmap_len;
region = (le32_to_cpu(
dma_hw_desc->sg1u)) >>
DMA_CUED_REGION_OFF;
region &= DMA_CUED_REGION_MSK;
switch (region) {
case DMA_RXOR123:
return le32_to_cpu(
dma_hw_desc->sg1l) +
(desc->unmap_len << 1);
case DMA_RXOR124:
return le32_to_cpu(
dma_hw_desc->sg1l) +
(desc->unmap_len * 3);
case DMA_RXOR125:
return le32_to_cpu(
dma_hw_desc->sg1l) +
(desc->unmap_len << 2);
default:
printk(KERN_ERR
"%s: try to"
" get src3 for region %02x"
"PPC440SPE_DESC_RXOR12?\n",
__func__, region);
BUG();
}
} else {
printk(KERN_ERR
"%s: try to get %d"
" source for non-cued descr\n",
__func__, src_idx);
BUG();
}
}
return le32_to_cpu(dma_hw_desc->sg1l);
default:
printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
__func__, dma_hw_desc->opc);
BUG();
}
return le32_to_cpu(dma_hw_desc->sg1l);
case PPC440SPE_XOR_ID:
/* May have up to 16 sources */
xor_hw_desc = desc->hw_desc;
return xor_hw_desc->ops[src_idx].l;
}
return 0;
}
/**
* ppc440spe_desc_get_dest_addr - extract the destination address from the
* descriptor
*/
static u32 ppc440spe_desc_get_dest_addr(struct ppc440spe_adma_desc_slot *desc,
struct ppc440spe_adma_chan *chan, int idx)
{
struct dma_cdb *dma_hw_desc;
struct xor_cb *xor_hw_desc;
switch (chan->device->id) {
case PPC440SPE_DMA0_ID:
case PPC440SPE_DMA1_ID:
dma_hw_desc = desc->hw_desc;
if (likely(!idx))
return le32_to_cpu(dma_hw_desc->sg2l);
return le32_to_cpu(dma_hw_desc->sg3l);
case PPC440SPE_XOR_ID:
xor_hw_desc = desc->hw_desc;
return xor_hw_desc->cbtal;
}
return 0;
}
/**
* ppc440spe_desc_get_src_num - extract the number of source addresses from
* the descriptor
*/
static u32 ppc440spe_desc_get_src_num(struct ppc440spe_adma_desc_slot *desc,
struct ppc440spe_adma_chan *chan)
{
struct dma_cdb *dma_hw_desc;
struct xor_cb *xor_hw_desc;
switch (chan->device->id) {
case PPC440SPE_DMA0_ID:
case PPC440SPE_DMA1_ID:
dma_hw_desc = desc->hw_desc;
switch (dma_hw_desc->opc) {
case DMA_CDB_OPC_NO_OP:
case DMA_CDB_OPC_DFILL128:
return 0;
case DMA_CDB_OPC_DCHECK128:
return 1;
case DMA_CDB_OPC_MV_SG1_SG2:
case DMA_CDB_OPC_MULTICAST:
/*
* Only for RXOR operations we have more than
* one source
*/
if (le32_to_cpu(dma_hw_desc->sg1u) &
DMA_CUED_XOR_WIN_MSK) {
/* RXOR op, there are 2 or 3 sources */
if (((le32_to_cpu(dma_hw_desc->sg1u) >>
DMA_CUED_REGION_OFF) &
DMA_CUED_REGION_MSK) == DMA_RXOR12) {
/* RXOR 1-2 */
return 2;
} else {
/* RXOR 1-2-3/1-2-4/1-2-5 */
return 3;
}
}
return 1;
default:
printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
__func__, dma_hw_desc->opc);
BUG();
}
case PPC440SPE_XOR_ID:
/* up to 16 sources */
xor_hw_desc = desc->hw_desc;
return xor_hw_desc->cbc & XOR_CDCR_OAC_MSK;
default:
BUG();
}
return 0;
}
/**
* ppc440spe_desc_get_dst_num - get the number of destination addresses in
* this descriptor
*/
static u32 ppc440spe_desc_get_dst_num(struct ppc440spe_adma_desc_slot *desc,
struct ppc440spe_adma_chan *chan)
{
struct dma_cdb *dma_hw_desc;
switch (chan->device->id) {
case PPC440SPE_DMA0_ID:
case PPC440SPE_DMA1_ID:
/* May be 1 or 2 destinations */
dma_hw_desc = desc->hw_desc;
switch (dma_hw_desc->opc) {
case DMA_CDB_OPC_NO_OP:
case DMA_CDB_OPC_DCHECK128:
return 0;
case DMA_CDB_OPC_MV_SG1_SG2:
case DMA_CDB_OPC_DFILL128:
return 1;
case DMA_CDB_OPC_MULTICAST:
if (desc->dst_cnt == 2)
return 2;
else
return 1;
default:
printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
__func__, dma_hw_desc->opc);
BUG();
}
case PPC440SPE_XOR_ID:
/* Always only 1 destination */
return 1;
default:
BUG();
}
return 0;
}
/** /**
* ppc440spe_desc_get_link - get the address of the descriptor that * ppc440spe_desc_get_link - get the address of the descriptor that
* follows this one * follows this one
...@@ -1705,43 +1493,6 @@ static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot, ...@@ -1705,43 +1493,6 @@ static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot,
} }
} }
static void ppc440spe_adma_unmap(struct ppc440spe_adma_chan *chan,
struct ppc440spe_adma_desc_slot *desc)
{
u32 src_cnt, dst_cnt;
dma_addr_t addr;
/*
* get the number of sources & destination
* included in this descriptor and unmap
* them all
*/
src_cnt = ppc440spe_desc_get_src_num(desc, chan);
dst_cnt = ppc440spe_desc_get_dst_num(desc, chan);
/* unmap destinations */
if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
while (dst_cnt--) {
addr = ppc440spe_desc_get_dest_addr(
desc, chan, dst_cnt);
dma_unmap_page(chan->device->dev,
addr, desc->unmap_len,
DMA_FROM_DEVICE);
}
}
/* unmap sources */
if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
while (src_cnt--) {
addr = ppc440spe_desc_get_src_addr(
desc, chan, src_cnt);
dma_unmap_page(chan->device->dev,
addr, desc->unmap_len,
DMA_TO_DEVICE);
}
}
}
/** /**
* ppc440spe_adma_run_tx_complete_actions - call functions to be called * ppc440spe_adma_run_tx_complete_actions - call functions to be called
* upon completion * upon completion
...@@ -1766,26 +1517,6 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions( ...@@ -1766,26 +1517,6 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
desc->async_tx.callback_param); desc->async_tx.callback_param);
dma_descriptor_unmap(&desc->async_tx); dma_descriptor_unmap(&desc->async_tx);
/* unmap dma addresses
* (unmap_single vs unmap_page?)
*
* actually, ppc's dma_unmap_page() functions are empty, so
* the following code is just for the sake of completeness
*/
if (chan && chan->needs_unmap && desc->group_head &&
desc->unmap_len) {
struct ppc440spe_adma_desc_slot *unmap =
desc->group_head;
/* assume 1 slot per op always */
u32 slot_count = unmap->slot_cnt;
/* Run through the group list and unmap addresses */
for (i = 0; i < slot_count; i++) {
BUG_ON(!unmap);
ppc440spe_adma_unmap(chan, unmap);
unmap = unmap->hw_next;
}
}
} }
/* run dependent operations */ /* run dependent operations */
......
...@@ -154,38 +154,6 @@ static bool __td_dma_done_ack(struct timb_dma_chan *td_chan) ...@@ -154,38 +154,6 @@ static bool __td_dma_done_ack(struct timb_dma_chan *td_chan)
return done; return done;
} }
static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc,
bool single)
{
dma_addr_t addr;
int len;
addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) |
dma_desc[4];
len = (dma_desc[3] << 8) | dma_desc[2];
if (single)
dma_unmap_single(chan2dev(&td_chan->chan), addr, len,
DMA_TO_DEVICE);
else
dma_unmap_page(chan2dev(&td_chan->chan), addr, len,
DMA_TO_DEVICE);
}
static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single)
{
struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan,
struct timb_dma_chan, chan);
u8 *descs;
for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) {
__td_unmap_desc(td_chan, descs, single);
if (descs[0] & 0x02)
break;
}
}
static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
struct scatterlist *sg, bool last) struct scatterlist *sg, bool last)
{ {
...@@ -294,10 +262,6 @@ static void __td_finish(struct timb_dma_chan *td_chan) ...@@ -294,10 +262,6 @@ static void __td_finish(struct timb_dma_chan *td_chan)
list_move(&td_desc->desc_node, &td_chan->free_list); list_move(&td_desc->desc_node, &td_chan->free_list);
dma_descriptor_unmap(txd); dma_descriptor_unmap(txd);
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
__td_unmap_descs(td_desc,
txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE);
/* /*
* The API requires that no submissions are done from a * The API requires that no submissions are done from a
* callback, so we don't need to drop the lock here * callback, so we don't need to drop the lock here
......
...@@ -420,30 +420,6 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, ...@@ -420,30 +420,6 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
list_move(&desc->desc_node, &dc->free_list); list_move(&desc->desc_node, &dc->free_list);
dma_descriptor_unmap(txd); dma_descriptor_unmap(txd);
if (!ds) {
dma_addr_t dmaaddr;
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
dmaaddr = is_dmac64(dc) ?
desc->hwdesc.DAR : desc->hwdesc32.DAR;
if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
dma_unmap_single(chan2parent(&dc->chan),
dmaaddr, desc->len, DMA_FROM_DEVICE);
else
dma_unmap_page(chan2parent(&dc->chan),
dmaaddr, desc->len, DMA_FROM_DEVICE);
}
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
dmaaddr = is_dmac64(dc) ?
desc->hwdesc.SAR : desc->hwdesc32.SAR;
if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
dma_unmap_single(chan2parent(&dc->chan),
dmaaddr, desc->len, DMA_TO_DEVICE);
else
dma_unmap_page(chan2parent(&dc->chan),
dmaaddr, desc->len, DMA_TO_DEVICE);
}
}
/* /*
* The API requires that no submissions are done from a * The API requires that no submissions are done from a
* callback, so we don't need to drop the lock here * callback, so we don't need to drop the lock here
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment