Commit 9bb67696 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (28 commits)
  ioat: cleanup ->timer_fn() and ->cleanup_fn() prototypes
  ioat3: interrupt coalescing
  ioat: close potential BUG_ON race in the descriptor cleanup path
  ioat2: kill pending flag
  ioat3: use ioat2_quiesce()
  ioat3: cleanup, don't enable DCA completion writes
  DMAENGINE: COH 901 318 lli sg offset fix
  DMAENGINE: COH 901 318 configure channel direction
  DMAENGINE: COH 901 318 remove irq counting
  DMAENGINE: COH 901 318 descriptor pool refactoring
  DMAENGINE: COH 901 318 cleanups
  dma: Add MPC512x DMA driver
  Debugging options for the DMA engine subsystem
  iop-adma: redundant/wrong tests in iop_*_count()?
  dmatest: fix handling of an even number of xor_sources
  dmatest: correct raid6 PQ test
  fsldma: Fix cookie issues
  fsldma: Fix cookie issues
  dma: cases IPU_PIX_FMT_BGRA32, BGR32 and ABGR32 are the same in ipu_ch_param_set_size()
  dma: make Open Firmware device id constant
  ...
parents 0f2cc4ec dd58ffcf
...@@ -44,21 +44,29 @@ Example: ...@@ -44,21 +44,29 @@ Example:
compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
cell-index = <0>; cell-index = <0>;
reg = <0 0x80>; reg = <0 0x80>;
interrupt-parent = <&ipic>;
interrupts = <71 8>;
}; };
dma-channel@80 { dma-channel@80 {
compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
cell-index = <1>; cell-index = <1>;
reg = <0x80 0x80>; reg = <0x80 0x80>;
interrupt-parent = <&ipic>;
interrupts = <71 8>;
}; };
dma-channel@100 { dma-channel@100 {
compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
cell-index = <2>; cell-index = <2>;
reg = <0x100 0x80>; reg = <0x100 0x80>;
interrupt-parent = <&ipic>;
interrupts = <71 8>;
}; };
dma-channel@180 { dma-channel@180 {
compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
cell-index = <3>; cell-index = <3>;
reg = <0x180 0x80>; reg = <0x180 0x80>;
interrupt-parent = <&ipic>;
interrupts = <71 8>;
}; };
}; };
......
...@@ -366,8 +366,7 @@ static inline int iop_chan_xor_slot_count(size_t len, int src_cnt, ...@@ -366,8 +366,7 @@ static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
slot_cnt += *slots_per_op; slot_cnt += *slots_per_op;
} }
if (len) slot_cnt += *slots_per_op;
slot_cnt += *slots_per_op;
return slot_cnt; return slot_cnt;
} }
...@@ -389,8 +388,7 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt, ...@@ -389,8 +388,7 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
slot_cnt += *slots_per_op; slot_cnt += *slots_per_op;
} }
if (len) slot_cnt += *slots_per_op;
slot_cnt += *slots_per_op;
return slot_cnt; return slot_cnt;
} }
...@@ -737,10 +735,8 @@ iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len) ...@@ -737,10 +735,8 @@ iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
i += slots_per_op; i += slots_per_op;
} while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT); } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
if (len) { iter = iop_hw_desc_slot_idx(hw_desc, i);
iter = iop_hw_desc_slot_idx(hw_desc, i); iter->byte_count = len;
iter->byte_count = len;
}
} }
} }
......
...@@ -53,7 +53,7 @@ struct coh901318_params { ...@@ -53,7 +53,7 @@ struct coh901318_params {
* struct coh_dma_channel - dma channel base * struct coh_dma_channel - dma channel base
* @name: ascii name of dma channel * @name: ascii name of dma channel
* @number: channel id number * @number: channel id number
* @desc_nbr_max: number of preallocated descriptortors * @desc_nbr_max: number of preallocated descriptors
* @priority_high: prio of channel, 0 low otherwise high. * @priority_high: prio of channel, 0 low otherwise high.
* @param: configuration parameters * @param: configuration parameters
* @dev_addr: physical address of periphal connected to channel * @dev_addr: physical address of periphal connected to channel
......
...@@ -13,6 +13,22 @@ menuconfig DMADEVICES ...@@ -13,6 +13,22 @@ menuconfig DMADEVICES
DMA Device drivers supported by the configured arch, it may DMA Device drivers supported by the configured arch, it may
be empty in some cases. be empty in some cases.
config DMADEVICES_DEBUG
bool "DMA Engine debugging"
depends on DMADEVICES != n
help
This is an option for use by developers; most people should
say N here. This enables DMA engine core and driver debugging.
config DMADEVICES_VDEBUG
bool "DMA Engine verbose debugging"
depends on DMADEVICES_DEBUG != n
help
This is an option for use by developers; most people should
say N here. This enables deeper (more verbose) debugging of
the DMA engine core and drivers.
if DMADEVICES if DMADEVICES
comment "DMA Devices" comment "DMA Devices"
...@@ -69,6 +85,13 @@ config FSL_DMA ...@@ -69,6 +85,13 @@ config FSL_DMA
The Elo is the DMA controller on some 82xx and 83xx parts, and the The Elo is the DMA controller on some 82xx and 83xx parts, and the
Elo Plus is the DMA controller on 85xx and 86xx parts. Elo Plus is the DMA controller on 85xx and 86xx parts.
config MPC512X_DMA
tristate "Freescale MPC512x built-in DMA engine support"
depends on PPC_MPC512x
select DMA_ENGINE
---help---
Enable support for the Freescale MPC512x built-in DMA engine.
config MV_XOR config MV_XOR
bool "Marvell XOR engine support" bool "Marvell XOR engine support"
depends on PLAT_ORION depends on PLAT_ORION
......
ifeq ($(CONFIG_DMADEVICES_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
endif
ifeq ($(CONFIG_DMADEVICES_VDEBUG),y)
EXTRA_CFLAGS += -DVERBOSE_DEBUG
endif
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
obj-$(CONFIG_NET_DMA) += iovlock.o obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_DMATEST) += dmatest.o obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/ obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o obj-$(CONFIG_FSL_DMA) += fsldma.o
obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
obj-$(CONFIG_MV_XOR) += mv_xor.o obj-$(CONFIG_MV_XOR) += mv_xor.o
obj-$(CONFIG_DW_DMAC) += dw_dmac.o obj-$(CONFIG_DW_DMAC) += dw_dmac.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
......
This diff is collapsed.
...@@ -74,6 +74,8 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len) ...@@ -74,6 +74,8 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
lli = head; lli = head;
lli->phy_this = phy; lli->phy_this = phy;
lli->link_addr = 0x00000000;
lli->virt_link_addr = 0x00000000U;
for (i = 1; i < len; i++) { for (i = 1; i < len; i++) {
lli_prev = lli; lli_prev = lli;
...@@ -85,13 +87,13 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len) ...@@ -85,13 +87,13 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
DEBUGFS_POOL_COUNTER_ADD(pool, 1); DEBUGFS_POOL_COUNTER_ADD(pool, 1);
lli->phy_this = phy; lli->phy_this = phy;
lli->link_addr = 0x00000000;
lli->virt_link_addr = 0x00000000U;
lli_prev->link_addr = phy; lli_prev->link_addr = phy;
lli_prev->virt_link_addr = lli; lli_prev->virt_link_addr = lli;
} }
lli->link_addr = 0x00000000U;
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
return head; return head;
...@@ -166,8 +168,7 @@ coh901318_lli_fill_memcpy(struct coh901318_pool *pool, ...@@ -166,8 +168,7 @@ coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
lli->src_addr = src; lli->src_addr = src;
lli->dst_addr = dst; lli->dst_addr = dst;
/* One irq per single transfer */ return 0;
return 1;
} }
int int
...@@ -223,8 +224,7 @@ coh901318_lli_fill_single(struct coh901318_pool *pool, ...@@ -223,8 +224,7 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
lli->src_addr = src; lli->src_addr = src;
lli->dst_addr = dst; lli->dst_addr = dst;
/* One irq per single transfer */ return 0;
return 1;
} }
int int
...@@ -240,7 +240,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, ...@@ -240,7 +240,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
u32 ctrl_sg; u32 ctrl_sg;
dma_addr_t src = 0; dma_addr_t src = 0;
dma_addr_t dst = 0; dma_addr_t dst = 0;
int nbr_of_irq = 0;
u32 bytes_to_transfer; u32 bytes_to_transfer;
u32 elem_size; u32 elem_size;
...@@ -269,15 +268,12 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, ...@@ -269,15 +268,12 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
ctrl_sg = ctrl ? ctrl : ctrl_last; ctrl_sg = ctrl ? ctrl : ctrl_last;
if ((ctrl_sg & ctrl_irq_mask))
nbr_of_irq++;
if (dir == DMA_TO_DEVICE) if (dir == DMA_TO_DEVICE)
/* increment source address */ /* increment source address */
src = sg_dma_address(sg); src = sg_phys(sg);
else else
/* increment destination address */ /* increment destination address */
dst = sg_dma_address(sg); dst = sg_phys(sg);
bytes_to_transfer = sg_dma_len(sg); bytes_to_transfer = sg_dma_len(sg);
...@@ -310,8 +306,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, ...@@ -310,8 +306,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
} }
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
/* There can be many IRQs per sg transfer */ return 0;
return nbr_of_irq;
err: err:
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
return -EINVAL; return -EINVAL;
......
...@@ -237,7 +237,7 @@ static int dmatest_func(void *data) ...@@ -237,7 +237,7 @@ static int dmatest_func(void *data)
dma_cookie_t cookie; dma_cookie_t cookie;
enum dma_status status; enum dma_status status;
enum dma_ctrl_flags flags; enum dma_ctrl_flags flags;
u8 pq_coefs[pq_sources]; u8 pq_coefs[pq_sources + 1];
int ret; int ret;
int src_cnt; int src_cnt;
int dst_cnt; int dst_cnt;
...@@ -257,7 +257,7 @@ static int dmatest_func(void *data) ...@@ -257,7 +257,7 @@ static int dmatest_func(void *data)
} else if (thread->type == DMA_PQ) { } else if (thread->type == DMA_PQ) {
src_cnt = pq_sources | 1; /* force odd to ensure dst = src */ src_cnt = pq_sources | 1; /* force odd to ensure dst = src */
dst_cnt = 2; dst_cnt = 2;
for (i = 0; i < pq_sources; i++) for (i = 0; i < src_cnt; i++)
pq_coefs[i] = 1; pq_coefs[i] = 1;
} else } else
goto err_srcs; goto err_srcs;
...@@ -347,7 +347,7 @@ static int dmatest_func(void *data) ...@@ -347,7 +347,7 @@ static int dmatest_func(void *data)
else if (thread->type == DMA_XOR) else if (thread->type == DMA_XOR)
tx = dev->device_prep_dma_xor(chan, tx = dev->device_prep_dma_xor(chan,
dma_dsts[0] + dst_off, dma_dsts[0] + dst_off,
dma_srcs, xor_sources, dma_srcs, src_cnt,
len, flags); len, flags);
else if (thread->type == DMA_PQ) { else if (thread->type == DMA_PQ) {
dma_addr_t dma_pq[dst_cnt]; dma_addr_t dma_pq[dst_cnt];
...@@ -355,7 +355,7 @@ static int dmatest_func(void *data) ...@@ -355,7 +355,7 @@ static int dmatest_func(void *data)
for (i = 0; i < dst_cnt; i++) for (i = 0; i < dst_cnt; i++)
dma_pq[i] = dma_dsts[i] + dst_off; dma_pq[i] = dma_dsts[i] + dst_off;
tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs, tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
pq_sources, pq_coefs, src_cnt, pq_coefs,
len, flags); len, flags);
} }
......
This diff is collapsed.
...@@ -92,11 +92,9 @@ struct fsl_desc_sw { ...@@ -92,11 +92,9 @@ struct fsl_desc_sw {
struct list_head node; struct list_head node;
struct list_head tx_list; struct list_head tx_list;
struct dma_async_tx_descriptor async_tx; struct dma_async_tx_descriptor async_tx;
struct list_head *ld;
void *priv;
} __attribute__((aligned(32))); } __attribute__((aligned(32)));
struct fsl_dma_chan_regs { struct fsldma_chan_regs {
u32 mr; /* 0x00 - Mode Register */ u32 mr; /* 0x00 - Mode Register */
u32 sr; /* 0x04 - Status Register */ u32 sr; /* 0x04 - Status Register */
u64 cdar; /* 0x08 - Current descriptor address register */ u64 cdar; /* 0x08 - Current descriptor address register */
...@@ -106,20 +104,19 @@ struct fsl_dma_chan_regs { ...@@ -106,20 +104,19 @@ struct fsl_dma_chan_regs {
u64 ndar; /* 0x24 - Next Descriptor Address Register */ u64 ndar; /* 0x24 - Next Descriptor Address Register */
}; };
struct fsl_dma_chan; struct fsldma_chan;
#define FSL_DMA_MAX_CHANS_PER_DEVICE 4 #define FSL_DMA_MAX_CHANS_PER_DEVICE 4
struct fsl_dma_device { struct fsldma_device {
void __iomem *reg_base; /* DGSR register base */ void __iomem *regs; /* DGSR register base */
struct resource reg; /* Resource for register */
struct device *dev; struct device *dev;
struct dma_device common; struct dma_device common;
struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
u32 feature; /* The same as DMA channels */ u32 feature; /* The same as DMA channels */
int irq; /* Channel IRQ */ int irq; /* Channel IRQ */
}; };
/* Define macros for fsl_dma_chan->feature property */ /* Define macros for fsldma_chan->feature property */
#define FSL_DMA_LITTLE_ENDIAN 0x00000000 #define FSL_DMA_LITTLE_ENDIAN 0x00000000
#define FSL_DMA_BIG_ENDIAN 0x00000001 #define FSL_DMA_BIG_ENDIAN 0x00000001
...@@ -130,28 +127,28 @@ struct fsl_dma_device { ...@@ -130,28 +127,28 @@ struct fsl_dma_device {
#define FSL_DMA_CHAN_PAUSE_EXT 0x00001000 #define FSL_DMA_CHAN_PAUSE_EXT 0x00001000
#define FSL_DMA_CHAN_START_EXT 0x00002000 #define FSL_DMA_CHAN_START_EXT 0x00002000
struct fsl_dma_chan { struct fsldma_chan {
struct fsl_dma_chan_regs __iomem *reg_base; struct fsldma_chan_regs __iomem *regs;
dma_cookie_t completed_cookie; /* The maximum cookie completed */ dma_cookie_t completed_cookie; /* The maximum cookie completed */
spinlock_t desc_lock; /* Descriptor operation lock */ spinlock_t desc_lock; /* Descriptor operation lock */
struct list_head ld_queue; /* Link descriptors queue */ struct list_head ld_pending; /* Link descriptors queue */
struct list_head ld_running; /* Link descriptors queue */
struct dma_chan common; /* DMA common channel */ struct dma_chan common; /* DMA common channel */
struct dma_pool *desc_pool; /* Descriptors pool */ struct dma_pool *desc_pool; /* Descriptors pool */
struct device *dev; /* Channel device */ struct device *dev; /* Channel device */
struct resource reg; /* Resource for register */
int irq; /* Channel IRQ */ int irq; /* Channel IRQ */
int id; /* Raw id of this channel */ int id; /* Raw id of this channel */
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
u32 feature; u32 feature;
void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int enable); void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable);
void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable); void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable);
void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size); void (*set_src_loop_size)(struct fsldma_chan *fsl_chan, int size);
void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size); void (*set_dst_loop_size)(struct fsldma_chan *fsl_chan, int size);
void (*set_request_count)(struct fsl_dma_chan *fsl_chan, int size); void (*set_request_count)(struct fsldma_chan *fsl_chan, int size);
}; };
#define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common) #define to_fsl_chan(chan) container_of(chan, struct fsldma_chan, common)
#define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node) #define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node)
#define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx) #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
......
...@@ -94,16 +94,12 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) ...@@ -94,16 +94,12 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void ioat1_cleanup_tasklet(unsigned long data);
/* common channel initialization */ /* common channel initialization */
void ioat_init_channel(struct ioatdma_device *device, void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx)
struct ioat_chan_common *chan, int idx,
void (*timer_fn)(unsigned long),
void (*tasklet)(unsigned long),
unsigned long ioat)
{ {
struct dma_device *dma = &device->common; struct dma_device *dma = &device->common;
struct dma_chan *c = &chan->common;
unsigned long data = (unsigned long) c;
chan->device = device; chan->device = device;
chan->reg_base = device->reg_base + (0x80 * (idx + 1)); chan->reg_base = device->reg_base + (0x80 * (idx + 1));
...@@ -112,14 +108,12 @@ void ioat_init_channel(struct ioatdma_device *device, ...@@ -112,14 +108,12 @@ void ioat_init_channel(struct ioatdma_device *device,
list_add_tail(&chan->common.device_node, &dma->channels); list_add_tail(&chan->common.device_node, &dma->channels);
device->idx[idx] = chan; device->idx[idx] = chan;
init_timer(&chan->timer); init_timer(&chan->timer);
chan->timer.function = timer_fn; chan->timer.function = device->timer_fn;
chan->timer.data = ioat; chan->timer.data = data;
tasklet_init(&chan->cleanup_task, tasklet, ioat); tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
tasklet_disable(&chan->cleanup_task); tasklet_disable(&chan->cleanup_task);
} }
static void ioat1_timer_event(unsigned long data);
/** /**
* ioat1_dma_enumerate_channels - find and initialize the device's channels * ioat1_dma_enumerate_channels - find and initialize the device's channels
* @device: the device to be enumerated * @device: the device to be enumerated
...@@ -155,10 +149,7 @@ static int ioat1_enumerate_channels(struct ioatdma_device *device) ...@@ -155,10 +149,7 @@ static int ioat1_enumerate_channels(struct ioatdma_device *device)
if (!ioat) if (!ioat)
break; break;
ioat_init_channel(device, &ioat->base, i, ioat_init_channel(device, &ioat->base, i);
ioat1_timer_event,
ioat1_cleanup_tasklet,
(unsigned long) ioat);
ioat->xfercap = xfercap; ioat->xfercap = xfercap;
spin_lock_init(&ioat->desc_lock); spin_lock_init(&ioat->desc_lock);
INIT_LIST_HEAD(&ioat->free_desc); INIT_LIST_HEAD(&ioat->free_desc);
...@@ -532,12 +523,12 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, ...@@ -532,12 +523,12 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
return &desc->txd; return &desc->txd;
} }
static void ioat1_cleanup_tasklet(unsigned long data) static void ioat1_cleanup_event(unsigned long data)
{ {
struct ioat_dma_chan *chan = (void *)data; struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
ioat1_cleanup(chan); ioat1_cleanup(ioat);
writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET); writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
} }
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
...@@ -687,7 +678,7 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat) ...@@ -687,7 +678,7 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat)
static void ioat1_timer_event(unsigned long data) static void ioat1_timer_event(unsigned long data)
{ {
struct ioat_dma_chan *ioat = (void *) data; struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state); dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);
...@@ -734,16 +725,17 @@ static void ioat1_timer_event(unsigned long data) ...@@ -734,16 +725,17 @@ static void ioat1_timer_event(unsigned long data)
spin_unlock_bh(&chan->cleanup_lock); spin_unlock_bh(&chan->cleanup_lock);
} }
static enum dma_status enum dma_status
ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie, ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie,
dma_cookie_t *done, dma_cookie_t *used) dma_cookie_t *done, dma_cookie_t *used)
{ {
struct ioat_dma_chan *ioat = to_ioat_chan(c); struct ioat_chan_common *chan = to_chan_common(c);
struct ioatdma_device *device = chan->device;
if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
return DMA_SUCCESS; return DMA_SUCCESS;
ioat1_cleanup(ioat); device->cleanup_fn((unsigned long) c);
return ioat_is_complete(c, cookie, done, used); return ioat_is_complete(c, cookie, done, used);
} }
...@@ -1199,12 +1191,14 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca) ...@@ -1199,12 +1191,14 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
device->intr_quirk = ioat1_intr_quirk; device->intr_quirk = ioat1_intr_quirk;
device->enumerate_channels = ioat1_enumerate_channels; device->enumerate_channels = ioat1_enumerate_channels;
device->self_test = ioat_dma_self_test; device->self_test = ioat_dma_self_test;
device->timer_fn = ioat1_timer_event;
device->cleanup_fn = ioat1_cleanup_event;
dma = &device->common; dma = &device->common;
dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
dma->device_free_chan_resources = ioat1_dma_free_chan_resources; dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
dma->device_is_tx_complete = ioat1_dma_is_complete; dma->device_is_tx_complete = ioat_is_dma_complete;
err = ioat_probe(device); err = ioat_probe(device);
if (err) if (err)
......
...@@ -61,7 +61,7 @@ ...@@ -61,7 +61,7 @@
* @intr_quirk: interrupt setup quirk (for ioat_v1 devices) * @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
* @enumerate_channels: hw version specific channel enumeration * @enumerate_channels: hw version specific channel enumeration
* @reset_hw: hw version specific channel (re)initialization * @reset_hw: hw version specific channel (re)initialization
* @cleanup_tasklet: select between the v2 and v3 cleanup routines * @cleanup_fn: select between the v2 and v3 cleanup routines
* @timer_fn: select between the v2 and v3 timer watchdog routines * @timer_fn: select between the v2 and v3 timer watchdog routines
* @self_test: hardware version specific self test for each supported op type * @self_test: hardware version specific self test for each supported op type
* *
...@@ -80,7 +80,7 @@ struct ioatdma_device { ...@@ -80,7 +80,7 @@ struct ioatdma_device {
void (*intr_quirk)(struct ioatdma_device *device); void (*intr_quirk)(struct ioatdma_device *device);
int (*enumerate_channels)(struct ioatdma_device *device); int (*enumerate_channels)(struct ioatdma_device *device);
int (*reset_hw)(struct ioat_chan_common *chan); int (*reset_hw)(struct ioat_chan_common *chan);
void (*cleanup_tasklet)(unsigned long data); void (*cleanup_fn)(unsigned long data);
void (*timer_fn)(unsigned long data); void (*timer_fn)(unsigned long data);
int (*self_test)(struct ioatdma_device *device); int (*self_test)(struct ioatdma_device *device);
}; };
...@@ -337,10 +337,9 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, ...@@ -337,10 +337,9 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
void __iomem *iobase); void __iomem *iobase);
unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); unsigned long ioat_get_current_completion(struct ioat_chan_common *chan);
void ioat_init_channel(struct ioatdma_device *device, void ioat_init_channel(struct ioatdma_device *device,
struct ioat_chan_common *chan, int idx, struct ioat_chan_common *chan, int idx);
void (*timer_fn)(unsigned long), enum dma_status ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie,
void (*tasklet)(unsigned long), dma_cookie_t *done, dma_cookie_t *used);
unsigned long ioat);
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
size_t len, struct ioat_dma_descriptor *hw); size_t len, struct ioat_dma_descriptor *hw);
bool ioat_cleanup_preamble(struct ioat_chan_common *chan, bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
......
...@@ -51,48 +51,40 @@ MODULE_PARM_DESC(ioat_ring_max_alloc_order, ...@@ -51,48 +51,40 @@ MODULE_PARM_DESC(ioat_ring_max_alloc_order,
void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
{ {
void * __iomem reg_base = ioat->base.reg_base; struct ioat_chan_common *chan = &ioat->base;
ioat->pending = 0;
ioat->dmacount += ioat2_ring_pending(ioat); ioat->dmacount += ioat2_ring_pending(ioat);
ioat->issued = ioat->head; ioat->issued = ioat->head;
/* make descriptor updates globally visible before notifying channel */ /* make descriptor updates globally visible before notifying channel */
wmb(); wmb();
writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET); writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
dev_dbg(to_dev(&ioat->base), dev_dbg(to_dev(chan),
"%s: head: %#x tail: %#x issued: %#x count: %#x\n", "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
__func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
} }
void ioat2_issue_pending(struct dma_chan *chan) void ioat2_issue_pending(struct dma_chan *c)
{ {
struct ioat2_dma_chan *ioat = to_ioat2_chan(chan); struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
spin_lock_bh(&ioat->ring_lock); if (ioat2_ring_pending(ioat)) {
if (ioat->pending == 1) spin_lock_bh(&ioat->ring_lock);
__ioat2_issue_pending(ioat); __ioat2_issue_pending(ioat);
spin_unlock_bh(&ioat->ring_lock); spin_unlock_bh(&ioat->ring_lock);
}
} }
/** /**
* ioat2_update_pending - log pending descriptors * ioat2_update_pending - log pending descriptors
* @ioat: ioat2+ channel * @ioat: ioat2+ channel
* *
* set pending to '1' unless pending is already set to '2', pending == 2 * Check if the number of unsubmitted descriptors has exceeded the
* indicates that submission is temporarily blocked due to an in-flight * watermark. Called with ring_lock held
* reset. If we are already above the ioat_pending_level threshold then
* just issue pending.
*
* called with ring_lock held
*/ */
static void ioat2_update_pending(struct ioat2_dma_chan *ioat) static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
{ {
if (unlikely(ioat->pending == 2)) if (ioat2_ring_pending(ioat) > ioat_pending_level)
return;
else if (ioat2_ring_pending(ioat) > ioat_pending_level)
__ioat2_issue_pending(ioat); __ioat2_issue_pending(ioat);
else
ioat->pending = 1;
} }
static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
...@@ -166,7 +158,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) ...@@ -166,7 +158,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
seen_current = true; seen_current = true;
} }
ioat->tail += i; ioat->tail += i;
BUG_ON(!seen_current); /* no active descs have written a completion? */ BUG_ON(active && !seen_current); /* no active descs have written a completion? */
chan->last_completion = phys_complete; chan->last_completion = phys_complete;
if (ioat->head == ioat->tail) { if (ioat->head == ioat->tail) {
...@@ -207,9 +199,9 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat) ...@@ -207,9 +199,9 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
spin_unlock_bh(&chan->cleanup_lock); spin_unlock_bh(&chan->cleanup_lock);
} }
void ioat2_cleanup_tasklet(unsigned long data) void ioat2_cleanup_event(unsigned long data)
{ {
struct ioat2_dma_chan *ioat = (void *) data; struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
ioat2_cleanup(ioat); ioat2_cleanup(ioat);
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
...@@ -291,7 +283,7 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) ...@@ -291,7 +283,7 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
void ioat2_timer_event(unsigned long data) void ioat2_timer_event(unsigned long data)
{ {
struct ioat2_dma_chan *ioat = (void *) data; struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
spin_lock_bh(&chan->cleanup_lock); spin_lock_bh(&chan->cleanup_lock);
...@@ -397,10 +389,7 @@ int ioat2_enumerate_channels(struct ioatdma_device *device) ...@@ -397,10 +389,7 @@ int ioat2_enumerate_channels(struct ioatdma_device *device)
if (!ioat) if (!ioat)
break; break;
ioat_init_channel(device, &ioat->base, i, ioat_init_channel(device, &ioat->base, i);
device->timer_fn,
device->cleanup_tasklet,
(unsigned long) ioat);
ioat->xfercap_log = xfercap_log; ioat->xfercap_log = xfercap_log;
spin_lock_init(&ioat->ring_lock); spin_lock_init(&ioat->ring_lock);
if (device->reset_hw(&ioat->base)) { if (device->reset_hw(&ioat->base)) {
...@@ -546,7 +535,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) ...@@ -546,7 +535,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
ioat->head = 0; ioat->head = 0;
ioat->issued = 0; ioat->issued = 0;
ioat->tail = 0; ioat->tail = 0;
ioat->pending = 0;
ioat->alloc_order = order; ioat->alloc_order = order;
spin_unlock_bh(&ioat->ring_lock); spin_unlock_bh(&ioat->ring_lock);
...@@ -701,7 +689,7 @@ int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs) ...@@ -701,7 +689,7 @@ int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs)
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
spin_unlock_bh(&chan->cleanup_lock); spin_unlock_bh(&chan->cleanup_lock);
device->timer_fn((unsigned long) ioat); device->timer_fn((unsigned long) &chan->common);
} else } else
spin_unlock_bh(&chan->cleanup_lock); spin_unlock_bh(&chan->cleanup_lock);
return -ENOMEM; return -ENOMEM;
...@@ -785,7 +773,7 @@ void ioat2_free_chan_resources(struct dma_chan *c) ...@@ -785,7 +773,7 @@ void ioat2_free_chan_resources(struct dma_chan *c)
tasklet_disable(&chan->cleanup_task); tasklet_disable(&chan->cleanup_task);
del_timer_sync(&chan->timer); del_timer_sync(&chan->timer);
device->cleanup_tasklet((unsigned long) ioat); device->cleanup_fn((unsigned long) c);
device->reset_hw(chan); device->reset_hw(chan);
spin_lock_bh(&ioat->ring_lock); spin_lock_bh(&ioat->ring_lock);
...@@ -815,25 +803,9 @@ void ioat2_free_chan_resources(struct dma_chan *c) ...@@ -815,25 +803,9 @@ void ioat2_free_chan_resources(struct dma_chan *c)
chan->last_completion = 0; chan->last_completion = 0;
chan->completion_dma = 0; chan->completion_dma = 0;
ioat->pending = 0;
ioat->dmacount = 0; ioat->dmacount = 0;
} }
enum dma_status
ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
dma_cookie_t *done, dma_cookie_t *used)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
struct ioatdma_device *device = ioat->base.device;
if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
return DMA_SUCCESS;
device->cleanup_tasklet((unsigned long) ioat);
return ioat_is_complete(c, cookie, done, used);
}
static ssize_t ring_size_show(struct dma_chan *c, char *page) static ssize_t ring_size_show(struct dma_chan *c, char *page)
{ {
struct ioat2_dma_chan *ioat = to_ioat2_chan(c); struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
...@@ -874,7 +846,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) ...@@ -874,7 +846,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
device->enumerate_channels = ioat2_enumerate_channels; device->enumerate_channels = ioat2_enumerate_channels;
device->reset_hw = ioat2_reset_hw; device->reset_hw = ioat2_reset_hw;
device->cleanup_tasklet = ioat2_cleanup_tasklet; device->cleanup_fn = ioat2_cleanup_event;
device->timer_fn = ioat2_timer_event; device->timer_fn = ioat2_timer_event;
device->self_test = ioat_dma_self_test; device->self_test = ioat_dma_self_test;
dma = &device->common; dma = &device->common;
...@@ -882,7 +854,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) ...@@ -882,7 +854,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
dma->device_issue_pending = ioat2_issue_pending; dma->device_issue_pending = ioat2_issue_pending;
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources; dma->device_free_chan_resources = ioat2_free_chan_resources;
dma->device_is_tx_complete = ioat2_is_complete; dma->device_is_tx_complete = ioat_is_dma_complete;
err = ioat_probe(device); err = ioat_probe(device);
if (err) if (err)
......
...@@ -47,7 +47,6 @@ extern int ioat_ring_alloc_order; ...@@ -47,7 +47,6 @@ extern int ioat_ring_alloc_order;
* @head: allocated index * @head: allocated index
* @issued: hardware notification point * @issued: hardware notification point
* @tail: cleanup index * @tail: cleanup index
* @pending: lock free indicator for issued != head
* @dmacount: identical to 'head' except for occasionally resetting to zero * @dmacount: identical to 'head' except for occasionally resetting to zero
* @alloc_order: log2 of the number of allocated descriptors * @alloc_order: log2 of the number of allocated descriptors
* @ring: software ring buffer implementation of hardware ring * @ring: software ring buffer implementation of hardware ring
...@@ -61,7 +60,6 @@ struct ioat2_dma_chan { ...@@ -61,7 +60,6 @@ struct ioat2_dma_chan {
u16 tail; u16 tail;
u16 dmacount; u16 dmacount;
u16 alloc_order; u16 alloc_order;
int pending;
struct ioat_ring_ent **ring; struct ioat_ring_ent **ring;
spinlock_t ring_lock; spinlock_t ring_lock;
}; };
...@@ -178,12 +176,10 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, ...@@ -178,12 +176,10 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
void ioat2_issue_pending(struct dma_chan *chan); void ioat2_issue_pending(struct dma_chan *chan);
int ioat2_alloc_chan_resources(struct dma_chan *c); int ioat2_alloc_chan_resources(struct dma_chan *c);
void ioat2_free_chan_resources(struct dma_chan *c); void ioat2_free_chan_resources(struct dma_chan *c);
enum dma_status ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
dma_cookie_t *done, dma_cookie_t *used);
void __ioat2_restart_chan(struct ioat2_dma_chan *ioat); void __ioat2_restart_chan(struct ioat2_dma_chan *ioat);
bool reshape_ring(struct ioat2_dma_chan *ioat, int order); bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
void ioat2_cleanup_tasklet(unsigned long data); void ioat2_cleanup_event(unsigned long data);
void ioat2_timer_event(unsigned long data); void ioat2_timer_event(unsigned long data);
int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo); int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo); int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
......
...@@ -293,17 +293,25 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) ...@@ -293,17 +293,25 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
} }
} }
ioat->tail += i; ioat->tail += i;
BUG_ON(!seen_current); /* no active descs have written a completion? */ BUG_ON(active && !seen_current); /* no active descs have written a completion? */
chan->last_completion = phys_complete; chan->last_completion = phys_complete;
if (ioat->head == ioat->tail) {
active = ioat2_ring_active(ioat);
if (active == 0) {
dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
__func__); __func__);
clear_bit(IOAT_COMPLETION_PENDING, &chan->state); clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
} }
/* 5 microsecond delay per pending descriptor */
writew(min((5 * active), IOAT_INTRDELAY_MASK),
chan->device->reg_base + IOAT_INTRDELAY_OFFSET);
} }
static void ioat3_cleanup(struct ioat2_dma_chan *ioat) /* try to cleanup, but yield (via spin_trylock) to incoming submissions
* with the expectation that we will immediately poll again shortly
*/
static void ioat3_cleanup_poll(struct ioat2_dma_chan *ioat)
{ {
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
unsigned long phys_complete; unsigned long phys_complete;
...@@ -329,29 +337,41 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat) ...@@ -329,29 +337,41 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
spin_unlock_bh(&chan->cleanup_lock); spin_unlock_bh(&chan->cleanup_lock);
} }
static void ioat3_cleanup_tasklet(unsigned long data) /* run cleanup now because we already delayed the interrupt via INTRDELAY */
static void ioat3_cleanup_sync(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
unsigned long phys_complete;
prefetch(chan->completion);
spin_lock_bh(&chan->cleanup_lock);
if (!ioat_cleanup_preamble(chan, &phys_complete)) {
spin_unlock_bh(&chan->cleanup_lock);
return;
}
spin_lock_bh(&ioat->ring_lock);
__cleanup(ioat, phys_complete);
spin_unlock_bh(&ioat->ring_lock);
spin_unlock_bh(&chan->cleanup_lock);
}
static void ioat3_cleanup_event(unsigned long data)
{ {
struct ioat2_dma_chan *ioat = (void *) data; struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
ioat3_cleanup(ioat); ioat3_cleanup_sync(ioat);
writew(IOAT_CHANCTRL_RUN | IOAT3_CHANCTRL_COMPL_DCA_EN, writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
} }
static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
{ {
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
unsigned long phys_complete; unsigned long phys_complete;
u32 status;
status = ioat_chansts(chan);
if (is_ioat_active(status) || is_ioat_idle(status))
ioat_suspend(chan);
while (is_ioat_active(status) || is_ioat_idle(status)) {
status = ioat_chansts(chan);
cpu_relax();
}
ioat2_quiesce(chan, 0);
if (ioat_cleanup_preamble(chan, &phys_complete)) if (ioat_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete); __cleanup(ioat, phys_complete);
...@@ -360,7 +380,7 @@ static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) ...@@ -360,7 +380,7 @@ static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
static void ioat3_timer_event(unsigned long data) static void ioat3_timer_event(unsigned long data)
{ {
struct ioat2_dma_chan *ioat = (void *) data; struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
spin_lock_bh(&chan->cleanup_lock); spin_lock_bh(&chan->cleanup_lock);
...@@ -426,7 +446,7 @@ ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie, ...@@ -426,7 +446,7 @@ ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie,
if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
return DMA_SUCCESS; return DMA_SUCCESS;
ioat3_cleanup(ioat); ioat3_cleanup_poll(ioat);
return ioat_is_complete(c, cookie, done, used); return ioat_is_complete(c, cookie, done, used);
} }
...@@ -1239,11 +1259,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) ...@@ -1239,11 +1259,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (is_raid_device) { if (is_raid_device) {
dma->device_is_tx_complete = ioat3_is_complete; dma->device_is_tx_complete = ioat3_is_complete;
device->cleanup_tasklet = ioat3_cleanup_tasklet; device->cleanup_fn = ioat3_cleanup_event;
device->timer_fn = ioat3_timer_event; device->timer_fn = ioat3_timer_event;
} else { } else {
dma->device_is_tx_complete = ioat2_is_complete; dma->device_is_tx_complete = ioat_is_dma_complete;
device->cleanup_tasklet = ioat2_cleanup_tasklet; device->cleanup_fn = ioat2_cleanup_event;
device->timer_fn = ioat2_timer_event; device->timer_fn = ioat2_timer_event;
} }
......
...@@ -60,7 +60,7 @@ ...@@ -60,7 +60,7 @@
#define IOAT_PERPORTOFFSET_OFFSET 0x0A /* 16-bit */ #define IOAT_PERPORTOFFSET_OFFSET 0x0A /* 16-bit */
#define IOAT_INTRDELAY_OFFSET 0x0C /* 16-bit */ #define IOAT_INTRDELAY_OFFSET 0x0C /* 16-bit */
#define IOAT_INTRDELAY_INT_DELAY_MASK 0x3FFF /* Interrupt Delay Time */ #define IOAT_INTRDELAY_MASK 0x3FFF /* Interrupt Delay Time */
#define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalescing Supported */ #define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalescing Supported */
#define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */ #define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */
......
...@@ -348,6 +348,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params, ...@@ -348,6 +348,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
break; break;
case IPU_PIX_FMT_BGRA32: case IPU_PIX_FMT_BGRA32:
case IPU_PIX_FMT_BGR32: case IPU_PIX_FMT_BGR32:
case IPU_PIX_FMT_ABGR32:
params->ip.bpp = 0; params->ip.bpp = 0;
params->ip.pfs = 4; params->ip.pfs = 4;
params->ip.npb = 7; params->ip.npb = 7;
...@@ -376,20 +377,6 @@ static void ipu_ch_param_set_size(union chan_param_mem *params, ...@@ -376,20 +377,6 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
params->ip.wid2 = 7; /* Blue bit width - 1 */ params->ip.wid2 = 7; /* Blue bit width - 1 */
params->ip.wid3 = 7; /* Alpha bit width - 1 */ params->ip.wid3 = 7; /* Alpha bit width - 1 */
break; break;
case IPU_PIX_FMT_ABGR32:
params->ip.bpp = 0;
params->ip.pfs = 4;
params->ip.npb = 7;
params->ip.sat = 2; /* SAT = 32-bit access */
params->ip.ofs0 = 8; /* Red bit offset */
params->ip.ofs1 = 16; /* Green bit offset */
params->ip.ofs2 = 24; /* Blue bit offset */
params->ip.ofs3 = 0; /* Alpha bit offset */
params->ip.wid0 = 7; /* Red bit width - 1 */
params->ip.wid1 = 7; /* Green bit width - 1 */
params->ip.wid2 = 7; /* Blue bit width - 1 */
params->ip.wid3 = 7; /* Alpha bit width - 1 */
break;
case IPU_PIX_FMT_UYVY: case IPU_PIX_FMT_UYVY:
params->ip.bpp = 2; params->ip.bpp = 2;
params->ip.pfs = 6; params->ip.pfs = 6;
......
This diff is collapsed.
...@@ -4940,7 +4940,7 @@ static int ppc440spe_configure_raid_devices(void) ...@@ -4940,7 +4940,7 @@ static int ppc440spe_configure_raid_devices(void)
return ret; return ret;
} }
static struct of_device_id __devinitdata ppc440spe_adma_of_match[] = { static const struct of_device_id ppc440spe_adma_of_match[] __devinitconst = {
{ .compatible = "ibm,dma-440spe", }, { .compatible = "ibm,dma-440spe", },
{ .compatible = "amcc,xor-accelerator", }, { .compatible = "amcc,xor-accelerator", },
{}, {},
......
...@@ -31,6 +31,8 @@ ...@@ -31,6 +31,8 @@
* if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
*/ */
typedef s32 dma_cookie_t; typedef s32 dma_cookie_t;
#define DMA_MIN_COOKIE 1
#define DMA_MAX_COOKIE INT_MAX
#define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0) #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment