Commit cd7b34fe authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-4.14-rc1' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine updates from Vinod Koul:
 "This one features the usual updates to the drivers and one good part
  of removing DA_SG from core as it has no users.

  Summary:

   - Remove DMA_SG support as we have no users for this feature
   - New driver for Altera / Intel mSGDMA IP core
   - Support for memset in dmatest and qcom_hidma driver
   - Update for non cyclic mode in k3dma, bunch of update in bam_dma,
     bcm sba-raid
   - Constify device ids across drivers"

* tag 'dmaengine-4.14-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (52 commits)
  dmaengine: sun6i: support V3s SoC variant
  dmaengine: sun6i: make gate bit in sun8i's DMA engines a common quirk
  dmaengine: rcar-dmac: document R8A77970 bindings
  dmaengine: xilinx_dma: Fix error code format specifier
  dmaengine: altera: Use macros instead of structs to describe the registers
  dmaengine: ti-dma-crossbar: Fix dra7 reserve function
  dmaengine: pl330: constify amba_id
  dmaengine: pl08x: constify amba_id
  dmaengine: bcm-sba-raid: Remove redundant SBA_REQUEST_STATE_COMPLETED
  dmaengine: bcm-sba-raid: Explicitly ACK mailbox message after sending
  dmaengine: bcm-sba-raid: Add debugfs support
  dmaengine: bcm-sba-raid: Remove redundant SBA_REQUEST_STATE_RECEIVED
  dmaengine: bcm-sba-raid: Re-factor sba_process_deferred_requests()
  dmaengine: bcm-sba-raid: Pre-ack async tx descriptor
  dmaengine: bcm-sba-raid: Peek mbox when we have no free requests
  dmaengine: bcm-sba-raid: Alloc resources before registering DMA device
  dmaengine: bcm-sba-raid: Improve sba_issue_pending() run duration
  dmaengine: bcm-sba-raid: Increase number of free sba_request
  dmaengine: bcm-sba-raid: Allow arbitrary number free sba_request
  dmaengine: bcm-sba-raid: Remove reqs_free_count from sba_device
  ...
parents 75c72715 41bd0314
What: sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/cap
Date: December 3, 2009
KernelVersion: 2.6.32
Contact: dmaengine@vger.kernel.org
Description: Capabilities the DMA supports.Currently there are DMA_PQ, DMA_PQ_VAL,
DMA_XOR,DMA_XOR_VAL,DMA_INTERRUPT.
What: sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/ring_active
Date: December 3, 2009
KernelVersion: 2.6.32
Contact: dmaengine@vger.kernel.org
Description: The number of descriptors active in the ring.
What: sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/ring_size
Date: December 3, 2009
KernelVersion: 2.6.32
Contact: dmaengine@vger.kernel.org
Description: Descriptor ring size, total number of descriptors available.
What: sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/version
Date: December 3, 2009
KernelVersion: 2.6.32
Contact: dmaengine@vger.kernel.org
Description: Version of ioatdma device.
What: sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/intr_coalesce
Date: August 8, 2017
KernelVersion: 4.14
Contact: dmaengine@vger.kernel.org
Description: Tune-able interrupt delay value per channel basis.
......@@ -25,6 +25,7 @@ Required Properties:
- "renesas,dmac-r8a7794" (R-Car E2)
- "renesas,dmac-r8a7795" (R-Car H3)
- "renesas,dmac-r8a7796" (R-Car M3-W)
- "renesas,dmac-r8a77970" (R-Car V3M)
- reg: base address and length of the registers block for the DMAC
......
......@@ -8,6 +8,7 @@ Required Properties:
- "renesas,r8a7793-usb-dmac" (R-Car M2-N)
- "renesas,r8a7794-usb-dmac" (R-Car E2)
- "renesas,r8a7795-usb-dmac" (R-Car H3)
- "renesas,r8a7796-usb-dmac" (R-Car M3-W)
- reg: base address and length of the registers block for the DMAC
- interrupts: interrupt specifiers for the DMAC, one for each entry in
interrupt-names.
......
......@@ -9,6 +9,7 @@ Required properties:
"allwinner,sun8i-a23-dma"
"allwinner,sun8i-a83t-dma"
"allwinner,sun8i-h3-dma"
"allwinner,sun8i-v3s-dma"
- reg: Should contain the registers base address and length
- interrupts: Should contain a reference to the interrupt used by this device
- clocks: Should contain a reference to the parent AHB clock
......
......@@ -181,13 +181,6 @@ Currently, the types available are:
- Used by the client drivers to register a callback that will be
called on a regular basis through the DMA controller interrupt
* DMA_SG
- The device supports memory to memory scatter-gather
transfers.
- Even though a plain memcpy can look like a particular case of a
scatter-gather transfer, with a single chunk to transfer, it's a
distinct transaction type in the mem2mem transfers case
* DMA_PRIVATE
- The devices only supports slave transfers, and as such isn't
available for async transfers.
......@@ -395,6 +388,13 @@ where to put them)
when DMA_CTRL_REUSE is already set
- Terminating the channel
* DMA_PREP_CMD
- If set, the client driver tells DMA controller that passed data in DMA
API is command data.
- Interpretation of command data is DMA controller specific. It can be
used for issuing commands to other peripherals/register reads/register
writes for which the descriptor should be in different format from
normal data descriptors.
General Design Notes
--------------------
......
......@@ -502,27 +502,6 @@ static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
return &desc->tx_desc;
}
static struct dma_async_tx_descriptor *ccp_prep_dma_sg(
struct dma_chan *dma_chan, struct scatterlist *dst_sg,
unsigned int dst_nents, struct scatterlist *src_sg,
unsigned int src_nents, unsigned long flags)
{
struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
dma_chan);
struct ccp_dma_desc *desc;
dev_dbg(chan->ccp->dev,
"%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n",
__func__, src_sg, src_nents, dst_sg, dst_nents, flags);
desc = ccp_create_desc(dma_chan, dst_sg, dst_nents, src_sg, src_nents,
flags);
if (!desc)
return NULL;
return &desc->tx_desc;
}
static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
struct dma_chan *dma_chan, unsigned long flags)
{
......@@ -704,7 +683,6 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
dma_dev->directions = DMA_MEM_TO_MEM;
dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
dma_cap_set(DMA_SG, dma_dev->cap_mask);
dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
/* The DMA channels for this device can be set to public or private,
......@@ -740,7 +718,6 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
dma_dev->device_free_chan_resources = ccp_free_chan_resources;
dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
dma_dev->device_prep_dma_sg = ccp_prep_dma_sg;
dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
dma_dev->device_issue_pending = ccp_issue_pending;
dma_dev->device_tx_status = ccp_tx_status;
......
......@@ -56,6 +56,12 @@ config DMA_OF
select DMA_ENGINE
#devices
config ALTERA_MSGDMA
tristate "Altera / Intel mSGDMA Engine"
select DMA_ENGINE
help
Enable support for Altera / Intel mSGDMA controller.
config AMBA_PL08X
bool "ARM PrimeCell PL080 or PL081 support"
depends on ARM_AMBA
......
......@@ -12,6 +12,7 @@ obj-$(CONFIG_DMA_OF) += of-dma.o
obj-$(CONFIG_DMATEST) += dmatest.o
#devices
obj-$(CONFIG_ALTERA_MSGDMA) += altera-msgdma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
......
This diff is collapsed.
......@@ -3033,7 +3033,7 @@ static struct vendor_data vendor_ftdmac020 = {
.max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
};
static struct amba_id pl08x_ids[] = {
static const struct amba_id pl08x_ids[] = {
/* Samsung PL080S variant */
{
.id = 0x0a141080,
......
......@@ -1202,138 +1202,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL;
}
/**
* atc_prep_dma_sg - prepare memory to memory scather-gather operation
* @chan: the channel to prepare operation on
* @dst_sg: destination scatterlist
* @dst_nents: number of destination scatterlist entries
* @src_sg: source scatterlist
* @src_nents: number of source scatterlist entries
* @flags: tx descriptor status flags
*/
static struct dma_async_tx_descriptor *
atc_prep_dma_sg(struct dma_chan *chan,
struct scatterlist *dst_sg, unsigned int dst_nents,
struct scatterlist *src_sg, unsigned int src_nents,
unsigned long flags)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_desc *desc = NULL;
struct at_desc *first = NULL;
struct at_desc *prev = NULL;
unsigned int src_width;
unsigned int dst_width;
size_t xfer_count;
u32 ctrla;
u32 ctrlb;
size_t dst_len = 0, src_len = 0;
dma_addr_t dst = 0, src = 0;
size_t len = 0, total_len = 0;
if (unlikely(dst_nents == 0 || src_nents == 0))
return NULL;
if (unlikely(dst_sg == NULL || src_sg == NULL))
return NULL;
ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
| ATC_SRC_ADDR_MODE_INCR
| ATC_DST_ADDR_MODE_INCR
| ATC_FC_MEM2MEM;
/*
* loop until there is either no more source or no more destination
* scatterlist entry
*/
while (true) {
/* prepare the next transfer */
if (dst_len == 0) {
/* no more destination scatterlist entries */
if (!dst_sg || !dst_nents)
break;
dst = sg_dma_address(dst_sg);
dst_len = sg_dma_len(dst_sg);
dst_sg = sg_next(dst_sg);
dst_nents--;
}
if (src_len == 0) {
/* no more source scatterlist entries */
if (!src_sg || !src_nents)
break;
src = sg_dma_address(src_sg);
src_len = sg_dma_len(src_sg);
src_sg = sg_next(src_sg);
src_nents--;
}
len = min_t(size_t, src_len, dst_len);
if (len == 0)
continue;
/* take care for the alignment */
src_width = dst_width = atc_get_xfer_width(src, dst, len);
ctrla = ATC_SRC_WIDTH(src_width) |
ATC_DST_WIDTH(dst_width);
/*
* The number of transfers to set up refer to the source width
* that depends on the alignment.
*/
xfer_count = len >> src_width;
if (xfer_count > ATC_BTSIZE_MAX) {
xfer_count = ATC_BTSIZE_MAX;
len = ATC_BTSIZE_MAX << src_width;
}
/* create the transfer */
desc = atc_desc_get(atchan);
if (!desc)
goto err_desc_get;
desc->lli.saddr = src;
desc->lli.daddr = dst;
desc->lli.ctrla = ctrla | xfer_count;
desc->lli.ctrlb = ctrlb;
desc->txd.cookie = 0;
desc->len = len;
atc_desc_chain(&first, &prev, desc);
/* update the lengths and addresses for the next loop cycle */
dst_len -= len;
src_len -= len;
dst += len;
src += len;
total_len += len;
}
/* First descriptor of the chain embedds additional information */
first->txd.cookie = -EBUSY;
first->total_len = total_len;
/* set end-of-link to the last link descriptor of list*/
set_desc_eol(desc);
first->txd.flags = flags; /* client is in control of this ack */
return &first->txd;
err_desc_get:
atc_desc_put(atchan, first);
return NULL;
}
/**
* atc_dma_cyclic_check_values
* Check for too big/unaligned periods and unaligned DMA buffer
......@@ -1933,14 +1801,12 @@ static int __init at_dma_probe(struct platform_device *pdev)
/* setup platform data for each SoC */
dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask);
dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask);
/* get DMA parameters from controller type */
plat_dat = at_dma_get_driver_data(pdev);
......@@ -2078,16 +1944,12 @@ static int __init at_dma_probe(struct platform_device *pdev)
atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
}
if (dma_has_cap(DMA_SG, atdma->dma_common.cap_mask))
atdma->dma_common.device_prep_dma_sg = atc_prep_dma_sg;
dma_writel(atdma, EN, AT_DMA_ENABLE);
dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s%s), %d channels\n",
dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
dma_has_cap(DMA_SG, atdma->dma_common.cap_mask) ? "sg-cpy " : "",
plat_dat->nr_channels);
dma_async_device_register(&atdma->dma_common);
......
......@@ -875,7 +875,7 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
dwidth = at_xdmac_align_width(chan, src | dst | chunk->size);
if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
dev_dbg(chan2dev(chan),
"%s: chunk too big (%d, max size %lu)...\n",
"%s: chunk too big (%zu, max size %lu)...\n",
__func__, chunk->size,
AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth);
return NULL;
......@@ -956,7 +956,7 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
if ((xt->numf > 1) && (xt->frame_size > 1))
return NULL;
dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%zu, frame_size=%zu, flags=0x%lx\n",
__func__, &xt->src_start, &xt->dst_start, xt->numf,
xt->frame_size, flags);
......@@ -990,7 +990,7 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
dst_skip = chunk->size + dst_icg;
dev_dbg(chan2dev(chan),
"%s: chunk size=%d, src icg=%d, dst icg=%d\n",
"%s: chunk size=%zu, src icg=%zu, dst icg=%zu\n",
__func__, chunk->size, src_icg, dst_icg);
desc = at_xdmac_interleaved_queue_desc(chan, atchan,
......@@ -1207,7 +1207,7 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac_desc *desc;
dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%zu, pattern=0x%x, flags=0x%lx\n",
__func__, &dest, len, value, flags);
if (unlikely(!len))
......@@ -1883,8 +1883,11 @@ static int atmel_xdmac_resume(struct device *dev)
struct at_xdmac_chan *atchan;
struct dma_chan *chan, *_chan;
int i;
int ret;
clk_prepare_enable(atxdmac->clk);
ret = clk_prepare_enable(atxdmac->clk);
if (ret)
return ret;
/* Clear pending interrupts. */
for (i = 0; i < atxdmac->dma.chancnt; i++) {
......
This diff is collapsed.
......@@ -923,30 +923,85 @@ int dma_async_device_register(struct dma_device *device)
return -ENODEV;
/* validate device routines */
BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
!device->device_prep_dma_memcpy);
BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
!device->device_prep_dma_xor);
BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
!device->device_prep_dma_xor_val);
BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
!device->device_prep_dma_pq);
BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
!device->device_prep_dma_pq_val);
BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
!device->device_prep_dma_memset);
BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
!device->device_prep_dma_interrupt);
BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
!device->device_prep_dma_sg);
BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
!device->device_prep_dma_cyclic);
BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
!device->device_prep_interleaved_dma);
BUG_ON(!device->device_tx_status);
BUG_ON(!device->device_issue_pending);
BUG_ON(!device->dev);
if (!device->dev) {
pr_err("DMAdevice must have dev\n");
return -EIO;
}
if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
"DMA_MEMCPY");
return -EIO;
}
if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
"DMA_XOR");
return -EIO;
}
if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
"DMA_XOR_VAL");
return -EIO;
}
if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
"DMA_PQ");
return -EIO;
}
if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
"DMA_PQ_VAL");
return -EIO;
}
if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
"DMA_MEMSET");
return -EIO;
}
if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
"DMA_INTERRUPT");
return -EIO;
}
if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
"DMA_CYCLIC");
return -EIO;
}
if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
"DMA_INTERLEAVE");
return -EIO;
}
if (!device->device_tx_status) {
dev_err(device->dev, "Device tx_status is not defined\n");
return -EIO;
}
if (!device->device_issue_pending) {
dev_err(device->dev, "Device issue_pending is not defined\n");
return -EIO;
}
/* note: this only matters in the
* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
......
......@@ -52,15 +52,10 @@ module_param(iterations, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(iterations,
"Iterations before stopping test (default: infinite)");
static unsigned int sg_buffers = 1;
module_param(sg_buffers, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(sg_buffers,
"Number of scatter gather buffers (default: 1)");
static unsigned int dmatest;
module_param(dmatest, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dmatest,
"dmatest 0-memcpy 1-slave_sg (default: 0)");
"dmatest 0-memcpy 1-memset (default: 0)");
static unsigned int xor_sources = 3;
module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
......@@ -158,6 +153,7 @@ MODULE_PARM_DESC(run, "Run the test (default: false)");
#define PATTERN_COPY 0x40
#define PATTERN_OVERWRITE 0x20
#define PATTERN_COUNT_MASK 0x1f
#define PATTERN_MEMSET_IDX 0x01
struct dmatest_thread {
struct list_head node;
......@@ -239,46 +235,62 @@ static unsigned long dmatest_random(void)
return buf;
}
static inline u8 gen_inv_idx(u8 index, bool is_memset)
{
u8 val = is_memset ? PATTERN_MEMSET_IDX : index;
return ~val & PATTERN_COUNT_MASK;
}
static inline u8 gen_src_value(u8 index, bool is_memset)
{
return PATTERN_SRC | gen_inv_idx(index, is_memset);
}
static inline u8 gen_dst_value(u8 index, bool is_memset)
{
return PATTERN_DST | gen_inv_idx(index, is_memset);
}
static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
unsigned int buf_size)
unsigned int buf_size, bool is_memset)
{
unsigned int i;
u8 *buf;
for (; (buf = *bufs); bufs++) {
for (i = 0; i < start; i++)
buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
buf[i] = gen_src_value(i, is_memset);
for ( ; i < start + len; i++)
buf[i] = PATTERN_SRC | PATTERN_COPY
| (~i & PATTERN_COUNT_MASK);
buf[i] = gen_src_value(i, is_memset) | PATTERN_COPY;
for ( ; i < buf_size; i++)
buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
buf[i] = gen_src_value(i, is_memset);
buf++;
}
}
static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
unsigned int buf_size)
unsigned int buf_size, bool is_memset)
{
unsigned int i;
u8 *buf;
for (; (buf = *bufs); bufs++) {
for (i = 0; i < start; i++)
buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
buf[i] = gen_dst_value(i, is_memset);
for ( ; i < start + len; i++)
buf[i] = PATTERN_DST | PATTERN_OVERWRITE
| (~i & PATTERN_COUNT_MASK);
buf[i] = gen_dst_value(i, is_memset) |
PATTERN_OVERWRITE;
for ( ; i < buf_size; i++)
buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
buf[i] = gen_dst_value(i, is_memset);
}
}
static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
unsigned int counter, bool is_srcbuf)
unsigned int counter, bool is_srcbuf, bool is_memset)
{
u8 diff = actual ^ pattern;
u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
u8 expected = pattern | gen_inv_idx(counter, is_memset);
const char *thread_name = current->comm;
if (is_srcbuf)
......@@ -298,7 +310,7 @@ static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
unsigned int end, unsigned int counter, u8 pattern,
bool is_srcbuf)
bool is_srcbuf, bool is_memset)
{
unsigned int i;
unsigned int error_count = 0;
......@@ -311,11 +323,12 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
counter = counter_orig;
for (i = start; i < end; i++) {
actual = buf[i];
expected = pattern | (~counter & PATTERN_COUNT_MASK);
expected = pattern | gen_inv_idx(counter, is_memset);
if (actual != expected) {
if (error_count < MAX_ERROR_COUNT)
dmatest_mismatch(actual, pattern, i,
counter, is_srcbuf);
counter, is_srcbuf,
is_memset);
error_count++;
}
counter++;
......@@ -435,6 +448,7 @@ static int dmatest_func(void *data)
s64 runtime = 0;
unsigned long long total_len = 0;
u8 align = 0;
bool is_memset = false;
set_freezable();
......@@ -448,9 +462,10 @@ static int dmatest_func(void *data)
if (thread->type == DMA_MEMCPY) {
align = dev->copy_align;
src_cnt = dst_cnt = 1;
} else if (thread->type == DMA_SG) {
align = dev->copy_align;
src_cnt = dst_cnt = sg_buffers;
} else if (thread->type == DMA_MEMSET) {
align = dev->fill_align;
src_cnt = dst_cnt = 1;
is_memset = true;
} else if (thread->type == DMA_XOR) {
/* force odd to ensure dst = src */
src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
......@@ -530,8 +545,6 @@ static int dmatest_func(void *data)
dma_addr_t srcs[src_cnt];
dma_addr_t *dsts;
unsigned int src_off, dst_off, len;
struct scatterlist tx_sg[src_cnt];
struct scatterlist rx_sg[src_cnt];
total_tests++;
......@@ -571,9 +584,9 @@ static int dmatest_func(void *data)
dst_off = (dst_off >> align) << align;
dmatest_init_srcs(thread->srcs, src_off, len,
params->buf_size);
params->buf_size, is_memset);
dmatest_init_dsts(thread->dsts, dst_off, len,
params->buf_size);
params->buf_size, is_memset);
diff = ktime_sub(ktime_get(), start);
filltime = ktime_add(filltime, diff);
......@@ -627,22 +640,15 @@ static int dmatest_func(void *data)
um->bidi_cnt++;
}
sg_init_table(tx_sg, src_cnt);
sg_init_table(rx_sg, src_cnt);
for (i = 0; i < src_cnt; i++) {
sg_dma_address(&rx_sg[i]) = srcs[i];
sg_dma_address(&tx_sg[i]) = dsts[i] + dst_off;
sg_dma_len(&tx_sg[i]) = len;
sg_dma_len(&rx_sg[i]) = len;
}
if (thread->type == DMA_MEMCPY)
tx = dev->device_prep_dma_memcpy(chan,
dsts[0] + dst_off,
srcs[0], len, flags);
else if (thread->type == DMA_SG)
tx = dev->device_prep_dma_sg(chan, tx_sg, src_cnt,
rx_sg, src_cnt, flags);
else if (thread->type == DMA_MEMSET)
tx = dev->device_prep_dma_memset(chan,
dsts[0] + dst_off,
*(thread->srcs[0] + src_off),
len, flags);
else if (thread->type == DMA_XOR)
tx = dev->device_prep_dma_xor(chan,
dsts[0] + dst_off,
......@@ -722,23 +728,25 @@ static int dmatest_func(void *data)
start = ktime_get();
pr_debug("%s: verifying source buffer...\n", current->comm);
error_count = dmatest_verify(thread->srcs, 0, src_off,
0, PATTERN_SRC, true);
0, PATTERN_SRC, true, is_memset);
error_count += dmatest_verify(thread->srcs, src_off,
src_off + len, src_off,
PATTERN_SRC | PATTERN_COPY, true);
PATTERN_SRC | PATTERN_COPY, true, is_memset);
error_count += dmatest_verify(thread->srcs, src_off + len,
params->buf_size, src_off + len,
PATTERN_SRC, true);
PATTERN_SRC, true, is_memset);
pr_debug("%s: verifying dest buffer...\n", current->comm);
error_count += dmatest_verify(thread->dsts, 0, dst_off,
0, PATTERN_DST, false);
0, PATTERN_DST, false, is_memset);
error_count += dmatest_verify(thread->dsts, dst_off,
dst_off + len, src_off,
PATTERN_SRC | PATTERN_COPY, false);
PATTERN_SRC | PATTERN_COPY, false, is_memset);
error_count += dmatest_verify(thread->dsts, dst_off + len,
params->buf_size, dst_off + len,
PATTERN_DST, false);
PATTERN_DST, false, is_memset);
diff = ktime_sub(ktime_get(), start);
comparetime = ktime_add(comparetime, diff);
......@@ -821,8 +829,8 @@ static int dmatest_add_threads(struct dmatest_info *info,
if (type == DMA_MEMCPY)
op = "copy";
else if (type == DMA_SG)
op = "sg";
else if (type == DMA_MEMSET)
op = "set";
else if (type == DMA_XOR)
op = "xor";
else if (type == DMA_PQ)
......@@ -883,9 +891,9 @@ static int dmatest_add_channel(struct dmatest_info *info,
}
}
if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) {
if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
if (dmatest == 1) {
cnt = dmatest_add_threads(info, dtc, DMA_SG);
cnt = dmatest_add_threads(info, dtc, DMA_MEMSET);
thread_count += cnt > 0 ? cnt : 0;
}
}
......@@ -961,8 +969,8 @@ static void run_threaded_test(struct dmatest_info *info)
params->noverify = noverify;
request_channels(info, DMA_MEMCPY);
request_channels(info, DMA_MEMSET);
request_channels(info, DMA_XOR);
request_channels(info, DMA_SG);
request_channels(info, DMA_PQ);
}
......
......@@ -825,122 +825,6 @@ fsl_dma_prep_memcpy(struct dma_chan *dchan,
return NULL;
}
static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
struct scatterlist *dst_sg, unsigned int dst_nents,
struct scatterlist *src_sg, unsigned int src_nents,
unsigned long flags)
{
struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
struct fsldma_chan *chan = to_fsl_chan(dchan);
size_t dst_avail, src_avail;
dma_addr_t dst, src;
size_t len;
/* basic sanity checks */
if (dst_nents == 0 || src_nents == 0)
return NULL;
if (dst_sg == NULL || src_sg == NULL)
return NULL;
/*
* TODO: should we check that both scatterlists have the same
* TODO: number of bytes in total? Is that really an error?
*/
/* get prepared for the loop */
dst_avail = sg_dma_len(dst_sg);
src_avail = sg_dma_len(src_sg);
/* run until we are out of scatterlist entries */
while (true) {
/* create the largest transaction possible */
len = min_t(size_t, src_avail, dst_avail);
len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
if (len == 0)
goto fetch;
dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
/* allocate and populate the descriptor */
new = fsl_dma_alloc_descriptor(chan);
if (!new) {
chan_err(chan, "%s\n", msg_ld_oom);
goto fail;
}
set_desc_cnt(chan, &new->hw, len);
set_desc_src(chan, &new->hw, src);
set_desc_dst(chan, &new->hw, dst);
if (!first)
first = new;
else
set_desc_next(chan, &prev->hw, new->async_tx.phys);
new->async_tx.cookie = 0;
async_tx_ack(&new->async_tx);
prev = new;
/* Insert the link descriptor to the LD ring */
list_add_tail(&new->node, &first->tx_list);
/* update metadata */
dst_avail -= len;
src_avail -= len;
fetch:
/* fetch the next dst scatterlist entry */
if (dst_avail == 0) {
/* no more entries: we're done */
if (dst_nents == 0)
break;
/* fetch the next entry: if there are no more: done */
dst_sg = sg_next(dst_sg);
if (dst_sg == NULL)
break;
dst_nents--;
dst_avail = sg_dma_len(dst_sg);
}
/* fetch the next src scatterlist entry */
if (src_avail == 0) {
/* no more entries: we're done */
if (src_nents == 0)
break;
/* fetch the next entry: if there are no more: done */
src_sg = sg_next(src_sg);
if (src_sg == NULL)
break;
src_nents--;
src_avail = sg_dma_len(src_sg);
}
}
new->async_tx.flags = flags; /* client is in control of this ack */
new->async_tx.cookie = -EBUSY;
/* Set End-of-link to the last link descriptor of new list */
set_ld_eol(chan, new);
return &first->async_tx;
fail:
if (!first)
return NULL;
fsldma_free_desc_list_reverse(chan, &first->tx_list);
return NULL;
}
static int fsl_dma_device_terminate_all(struct dma_chan *dchan)
{
struct fsldma_chan *chan;
......@@ -1357,12 +1241,10 @@ static int fsldma_of_probe(struct platform_device *op)
fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
dma_cap_set(DMA_SG, fdev->common.cap_mask);
dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
fdev->common.device_tx_status = fsl_tx_status;
fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
fdev->common.device_config = fsl_dma_device_config;
......
......@@ -644,9 +644,13 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
}
/* 5 microsecond delay per pending descriptor */
writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
/* microsecond delay by sysfs variable per pending descriptor */
if (ioat_chan->intr_coalesce != ioat_chan->prev_intr_coalesce) {
writew(min((ioat_chan->intr_coalesce * (active - i)),
IOAT_INTRDELAY_MASK),
ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
ioat_chan->prev_intr_coalesce = ioat_chan->intr_coalesce;
}
}
static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
......
......@@ -142,11 +142,14 @@ struct ioatdma_chan {
spinlock_t prep_lock;
struct ioat_descs descs[2];
int desc_chunks;
int intr_coalesce;
int prev_intr_coalesce;
};
struct ioat_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct dma_chan *, char *);
ssize_t (*store)(struct dma_chan *, const char *, size_t);
};
/**
......
......@@ -39,7 +39,7 @@ MODULE_VERSION(IOAT_DMA_VERSION);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel Corporation");
static struct pci_device_id ioat_pci_tbl[] = {
static const struct pci_device_id ioat_pci_tbl[] = {
/* I/OAT v3 platforms */
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
......
......@@ -64,8 +64,24 @@ ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
return entry->show(&ioat_chan->dma_chan, page);
}
static ssize_t
ioat_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t count)
{
struct ioat_sysfs_entry *entry;
struct ioatdma_chan *ioat_chan;
entry = container_of(attr, struct ioat_sysfs_entry, attr);
ioat_chan = container_of(kobj, struct ioatdma_chan, kobj);
if (!entry->store)
return -EIO;
return entry->store(&ioat_chan->dma_chan, page, count);
}
const struct sysfs_ops ioat_sysfs_ops = {
.show = ioat_attr_show,
.store = ioat_attr_store,
};
void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type)
......@@ -121,11 +137,37 @@ static ssize_t ring_active_show(struct dma_chan *c, char *page)
}
static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
static ssize_t intr_coalesce_show(struct dma_chan *c, char *page)
{
struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
return sprintf(page, "%d\n", ioat_chan->intr_coalesce);
}
static ssize_t intr_coalesce_store(struct dma_chan *c, const char *page,
size_t count)
{
int intr_coalesce = 0;
struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
if (sscanf(page, "%du", &intr_coalesce) != -1) {
if ((intr_coalesce < 0) ||
(intr_coalesce > IOAT_INTRDELAY_MASK))
return -EINVAL;
ioat_chan->intr_coalesce = intr_coalesce;
}
return count;
}
static struct ioat_sysfs_entry intr_coalesce_attr = __ATTR_RW(intr_coalesce);
static struct attribute *ioat_attrs[] = {
&ring_size_attr.attr,
&ring_active_attr.attr,
&ioat_cap_attr.attr,
&ioat_version_attr.attr,
&intr_coalesce_attr.attr,
NULL,
};
......
......@@ -223,7 +223,6 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
if (c && (tc1 & BIT(i))) {
spin_lock_irqsave(&c->vc.lock, flags);
vchan_cookie_complete(&p->ds_run->vd);
WARN_ON_ONCE(p->ds_done);
p->ds_done = p->ds_run;
p->ds_run = NULL;
spin_unlock_irqrestore(&c->vc.lock, flags);
......@@ -274,13 +273,14 @@ static int k3_dma_start_txd(struct k3_dma_chan *c)
*/
list_del(&ds->vd.node);
WARN_ON_ONCE(c->phy->ds_run);
WARN_ON_ONCE(c->phy->ds_done);
c->phy->ds_run = ds;
c->phy->ds_done = NULL;
/* start dma */
k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
return 0;
}
c->phy->ds_run = NULL;
c->phy->ds_done = NULL;
return -EAGAIN;
}
......@@ -722,11 +722,7 @@ static int k3_dma_terminate_all(struct dma_chan *chan)
k3_dma_free_desc(&p->ds_run->vd);
p->ds_run = NULL;
}
if (p->ds_done) {
k3_dma_free_desc(&p->ds_done->vd);
p->ds_done = NULL;
}
p->ds_done = NULL;
}
spin_unlock_irqrestore(&c->vc.lock, flags);
vchan_dma_desc_free_list(&c->vc, &head);
......
......@@ -68,36 +68,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc,
hw_desc->byte_count = byte_count;
}
/* Populate the descriptor */
static void mv_xor_config_sg_ll_desc(struct mv_xor_desc_slot *desc,
dma_addr_t dma_src, dma_addr_t dma_dst,
u32 len, struct mv_xor_desc_slot *prev)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
hw_desc->status = XOR_DESC_DMA_OWNED;
hw_desc->phy_next_desc = 0;
/* Configure for XOR with only one src address -> MEMCPY */
hw_desc->desc_command = XOR_DESC_OPERATION_XOR | (0x1 << 0);
hw_desc->phy_dest_addr = dma_dst;
hw_desc->phy_src_addr[0] = dma_src;
hw_desc->byte_count = len;
if (prev) {
struct mv_xor_desc *hw_prev = prev->hw_desc;
hw_prev->phy_next_desc = desc->async_tx.phys;
}
}
static void mv_xor_desc_config_eod(struct mv_xor_desc_slot *desc)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
/* Enable end-of-descriptor interrupt */
hw_desc->desc_command |= XOR_DESC_EOD_INT_EN;
}
static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
......@@ -662,132 +632,6 @@ mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
}
/**
* mv_xor_prep_dma_sg - prepare descriptors for a memory sg transaction
* @chan: DMA channel
* @dst_sg: Destination scatter list
* @dst_sg_len: Number of entries in destination scatter list
* @src_sg: Source scatter list
* @src_sg_len: Number of entries in source scatter list
* @flags: transfer ack flags
*
* Return: Async transaction descriptor on success and NULL on failure
*/
static struct dma_async_tx_descriptor *
mv_xor_prep_dma_sg(struct dma_chan *chan, struct scatterlist *dst_sg,
unsigned int dst_sg_len, struct scatterlist *src_sg,
unsigned int src_sg_len, unsigned long flags)
{
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
struct mv_xor_desc_slot *new;
struct mv_xor_desc_slot *first = NULL;
struct mv_xor_desc_slot *prev = NULL;
size_t len, dst_avail, src_avail;
dma_addr_t dma_dst, dma_src;
int desc_cnt = 0;
int ret;
dev_dbg(mv_chan_to_devp(mv_chan),
"%s dst_sg_len: %d src_sg_len: %d flags: %ld\n",
__func__, dst_sg_len, src_sg_len, flags);
dst_avail = sg_dma_len(dst_sg);
src_avail = sg_dma_len(src_sg);
/* Run until we are out of scatterlist entries */
while (true) {
/* Allocate and populate the descriptor */
desc_cnt++;
new = mv_chan_alloc_slot(mv_chan);
if (!new) {
dev_err(mv_chan_to_devp(mv_chan),
"Out of descriptors (desc_cnt=%d)!\n",
desc_cnt);
goto err;
}
len = min_t(size_t, src_avail, dst_avail);
len = min_t(size_t, len, MV_XOR_MAX_BYTE_COUNT);
if (len == 0)
goto fetch;
if (len < MV_XOR_MIN_BYTE_COUNT) {
dev_err(mv_chan_to_devp(mv_chan),
"Transfer size of %zu too small!\n", len);
goto err;
}
dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
dst_avail;
dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
src_avail;
/* Check if a new window needs to get added for 'dst' */
ret = mv_xor_add_io_win(mv_chan, dma_dst);
if (ret)
goto err;
/* Check if a new window needs to get added for 'src' */
ret = mv_xor_add_io_win(mv_chan, dma_src);
if (ret)
goto err;
/* Populate the descriptor */
mv_xor_config_sg_ll_desc(new, dma_src, dma_dst, len, prev);
prev = new;
dst_avail -= len;
src_avail -= len;
if (!first)
first = new;
else
list_move_tail(&new->node, &first->sg_tx_list);
fetch:
/* Fetch the next dst scatterlist entry */
if (dst_avail == 0) {
if (dst_sg_len == 0)
break;
/* Fetch the next entry: if there are no more: done */
dst_sg = sg_next(dst_sg);
if (dst_sg == NULL)
break;
dst_sg_len--;
dst_avail = sg_dma_len(dst_sg);
}
/* Fetch the next src scatterlist entry */
if (src_avail == 0) {
if (src_sg_len == 0)
break;
/* Fetch the next entry: if there are no more: done */
src_sg = sg_next(src_sg);
if (src_sg == NULL)
break;
src_sg_len--;
src_avail = sg_dma_len(src_sg);
}
}
/* Set the EOD flag in the last descriptor */
mv_xor_desc_config_eod(new);
first->async_tx.flags = flags;
return &first->async_tx;
err:
/* Cleanup: Move all descriptors back into the free list */
spin_lock_bh(&mv_chan->lock);
mv_desc_clean_slot(first, mv_chan);
spin_unlock_bh(&mv_chan->lock);
return NULL;
}
static void mv_xor_free_chan_resources(struct dma_chan *chan)
{
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
......@@ -1254,8 +1098,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
if (dma_has_cap(DMA_SG, dma_dev->cap_mask))
dma_dev->device_prep_dma_sg = mv_xor_prep_dma_sg;
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
dma_dev->max_xor = 8;
dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
......@@ -1305,11 +1147,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
goto err_free_irq;
}
dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s%s)\n",
dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "sg " : "",
dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
dma_async_device_register(dma_dev);
......@@ -1552,7 +1393,6 @@ static int mv_xor_probe(struct platform_device *pdev)
dma_cap_zero(cap_mask);
dma_cap_set(DMA_MEMCPY, cap_mask);
dma_cap_set(DMA_SG, cap_mask);
dma_cap_set(DMA_XOR, cap_mask);
dma_cap_set(DMA_INTERRUPT, cap_mask);
......
......@@ -1005,21 +1005,6 @@ static struct dma_async_tx_descriptor *nbpf_prep_memcpy(
DMA_MEM_TO_MEM, flags);
}
static struct dma_async_tx_descriptor *nbpf_prep_memcpy_sg(
struct dma_chan *dchan,
struct scatterlist *dst_sg, unsigned int dst_nents,
struct scatterlist *src_sg, unsigned int src_nents,
unsigned long flags)
{
struct nbpf_channel *chan = nbpf_to_chan(dchan);
if (dst_nents != src_nents)
return NULL;
return nbpf_prep_sg(chan, src_sg, dst_sg, src_nents,
DMA_MEM_TO_MEM, flags);
}
static struct dma_async_tx_descriptor *nbpf_prep_slave_sg(
struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
enum dma_transfer_direction direction, unsigned long flags, void *context)
......@@ -1417,13 +1402,11 @@ static int nbpf_probe(struct platform_device *pdev)
dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
dma_cap_set(DMA_SG, dma_dev->cap_mask);
/* Common and MEMCPY operations */
dma_dev->device_alloc_chan_resources
= nbpf_alloc_chan_resources;
dma_dev->device_free_chan_resources = nbpf_free_chan_resources;
dma_dev->device_prep_dma_sg = nbpf_prep_memcpy_sg;
dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy;
dma_dev->device_tx_status = nbpf_tx_status;
dma_dev->device_issue_pending = nbpf_issue_pending;
......
......@@ -38,8 +38,8 @@ static struct of_dma *of_dma_find_controller(struct of_phandle_args *dma_spec)
if (ofdma->of_node == dma_spec->np)
return ofdma;
pr_debug("%s: can't find DMA controller %s\n", __func__,
dma_spec->np->full_name);
pr_debug("%s: can't find DMA controller %pOF\n", __func__,
dma_spec->np);
return NULL;
}
......@@ -255,8 +255,8 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
count = of_property_count_strings(np, "dma-names");
if (count < 0) {
pr_err("%s: dma-names property of node '%s' missing or empty\n",
__func__, np->full_name);
pr_err("%s: dma-names property of node '%pOF' missing or empty\n",
__func__, np);
return ERR_PTR(-ENODEV);
}
......
......@@ -3023,7 +3023,7 @@ static int pl330_remove(struct amba_device *adev)
return 0;
}
static struct amba_id pl330_ids[] = {
static const struct amba_id pl330_ids[] = {
{
.id = 0x00041330,
.mask = 0x000fffff,
......
......@@ -4040,9 +4040,9 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev)
/* it is DMA0 or DMA1 */
idx = of_get_property(np, "cell-index", &len);
if (!idx || (len != sizeof(u32))) {
dev_err(&ofdev->dev, "Device node %s has missing "
dev_err(&ofdev->dev, "Device node %pOF has missing "
"or invalid cell-index property\n",
np->full_name);
np);
return -EINVAL;
}
id = *idx;
......@@ -4307,7 +4307,7 @@ static int ppc440spe_adma_remove(struct platform_device *ofdev)
* "poly" allows setting/checking used polynomial (for PPC440SPe only).
*/
static ssize_t show_ppc440spe_devices(struct device_driver *dev, char *buf)
static ssize_t devices_show(struct device_driver *dev, char *buf)
{
ssize_t size = 0;
int i;
......@@ -4321,16 +4321,17 @@ static ssize_t show_ppc440spe_devices(struct device_driver *dev, char *buf)
}
return size;
}
static DRIVER_ATTR_RO(devices);
static ssize_t show_ppc440spe_r6enable(struct device_driver *dev, char *buf)
static ssize_t enable_show(struct device_driver *dev, char *buf)
{
return snprintf(buf, PAGE_SIZE,
"PPC440SP(e) RAID-6 capabilities are %sABLED.\n",
ppc440spe_r6_enabled ? "EN" : "DIS");
}
static ssize_t store_ppc440spe_r6enable(struct device_driver *dev,
const char *buf, size_t count)
static ssize_t enable_store(struct device_driver *dev, const char *buf,
size_t count)
{
unsigned long val;
......@@ -4357,8 +4358,9 @@ static ssize_t store_ppc440spe_r6enable(struct device_driver *dev,
}
return count;
}
static DRIVER_ATTR_RW(enable);
static ssize_t show_ppc440spe_r6poly(struct device_driver *dev, char *buf)
static ssize_t poly_store(struct device_driver *dev, char *buf)
{
ssize_t size = 0;
u32 reg;
......@@ -4377,8 +4379,8 @@ static ssize_t show_ppc440spe_r6poly(struct device_driver *dev, char *buf)
return size;
}
static ssize_t store_ppc440spe_r6poly(struct device_driver *dev,
const char *buf, size_t count)
static ssize_t poly_store(struct device_driver *dev, const char *buf,
size_t count)
{
unsigned long reg, val;
......@@ -4404,12 +4406,7 @@ static ssize_t store_ppc440spe_r6poly(struct device_driver *dev,
return count;
}
static DRIVER_ATTR(devices, S_IRUGO, show_ppc440spe_devices, NULL);
static DRIVER_ATTR(enable, S_IRUGO | S_IWUSR, show_ppc440spe_r6enable,
store_ppc440spe_r6enable);
static DRIVER_ATTR(poly, S_IRUGO | S_IWUSR, show_ppc440spe_r6poly,
store_ppc440spe_r6poly);
static DRIVER_ATTR_RW(poly);
/*
* Common initialisation for RAID engines; allocate memory for
......@@ -4448,8 +4445,7 @@ static int ppc440spe_configure_raid_devices(void)
dcr_base = dcr_resource_start(np, 0);
dcr_len = dcr_resource_len(np, 0);
if (!dcr_base && !dcr_len) {
pr_err("%s: can't get DCR registers base/len!\n",
np->full_name);
pr_err("%pOF: can't get DCR registers base/len!\n", np);
of_node_put(np);
iounmap(i2o_reg);
return -ENODEV;
......@@ -4457,7 +4453,7 @@ static int ppc440spe_configure_raid_devices(void)
i2o_dcr_host = dcr_map(np, dcr_base, dcr_len);
if (!DCR_MAP_OK(i2o_dcr_host)) {
pr_err("%s: failed to map DCRs!\n", np->full_name);
pr_err("%pOF: failed to map DCRs!\n", np);
of_node_put(np);
iounmap(i2o_reg);
return -ENODEV;
......@@ -4518,15 +4514,14 @@ static int ppc440spe_configure_raid_devices(void)
dcr_base = dcr_resource_start(np, 0);
dcr_len = dcr_resource_len(np, 0);
if (!dcr_base && !dcr_len) {
pr_err("%s: can't get DCR registers base/len!\n",
np->full_name);
pr_err("%pOF: can't get DCR registers base/len!\n", np);
ret = -ENODEV;
goto out_mq;
}
ppc440spe_mq_dcr_host = dcr_map(np, dcr_base, dcr_len);
if (!DCR_MAP_OK(ppc440spe_mq_dcr_host)) {
pr_err("%s: failed to map DCRs!\n", np->full_name);
pr_err("%pOF: failed to map DCRs!\n", np);
ret = -ENODEV;
goto out_mq;
}
......
......@@ -65,6 +65,7 @@ struct bam_desc_hw {
#define DESC_FLAG_EOT BIT(14)
#define DESC_FLAG_EOB BIT(13)
#define DESC_FLAG_NWD BIT(12)
#define DESC_FLAG_CMD BIT(11)
struct bam_async_desc {
struct virt_dma_desc vd;
......@@ -645,6 +646,9 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
unsigned int curr_offset = 0;
do {
if (flags & DMA_PREP_CMD)
desc->flags |= cpu_to_le16(DESC_FLAG_CMD);
desc->addr = cpu_to_le32(sg_dma_address(sg) +
curr_offset);
......@@ -960,7 +964,7 @@ static void bam_start_dma(struct bam_chan *bchan)
/* set any special flags on the last descriptor */
if (async_desc->num_desc == async_desc->xfer_len)
desc[async_desc->xfer_len - 1].flags =
desc[async_desc->xfer_len - 1].flags |=
cpu_to_le16(async_desc->flags);
else
desc[async_desc->xfer_len - 1].flags |=
......
......@@ -411,7 +411,40 @@ hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
return NULL;
hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
src, dest, len, flags);
src, dest, len, flags,
HIDMA_TRE_MEMCPY);
/* Place descriptor in prepared list */
spin_lock_irqsave(&mchan->lock, irqflags);
list_add_tail(&mdesc->node, &mchan->prepared);
spin_unlock_irqrestore(&mchan->lock, irqflags);
return &mdesc->desc;
}
static struct dma_async_tx_descriptor *
hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
size_t len, unsigned long flags)
{
struct hidma_chan *mchan = to_hidma_chan(dmach);
struct hidma_desc *mdesc = NULL;
struct hidma_dev *mdma = mchan->dmadev;
unsigned long irqflags;
/* Get free descriptor */
spin_lock_irqsave(&mchan->lock, irqflags);
if (!list_empty(&mchan->free)) {
mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
list_del(&mdesc->node);
}
spin_unlock_irqrestore(&mchan->lock, irqflags);
if (!mdesc)
return NULL;
hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
value, dest, len, flags,
HIDMA_TRE_MEMSET);
/* Place descriptor in prepared list */
spin_lock_irqsave(&mchan->lock, irqflags);
......@@ -776,6 +809,7 @@ static int hidma_probe(struct platform_device *pdev)
pm_runtime_get_sync(dmadev->ddev.dev);
dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
dma_cap_set(DMA_MEMSET, dmadev->ddev.cap_mask);
if (WARN_ON(!pdev->dev.dma_mask)) {
rc = -ENXIO;
goto dmafree;
......@@ -786,6 +820,7 @@ static int hidma_probe(struct platform_device *pdev)
dmadev->dev_trca = trca;
dmadev->trca_resource = trca_resource;
dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
dmadev->ddev.device_prep_dma_memset = hidma_prep_dma_memset;
dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
dmadev->ddev.device_tx_status = hidma_tx_status;
......
......@@ -28,6 +28,11 @@
#define HIDMA_TRE_DEST_LOW_IDX 4
#define HIDMA_TRE_DEST_HI_IDX 5
enum tre_type {
HIDMA_TRE_MEMCPY = 3,
HIDMA_TRE_MEMSET = 4,
};
struct hidma_tre {
atomic_t allocated; /* if this channel is allocated */
bool queued; /* flag whether this is pending */
......@@ -150,7 +155,7 @@ void hidma_ll_start(struct hidma_lldev *llhndl);
int hidma_ll_disable(struct hidma_lldev *lldev);
int hidma_ll_enable(struct hidma_lldev *llhndl);
void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch,
dma_addr_t src, dma_addr_t dest, u32 len, u32 flags);
dma_addr_t src, dma_addr_t dest, u32 len, u32 flags, u32 txntype);
void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi);
int hidma_ll_setup(struct hidma_lldev *lldev);
struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels,
......
......@@ -105,10 +105,6 @@ enum ch_state {
HIDMA_CH_STOPPED = 4,
};
enum tre_type {
HIDMA_TRE_MEMCPY = 3,
};
enum err_code {
HIDMA_EVRE_STATUS_COMPLETE = 1,
HIDMA_EVRE_STATUS_ERROR = 4,
......@@ -174,8 +170,7 @@ int hidma_ll_request(struct hidma_lldev *lldev, u32 sig, const char *dev_name,
tre->err_info = 0;
tre->lldev = lldev;
tre_local = &tre->tre_local[0];
tre_local[HIDMA_TRE_CFG_IDX] = HIDMA_TRE_MEMCPY;
tre_local[HIDMA_TRE_CFG_IDX] |= (lldev->chidx & 0xFF) << 8;
tre_local[HIDMA_TRE_CFG_IDX] = (lldev->chidx & 0xFF) << 8;
tre_local[HIDMA_TRE_CFG_IDX] |= BIT(16); /* set IEOB */
*tre_ch = i;
if (callback)
......@@ -607,7 +602,7 @@ int hidma_ll_disable(struct hidma_lldev *lldev)
void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch,
dma_addr_t src, dma_addr_t dest, u32 len,
u32 flags)
u32 flags, u32 txntype)
{
struct hidma_tre *tre;
u32 *tre_local;
......@@ -626,6 +621,8 @@ void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch,
}
tre_local = &tre->tre_local[0];
tre_local[HIDMA_TRE_CFG_IDX] &= ~GENMASK(7, 0);
tre_local[HIDMA_TRE_CFG_IDX] |= txntype;
tre_local[HIDMA_TRE_LEN_IDX] = len;
tre_local[HIDMA_TRE_SRC_LOW_IDX] = lower_32_bits(src);
tre_local[HIDMA_TRE_SRC_HI_IDX] = upper_32_bits(src);
......
......@@ -28,7 +28,7 @@
#include "hidma_mgmt.h"
#define HIDMA_QOS_N_OFFSET 0x300
#define HIDMA_QOS_N_OFFSET 0x700
#define HIDMA_CFG_OFFSET 0x400
#define HIDMA_MAX_BUS_REQ_LEN_OFFSET 0x41C
#define HIDMA_MAX_XACTIONS_OFFSET 0x420
......@@ -227,7 +227,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
goto out;
}
if (max_write_request) {
if (max_write_request &&
(max_write_request != mgmtdev->max_write_request)) {
dev_info(&pdev->dev, "overriding max-write-burst-bytes: %d\n",
max_write_request);
mgmtdev->max_write_request = max_write_request;
......@@ -240,7 +241,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "max-read-burst-bytes missing\n");
goto out;
}
if (max_read_request) {
if (max_read_request &&
(max_read_request != mgmtdev->max_read_request)) {
dev_info(&pdev->dev, "overriding max-read-burst-bytes: %d\n",
max_read_request);
mgmtdev->max_read_request = max_read_request;
......@@ -253,7 +255,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "max-write-transactions missing\n");
goto out;
}
if (max_wr_xactions) {
if (max_wr_xactions &&
(max_wr_xactions != mgmtdev->max_wr_xactions)) {
dev_info(&pdev->dev, "overriding max-write-transactions: %d\n",
max_wr_xactions);
mgmtdev->max_wr_xactions = max_wr_xactions;
......@@ -266,7 +269,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "max-read-transactions missing\n");
goto out;
}
if (max_rd_xactions) {
if (max_rd_xactions &&
(max_rd_xactions != mgmtdev->max_rd_xactions)) {
dev_info(&pdev->dev, "overriding max-read-transactions: %d\n",
max_rd_xactions);
mgmtdev->max_rd_xactions = max_rd_xactions;
......@@ -354,7 +358,7 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
struct platform_device_info pdevinfo;
struct of_phandle_args out_irq;
struct device_node *child;
struct resource *res;
struct resource *res = NULL;
const __be32 *cell;
int ret = 0, size, i, num;
u64 addr, addr_size;
......
......@@ -1690,6 +1690,15 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
if (!irqname)
return -ENOMEM;
/*
* Initialize the DMA engine channel and add it to the DMA engine
* channels list.
*/
chan->device = &dmac->engine;
dma_cookie_init(chan);
list_add_tail(&chan->device_node, &dmac->engine.channels);
ret = devm_request_threaded_irq(dmac->dev, rchan->irq,
rcar_dmac_isr_channel,
rcar_dmac_isr_channel_thread, 0,
......@@ -1700,15 +1709,6 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
return ret;
}
/*
* Initialize the DMA engine channel and add it to the DMA engine
* channels list.
*/
chan->device = &dmac->engine;
dma_cookie_init(chan);
list_add_tail(&chan->device_node, &dmac->engine.channels);
return 0;
}
......@@ -1794,14 +1794,6 @@ static int rcar_dmac_probe(struct platform_device *pdev)
if (!irqname)
return -ENOMEM;
ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0,
irqname, dmac);
if (ret) {
dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
irq, ret);
return ret;
}
/* Enable runtime PM and initialize the device. */
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
......@@ -1818,8 +1810,32 @@ static int rcar_dmac_probe(struct platform_device *pdev)
goto error;
}
/* Initialize the channels. */
INIT_LIST_HEAD(&dmac->engine.channels);
/* Initialize engine */
engine = &dmac->engine;
dma_cap_set(DMA_MEMCPY, engine->cap_mask);
dma_cap_set(DMA_SLAVE, engine->cap_mask);
engine->dev = &pdev->dev;
engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE);
engine->src_addr_widths = widths;
engine->dst_addr_widths = widths;
engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources;
engine->device_free_chan_resources = rcar_dmac_free_chan_resources;
engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy;
engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg;
engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic;
engine->device_config = rcar_dmac_device_config;
engine->device_terminate_all = rcar_dmac_chan_terminate_all;
engine->device_tx_status = rcar_dmac_tx_status;
engine->device_issue_pending = rcar_dmac_issue_pending;
engine->device_synchronize = rcar_dmac_device_synchronize;
INIT_LIST_HEAD(&engine->channels);
for (i = 0; i < dmac->n_channels; ++i) {
ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i],
......@@ -1828,6 +1844,14 @@ static int rcar_dmac_probe(struct platform_device *pdev)
goto error;
}
ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0,
irqname, dmac);
if (ret) {
dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
irq, ret);
return ret;
}
/* Register the DMAC as a DMA provider for DT. */
ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
NULL);
......@@ -1839,29 +1863,6 @@ static int rcar_dmac_probe(struct platform_device *pdev)
*
* Default transfer size of 32 bytes requires 32-byte alignment.
*/
engine = &dmac->engine;
dma_cap_set(DMA_MEMCPY, engine->cap_mask);
dma_cap_set(DMA_SLAVE, engine->cap_mask);
engine->dev = &pdev->dev;
engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE);
engine->src_addr_widths = widths;
engine->dst_addr_widths = widths;
engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources;
engine->device_free_chan_resources = rcar_dmac_free_chan_resources;
engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy;
engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg;
engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic;
engine->device_config = rcar_dmac_device_config;
engine->device_terminate_all = rcar_dmac_chan_terminate_all;
engine->device_tx_status = rcar_dmac_tx_status;
engine->device_issue_pending = rcar_dmac_issue_pending;
engine->device_synchronize = rcar_dmac_device_synchronize;
ret = dma_async_device_register(engine);
if (ret < 0)
goto error;
......
......@@ -79,7 +79,7 @@ static int dma40_memcpy_channels[] = {
};
/* Default configuration for physcial memcpy */
static struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
static const struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
.mode = STEDMA40_MODE_PHYSICAL,
.dir = DMA_MEM_TO_MEM,
......@@ -93,7 +93,7 @@ static struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
};
/* Default configuration for logical memcpy */
static struct stedma40_chan_cfg dma40_memcpy_conf_log = {
static const struct stedma40_chan_cfg dma40_memcpy_conf_log = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = DMA_MEM_TO_MEM,
......@@ -2484,19 +2484,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
DMA_MEM_TO_MEM, dma_flags);
}
static struct dma_async_tx_descriptor *
d40_prep_memcpy_sg(struct dma_chan *chan,
struct scatterlist *dst_sg, unsigned int dst_nents,
struct scatterlist *src_sg, unsigned int src_nents,
unsigned long dma_flags)
{
if (dst_nents != src_nents)
return NULL;
return d40_prep_sg(chan, src_sg, dst_sg, src_nents,
DMA_MEM_TO_MEM, dma_flags);
}
static struct dma_async_tx_descriptor *
d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
......@@ -2821,9 +2808,6 @@ static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
}
if (dma_has_cap(DMA_SG, dev->cap_mask))
dev->device_prep_dma_sg = d40_prep_memcpy_sg;
if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
......@@ -2865,7 +2849,6 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_zero(base->dma_memcpy.cap_mask);
dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
d40_ops_init(base, &base->dma_memcpy);
......@@ -2883,7 +2866,6 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_zero(base->dma_both.cap_mask);
dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
dma_cap_set(DMA_SG, base->dma_both.cap_mask);
dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
d40_ops_init(base, &base->dma_both);
......
......@@ -101,6 +101,17 @@ struct sun6i_dma_config {
u32 nr_max_channels;
u32 nr_max_requests;
u32 nr_max_vchans;
/*
* In the datasheets/user manuals of newer Allwinner SoCs, a special
* bit (bit 2 at register 0x20) is present.
* It's named "DMA MCLK interface circuit auto gating bit" in the
* documents, and the footnote of this register says that this bit
* should be set up when initializing the DMA controller.
* Allwinner A23/A33 user manuals do not have this bit documented,
* however these SoCs really have and need this bit, as seen in the
* BSP kernel source code.
*/
bool gate_needed;
};
/*
......@@ -1009,6 +1020,7 @@ static struct sun6i_dma_config sun8i_a23_dma_cfg = {
.nr_max_channels = 8,
.nr_max_requests = 24,
.nr_max_vchans = 37,
.gate_needed = true,
};
static struct sun6i_dma_config sun8i_a83t_dma_cfg = {
......@@ -1028,11 +1040,24 @@ static struct sun6i_dma_config sun8i_h3_dma_cfg = {
.nr_max_vchans = 34,
};
/*
* The V3s have only 8 physical channels, a maximum DRQ port id of 23,
* and a total of 24 usable source and destination endpoints.
*/
static struct sun6i_dma_config sun8i_v3s_dma_cfg = {
.nr_max_channels = 8,
.nr_max_requests = 23,
.nr_max_vchans = 24,
.gate_needed = true,
};
static const struct of_device_id sun6i_dma_match[] = {
{ .compatible = "allwinner,sun6i-a31-dma", .data = &sun6i_a31_dma_cfg },
{ .compatible = "allwinner,sun8i-a23-dma", .data = &sun8i_a23_dma_cfg },
{ .compatible = "allwinner,sun8i-a83t-dma", .data = &sun8i_a83t_dma_cfg },
{ .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg },
{ .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sun6i_dma_match);
......@@ -1174,13 +1199,7 @@ static int sun6i_dma_probe(struct platform_device *pdev)
goto err_dma_unregister;
}
/*
* sun8i variant requires us to toggle a dma gating register,
* as seen in Allwinner's SDK. This register is not documented
* in the A23 user manual.
*/
if (of_device_is_compatible(pdev->dev.of_node,
"allwinner,sun8i-a23-dma"))
if (sdc->cfg->gate_needed)
writel(SUN8I_DMA_GATE_ENABLE, sdc->base + SUN8I_DMA_GATE);
return 0;
......
......@@ -308,7 +308,7 @@ static const struct of_device_id ti_dra7_master_match[] = {
static inline void ti_dra7_xbar_reserve(int offset, int len, unsigned long *p)
{
for (; len > 0; len--)
clear_bit(offset + (len - 1), p);
set_bit(offset + (len - 1), p);
}
static int ti_dra7_xbar_probe(struct platform_device *pdev)
......
......@@ -391,11 +391,6 @@ static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len,
*paddr += nbytes;
}
static void xgene_dma_invalidate_buffer(__le64 *ext8)
{
*ext8 |= cpu_to_le64(XGENE_DMA_INVALID_LEN_CODE);
}
static __le64 *xgene_dma_lookup_ext8(struct xgene_dma_desc_hw *desc, int idx)
{
switch (idx) {
......@@ -425,48 +420,6 @@ static void xgene_dma_init_desc(struct xgene_dma_desc_hw *desc,
XGENE_DMA_DESC_HOENQ_NUM_POS);
}
static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan,
struct xgene_dma_desc_sw *desc_sw,
dma_addr_t dst, dma_addr_t src,
size_t len)
{
struct xgene_dma_desc_hw *desc1, *desc2;
int i;
/* Get 1st descriptor */
desc1 = &desc_sw->desc1;
xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num);
/* Set destination address */
desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_DR_BIT);
desc1->m3 |= cpu_to_le64(dst);
/* Set 1st source address */
xgene_dma_set_src_buffer(&desc1->m1, &len, &src);
if (!len)
return;
/*
* We need to split this source buffer,
* and need to use 2nd descriptor
*/
desc2 = &desc_sw->desc2;
desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT);
/* Set 2nd to 5th source address */
for (i = 0; i < 4 && len; i++)
xgene_dma_set_src_buffer(xgene_dma_lookup_ext8(desc2, i),
&len, &src);
/* Invalidate unused source address field */
for (; i < 4; i++)
xgene_dma_invalidate_buffer(xgene_dma_lookup_ext8(desc2, i));
/* Updated flag that we have prepared 64B descriptor */
desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC;
}
static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan,
struct xgene_dma_desc_sw *desc_sw,
dma_addr_t *dst, dma_addr_t *src,
......@@ -891,114 +844,6 @@ static void xgene_dma_free_chan_resources(struct dma_chan *dchan)
chan->desc_pool = NULL;
}
static struct dma_async_tx_descriptor *xgene_dma_prep_sg(
struct dma_chan *dchan, struct scatterlist *dst_sg,
u32 dst_nents, struct scatterlist *src_sg,
u32 src_nents, unsigned long flags)
{
struct xgene_dma_desc_sw *first = NULL, *new = NULL;
struct xgene_dma_chan *chan;
size_t dst_avail, src_avail;
dma_addr_t dst, src;
size_t len;
if (unlikely(!dchan))
return NULL;
if (unlikely(!dst_nents || !src_nents))
return NULL;
if (unlikely(!dst_sg || !src_sg))
return NULL;
chan = to_dma_chan(dchan);
/* Get prepared for the loop */
dst_avail = sg_dma_len(dst_sg);
src_avail = sg_dma_len(src_sg);
dst_nents--;
src_nents--;
/* Run until we are out of scatterlist entries */
while (true) {
/* Create the largest transaction possible */
len = min_t(size_t, src_avail, dst_avail);
len = min_t(size_t, len, XGENE_DMA_MAX_64B_DESC_BYTE_CNT);
if (len == 0)
goto fetch;
dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
/* Allocate the link descriptor from DMA pool */
new = xgene_dma_alloc_descriptor(chan);
if (!new)
goto fail;
/* Prepare DMA descriptor */
xgene_dma_prep_cpy_desc(chan, new, dst, src, len);
if (!first)
first = new;
new->tx.cookie = 0;
async_tx_ack(&new->tx);
/* update metadata */
dst_avail -= len;
src_avail -= len;
/* Insert the link descriptor to the LD ring */
list_add_tail(&new->node, &first->tx_list);
fetch:
/* fetch the next dst scatterlist entry */
if (dst_avail == 0) {
/* no more entries: we're done */
if (dst_nents == 0)
break;
/* fetch the next entry: if there are no more: done */
dst_sg = sg_next(dst_sg);
if (!dst_sg)
break;
dst_nents--;
dst_avail = sg_dma_len(dst_sg);
}
/* fetch the next src scatterlist entry */
if (src_avail == 0) {
/* no more entries: we're done */
if (src_nents == 0)
break;
/* fetch the next entry: if there are no more: done */
src_sg = sg_next(src_sg);
if (!src_sg)
break;
src_nents--;
src_avail = sg_dma_len(src_sg);
}
}
if (!new)
return NULL;
new->tx.flags = flags; /* client is in control of this ack */
new->tx.cookie = -EBUSY;
list_splice(&first->tx_list, &new->tx_list);
return &new->tx;
fail:
if (!first)
return NULL;
xgene_dma_free_desc_list(chan, &first->tx_list);
return NULL;
}
static struct dma_async_tx_descriptor *xgene_dma_prep_xor(
struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src,
u32 src_cnt, size_t len, unsigned long flags)
......@@ -1653,7 +1498,6 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan,
dma_cap_zero(dma_dev->cap_mask);
/* Set DMA device capability */
dma_cap_set(DMA_SG, dma_dev->cap_mask);
/* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR
* and channel 1 supports XOR, PQ both. First thing here is we have
......@@ -1679,7 +1523,6 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan,
dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources;
dma_dev->device_issue_pending = xgene_dma_issue_pending;
dma_dev->device_tx_status = xgene_dma_tx_status;
dma_dev->device_prep_dma_sg = xgene_dma_prep_sg;
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
dma_dev->device_prep_dma_xor = xgene_dma_prep_xor;
......@@ -1731,8 +1574,7 @@ static int xgene_dma_async_register(struct xgene_dma *pdma, int id)
/* DMA capability info */
dev_info(pdma->dev,
"%s: CAPABILITY ( %s%s%s)\n", dma_chan_name(&chan->dma_chan),
dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "SGCPY " : "",
"%s: CAPABILITY ( %s%s)\n", dma_chan_name(&chan->dma_chan),
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "",
dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : "");
......
......@@ -2124,7 +2124,7 @@ static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
if (IS_ERR(*axi_clk)) {
err = PTR_ERR(*axi_clk);
dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
return err;
}
......@@ -2142,25 +2142,25 @@ static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
err = clk_prepare_enable(*axi_clk);
if (err) {
dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
return err;
}
err = clk_prepare_enable(*tx_clk);
if (err) {
dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
goto err_disable_axiclk;
}
err = clk_prepare_enable(*rx_clk);
if (err) {
dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
goto err_disable_txclk;
}
err = clk_prepare_enable(*sg_clk);
if (err) {
dev_err(&pdev->dev, "failed to enable sg_clk (%u)\n", err);
dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
goto err_disable_rxclk;
}
......@@ -2189,26 +2189,26 @@ static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
if (IS_ERR(*axi_clk)) {
err = PTR_ERR(*axi_clk);
dev_err(&pdev->dev, "failed to get axi_clk (%u)\n", err);
dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err);
return err;
}
*dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
if (IS_ERR(*dev_clk)) {
err = PTR_ERR(*dev_clk);
dev_err(&pdev->dev, "failed to get dev_clk (%u)\n", err);
dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err);
return err;
}
err = clk_prepare_enable(*axi_clk);
if (err) {
dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
return err;
}
err = clk_prepare_enable(*dev_clk);
if (err) {
dev_err(&pdev->dev, "failed to enable dev_clk (%u)\n", err);
dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
goto err_disable_axiclk;
}
......@@ -2229,7 +2229,7 @@ static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
if (IS_ERR(*axi_clk)) {
err = PTR_ERR(*axi_clk);
dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
return err;
}
......@@ -2251,31 +2251,31 @@ static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
err = clk_prepare_enable(*axi_clk);
if (err) {
dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
return err;
}
err = clk_prepare_enable(*tx_clk);
if (err) {
dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
goto err_disable_axiclk;
}
err = clk_prepare_enable(*txs_clk);
if (err) {
dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err);
dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
goto err_disable_txclk;
}
err = clk_prepare_enable(*rx_clk);
if (err) {
dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
goto err_disable_txsclk;
}
err = clk_prepare_enable(*rxs_clk);
if (err) {
dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err);
dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
goto err_disable_rxclk;
}
......
......@@ -829,98 +829,6 @@ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy(
return &first->async_tx;
}
/**
* zynqmp_dma_prep_slave_sg - prepare descriptors for a memory sg transaction
* @dchan: DMA channel
* @dst_sg: Destination scatter list
* @dst_sg_len: Number of entries in destination scatter list
* @src_sg: Source scatter list
* @src_sg_len: Number of entries in source scatter list
* @flags: transfer ack flags
*
* Return: Async transaction descriptor on success and NULL on failure
*/
static struct dma_async_tx_descriptor *zynqmp_dma_prep_sg(
struct dma_chan *dchan, struct scatterlist *dst_sg,
unsigned int dst_sg_len, struct scatterlist *src_sg,
unsigned int src_sg_len, unsigned long flags)
{
struct zynqmp_dma_desc_sw *new, *first = NULL;
struct zynqmp_dma_chan *chan = to_chan(dchan);
void *desc = NULL, *prev = NULL;
size_t len, dst_avail, src_avail;
dma_addr_t dma_dst, dma_src;
u32 desc_cnt = 0, i;
struct scatterlist *sg;
for_each_sg(src_sg, sg, src_sg_len, i)
desc_cnt += DIV_ROUND_UP(sg_dma_len(sg),
ZYNQMP_DMA_MAX_TRANS_LEN);
spin_lock_bh(&chan->lock);
if (desc_cnt > chan->desc_free_cnt) {
spin_unlock_bh(&chan->lock);
dev_dbg(chan->dev, "chan %p descs are not available\n", chan);
return NULL;
}
chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt;
spin_unlock_bh(&chan->lock);
dst_avail = sg_dma_len(dst_sg);
src_avail = sg_dma_len(src_sg);
/* Run until we are out of scatterlist entries */
while (true) {
/* Allocate and populate the descriptor */
new = zynqmp_dma_get_descriptor(chan);
desc = (struct zynqmp_dma_desc_ll *)new->src_v;
len = min_t(size_t, src_avail, dst_avail);
len = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN);
if (len == 0)
goto fetch;
dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
dst_avail;
dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
src_avail;
zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src, dma_dst,
len, prev);
prev = desc;
dst_avail -= len;
src_avail -= len;
if (!first)
first = new;
else
list_add_tail(&new->node, &first->tx_list);
fetch:
/* Fetch the next dst scatterlist entry */
if (dst_avail == 0) {
if (dst_sg_len == 0)
break;
dst_sg = sg_next(dst_sg);
if (dst_sg == NULL)
break;
dst_sg_len--;
dst_avail = sg_dma_len(dst_sg);
}
/* Fetch the next src scatterlist entry */
if (src_avail == 0) {
if (src_sg_len == 0)
break;
src_sg = sg_next(src_sg);
if (src_sg == NULL)
break;
src_sg_len--;
src_avail = sg_dma_len(src_sg);
}
}
zynqmp_dma_desc_config_eod(chan, desc);
first->async_tx.flags = flags;
return &first->async_tx;
}
/**
* zynqmp_dma_chan_remove - Channel remove function
* @chan: ZynqMP DMA channel pointer
......@@ -1064,11 +972,9 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&zdev->common.channels);
dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
dma_cap_set(DMA_SG, zdev->common.cap_mask);
dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask);
p = &zdev->common;
p->device_prep_dma_sg = zynqmp_dma_prep_sg;
p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy;
p->device_terminate_all = zynqmp_dma_device_terminate_all;
p->device_issue_pending = zynqmp_dma_issue_pending;
......
/*
* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _QCOM_BAM_DMA_H
#define _QCOM_BAM_DMA_H
#include <asm/byteorder.h>
/*
* This data type corresponds to the native Command Element
* supported by BAM DMA Engine.
*
* @cmd_and_addr - upper 8 bits command and lower 24 bits register address.
* @data - for write command: content to be written into peripheral register.
* for read command: dest addr to write peripheral register value.
* @mask - register mask.
* @reserved - for future usage.
*
*/
struct bam_cmd_element {
__le32 cmd_and_addr;
__le32 data;
__le32 mask;
__le32 reserved;
};
/*
* This enum indicates the command type in a command element
*/
enum bam_command_type {
BAM_WRITE_COMMAND = 0,
BAM_READ_COMMAND,
};
/*
* prep_bam_ce_le32 - Wrapper function to prepare a single BAM command
* element with the data already in le32 format.
*
* @bam_ce: bam command element
* @addr: target address
* @cmd: BAM command
* @data: actual data for write and dest addr for read in le32
*/
static inline void
bam_prep_ce_le32(struct bam_cmd_element *bam_ce, u32 addr,
enum bam_command_type cmd, __le32 data)
{
bam_ce->cmd_and_addr =
cpu_to_le32((addr & 0xffffff) | ((cmd & 0xff) << 24));
bam_ce->data = data;
bam_ce->mask = cpu_to_le32(0xffffffff);
}
/*
* bam_prep_ce - Wrapper function to prepare a single BAM command element
* with the data.
*
* @bam_ce: BAM command element
* @addr: target address
* @cmd: BAM command
* @data: actual data for write and dest addr for read
*/
static inline void
bam_prep_ce(struct bam_cmd_element *bam_ce, u32 addr,
enum bam_command_type cmd, u32 data)
{
bam_prep_ce_le32(bam_ce, addr, cmd, cpu_to_le32(data));
}
#endif
......@@ -68,7 +68,6 @@ enum dma_transaction_type {
DMA_MEMSET,
DMA_MEMSET_SG,
DMA_INTERRUPT,
DMA_SG,
DMA_PRIVATE,
DMA_ASYNC_TX,
DMA_SLAVE,
......@@ -186,6 +185,9 @@ struct dma_interleaved_template {
* on the result of this operation
* @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till
* cleared or freed
* @DMA_PREP_CMD: tell the driver that the data passed to DMA API is command
* data and the descriptor should be in different format from normal
* data descriptors.
*/
enum dma_ctrl_flags {
DMA_PREP_INTERRUPT = (1 << 0),
......@@ -195,6 +197,7 @@ enum dma_ctrl_flags {
DMA_PREP_CONTINUE = (1 << 4),
DMA_PREP_FENCE = (1 << 5),
DMA_CTRL_REUSE = (1 << 6),
DMA_PREP_CMD = (1 << 7),
};
/**
......@@ -771,11 +774,6 @@ struct dma_device {
unsigned int nents, int value, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
struct dma_chan *chan, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
struct dma_chan *chan,
struct scatterlist *dst_sg, unsigned int dst_nents,
struct scatterlist *src_sg, unsigned int src_nents,
unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
struct dma_chan *chan, struct scatterlist *sgl,
......@@ -905,19 +903,6 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
len, flags);
}
static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
struct dma_chan *chan,
struct scatterlist *dst_sg, unsigned int dst_nents,
struct scatterlist *src_sg, unsigned int src_nents,
unsigned long flags)
{
if (!chan || !chan->device || !chan->device->device_prep_dma_sg)
return NULL;
return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents,
src_sg, src_nents, flags);
}
/**
* dmaengine_terminate_all() - Terminate all active DMA transfers
* @chan: The channel for which to terminate the transfers
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment