Commit 416a2f4f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine updates from Vinod Koul:
 "New Support:

   - MT6795 SoC dma controller (AngeloGioacchino Del Regno)

   - qcom-adm controller yaml binding (Christian Marangi)

   - Renesas r8a779g0 dma controller yaml binding (Geert Uytterhoeven)

   - Qualcomm SM6350 GPI dma controller (Luca Weiss)

  Updates:

   - STM32 DMA-MDMA chaining support (Amelie Delaunay)

   - make hsu driver use managed resources (Andy Shevchenko)

   - the usual round of idxd driver updates (Dave Jiang & Jerry
     Snitselaar)

   - apple dma driver iommu and pd properties and remove use
     of devres for irqs (Janne Grunau & Martin Povišer)

   - device_synchronize support for Xilinx zynqmp driver (Swati
     Agarwal)"

* tag 'dmaengine-6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (60 commits)
  dmaengine: ioat: remove unused declarations in dma.h
  dmaengine: ti: k3-udma: Respond TX done if DMA_PREP_INTERRUPT is not requested
  dmaengine: zynqmp_dma: Add device_synchronize support
  dt-bindings: dma: add additional pbus reset to qcom,adm
  dt-bindings: dma: rework qcom,adm Documentation to yaml schema
  dt-bindings: dma: apple,admac: Add iommus and power-domains properties
  dmaengine: dw-edma: Remove runtime PM support
  dmaengine: idxd: add configuration for concurrent batch descriptor processing
  dmaengine: idxd: add configuration for concurrent work descriptor processing
  dmaengine: idxd: add WQ operation cap restriction support
  dmanegine: idxd: reformat opcap output to match bitmap_parse() input
  dmaengine: idxd: convert ats_dis to a wq flag
  dmaengine: ioat: stop mod_timer from resurrecting deleted timer in __cleanup()
  dmaengine: qcom-adm: fix wrong calling convention for prep_slave_sg
  dmaengine: qcom-adm: fix wrong sizeof config in slave_config
  dmaengine: ti: k3-psil: add additional TX threads for j721e
  dmaengine: ti: k3-psil: add additional TX threads for j7200
  dmaengine: apple-admac: Trigger shared reset
  dmaengine: apple-admac: Do not use devres for IRQs
  dmaengine: ti: edma: Remove some unused functions
  ...
parents 62e6e594 b957df98
......@@ -227,6 +227,17 @@ Contact: dmaengine@vger.kernel.org
Description: Indicate the number of retires for an enqcmds submission on a sharedwq.
A max value to set attribute is capped at 64.
What: /sys/bus/dsa/devices/wq<m>.<n>/op_config
Date: Sept 14, 2022
KernelVersion: 6.0.0
Contact: dmaengine@vger.kernel.org
Description: Shows the operation capability bits displayed in bitmap format
presented by %*pb printk() output format specifier.
The attribute can be configured when the WQ is disabled in
order to configure the WQ to accept specific bits that
correlates to the operations allowed. It's visible only
on platforms that support the capability.
What: /sys/bus/dsa/devices/engine<m>.<n>/group_id
Date: Oct 25, 2019
KernelVersion: 5.6.0
......@@ -255,3 +266,27 @@ Contact: dmaengine@vger.kernel.org
Description: Indicates the number of Read Buffers reserved for the use of
engines in the group. See DSA spec v1.2 9.2.18 GRPCFG Read Buffers
Reserved.
What: /sys/bus/dsa/devices/group<m>.<n>/desc_progress_limit
Date: Sept 14, 2022
KernelVersion: 6.0.0
Contact: dmaengine@vger.kernel.org
Description: Allows control of the number of work descriptors that can be
concurrently processed by an engine in the group as a fraction
of the Maximum Work Descriptors in Progress value specified in
the ENGCAP register. The acceptable values are 0 (default),
1 (1/2 of max value), 2 (1/4 of the max value), and 3 (1/8 of
the max value). It's visible only on platforms that support
the capability.
What: /sys/bus/dsa/devices/group<m>.<n>/batch_progress_limit
Date: Sept 14, 2022
KernelVersion: 6.0.0
Contact: dmaengine@vger.kernel.org
Description: Allows control of the number of batch descriptors that can be
concurrently processed by an engine in the group as a fraction
of the Maximum Batch Descriptors in Progress value specified in
the ENGCAP register. The acceptable values are 0 (default),
1 (1/2 of max value), 2 (1/4 of the max value), and 3 (1/8 of
the max value). It's visible only on platforms that support
the capability.
......@@ -59,6 +59,7 @@ SoC-specific documents
stm32/stm32f429-overview
stm32/stm32mp13-overview
stm32/stm32mp157-overview
stm32/stm32-dma-mdma-chaining
sunxi
......
This diff is collapsed.
......@@ -49,6 +49,13 @@ properties:
in an interrupts-extended list the disconnected positions will contain
an empty phandle reference <0>.
iommus:
minItems: 1
maxItems: 2
power-domains:
maxItems: 1
required:
- compatible
- reg
......
......@@ -55,6 +55,12 @@ properties:
dma-coherent: true
iommus:
minItems: 1
maxItems: 9
description: Up to 1 IOMMU entry per DMA channel for writes and 1
IOMMU entry for reads.
power-domains:
maxItems: 1
......
......@@ -22,6 +22,7 @@ properties:
- items:
- enum:
- mediatek,mt2712-uart-dma
- mediatek,mt6795-uart-dma
- mediatek,mt8365-uart-dma
- mediatek,mt8516-uart-dma
- const: mediatek,mt6577-uart-dma
......
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/qcom,adm.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm ADM DMA Controller
maintainers:
- Christian Marangi <ansuelsmth@gmail.com>
- Bjorn Andersson <bjorn.andersson@linaro.org>
description: |
QCOM ADM DMA controller provides DMA capabilities for
peripheral buses such as NAND and SPI.
properties:
compatible:
const: qcom,adm
reg:
maxItems: 1
interrupts:
maxItems: 1
"#dma-cells":
const: 1
clocks:
items:
- description: phandle to the core clock
- description: phandle to the iface clock
clock-names:
items:
- const: core
- const: iface
resets:
items:
- description: phandle to the clk reset
- description: phandle to the pbus reset
- description: phandle to the c0 reset
- description: phandle to the c1 reset
- description: phandle to the c2 reset
reset-names:
items:
- const: clk
- const: pbus
- const: c0
- const: c1
- const: c2
qcom,ee:
$ref: /schemas/types.yaml#/definitions/uint32
description: indicates the security domain identifier used in the secure world.
minimum: 0
maximum: 255
required:
- compatible
- reg
- interrupts
- "#dma-cells"
- clocks
- clock-names
- resets
- reset-names
- qcom,ee
additionalProperties: false
examples:
- |
#include <dt-bindings/clock/qcom,gcc-ipq806x.h>
#include <dt-bindings/reset/qcom,gcc-ipq806x.h>
adm_dma: dma-controller@18300000 {
compatible = "qcom,adm";
reg = <0x18300000 0x100000>;
interrupts = <0 170 0>;
#dma-cells = <1>;
clocks = <&gcc ADM0_CLK>,
<&gcc ADM0_PBUS_CLK>;
clock-names = "core", "iface";
resets = <&gcc ADM0_RESET>,
<&gcc ADM0_PBUS_RESET>,
<&gcc ADM0_C0_RESET>,
<&gcc ADM0_C1_RESET>,
<&gcc ADM0_C2_RESET>;
reset-names = "clk", "pbus", "c0", "c1", "c2";
qcom,ee = <0>;
};
...
......@@ -8,7 +8,7 @@ title: Qualcomm Technologies Inc BAM DMA controller
maintainers:
- Andy Gross <agross@kernel.org>
- Bjorn Andersson <bjorn.andersson@linaro.org>
- Bjorn Andersson <andersson@kernel.org>
allOf:
- $ref: "dma-controller.yaml#"
......@@ -20,7 +20,7 @@ properties:
- qcom,bam-v1.3.0
# MSM8974, APQ8074 and APQ8084
- qcom,bam-v1.4.0
# MSM8916
# MSM8916 and SDM845
- qcom,bam-v1.7.0
clocks:
......@@ -90,8 +90,8 @@ examples:
dma-controller@f9944000 {
compatible = "qcom,bam-v1.4.0";
reg = <0xf9944000 0x15000>;
interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
reg = <0xf9944000 0x19000>;
interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_BLSP2_AHB_CLK>;
clock-names = "bam_clk";
#dma-cells = <1>;
......
......@@ -21,6 +21,7 @@ properties:
enum:
- qcom,sc7280-gpi-dma
- qcom,sdm845-gpi-dma
- qcom,sm6350-gpi-dma
- qcom,sm8150-gpi-dma
- qcom,sm8250-gpi-dma
- qcom,sm8350-gpi-dma
......
QCOM ADM DMA Controller
Required properties:
- compatible: must contain "qcom,adm" for IPQ/APQ8064 and MSM8960
- reg: Address range for DMA registers
- interrupts: Should contain one interrupt shared by all channels
- #dma-cells: must be <2>. First cell denotes the channel number. Second cell
denotes CRCI (client rate control interface) flow control assignment.
- clocks: Should contain the core clock and interface clock.
- clock-names: Must contain "core" for the core clock and "iface" for the
interface clock.
- resets: Must contain an entry for each entry in reset names.
- reset-names: Must include the following entries:
- clk
- c0
- c1
- c2
- qcom,ee: indicates the security domain identifier used in the secure world.
Example:
adm_dma: dma@18300000 {
compatible = "qcom,adm";
reg = <0x18300000 0x100000>;
interrupts = <0 170 0>;
#dma-cells = <2>;
clocks = <&gcc ADM0_CLK>, <&gcc ADM0_PBUS_CLK>;
clock-names = "core", "iface";
resets = <&gcc ADM0_RESET>,
<&gcc ADM0_C0_RESET>,
<&gcc ADM0_C1_RESET>,
<&gcc ADM0_C2_RESET>;
reset-names = "clk", "c0", "c1", "c2";
qcom,ee = <0>;
};
DMA clients must use the format descripted in the dma.txt file, using a three
cell specifier for each channel.
Each dmas request consists of 3 cells:
1. phandle pointing to the DMA controller
2. channel number
3. CRCI assignment, if applicable. If no CRCI flow control is required, use 0.
The CRCI is used for flow control. It identifies the peripheral device that
is the source/destination for the transferred data.
Example:
spi4: spi@1a280000 {
spi-max-frequency = <50000000>;
pinctrl-0 = <&spi_pins>;
pinctrl-names = "default";
cs-gpios = <&qcom_pinmux 20 0>;
dmas = <&adm_dma 6 9>,
<&adm_dma 5 10>;
dma-names = "rx", "tx";
};
......@@ -45,6 +45,7 @@ properties:
- enum:
- renesas,dmac-r8a779a0 # R-Car V3U
- renesas,dmac-r8a779f0 # R-Car S4-8
- renesas,dmac-r8a779g0 # R-Car V4H
- const: renesas,rcar-gen4-dmac # R-Car Gen4
reg: true
......
......@@ -4,7 +4,7 @@ Required properties:
- compatible: "ti,dra7-dma-crossbar" for DRA7xx DMA crossbar
"ti,am335x-edma-crossbar" for AM335x and AM437x
- reg: Memory map for accessing module
- #dma-cells: Should be set to to match with the DMA controller's dma-cells
- #dma-cells: Should be set to match with the DMA controller's dma-cells
for ti,dra7-dma-crossbar and <3> for ti,am335x-edma-crossbar.
- dma-requests: Number of DMA requests the crossbar can receive
- dma-masters: phandle pointing to the DMA controller
......
......@@ -9157,6 +9157,7 @@ F: net/dsa/tag_hellcreek.c
HISILICON DMA DRIVER
M: Zhou Wang <wangzhou1@hisilicon.com>
M: Jie Hai <haijie1@hisilicon.com>
L: dmaengine@vger.kernel.org
S: Maintained
F: drivers/dma/hisi_dma.c
......
......@@ -180,7 +180,7 @@ config DMA_SUN6I
config DW_AXI_DMAC
tristate "Synopsys DesignWare AXI DMA support"
depends on OF || COMPILE_TEST
depends on OF
depends on HAS_IOMEM
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
......
......@@ -2367,7 +2367,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
INIT_LIST_HEAD(&dmadev->channels);
/*
* Register as many many memcpy as we have physical channels,
* Register as many memcpy as we have physical channels,
* we won't always be able to use all but the code will have
* to cope with that situation.
*/
......
......@@ -12,8 +12,9 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/interrupt.h>
#include <linux/reset.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include "dmaengine.h"
......@@ -95,7 +96,9 @@ struct admac_data {
struct dma_device dma;
struct device *dev;
__iomem void *base;
struct reset_control *rstc;
int irq;
int irq_index;
int nchannels;
struct admac_chan channels[];
......@@ -724,18 +727,17 @@ static int admac_probe(struct platform_device *pdev)
if (irq < 0)
return dev_err_probe(&pdev->dev, irq, "no usable interrupt\n");
err = devm_request_irq(&pdev->dev, irq, admac_interrupt,
0, dev_name(&pdev->dev), ad);
if (err)
return dev_err_probe(&pdev->dev, err,
"unable to register interrupt\n");
ad->irq = irq;
ad->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ad->base))
return dev_err_probe(&pdev->dev, PTR_ERR(ad->base),
"unable to obtain MMIO resource\n");
ad->rstc = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
if (IS_ERR(ad->rstc))
return PTR_ERR(ad->rstc);
dma = &ad->dma;
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
......@@ -774,17 +776,38 @@ static int admac_probe(struct platform_device *pdev)
tasklet_setup(&adchan->tasklet, admac_chan_tasklet);
}
err = dma_async_device_register(&ad->dma);
err = reset_control_reset(ad->rstc);
if (err)
return dev_err_probe(&pdev->dev, err, "failed to register DMA device\n");
return dev_err_probe(&pdev->dev, err,
"unable to trigger reset\n");
err = request_irq(irq, admac_interrupt, 0, dev_name(&pdev->dev), ad);
if (err) {
dev_err_probe(&pdev->dev, err,
"unable to register interrupt\n");
goto free_reset;
}
err = dma_async_device_register(&ad->dma);
if (err) {
dev_err_probe(&pdev->dev, err, "failed to register DMA device\n");
goto free_irq;
}
err = of_dma_controller_register(pdev->dev.of_node, admac_dma_of_xlate, ad);
if (err) {
dma_async_device_unregister(&ad->dma);
return dev_err_probe(&pdev->dev, err, "failed to register with OF\n");
dev_err_probe(&pdev->dev, err, "failed to register with OF\n");
goto free_irq;
}
return 0;
free_irq:
free_irq(ad->irq, ad);
free_reset:
reset_control_rearm(ad->rstc);
return err;
}
static int admac_remove(struct platform_device *pdev)
......@@ -793,6 +816,8 @@ static int admac_remove(struct platform_device *pdev)
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&ad->dma);
free_irq(ad->irq, ad);
reset_control_rearm(ad->rstc);
return 0;
}
......
......@@ -1470,10 +1470,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
bool initd;
ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_COMPLETE)
return ret;
if (!txstate)
if (ret == DMA_COMPLETE || !txstate)
return ret;
spin_lock_irqsave(&atchan->lock, flags);
......
......@@ -9,7 +9,6 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/pm_runtime.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/interrupt.h>
......@@ -682,15 +681,12 @@ static int dw_edma_alloc_chan_resources(struct dma_chan *dchan)
if (chan->status != EDMA_ST_IDLE)
return -EBUSY;
pm_runtime_get(chan->dw->chip->dev);
return 0;
}
static void dw_edma_free_chan_resources(struct dma_chan *dchan)
{
unsigned long timeout = jiffies + msecs_to_jiffies(5000);
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
int ret;
while (time_before(jiffies, timeout)) {
......@@ -703,8 +699,6 @@ static void dw_edma_free_chan_resources(struct dma_chan *dchan)
cpu_relax();
}
pm_runtime_put(chan->dw->chip->dev);
}
static int dw_edma_channel_setup(struct dw_edma *dw, bool write,
......@@ -977,9 +971,6 @@ int dw_edma_probe(struct dw_edma_chip *chip)
if (err)
goto err_irq_free;
/* Power management */
pm_runtime_enable(dev);
/* Turn debugfs on */
dw_edma_v0_core_debugfs_on(dw);
......@@ -1009,9 +1000,6 @@ int dw_edma_remove(struct dw_edma_chip *chip)
for (i = (dw->nr_irqs - 1); i >= 0; i--)
free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
/* Power management */
pm_runtime_disable(dev);
/* Deregister eDMA device */
dma_async_device_unregister(&dw->wr_edma);
list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
......
This diff is collapsed.
......@@ -16,12 +16,20 @@
* port 3, and so on.
*/
#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/percpu-defs.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include "hsu.h"
......
......@@ -10,7 +10,11 @@
#ifndef __DMA_HSU_H__
#define __DMA_HSU_H__
#include <linux/spinlock.h>
#include <linux/bits.h>
#include <linux/container_of.h>
#include <linux/io.h>
#include <linux/types.h>
#include <linux/dma/hsu.h>
#include "../virt-dma.h"
......@@ -36,11 +40,11 @@
/* Bits in HSU_CH_SR */
#define HSU_CH_SR_DESCTO(x) BIT(8 + (x))
#define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8))
#define HSU_CH_SR_DESCTO_ANY GENMASK(11, 8)
#define HSU_CH_SR_CHE BIT(15)
#define HSU_CH_SR_DESCE(x) BIT(16 + (x))
#define HSU_CH_SR_DESCE_ANY (BIT(19) | BIT(18) | BIT(17) | BIT(16))
#define HSU_CH_SR_CDESC_ANY (BIT(31) | BIT(30))
#define HSU_CH_SR_DESCE_ANY GENMASK(19, 16)
#define HSU_CH_SR_CDESC_ANY GENMASK(31, 30)
/* Bits in HSU_CH_CR */
#define HSU_CH_CR_CHA BIT(0)
......
......@@ -10,6 +10,7 @@
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
......@@ -26,29 +27,32 @@
static irqreturn_t hsu_pci_irq(int irq, void *dev)
{
struct hsu_dma_chip *chip = dev;
u32 dmaisr;
u32 status;
unsigned long dmaisr;
unsigned short i;
u32 status;
int ret = 0;
int err;
dmaisr = readl(chip->regs + HSU_PCI_DMAISR);
for (i = 0; i < chip->hsu->nr_channels; i++) {
if (dmaisr & 0x1) {
err = hsu_dma_get_status(chip, i, &status);
if (err > 0)
ret |= 1;
else if (err == 0)
ret |= hsu_dma_do_irq(chip, i, status);
}
dmaisr >>= 1;
for_each_set_bit(i, &dmaisr, chip->hsu->nr_channels) {
err = hsu_dma_get_status(chip, i, &status);
if (err > 0)
ret |= 1;
else if (err == 0)
ret |= hsu_dma_do_irq(chip, i, status);
}
return IRQ_RETVAL(ret);
}
static void hsu_pci_dma_remove(void *chip)
{
hsu_dma_remove(chip);
}
static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
struct hsu_dma_chip *chip;
int ret;
......@@ -87,9 +91,13 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
ret = request_irq(chip->irq, hsu_pci_irq, 0, "hsu_dma_pci", chip);
ret = devm_add_action_or_reset(dev, hsu_pci_dma_remove, chip);
if (ret)
goto err_register_irq;
return ret;
ret = devm_request_irq(dev, chip->irq, hsu_pci_irq, 0, "hsu_dma_pci", chip);
if (ret)
return ret;
/*
* On Intel Tangier B0 and Anniedale the interrupt line, disregarding
......@@ -105,18 +113,6 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, chip);
return 0;
err_register_irq:
hsu_dma_remove(chip);
return ret;
}
static void hsu_pci_remove(struct pci_dev *pdev)
{
struct hsu_dma_chip *chip = pci_get_drvdata(pdev);
free_irq(chip->irq, chip);
hsu_dma_remove(chip);
}
static const struct pci_device_id hsu_pci_id_table[] = {
......@@ -130,7 +126,6 @@ static struct pci_driver hsu_pci_driver = {
.name = "hsu_dma_pci",
.id_table = hsu_pci_id_table,
.probe = hsu_pci_probe,
.remove = hsu_pci_remove,
};
module_pci_driver(hsu_pci_driver);
......
......@@ -196,6 +196,7 @@ int idxd_wq_enable(struct idxd_wq *wq)
}
wq->state = IDXD_WQ_ENABLED;
set_bit(wq->id, idxd->wq_enable_map);
dev_dbg(dev, "WQ %d enabled\n", wq->id);
return 0;
}
......@@ -223,6 +224,7 @@ int idxd_wq_disable(struct idxd_wq *wq, bool reset_config)
if (reset_config)
idxd_wq_disable_cleanup(wq);
clear_bit(wq->id, idxd->wq_enable_map);
wq->state = IDXD_WQ_DISABLED;
dev_dbg(dev, "WQ %d disabled\n", wq->id);
return 0;
......@@ -258,7 +260,6 @@ void idxd_wq_reset(struct idxd_wq *wq)
operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
idxd_wq_disable_cleanup(wq);
wq->state = IDXD_WQ_DISABLED;
}
int idxd_wq_map_portal(struct idxd_wq *wq)
......@@ -378,17 +379,20 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
struct idxd_device *idxd = wq->idxd;
lockdep_assert_held(&wq->wq_lock);
wq->state = IDXD_WQ_DISABLED;
memset(wq->wqcfg, 0, idxd->wqcfg_size);
wq->type = IDXD_WQT_NONE;
wq->threshold = 0;
wq->priority = 0;
wq->ats_dis = 0;
wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
memset(wq->name, 0, WQ_NAME_SIZE);
wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
wq->max_batch_size = WQ_DEFAULT_MAX_BATCH;
if (wq->opcap_bmap)
bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
}
static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq)
......@@ -705,6 +709,8 @@ static void idxd_groups_clear_state(struct idxd_device *idxd)
group->tc_a = -1;
group->tc_b = -1;
}
group->desc_progress_limit = 0;
group->batch_progress_limit = 0;
}
}
......@@ -761,10 +767,10 @@ static void idxd_group_config_write(struct idxd_group *group)
/* setup GRPFLAGS */
grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
iowrite64(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#llx\n",
group->id, grpcfg_offset,
ioread32(idxd->reg_base + grpcfg_offset));
ioread64(idxd->reg_base + grpcfg_offset));
}
static int idxd_groups_config_write(struct idxd_device *idxd)
......@@ -807,7 +813,7 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
u32 wq_offset;
int i;
int i, n;
if (!wq->group)
return 0;
......@@ -859,12 +865,23 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
wq->wqcfg->bof = 1;
if (idxd->hw.wq_cap.wq_ats_support)
wq->wqcfg->wq_ats_disable = wq->ats_dis;
wq->wqcfg->wq_ats_disable = test_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
/* bytes 12-15 */
wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size);
/* bytes 32-63 */
if (idxd->hw.wq_cap.op_config && wq->opcap_bmap) {
memset(wq->wqcfg->op_config, 0, IDXD_MAX_OPCAP_BITS / 8);
for_each_set_bit(n, wq->opcap_bmap, IDXD_MAX_OPCAP_BITS) {
int pos = n % BITS_PER_LONG_LONG;
int idx = n / BITS_PER_LONG_LONG;
wq->wqcfg->op_config[idx] |= BIT(pos);
}
}
dev_dbg(dev, "WQ %d CFGs\n", wq->id);
for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
......@@ -914,6 +931,9 @@ static void idxd_group_flags_setup(struct idxd_device *idxd)
group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed;
else
group->grpcfg.flags.rdbufs_allowed = idxd->max_rdbufs;
group->grpcfg.flags.desc_progress_limit = group->desc_progress_limit;
group->grpcfg.flags.batch_progress_limit = group->batch_progress_limit;
}
}
......@@ -1096,8 +1116,8 @@ static void idxd_group_load_config(struct idxd_group *group)
}
grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
group->grpcfg.flags.bits = ioread32(idxd->reg_base + grpcfg_offset);
dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
group->grpcfg.flags.bits = ioread64(idxd->reg_base + grpcfg_offset);
dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#llx\n",
group->id, grpcfg_offset, group->grpcfg.flags.bits);
}
......
......@@ -11,6 +11,7 @@
#include <linux/idr.h>
#include <linux/pci.h>
#include <linux/ioasid.h>
#include <linux/bitmap.h>
#include <linux/perf_event.h>
#include <uapi/linux/idxd.h>
#include "registers.h"
......@@ -95,6 +96,8 @@ struct idxd_group {
u8 rdbufs_reserved;
int tc_a;
int tc_b;
int desc_progress_limit;
int batch_progress_limit;
};
struct idxd_pmu {
......@@ -132,6 +135,7 @@ enum idxd_wq_state {
enum idxd_wq_flag {
WQ_FLAG_DEDICATED = 0,
WQ_FLAG_BLOCK_ON_FAULT,
WQ_FLAG_ATS_DISABLE,
};
enum idxd_wq_type {
......@@ -194,6 +198,8 @@ struct idxd_wq {
enum idxd_wq_state state;
unsigned long flags;
union wqcfg *wqcfg;
unsigned long *opcap_bmap;
struct dsa_hw_desc **hw_descs;
int num_descs;
union {
......@@ -208,7 +214,6 @@ struct idxd_wq {
char name[WQ_NAME_SIZE + 1];
u64 max_xfer_bytes;
u32 max_batch_size;
bool ats_dis;
};
struct idxd_engine {
......@@ -299,6 +304,7 @@ struct idxd_device {
int rdbuf_limit;
int nr_rdbufs; /* non-reserved read buffers */
unsigned int wqcfg_size;
unsigned long *wq_enable_map;
union sw_err_reg sw_err;
wait_queue_head_t cmd_waitq;
......@@ -308,6 +314,8 @@ struct idxd_device {
struct work_struct work;
struct idxd_pmu *idxd_pmu;
unsigned long *opcap_bmap;
};
/* IDXD software descriptor */
......
......@@ -151,6 +151,12 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
if (!idxd->wqs)
return -ENOMEM;
idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev));
if (!idxd->wq_enable_map) {
kfree(idxd->wqs);
return -ENOMEM;
}
for (i = 0; i < idxd->max_wqs; i++) {
wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
if (!wq) {
......@@ -185,6 +191,16 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
rc = -ENOMEM;
goto err;
}
if (idxd->hw.wq_cap.op_config) {
wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
if (!wq->opcap_bmap) {
put_device(conf_dev);
rc = -ENOMEM;
goto err;
}
bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
}
idxd->wqs[i] = wq;
}
......@@ -369,6 +385,19 @@ static void idxd_read_table_offsets(struct idxd_device *idxd)
dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
}
static void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count)
{
int i, j, nr;
for (i = 0, nr = 0; i < count; i++) {
for (j = 0; j < BITS_PER_LONG_LONG; j++) {
if (val[i] & BIT(j))
set_bit(nr, bmap);
nr++;
}
}
}
static void idxd_read_caps(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
......@@ -427,6 +456,7 @@ static void idxd_read_caps(struct idxd_device *idxd)
IDXD_OPCAP_OFFSET + i * sizeof(u64));
dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
}
multi_u64_to_bmap(idxd->opcap_bmap, &idxd->hw.opcap.bits[0], 4);
}
static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
......@@ -448,6 +478,12 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d
if (idxd->id < 0)
return NULL;
idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev));
if (!idxd->opcap_bmap) {
ida_free(&idxd_ida, idxd->id);
return NULL;
}
device_initialize(conf_dev);
conf_dev->parent = dev;
conf_dev->bus = &dsa_bus_type;
......
......@@ -17,12 +17,6 @@ enum irq_work_type {
IRQ_WORK_PROCESS_FAULT,
};
struct idxd_fault {
struct work_struct work;
u64 addr;
struct idxd_device *idxd;
};
struct idxd_resubmit {
struct work_struct work;
struct idxd_desc *desc;
......@@ -49,11 +43,12 @@ static void idxd_device_reinit(struct work_struct *work)
goto out;
for (i = 0; i < idxd->max_wqs; i++) {
struct idxd_wq *wq = idxd->wqs[i];
if (test_bit(i, idxd->wq_enable_map)) {
struct idxd_wq *wq = idxd->wqs[i];
if (wq->state == IDXD_WQ_ENABLED) {
rc = idxd_wq_enable(wq);
if (rc < 0) {
clear_bit(i, idxd->wq_enable_map);
dev_warn(dev, "Unable to re-enable wq %s\n",
dev_name(wq_confdev(wq)));
}
......@@ -324,13 +319,11 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
idxd->state = IDXD_DEV_HALTED;
idxd_wqs_quiesce(idxd);
idxd_wqs_unmap_portal(idxd);
spin_lock(&idxd->dev_lock);
idxd_device_clear_state(idxd);
dev_err(&idxd->pdev->dev,
"idxd halted, need %s.\n",
gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
"FLR" : "system reset");
spin_unlock(&idxd->dev_lock);
return -ENXIO;
}
}
......
......@@ -54,7 +54,8 @@ union wq_cap_reg {
u64 priority:1;
u64 occupancy:1;
u64 occupancy_int:1;
u64 rsvd3:10;
u64 op_config:1;
u64 rsvd3:9;
};
u64 bits;
} __packed;
......@@ -67,7 +68,8 @@ union group_cap_reg {
u64 total_rdbufs:8; /* formerly total_tokens */
u64 rdbuf_ctrl:1; /* formerly token_en */
u64 rdbuf_limit:1; /* formerly token_limit */
u64 rsvd:46;
u64 progress_limit:1; /* descriptor and batch descriptor */
u64 rsvd:45;
};
u64 bits;
} __packed;
......@@ -90,6 +92,8 @@ struct opcap {
u64 bits[4];
};
#define IDXD_MAX_OPCAP_BITS 256U
#define IDXD_OPCAP_OFFSET 0x40
#define IDXD_TABLE_OFFSET 0x60
......@@ -285,16 +289,20 @@ union msix_perm {
union group_flags {
struct {
u32 tc_a:3;
u32 tc_b:3;
u32 rsvd:1;
u32 use_rdbuf_limit:1;
u32 rdbufs_reserved:8;
u32 rsvd2:4;
u32 rdbufs_allowed:8;
u32 rsvd3:4;
u64 tc_a:3;
u64 tc_b:3;
u64 rsvd:1;
u64 use_rdbuf_limit:1;
u64 rdbufs_reserved:8;
u64 rsvd2:4;
u64 rdbufs_allowed:8;
u64 rsvd3:4;
u64 desc_progress_limit:2;
u64 rsvd4:2;
u64 batch_progress_limit:2;
u64 rsvd5:26;
};
u32 bits;
u64 bits;
} __packed;
struct grpcfg {
......@@ -348,8 +356,11 @@ union wqcfg {
/* bytes 28-31 */
u32 rsvd8;
/* bytes 32-63 */
u64 op_config[4];
};
u32 bits[8];
u32 bits[16];
} __packed;
#define WQCFG_PASID_IDX 2
......
......@@ -443,6 +443,67 @@ static struct device_attribute dev_attr_group_traffic_class_b =
__ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
group_traffic_class_b_store);
static ssize_t group_desc_progress_limit_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct idxd_group *group = confdev_to_group(dev);
return sysfs_emit(buf, "%d\n", group->desc_progress_limit);
}
static ssize_t group_desc_progress_limit_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct idxd_group *group = confdev_to_group(dev);
int val, rc;
rc = kstrtoint(buf, 10, &val);
if (rc < 0)
return -EINVAL;
if (val & ~GENMASK(1, 0))
return -EINVAL;
group->desc_progress_limit = val;
return count;
}
static struct device_attribute dev_attr_group_desc_progress_limit =
__ATTR(desc_progress_limit, 0644, group_desc_progress_limit_show,
group_desc_progress_limit_store);
static ssize_t group_batch_progress_limit_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct idxd_group *group = confdev_to_group(dev);
return sysfs_emit(buf, "%d\n", group->batch_progress_limit);
}
static ssize_t group_batch_progress_limit_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct idxd_group *group = confdev_to_group(dev);
int val, rc;
rc = kstrtoint(buf, 10, &val);
if (rc < 0)
return -EINVAL;
if (val & ~GENMASK(1, 0))
return -EINVAL;
group->batch_progress_limit = val;
return count;
}
static struct device_attribute dev_attr_group_batch_progress_limit =
__ATTR(batch_progress_limit, 0644, group_batch_progress_limit_show,
group_batch_progress_limit_store);
static struct attribute *idxd_group_attributes[] = {
&dev_attr_group_work_queues.attr,
&dev_attr_group_engines.attr,
......@@ -454,11 +515,35 @@ static struct attribute *idxd_group_attributes[] = {
&dev_attr_group_read_buffers_reserved.attr,
&dev_attr_group_traffic_class_a.attr,
&dev_attr_group_traffic_class_b.attr,
&dev_attr_group_desc_progress_limit.attr,
&dev_attr_group_batch_progress_limit.attr,
NULL,
};
static bool idxd_group_attr_progress_limit_invisible(struct attribute *attr,
struct idxd_device *idxd)
{
return (attr == &dev_attr_group_desc_progress_limit.attr ||
attr == &dev_attr_group_batch_progress_limit.attr) &&
!idxd->hw.group_cap.progress_limit;
}
static umode_t idxd_group_attr_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct idxd_group *group = confdev_to_group(dev);
struct idxd_device *idxd = group->idxd;
if (idxd_group_attr_progress_limit_invisible(attr, idxd))
return 0;
return attr->mode;
}
static const struct attribute_group idxd_group_attribute_group = {
.attrs = idxd_group_attributes,
.is_visible = idxd_group_attr_visible,
};
static const struct attribute_group *idxd_group_attribute_groups[] = {
......@@ -973,7 +1058,7 @@ static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *
{
struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%u\n", wq->ats_dis);
return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_ATS_DISABLE, &wq->flags));
}
static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
......@@ -994,7 +1079,10 @@ static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute
if (rc < 0)
return rc;
wq->ats_dis = ats_dis;
if (ats_dis)
set_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
else
clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
return count;
}
......@@ -1055,6 +1143,68 @@ static ssize_t wq_enqcmds_retries_store(struct device *dev, struct device_attrib
static struct device_attribute dev_attr_wq_enqcmds_retries =
__ATTR(enqcmds_retries, 0644, wq_enqcmds_retries_show, wq_enqcmds_retries_store);
static ssize_t wq_op_config_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, wq->opcap_bmap);
}
static int idxd_verify_supported_opcap(struct idxd_device *idxd, unsigned long *opmask)
{
int bit;
/*
* The OPCAP is defined as 256 bits that represents each operation the device
* supports per bit. Iterate through all the bits and check if the input mask
* is set for bits that are not set in the OPCAP for the device. If no OPCAP
* bit is set and input mask has the bit set, then return error.
*/
for_each_set_bit(bit, opmask, IDXD_MAX_OPCAP_BITS) {
if (!test_bit(bit, idxd->opcap_bmap))
return -EINVAL;
}
return 0;
}
static ssize_t wq_op_config_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
unsigned long *opmask;
int rc;
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;
opmask = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
if (!opmask)
return -ENOMEM;
rc = bitmap_parse(buf, count, opmask, IDXD_MAX_OPCAP_BITS);
if (rc < 0)
goto err;
rc = idxd_verify_supported_opcap(idxd, opmask);
if (rc < 0)
goto err;
bitmap_copy(wq->opcap_bmap, opmask, IDXD_MAX_OPCAP_BITS);
bitmap_free(opmask);
return count;
err:
bitmap_free(opmask);
return rc;
}
static struct device_attribute dev_attr_wq_op_config =
__ATTR(op_config, 0644, wq_op_config_show, wq_op_config_store);
static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_clients.attr,
&dev_attr_wq_state.attr,
......@@ -1072,11 +1222,33 @@ static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_ats_disable.attr,
&dev_attr_wq_occupancy.attr,
&dev_attr_wq_enqcmds_retries.attr,
&dev_attr_wq_op_config.attr,
NULL,
};
static bool idxd_wq_attr_op_config_invisible(struct attribute *attr,
struct idxd_device *idxd)
{
return attr == &dev_attr_wq_op_config.attr &&
!idxd->hw.wq_cap.op_config;
}
static umode_t idxd_wq_attr_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
if (idxd_wq_attr_op_config_invisible(attr, idxd))
return 0;
return attr->mode;
}
static const struct attribute_group idxd_wq_attribute_group = {
.attrs = idxd_wq_attributes,
.is_visible = idxd_wq_attr_visible,
};
static const struct attribute_group *idxd_wq_attribute_groups[] = {
......@@ -1088,6 +1260,7 @@ static void idxd_conf_wq_release(struct device *dev)
{
struct idxd_wq *wq = confdev_to_wq(dev);
bitmap_free(wq->opcap_bmap);
kfree(wq->wqcfg);
kfree(wq);
}
......@@ -1177,14 +1350,8 @@ static ssize_t op_cap_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct idxd_device *idxd = confdev_to_idxd(dev);
int i, rc = 0;
for (i = 0; i < 4; i++)
rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
rc--;
rc += sysfs_emit_at(buf, rc, "\n");
return rc;
return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, idxd->opcap_bmap);
}
static DEVICE_ATTR_RO(op_cap);
......@@ -1405,9 +1572,11 @@ static void idxd_conf_device_release(struct device *dev)
struct idxd_device *idxd = confdev_to_idxd(dev);
kfree(idxd->groups);
bitmap_free(idxd->wq_enable_map);
kfree(idxd->wqs);
kfree(idxd->engines);
ida_free(&idxd_ida, idxd->id);
bitmap_free(idxd->opcap_bmap);
kfree(idxd);
}
......
......@@ -656,7 +656,7 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
if (active - i == 0) {
dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
__func__);
mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
}
/* microsecond delay by sysfs variable per pending descriptor */
......@@ -682,7 +682,7 @@ static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
if (chanerr &
(IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
ioat_eh(ioat_chan);
}
}
......@@ -879,7 +879,7 @@ static void check_active(struct ioatdma_chan *ioat_chan)
}
if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
}
static void ioat_reboot_chan(struct ioatdma_chan *ioat_chan)
......
......@@ -196,10 +196,8 @@ extern const struct sysfs_ops ioat_sysfs_ops;
extern struct ioat_sysfs_entry ioat_version_attr;
extern struct ioat_sysfs_entry ioat_cap_attr;
extern int ioat_pending_level;
extern int ioat_ring_alloc_order;
extern struct kobj_type ioat_ktype;
extern struct kmem_cache *ioat_cache;
extern int ioat_ring_max_alloc_order;
extern struct kmem_cache *ioat_sed_cache;
static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c)
......
......@@ -670,7 +670,7 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
return mxs_chan->status;
}
static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
static int mxs_dma_init(struct mxs_dma_engine *mxs_dma)
{
int ret;
......@@ -741,7 +741,7 @@ static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec,
ofdma->of_node);
}
static int __init mxs_dma_probe(struct platform_device *pdev)
static int mxs_dma_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
const struct mxs_dma_type *dma_type;
......@@ -839,10 +839,7 @@ static struct platform_driver mxs_dma_driver = {
.name = "mxs-dma",
.of_match_table = mxs_dma_dt_ids,
},
.probe = mxs_dma_probe,
};
static int __init mxs_dma_module_init(void)
{
return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe);
}
subsys_initcall(mxs_dma_module_init);
builtin_platform_driver(mxs_dma_driver);
......@@ -2752,7 +2752,6 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
return NULL;
pch->cyclic = true;
desc->txd.flags = flags;
return &desc->txd;
}
......@@ -2804,8 +2803,6 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
desc->bytes_requested = len;
desc->txd.flags = flags;
return &desc->txd;
}
......@@ -2889,7 +2886,6 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
/* Return the last desc in the chain */
desc->txd.flags = flg;
return &desc->txd;
}
......
......@@ -1150,9 +1150,9 @@ static void gpi_ev_tasklet(unsigned long data)
{
struct gpii *gpii = (struct gpii *)data;
read_lock_bh(&gpii->pm_lock);
read_lock(&gpii->pm_lock);
if (!REG_ACCESS_VALID(gpii->pm_state)) {
read_unlock_bh(&gpii->pm_lock);
read_unlock(&gpii->pm_lock);
dev_err(gpii->gpi_dev->dev, "not processing any events, pm_state:%s\n",
TO_GPI_PM_STR(gpii->pm_state));
return;
......@@ -1163,7 +1163,7 @@ static void gpi_ev_tasklet(unsigned long data)
/* enable IEOB, switching back to interrupts */
gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 1);
read_unlock_bh(&gpii->pm_lock);
read_unlock(&gpii->pm_lock);
}
/* marks all pending events for the channel as stale */
......@@ -2288,6 +2288,7 @@ static int gpi_probe(struct platform_device *pdev)
static const struct of_device_id gpi_of_match[] = {
{ .compatible = "qcom,sc7280-gpi-dma", .data = (void *)0x10000 },
{ .compatible = "qcom,sdm845-gpi-dma", .data = (void *)0x0 },
{ .compatible = "qcom,sm6350-gpi-dma", .data = (void *)0x10000 },
{ .compatible = "qcom,sm8150-gpi-dma", .data = (void *)0x0 },
{ .compatible = "qcom,sm8250-gpi-dma", .data = (void *)0x0 },
{ .compatible = "qcom,sm8350-gpi-dma", .data = (void *)0x10000 },
......
......@@ -379,13 +379,13 @@ static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
if (blk_size < 0) {
dev_err(adev->dev, "invalid burst value: %d\n",
burst);
return ERR_PTR(-EINVAL);
return NULL;
}
crci = achan->crci & 0xf;
if (!crci || achan->crci > 0x1f) {
dev_err(adev->dev, "invalid crci value\n");
return ERR_PTR(-EINVAL);
return NULL;
}
}
......@@ -403,8 +403,10 @@ static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
}
async_desc = kzalloc(sizeof(*async_desc), GFP_NOWAIT);
if (!async_desc)
return ERR_PTR(-ENOMEM);
if (!async_desc) {
dev_err(adev->dev, "not enough memory for async_desc struct\n");
return NULL;
}
async_desc->mux = achan->mux ? ADM_CRCI_CTL_MUX_SEL : 0;
async_desc->crci = crci;
......@@ -414,8 +416,10 @@ static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
sizeof(*cple) + 2 * ADM_DESC_ALIGN;
async_desc->cpl = kzalloc(async_desc->dma_len, GFP_NOWAIT);
if (!async_desc->cpl)
if (!async_desc->cpl) {
dev_err(adev->dev, "not enough memory for cpl struct\n");
goto free;
}
async_desc->adev = adev;
......@@ -437,8 +441,10 @@ static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
async_desc->dma_addr = dma_map_single(adev->dev, async_desc->cpl,
async_desc->dma_len,
DMA_TO_DEVICE);
if (dma_mapping_error(adev->dev, async_desc->dma_addr))
if (dma_mapping_error(adev->dev, async_desc->dma_addr)) {
dev_err(adev->dev, "dma mapping error for cpl\n");
goto free;
}
cple_addr = async_desc->dma_addr + ((void *)cple - async_desc->cpl);
......@@ -454,7 +460,7 @@ static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
free:
kfree(async_desc);
return ERR_PTR(-ENOMEM);
return NULL;
}
/**
......@@ -494,7 +500,7 @@ static int adm_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
spin_lock_irqsave(&achan->vc.lock, flag);
memcpy(&achan->slave, cfg, sizeof(struct dma_slave_config));
if (cfg->peripheral_size == sizeof(config))
if (cfg->peripheral_size == sizeof(*config))
achan->crci = config->crci;
spin_unlock_irqrestore(&achan->vc.lock, flag);
......
......@@ -1094,7 +1094,7 @@ static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma,
INIT_LIST_HEAD(&dmadev->channels);
/*
* Register as many many memcpy as we have physical channels,
* Register as many memcpy as we have physical channels,
* we won't always be able to use all but the code will have
* to cope with that situation.
*/
......
......@@ -405,10 +405,8 @@ static int sf_pdma_irq_init(struct platform_device *pdev, struct sf_pdma *pdma)
chan = &pdma->chans[i];
irq = platform_get_irq(pdev, i * 2);
if (irq < 0) {
dev_err(&pdev->dev, "ch(%d) Can't get done irq.\n", i);
if (irq < 0)
return -EINVAL;
}
r = devm_request_irq(&pdev->dev, irq, sf_pdma_done_isr, 0,
dev_name(&pdev->dev), (void *)chan);
......@@ -420,10 +418,8 @@ static int sf_pdma_irq_init(struct platform_device *pdev, struct sf_pdma *pdma)
chan->txirq = irq;
irq = platform_get_irq(pdev, (i * 2) + 1);
if (irq < 0) {
dev_err(&pdev->dev, "ch(%d) Can't get err irq.\n", i);
if (irq < 0)
return -EINVAL;
}
r = devm_request_irq(&pdev->dev, irq, sf_pdma_err_isr, 0,
dev_name(&pdev->dev), (void *)chan);
......
......@@ -103,8 +103,8 @@ struct rcar_dmac_desc_page {
struct list_head node;
union {
struct rcar_dmac_desc descs[0];
struct rcar_dmac_xfer_chunk chunks[0];
DECLARE_FLEX_ARRAY(struct rcar_dmac_desc, descs);
DECLARE_FLEX_ARRAY(struct rcar_dmac_xfer_chunk, chunks);
};
};
......
This diff is collapsed.
......@@ -39,13 +39,13 @@ struct stm32_dmamux_data {
u32 dma_requests; /* Number of DMA requests connected to DMAMUX */
u32 dmamux_requests; /* Number of DMA requests routed toward DMAs */
spinlock_t lock; /* Protects register access */
unsigned long *dma_inuse; /* Used DMA channel */
DECLARE_BITMAP(dma_inuse, STM32_DMAMUX_MAX_DMA_REQUESTS); /* Used DMA channel */
u32 ccr[STM32_DMAMUX_MAX_DMA_REQUESTS]; /* Used to backup CCR register
* in suspend
*/
u32 dma_reqs[]; /* Number of DMA Request per DMA masters.
* [0] holds number of DMA Masters.
* To be kept at very end end of this structure
* To be kept at very end of this structure
*/
};
......@@ -147,7 +147,7 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
mux->request = dma_spec->args[0];
/* craft DMA spec */
dma_spec->args[3] = dma_spec->args[2];
dma_spec->args[3] = dma_spec->args[2] | mux->chan_id << 16;
dma_spec->args[2] = dma_spec->args[1];
dma_spec->args[1] = 0;
dma_spec->args[0] = mux->chan_id - min;
......@@ -229,12 +229,6 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
stm32_dmamux->dma_requests = dma_req;
stm32_dmamux->dma_reqs[0] = count;
stm32_dmamux->dma_inuse = devm_kcalloc(&pdev->dev,
BITS_TO_LONGS(dma_req),
sizeof(unsigned long),
GFP_KERNEL);
if (!stm32_dmamux->dma_inuse)
return -ENOMEM;
if (device_property_read_u32(&pdev->dev, "dma-requests",
&stm32_dmamux->dmamux_requests)) {
......
......@@ -199,6 +199,7 @@ struct stm32_mdma_chan_config {
u32 transfer_config;
u32 mask_addr;
u32 mask_data;
bool m2m_hw; /* True when MDMA is triggered by STM32 DMA */
};
struct stm32_mdma_hwdesc {
......@@ -227,6 +228,12 @@ struct stm32_mdma_desc {
struct stm32_mdma_desc_node node[];
};
struct stm32_mdma_dma_config {
u32 request; /* STM32 DMA channel stream id, triggering MDMA */
u32 cmar; /* STM32 DMA interrupt flag clear register address */
u32 cmdr; /* STM32 DMA Transfer Complete flag */
};
struct stm32_mdma_chan {
struct virt_dma_chan vchan;
struct dma_pool *desc_pool;
......@@ -539,13 +546,23 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
dst_addr = chan->dma_config.dst_addr;
/* Set device data size */
if (chan_config->m2m_hw)
dst_addr_width = stm32_mdma_get_max_width(dst_addr, buf_len,
STM32_MDMA_MAX_BUF_LEN);
dst_bus_width = stm32_mdma_get_width(chan, dst_addr_width);
if (dst_bus_width < 0)
return dst_bus_width;
ctcr &= ~STM32_MDMA_CTCR_DSIZE_MASK;
ctcr |= STM32_MDMA_CTCR_DSIZE(dst_bus_width);
if (chan_config->m2m_hw) {
ctcr &= ~STM32_MDMA_CTCR_DINCOS_MASK;
ctcr |= STM32_MDMA_CTCR_DINCOS(dst_bus_width);
}
/* Set device burst value */
if (chan_config->m2m_hw)
dst_maxburst = STM32_MDMA_MAX_BUF_LEN / dst_addr_width;
dst_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
dst_maxburst,
dst_addr_width);
......@@ -588,13 +605,24 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
src_addr = chan->dma_config.src_addr;
/* Set device data size */
if (chan_config->m2m_hw)
src_addr_width = stm32_mdma_get_max_width(src_addr, buf_len,
STM32_MDMA_MAX_BUF_LEN);
src_bus_width = stm32_mdma_get_width(chan, src_addr_width);
if (src_bus_width < 0)
return src_bus_width;
ctcr &= ~STM32_MDMA_CTCR_SSIZE_MASK;
ctcr |= STM32_MDMA_CTCR_SSIZE(src_bus_width);
if (chan_config->m2m_hw) {
ctcr &= ~STM32_MDMA_CTCR_SINCOS_MASK;
ctcr |= STM32_MDMA_CTCR_SINCOS(src_bus_width);
}
/* Set device burst value */
if (chan_config->m2m_hw)
src_maxburst = STM32_MDMA_MAX_BUF_LEN / src_addr_width;
src_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
src_maxburst,
src_addr_width);
......@@ -702,11 +730,15 @@ static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan,
{
struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
struct dma_slave_config *dma_config = &chan->dma_config;
struct stm32_mdma_chan_config *chan_config = &chan->chan_config;
struct scatterlist *sg;
dma_addr_t src_addr, dst_addr;
u32 ccr, ctcr, ctbr;
u32 m2m_hw_period, ccr, ctcr, ctbr;
int i, ret = 0;
if (chan_config->m2m_hw)
m2m_hw_period = sg_dma_len(sgl);
for_each_sg(sgl, sg, sg_len, i) {
if (sg_dma_len(sg) > STM32_MDMA_MAX_BLOCK_LEN) {
dev_err(chan2dev(chan), "Invalid block len\n");
......@@ -716,6 +748,8 @@ static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan,
if (direction == DMA_MEM_TO_DEV) {
src_addr = sg_dma_address(sg);
dst_addr = dma_config->dst_addr;
if (chan_config->m2m_hw && (i & 1))
dst_addr += m2m_hw_period;
ret = stm32_mdma_set_xfer_param(chan, direction, &ccr,
&ctcr, &ctbr, src_addr,
sg_dma_len(sg));
......@@ -723,6 +757,8 @@ static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan,
src_addr);
} else {
src_addr = dma_config->src_addr;
if (chan_config->m2m_hw && (i & 1))
src_addr += m2m_hw_period;
dst_addr = sg_dma_address(sg);
ret = stm32_mdma_set_xfer_param(chan, direction, &ccr,
&ctcr, &ctbr, dst_addr,
......@@ -755,6 +791,7 @@ stm32_mdma_prep_slave_sg(struct dma_chan *c, struct scatterlist *sgl,
unsigned long flags, void *context)
{
struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
struct stm32_mdma_chan_config *chan_config = &chan->chan_config;
struct stm32_mdma_desc *desc;
int i, ret;
......@@ -777,6 +814,21 @@ stm32_mdma_prep_slave_sg(struct dma_chan *c, struct scatterlist *sgl,
if (ret < 0)
goto xfer_setup_err;
/*
* In case of M2M HW transfer triggered by STM32 DMA, we do not have to clear the
* transfer complete flag by hardware in order to let the CPU rearm the STM32 DMA
* with the next sg element and update some data in dmaengine framework.
*/
if (chan_config->m2m_hw && direction == DMA_MEM_TO_DEV) {
struct stm32_mdma_hwdesc *hwdesc;
for (i = 0; i < sg_len; i++) {
hwdesc = desc->node[i].hwdesc;
hwdesc->cmar = 0;
hwdesc->cmdr = 0;
}
}
desc->cyclic = false;
return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
......@@ -798,6 +850,7 @@ stm32_mdma_prep_dma_cyclic(struct dma_chan *c, dma_addr_t buf_addr,
struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
struct dma_slave_config *dma_config = &chan->dma_config;
struct stm32_mdma_chan_config *chan_config = &chan->chan_config;
struct stm32_mdma_desc *desc;
dma_addr_t src_addr, dst_addr;
u32 ccr, ctcr, ctbr, count;
......@@ -858,8 +911,12 @@ stm32_mdma_prep_dma_cyclic(struct dma_chan *c, dma_addr_t buf_addr,
if (direction == DMA_MEM_TO_DEV) {
src_addr = buf_addr + i * period_len;
dst_addr = dma_config->dst_addr;
if (chan_config->m2m_hw && (i & 1))
dst_addr += period_len;
} else {
src_addr = dma_config->src_addr;
if (chan_config->m2m_hw && (i & 1))
src_addr += period_len;
dst_addr = buf_addr + i * period_len;
}
......@@ -1244,6 +1301,17 @@ static int stm32_mdma_slave_config(struct dma_chan *c,
memcpy(&chan->dma_config, config, sizeof(*config));
/* Check if user is requesting STM32 DMA to trigger MDMA */
if (config->peripheral_size) {
struct stm32_mdma_dma_config *mdma_config;
mdma_config = (struct stm32_mdma_dma_config *)chan->dma_config.peripheral_config;
chan->chan_config.request = mdma_config->request;
chan->chan_config.mask_addr = mdma_config->cmar;
chan->chan_config.mask_data = mdma_config->cmdr;
chan->chan_config.m2m_hw = true;
}
return 0;
}
......
......@@ -352,12 +352,6 @@ static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i,
edma_modify(ecc, offset + (i << 2), and, or);
}
static inline void edma_or_array(struct edma_cc *ecc, int offset, int i,
unsigned or)
{
edma_or(ecc, offset + (i << 2), or);
}
static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j,
unsigned or)
{
......@@ -370,11 +364,6 @@ static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i,
edma_write(ecc, offset + ((i * 2 + j) << 2), val);
}
static inline unsigned int edma_shadow0_read(struct edma_cc *ecc, int offset)
{
return edma_read(ecc, EDMA_SHADOW0 + offset);
}
static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc,
int offset, int i)
{
......@@ -393,36 +382,12 @@ static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset,
edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val);
}
static inline unsigned int edma_param_read(struct edma_cc *ecc, int offset,
int param_no)
{
return edma_read(ecc, EDMA_PARM + offset + (param_no << 5));
}
static inline void edma_param_write(struct edma_cc *ecc, int offset,
int param_no, unsigned val)
{
edma_write(ecc, EDMA_PARM + offset + (param_no << 5), val);
}
static inline void edma_param_modify(struct edma_cc *ecc, int offset,
int param_no, unsigned and, unsigned or)
{
edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or);
}
static inline void edma_param_and(struct edma_cc *ecc, int offset, int param_no,
unsigned and)
{
edma_and(ecc, EDMA_PARM + offset + (param_no << 5), and);
}
static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no,
unsigned or)
{
edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or);
}
static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
int priority)
{
......@@ -743,11 +708,6 @@ static void edma_free_channel(struct edma_chan *echan)
edma_setup_interrupt(echan, false);
}
static inline struct edma_cc *to_edma_cc(struct dma_device *d)
{
return container_of(d, struct edma_cc, dma_slave);
}
static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
{
return container_of(c, struct edma_chan, vchan.chan);
......
......@@ -143,6 +143,57 @@ static struct psil_ep j7200_src_ep_map[] = {
/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
static struct psil_ep j7200_dst_ep_map[] = {
/* PDMA_MCASP - McASP0-2 */
PSIL_PDMA_MCASP(0xc400),
PSIL_PDMA_MCASP(0xc401),
PSIL_PDMA_MCASP(0xc402),
/* PDMA_SPI_G0 - SPI0-3 */
PSIL_PDMA_XY_PKT(0xc600),
PSIL_PDMA_XY_PKT(0xc601),
PSIL_PDMA_XY_PKT(0xc602),
PSIL_PDMA_XY_PKT(0xc603),
PSIL_PDMA_XY_PKT(0xc604),
PSIL_PDMA_XY_PKT(0xc605),
PSIL_PDMA_XY_PKT(0xc606),
PSIL_PDMA_XY_PKT(0xc607),
PSIL_PDMA_XY_PKT(0xc608),
PSIL_PDMA_XY_PKT(0xc609),
PSIL_PDMA_XY_PKT(0xc60a),
PSIL_PDMA_XY_PKT(0xc60b),
PSIL_PDMA_XY_PKT(0xc60c),
PSIL_PDMA_XY_PKT(0xc60d),
PSIL_PDMA_XY_PKT(0xc60e),
PSIL_PDMA_XY_PKT(0xc60f),
/* PDMA_SPI_G1 - SPI4-7 */
PSIL_PDMA_XY_PKT(0xc610),
PSIL_PDMA_XY_PKT(0xc611),
PSIL_PDMA_XY_PKT(0xc612),
PSIL_PDMA_XY_PKT(0xc613),
PSIL_PDMA_XY_PKT(0xc614),
PSIL_PDMA_XY_PKT(0xc615),
PSIL_PDMA_XY_PKT(0xc616),
PSIL_PDMA_XY_PKT(0xc617),
PSIL_PDMA_XY_PKT(0xc618),
PSIL_PDMA_XY_PKT(0xc619),
PSIL_PDMA_XY_PKT(0xc61a),
PSIL_PDMA_XY_PKT(0xc61b),
PSIL_PDMA_XY_PKT(0xc61c),
PSIL_PDMA_XY_PKT(0xc61d),
PSIL_PDMA_XY_PKT(0xc61e),
PSIL_PDMA_XY_PKT(0xc61f),
/* PDMA_USART_G0 - UART0-1 */
PSIL_PDMA_XY_PKT(0xc700),
PSIL_PDMA_XY_PKT(0xc701),
/* PDMA_USART_G1 - UART2-3 */
PSIL_PDMA_XY_PKT(0xc702),
PSIL_PDMA_XY_PKT(0xc703),
/* PDMA_USART_G2 - UART4-9 */
PSIL_PDMA_XY_PKT(0xc704),
PSIL_PDMA_XY_PKT(0xc705),
PSIL_PDMA_XY_PKT(0xc706),
PSIL_PDMA_XY_PKT(0xc707),
PSIL_PDMA_XY_PKT(0xc708),
PSIL_PDMA_XY_PKT(0xc709),
/* CPSW5 */
PSIL_ETHERNET(0xca00),
PSIL_ETHERNET(0xca01),
......@@ -161,6 +212,22 @@ static struct psil_ep j7200_dst_ep_map[] = {
PSIL_ETHERNET(0xf005),
PSIL_ETHERNET(0xf006),
PSIL_ETHERNET(0xf007),
/* MCU_PDMA_MISC_G0 - SPI0 */
PSIL_PDMA_XY_PKT(0xf100),
PSIL_PDMA_XY_PKT(0xf101),
PSIL_PDMA_XY_PKT(0xf102),
PSIL_PDMA_XY_PKT(0xf103),
/* MCU_PDMA_MISC_G1 - SPI1-2 */
PSIL_PDMA_XY_PKT(0xf200),
PSIL_PDMA_XY_PKT(0xf201),
PSIL_PDMA_XY_PKT(0xf202),
PSIL_PDMA_XY_PKT(0xf203),
PSIL_PDMA_XY_PKT(0xf204),
PSIL_PDMA_XY_PKT(0xf205),
PSIL_PDMA_XY_PKT(0xf206),
PSIL_PDMA_XY_PKT(0xf207),
/* MCU_PDMA_MISC_G2 - UART0 */
PSIL_PDMA_XY_PKT(0xf300),
/* SA2UL */
PSIL_SA2UL(0xf500, 1),
PSIL_SA2UL(0xf501, 1),
......
......@@ -266,6 +266,69 @@ static struct psil_ep j721e_dst_ep_map[] = {
PSIL_ETHERNET(0xc205),
PSIL_ETHERNET(0xc206),
PSIL_ETHERNET(0xc207),
/* PDMA6 (PSIL_PDMA_MCASP_G0) - McASP0-2 */
PSIL_PDMA_MCASP(0xc400),
PSIL_PDMA_MCASP(0xc401),
PSIL_PDMA_MCASP(0xc402),
/* PDMA7 (PSIL_PDMA_MCASP_G1) - McASP3-11 */
PSIL_PDMA_MCASP(0xc500),
PSIL_PDMA_MCASP(0xc501),
PSIL_PDMA_MCASP(0xc502),
PSIL_PDMA_MCASP(0xc503),
PSIL_PDMA_MCASP(0xc504),
PSIL_PDMA_MCASP(0xc505),
PSIL_PDMA_MCASP(0xc506),
PSIL_PDMA_MCASP(0xc507),
PSIL_PDMA_MCASP(0xc508),
/* PDMA8 (PDMA_MISC_G0) - SPI0-1 */
PSIL_PDMA_XY_PKT(0xc600),
PSIL_PDMA_XY_PKT(0xc601),
PSIL_PDMA_XY_PKT(0xc602),
PSIL_PDMA_XY_PKT(0xc603),
PSIL_PDMA_XY_PKT(0xc604),
PSIL_PDMA_XY_PKT(0xc605),
PSIL_PDMA_XY_PKT(0xc606),
PSIL_PDMA_XY_PKT(0xc607),
/* PDMA9 (PDMA_MISC_G1) - SPI2-3 */
PSIL_PDMA_XY_PKT(0xc60c),
PSIL_PDMA_XY_PKT(0xc60d),
PSIL_PDMA_XY_PKT(0xc60e),
PSIL_PDMA_XY_PKT(0xc60f),
PSIL_PDMA_XY_PKT(0xc610),
PSIL_PDMA_XY_PKT(0xc611),
PSIL_PDMA_XY_PKT(0xc612),
PSIL_PDMA_XY_PKT(0xc613),
/* PDMA10 (PDMA_MISC_G2) - SPI4-5 */
PSIL_PDMA_XY_PKT(0xc618),
PSIL_PDMA_XY_PKT(0xc619),
PSIL_PDMA_XY_PKT(0xc61a),
PSIL_PDMA_XY_PKT(0xc61b),
PSIL_PDMA_XY_PKT(0xc61c),
PSIL_PDMA_XY_PKT(0xc61d),
PSIL_PDMA_XY_PKT(0xc61e),
PSIL_PDMA_XY_PKT(0xc61f),
/* PDMA11 (PDMA_MISC_G3) */
PSIL_PDMA_XY_PKT(0xc624),
PSIL_PDMA_XY_PKT(0xc625),
PSIL_PDMA_XY_PKT(0xc626),
PSIL_PDMA_XY_PKT(0xc627),
PSIL_PDMA_XY_PKT(0xc628),
PSIL_PDMA_XY_PKT(0xc629),
PSIL_PDMA_XY_PKT(0xc630),
PSIL_PDMA_XY_PKT(0xc63a),
/* PDMA13 (PDMA_USART_G0) - UART0-1 */
PSIL_PDMA_XY_PKT(0xc700),
PSIL_PDMA_XY_PKT(0xc701),
/* PDMA14 (PDMA_USART_G1) - UART2-3 */
PSIL_PDMA_XY_PKT(0xc702),
PSIL_PDMA_XY_PKT(0xc703),
/* PDMA15 (PDMA_USART_G2) - UART4-9 */
PSIL_PDMA_XY_PKT(0xc704),
PSIL_PDMA_XY_PKT(0xc705),
PSIL_PDMA_XY_PKT(0xc706),
PSIL_PDMA_XY_PKT(0xc707),
PSIL_PDMA_XY_PKT(0xc708),
PSIL_PDMA_XY_PKT(0xc709),
/* CPSW9 */
PSIL_ETHERNET(0xca00),
PSIL_ETHERNET(0xca01),
......@@ -284,6 +347,22 @@ static struct psil_ep j721e_dst_ep_map[] = {
PSIL_ETHERNET(0xf005),
PSIL_ETHERNET(0xf006),
PSIL_ETHERNET(0xf007),
/* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */
PSIL_PDMA_XY_PKT(0xf100),
PSIL_PDMA_XY_PKT(0xf101),
PSIL_PDMA_XY_PKT(0xf102),
PSIL_PDMA_XY_PKT(0xf103),
/* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */
PSIL_PDMA_XY_PKT(0xf200),
PSIL_PDMA_XY_PKT(0xf201),
PSIL_PDMA_XY_PKT(0xf202),
PSIL_PDMA_XY_PKT(0xf203),
PSIL_PDMA_XY_PKT(0xf204),
PSIL_PDMA_XY_PKT(0xf205),
PSIL_PDMA_XY_PKT(0xf206),
PSIL_PDMA_XY_PKT(0xf207),
/* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */
PSIL_PDMA_XY_PKT(0xf300),
/* SA2UL */
PSIL_SA2UL(0xf500, 1),
PSIL_SA2UL(0xf501, 1),
......
......@@ -263,6 +263,7 @@ struct udma_chan_config {
enum udma_tp_level channel_tpl; /* Channel Throughput Level */
u32 tr_trigger_type;
unsigned long tx_flags;
/* PKDMA mapped channel */
int mapped_channel_id;
......@@ -300,8 +301,6 @@ struct udma_chan {
struct udma_tx_drain tx_drain;
u32 bcnt; /* number of bytes completed since the start of the channel */
/* Channel configuration parameters */
struct udma_chan_config config;
......@@ -757,6 +756,20 @@ static void udma_reset_rings(struct udma_chan *uc)
}
}
static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val)
{
if (uc->desc->dir == DMA_DEV_TO_MEM) {
udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
} else {
udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
if (!uc->bchan)
udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
}
}
static void udma_reset_counters(struct udma_chan *uc)
{
u32 val;
......@@ -790,8 +803,6 @@ static void udma_reset_counters(struct udma_chan *uc)
val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
}
uc->bcnt = 0;
}
static int udma_reset_chan(struct udma_chan *uc, bool hard)
......@@ -1045,9 +1056,14 @@ static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
{
u32 peer_bcnt, bcnt;
/* Only TX towards PDMA is affected */
/*
* Only TX towards PDMA is affected.
* If DMA_PREP_INTERRUPT is not set by consumer then skip the transfer
* completion calculation, consumer must ensure that there is no stale
* data in DMA fabric in this case.
*/
if (uc->config.ep_type == PSIL_EP_NATIVE ||
uc->config.dir != DMA_MEM_TO_DEV)
uc->config.dir != DMA_MEM_TO_DEV || !(uc->config.tx_flags & DMA_PREP_INTERRUPT))
return true;
peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
......@@ -1115,7 +1131,7 @@ static void udma_check_tx_completion(struct work_struct *work)
if (uc->desc) {
struct udma_desc *d = uc->desc;
uc->bcnt += d->residue;
udma_decrement_byte_counters(uc, d->residue);
udma_start(uc);
vchan_cookie_complete(&d->vd);
break;
......@@ -1168,7 +1184,7 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data)
vchan_cyclic_callback(&d->vd);
} else {
if (udma_is_desc_really_done(uc, d)) {
uc->bcnt += d->residue;
udma_decrement_byte_counters(uc, d->residue);
udma_start(uc);
vchan_cookie_complete(&d->vd);
} else {
......@@ -1204,7 +1220,7 @@ static irqreturn_t udma_udma_irq_handler(int irq, void *data)
vchan_cyclic_callback(&d->vd);
} else {
/* TODO: figure out the real amount of data */
uc->bcnt += d->residue;
udma_decrement_byte_counters(uc, d->residue);
udma_start(uc);
vchan_cookie_complete(&d->vd);
}
......@@ -3408,6 +3424,8 @@ udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (!burst)
burst = 1;
uc->config.tx_flags = tx_flags;
if (uc->config.pkt_mode)
d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
context);
......@@ -3809,7 +3827,6 @@ static enum dma_status udma_tx_status(struct dma_chan *chan,
bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
}
bcnt -= uc->bcnt;
if (bcnt && !(bcnt % uc->desc->residue))
residue = 0;
else
......
......@@ -795,6 +795,17 @@ static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan)
return 0;
}
/**
* zynqmp_dma_synchronize - Synchronizes the termination of a transfers to the current context.
* @dchan: DMA channel pointer
*/
static void zynqmp_dma_synchronize(struct dma_chan *dchan)
{
struct zynqmp_dma_chan *chan = to_chan(dchan);
tasklet_kill(&chan->tasklet);
}
/**
* zynqmp_dma_prep_memcpy - prepare descriptors for memcpy transaction
* @dchan: DMA channel
......@@ -1057,6 +1068,7 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
p = &zdev->common;
p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy;
p->device_terminate_all = zynqmp_dma_device_terminate_all;
p->device_synchronize = zynqmp_dma_synchronize;
p->device_issue_pending = zynqmp_dma_issue_pending;
p->device_alloc_chan_resources = zynqmp_dma_alloc_chan_resources;
p->device_free_chan_resources = zynqmp_dma_free_chan_resources;
......
......@@ -8,11 +8,13 @@
#ifndef _DMA_HSU_H
#define _DMA_HSU_H
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/kconfig.h>
#include <linux/types.h>
#include <linux/platform_data/dma-hsu.h>
struct device;
struct hsu_dma;
/**
......
......@@ -8,7 +8,7 @@
#ifndef _PLATFORM_DATA_DMA_HSU_H
#define _PLATFORM_DATA_DMA_HSU_H
#include <linux/device.h>
struct device;
struct hsu_dma_slave {
struct device *dma_dev;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment