Commit b4c26967 authored by Laxman Dewangan's avatar Laxman Dewangan Committed by Stephen Warren

ARM: tegra: dma: remove legacy APB DMA driver

Remove the legacy APB dma driver. The APB DMA support
is moved to dmaengine based Tegra APB DMA driver.
All clients are also moved to dmaengine based APB DMA
driver.
Signed-off-by: default avatarLaxman Dewangan <ldewangan@nvidia.com>
Signed-off-by: default avatarStephen Warren <swarren@nvidia.com>
parent e2187b94
......@@ -130,13 +130,6 @@ config TEGRA_DEBUG_UART_AUTO_SCRATCH
endchoice
config TEGRA_SYSTEM_DMA
bool "Enable system DMA driver for NVIDIA Tegra SoCs"
default y
help
Adds system DMA functionality for NVIDIA Tegra SoCs, used by
several Tegra device drivers
config TEGRA_EMC_SCALING_ENABLE
bool "Enable scaling the memory frequency"
......
......@@ -18,7 +18,6 @@ obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += tegra30_clocks.o
obj-$(CONFIG_SMP) += platsmp.o headsmp.o
obj-$(CONFIG_SMP) += reset.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
obj-$(CONFIG_TEGRA_SYSTEM_DMA) += dma.o
obj-$(CONFIG_CPU_FREQ) += cpu-tegra.o
obj-$(CONFIG_TEGRA_PCI) += pcie.o
obj-$(CONFIG_USB_SUPPORT) += usb_phy.o
......
This diff is collapsed.
......@@ -51,101 +51,4 @@
#define TEGRA_DMA_REQ_SEL_OWR 25
#define TEGRA_DMA_REQ_SEL_INVALID 31
struct tegra_dma_req;
struct tegra_dma_channel;
enum tegra_dma_mode {
TEGRA_DMA_SHARED = 1,
TEGRA_DMA_MODE_CONTINOUS = 2,
TEGRA_DMA_MODE_ONESHOT = 4,
};
enum tegra_dma_req_error {
TEGRA_DMA_REQ_SUCCESS = 0,
TEGRA_DMA_REQ_ERROR_ABORTED,
TEGRA_DMA_REQ_INFLIGHT,
};
enum tegra_dma_req_buff_status {
TEGRA_DMA_REQ_BUF_STATUS_EMPTY = 0,
TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL,
TEGRA_DMA_REQ_BUF_STATUS_FULL,
};
struct tegra_dma_req {
struct list_head node;
unsigned int modid;
int instance;
/* Called when the req is complete and from the DMA ISR context.
* When this is called the req structure is no longer queued by
* the DMA channel.
*
* State of the DMA depends on the number of req it has. If there are
* no DMA requests queued up, then it will STOP the DMA. It there are
* more requests in the DMA, then it will queue the next request.
*/
void (*complete)(struct tegra_dma_req *req);
/* This is a called from the DMA ISR context when the DMA is still in
* progress and is actively filling same buffer.
*
* In case of continuous mode receive, this threshold is 1/2 the buffer
* size. In other cases, this will not even be called as there is no
* hardware support for it.
*
* In the case of continuous mode receive, if there is next req already
* queued, DMA programs the HW to use that req when this req is
* completed. If there is no "next req" queued, then DMA ISR doesn't do
* anything before calling this callback.
*
* This is mainly used by the cases, where the clients has queued
* only one req and want to get some sort of DMA threshold
* callback to program the next buffer.
*
*/
void (*threshold)(struct tegra_dma_req *req);
/* 1 to copy to memory.
* 0 to copy from the memory to device FIFO */
int to_memory;
void *virt_addr;
unsigned long source_addr;
unsigned long dest_addr;
unsigned long dest_wrap;
unsigned long source_wrap;
unsigned long source_bus_width;
unsigned long dest_bus_width;
unsigned long req_sel;
unsigned int size;
/* Updated by the DMA driver on the conpletion of the request. */
int bytes_transferred;
int status;
/* DMA completion tracking information */
int buffer_status;
/* Client specific data */
void *dev;
};
int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
struct tegra_dma_req *req);
int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
struct tegra_dma_req *req);
void tegra_dma_dequeue(struct tegra_dma_channel *ch);
void tegra_dma_flush(struct tegra_dma_channel *ch);
bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
struct tegra_dma_req *req);
bool tegra_dma_is_empty(struct tegra_dma_channel *ch);
struct tegra_dma_channel *tegra_dma_allocate_channel(int mode);
void tegra_dma_free_channel(struct tegra_dma_channel *ch);
int __init tegra_dma_init(void);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment