Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
bc0fa814
Commit
bc0fa814
authored
Jan 07, 2011
by
Dan Williams
Browse files
Options
Browse Files
Download
Plain Diff
Merge branches 'amba' and 'dma40' into dmaengine
parents
0a4bbddd
b7f75865
d49278e3
Changes
6
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
871 additions
and
706 deletions
+871
-706
arch/arm/plat-nomadik/include/plat/ste_dma40.h
arch/arm/plat-nomadik/include/plat/ste_dma40.h
+8
-0
drivers/dma/amba-pl08x.c
drivers/dma/amba-pl08x.c
+490
-550
drivers/dma/ste_dma40.c
drivers/dma/ste_dma40.c
+152
-39
drivers/dma/ste_dma40_ll.c
drivers/dma/ste_dma40_ll.c
+172
-74
drivers/dma/ste_dma40_ll.h
drivers/dma/ste_dma40_ll.h
+19
-17
include/linux/amba/pl08x.h
include/linux/amba/pl08x.h
+30
-26
No files found.
arch/arm/plat-nomadik/include/plat/ste_dma40.h
View file @
bc0fa814
...
@@ -13,6 +13,14 @@
...
@@ -13,6 +13,14 @@
#include <linux/workqueue.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/interrupt.h>
/*
* Maxium size for a single dma descriptor
* Size is limited to 16 bits.
* Size is in the units of addr-widths (1,2,4,8 bytes)
* Larger transfers will be split up to multiple linked desc
*/
#define STEDMA40_MAX_SEG_SIZE 0xFFFF
/* dev types for memcpy */
/* dev types for memcpy */
#define STEDMA40_DEV_DST_MEMORY (-1)
#define STEDMA40_DEV_DST_MEMORY (-1)
#define STEDMA40_DEV_SRC_MEMORY (-1)
#define STEDMA40_DEV_SRC_MEMORY (-1)
...
...
drivers/dma/amba-pl08x.c
View file @
bc0fa814
This diff is collapsed.
Click to expand it.
drivers/dma/ste_dma40.c
View file @
bc0fa814
/*
/*
* Copyright (C) ST-Ericsson SA 2007-2010
* Copyright (C) Ericsson AB 2007-2008
* Copyright (C) ST-Ericsson SA 2008-2010
* Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
* Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
* Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
* Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
* License terms: GNU General Public License (GPL) version 2
...
@@ -554,8 +555,66 @@ static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
...
@@ -554,8 +555,66 @@ static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
return
d
;
return
d
;
}
}
/* Support functions for logical channels */
static
int
d40_psize_2_burst_size
(
bool
is_log
,
int
psize
)
{
if
(
is_log
)
{
if
(
psize
==
STEDMA40_PSIZE_LOG_1
)
return
1
;
}
else
{
if
(
psize
==
STEDMA40_PSIZE_PHY_1
)
return
1
;
}
return
2
<<
psize
;
}
/*
* The dma only supports transmitting packages up to
* STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
* dma elements required to send the entire sg list
*/
static
int
d40_size_2_dmalen
(
int
size
,
u32
data_width1
,
u32
data_width2
)
{
int
dmalen
;
u32
max_w
=
max
(
data_width1
,
data_width2
);
u32
min_w
=
min
(
data_width1
,
data_width2
);
u32
seg_max
=
ALIGN
(
STEDMA40_MAX_SEG_SIZE
<<
min_w
,
1
<<
max_w
);
if
(
seg_max
>
STEDMA40_MAX_SEG_SIZE
)
seg_max
-=
(
1
<<
max_w
);
if
(
!
IS_ALIGNED
(
size
,
1
<<
max_w
))
return
-
EINVAL
;
if
(
size
<=
seg_max
)
dmalen
=
1
;
else
{
dmalen
=
size
/
seg_max
;
if
(
dmalen
*
seg_max
<
size
)
dmalen
++
;
}
return
dmalen
;
}
static
int
d40_sg_2_dmalen
(
struct
scatterlist
*
sgl
,
int
sg_len
,
u32
data_width1
,
u32
data_width2
)
{
struct
scatterlist
*
sg
;
int
i
;
int
len
=
0
;
int
ret
;
for_each_sg
(
sgl
,
sg
,
sg_len
,
i
)
{
ret
=
d40_size_2_dmalen
(
sg_dma_len
(
sg
),
data_width1
,
data_width2
);
if
(
ret
<
0
)
return
ret
;
len
+=
ret
;
}
return
len
;
}
/* Support functions for logical channels */
static
int
d40_channel_execute_command
(
struct
d40_chan
*
d40c
,
static
int
d40_channel_execute_command
(
struct
d40_chan
*
d40c
,
enum
d40_command
command
)
enum
d40_command
command
)
...
@@ -1241,6 +1300,21 @@ static int d40_validate_conf(struct d40_chan *d40c,
...
@@ -1241,6 +1300,21 @@ static int d40_validate_conf(struct d40_chan *d40c,
res
=
-
EINVAL
;
res
=
-
EINVAL
;
}
}
if
(
d40_psize_2_burst_size
(
is_log
,
conf
->
src_info
.
psize
)
*
(
1
<<
conf
->
src_info
.
data_width
)
!=
d40_psize_2_burst_size
(
is_log
,
conf
->
dst_info
.
psize
)
*
(
1
<<
conf
->
dst_info
.
data_width
))
{
/*
* The DMAC hardware only supports
* src (burst x width) == dst (burst x width)
*/
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] src (burst x width) != dst (burst x width)
\n
"
,
__func__
);
res
=
-
EINVAL
;
}
return
res
;
return
res
;
}
}
...
@@ -1638,13 +1712,21 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
...
@@ -1638,13 +1712,21 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
if
(
d40d
==
NULL
)
if
(
d40d
==
NULL
)
goto
err
;
goto
err
;
d40d
->
lli_len
=
sgl_len
;
d40d
->
lli_len
=
d40_sg_2_dmalen
(
sgl_dst
,
sgl_len
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
d40c
->
dma_cfg
.
dst_info
.
data_width
);
if
(
d40d
->
lli_len
<
0
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Unaligned size
\n
"
,
__func__
);
goto
err
;
}
d40d
->
lli_current
=
0
;
d40d
->
lli_current
=
0
;
d40d
->
txd
.
flags
=
dma_flags
;
d40d
->
txd
.
flags
=
dma_flags
;
if
(
d40c
->
log_num
!=
D40_PHY_CHAN
)
{
if
(
d40c
->
log_num
!=
D40_PHY_CHAN
)
{
if
(
d40_pool_lli_alloc
(
d40d
,
sgl
_len
,
true
)
<
0
)
{
if
(
d40_pool_lli_alloc
(
d40d
,
d40d
->
lli
_len
,
true
)
<
0
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Out of memory
\n
"
,
__func__
);
"[%s] Out of memory
\n
"
,
__func__
);
goto
err
;
goto
err
;
...
@@ -1654,15 +1736,17 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
...
@@ -1654,15 +1736,17 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
sgl_len
,
sgl_len
,
d40d
->
lli_log
.
src
,
d40d
->
lli_log
.
src
,
d40c
->
log_def
.
lcsp1
,
d40c
->
log_def
.
lcsp1
,
d40c
->
dma_cfg
.
src_info
.
data_width
);
d40c
->
dma_cfg
.
src_info
.
data_width
,
d40c
->
dma_cfg
.
dst_info
.
data_width
);
(
void
)
d40_log_sg_to_lli
(
sgl_dst
,
(
void
)
d40_log_sg_to_lli
(
sgl_dst
,
sgl_len
,
sgl_len
,
d40d
->
lli_log
.
dst
,
d40d
->
lli_log
.
dst
,
d40c
->
log_def
.
lcsp3
,
d40c
->
log_def
.
lcsp3
,
d40c
->
dma_cfg
.
dst_info
.
data_width
);
d40c
->
dma_cfg
.
dst_info
.
data_width
,
d40c
->
dma_cfg
.
src_info
.
data_width
);
}
else
{
}
else
{
if
(
d40_pool_lli_alloc
(
d40d
,
sgl
_len
,
false
)
<
0
)
{
if
(
d40_pool_lli_alloc
(
d40d
,
d40d
->
lli
_len
,
false
)
<
0
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Out of memory
\n
"
,
__func__
);
"[%s] Out of memory
\n
"
,
__func__
);
goto
err
;
goto
err
;
...
@@ -1675,6 +1759,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
...
@@ -1675,6 +1759,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
virt_to_phys
(
d40d
->
lli_phy
.
src
),
virt_to_phys
(
d40d
->
lli_phy
.
src
),
d40c
->
src_def_cfg
,
d40c
->
src_def_cfg
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
d40c
->
dma_cfg
.
dst_info
.
data_width
,
d40c
->
dma_cfg
.
src_info
.
psize
);
d40c
->
dma_cfg
.
src_info
.
psize
);
if
(
res
<
0
)
if
(
res
<
0
)
...
@@ -1687,6 +1772,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
...
@@ -1687,6 +1772,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
virt_to_phys
(
d40d
->
lli_phy
.
dst
),
virt_to_phys
(
d40d
->
lli_phy
.
dst
),
d40c
->
dst_def_cfg
,
d40c
->
dst_def_cfg
,
d40c
->
dma_cfg
.
dst_info
.
data_width
,
d40c
->
dma_cfg
.
dst_info
.
data_width
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
d40c
->
dma_cfg
.
dst_info
.
psize
);
d40c
->
dma_cfg
.
dst_info
.
psize
);
if
(
res
<
0
)
if
(
res
<
0
)
...
@@ -1826,7 +1912,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
...
@@ -1826,7 +1912,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
struct
d40_chan
*
d40c
=
container_of
(
chan
,
struct
d40_chan
,
struct
d40_chan
*
d40c
=
container_of
(
chan
,
struct
d40_chan
,
chan
);
chan
);
unsigned
long
flags
;
unsigned
long
flags
;
int
err
=
0
;
if
(
d40c
->
phy_chan
==
NULL
)
{
if
(
d40c
->
phy_chan
==
NULL
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
...
@@ -1844,6 +1929,15 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
...
@@ -1844,6 +1929,15 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
}
}
d40d
->
txd
.
flags
=
dma_flags
;
d40d
->
txd
.
flags
=
dma_flags
;
d40d
->
lli_len
=
d40_size_2_dmalen
(
size
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
d40c
->
dma_cfg
.
dst_info
.
data_width
);
if
(
d40d
->
lli_len
<
0
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Unaligned size
\n
"
,
__func__
);
goto
err
;
}
dma_async_tx_descriptor_init
(
&
d40d
->
txd
,
chan
);
dma_async_tx_descriptor_init
(
&
d40d
->
txd
,
chan
);
...
@@ -1851,37 +1945,40 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
...
@@ -1851,37 +1945,40 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
if
(
d40c
->
log_num
!=
D40_PHY_CHAN
)
{
if
(
d40c
->
log_num
!=
D40_PHY_CHAN
)
{
if
(
d40_pool_lli_alloc
(
d40d
,
1
,
true
)
<
0
)
{
if
(
d40_pool_lli_alloc
(
d40d
,
d40d
->
lli_len
,
true
)
<
0
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Out of memory
\n
"
,
__func__
);
"[%s] Out of memory
\n
"
,
__func__
);
goto
err
;
goto
err
;
}
}
d40d
->
lli_len
=
1
;
d40d
->
lli_current
=
0
;
d40d
->
lli_current
=
0
;
d40_log_fill
_lli
(
d40d
->
lli_log
.
src
,
if
(
d40_log_buf_to
_lli
(
d40d
->
lli_log
.
src
,
src
,
src
,
size
,
size
,
d40c
->
log_def
.
lcsp1
,
d40c
->
log_def
.
lcsp1
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
true
);
d40c
->
dma_cfg
.
dst_info
.
data_width
,
true
)
==
NULL
)
goto
err
;
d40_log_fill
_lli
(
d40d
->
lli_log
.
dst
,
if
(
d40_log_buf_to
_lli
(
d40d
->
lli_log
.
dst
,
dst
,
dst
,
size
,
size
,
d40c
->
log_def
.
lcsp3
,
d40c
->
log_def
.
lcsp3
,
d40c
->
dma_cfg
.
dst_info
.
data_width
,
d40c
->
dma_cfg
.
dst_info
.
data_width
,
true
);
d40c
->
dma_cfg
.
src_info
.
data_width
,
true
)
==
NULL
)
goto
err
;
}
else
{
}
else
{
if
(
d40_pool_lli_alloc
(
d40d
,
1
,
false
)
<
0
)
{
if
(
d40_pool_lli_alloc
(
d40d
,
d40d
->
lli_len
,
false
)
<
0
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Out of memory
\n
"
,
__func__
);
"[%s] Out of memory
\n
"
,
__func__
);
goto
err
;
goto
err
;
}
}
err
=
d40_phy_fill
_lli
(
d40d
->
lli_phy
.
src
,
if
(
d40_phy_buf_to
_lli
(
d40d
->
lli_phy
.
src
,
src
,
src
,
size
,
size
,
d40c
->
dma_cfg
.
src_info
.
psize
,
d40c
->
dma_cfg
.
src_info
.
psize
,
...
@@ -1889,11 +1986,11 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
...
@@ -1889,11 +1986,11 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
d40c
->
src_def_cfg
,
d40c
->
src_def_cfg
,
true
,
true
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
false
);
d40c
->
dma_cfg
.
dst_info
.
data_width
,
if
(
err
)
false
)
==
NULL
)
goto
err
_fill_lli
;
goto
err
;
err
=
d40_phy_fill
_lli
(
d40d
->
lli_phy
.
dst
,
if
(
d40_phy_buf_to
_lli
(
d40d
->
lli_phy
.
dst
,
dst
,
dst
,
size
,
size
,
d40c
->
dma_cfg
.
dst_info
.
psize
,
d40c
->
dma_cfg
.
dst_info
.
psize
,
...
@@ -1901,10 +1998,9 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
...
@@ -1901,10 +1998,9 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
d40c
->
dst_def_cfg
,
d40c
->
dst_def_cfg
,
true
,
true
,
d40c
->
dma_cfg
.
dst_info
.
data_width
,
d40c
->
dma_cfg
.
dst_info
.
data_width
,
false
);
d40c
->
dma_cfg
.
src_info
.
data_width
,
false
)
==
NULL
)
if
(
err
)
goto
err
;
goto
err_fill_lli
;
(
void
)
dma_map_single
(
d40c
->
base
->
dev
,
d40d
->
lli_phy
.
src
,
(
void
)
dma_map_single
(
d40c
->
base
->
dev
,
d40d
->
lli_phy
.
src
,
d40d
->
lli_pool
.
size
,
DMA_TO_DEVICE
);
d40d
->
lli_pool
.
size
,
DMA_TO_DEVICE
);
...
@@ -1913,9 +2009,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
...
@@ -1913,9 +2009,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
spin_unlock_irqrestore
(
&
d40c
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
d40c
->
lock
,
flags
);
return
&
d40d
->
txd
;
return
&
d40d
->
txd
;
err_fill_lli:
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Failed filling in PHY LLI
\n
"
,
__func__
);
err:
err:
if
(
d40d
)
if
(
d40d
)
d40_desc_free
(
d40c
,
d40d
);
d40_desc_free
(
d40c
,
d40d
);
...
@@ -1945,13 +2038,21 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
...
@@ -1945,13 +2038,21 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
dma_addr_t
dev_addr
=
0
;
dma_addr_t
dev_addr
=
0
;
int
total_size
;
int
total_size
;
if
(
d40_pool_lli_alloc
(
d40d
,
sg_len
,
true
)
<
0
)
{
d40d
->
lli_len
=
d40_sg_2_dmalen
(
sgl
,
sg_len
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
d40c
->
dma_cfg
.
dst_info
.
data_width
);
if
(
d40d
->
lli_len
<
0
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Unaligned size
\n
"
,
__func__
);
return
-
EINVAL
;
}
if
(
d40_pool_lli_alloc
(
d40d
,
d40d
->
lli_len
,
true
)
<
0
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Out of memory
\n
"
,
__func__
);
"[%s] Out of memory
\n
"
,
__func__
);
return
-
ENOMEM
;
return
-
ENOMEM
;
}
}
d40d
->
lli_len
=
sg_len
;
d40d
->
lli_current
=
0
;
d40d
->
lli_current
=
0
;
if
(
direction
==
DMA_FROM_DEVICE
)
if
(
direction
==
DMA_FROM_DEVICE
)
...
@@ -1993,13 +2094,21 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
...
@@ -1993,13 +2094,21 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
dma_addr_t
dst_dev_addr
;
dma_addr_t
dst_dev_addr
;
int
res
;
int
res
;
if
(
d40_pool_lli_alloc
(
d40d
,
sgl_len
,
false
)
<
0
)
{
d40d
->
lli_len
=
d40_sg_2_dmalen
(
sgl
,
sgl_len
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
d40c
->
dma_cfg
.
dst_info
.
data_width
);
if
(
d40d
->
lli_len
<
0
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Unaligned size
\n
"
,
__func__
);
return
-
EINVAL
;
}
if
(
d40_pool_lli_alloc
(
d40d
,
d40d
->
lli_len
,
false
)
<
0
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Out of memory
\n
"
,
__func__
);
"[%s] Out of memory
\n
"
,
__func__
);
return
-
ENOMEM
;
return
-
ENOMEM
;
}
}
d40d
->
lli_len
=
sgl_len
;
d40d
->
lli_current
=
0
;
d40d
->
lli_current
=
0
;
if
(
direction
==
DMA_FROM_DEVICE
)
{
if
(
direction
==
DMA_FROM_DEVICE
)
{
...
@@ -2024,6 +2133,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
...
@@ -2024,6 +2133,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
virt_to_phys
(
d40d
->
lli_phy
.
src
),
virt_to_phys
(
d40d
->
lli_phy
.
src
),
d40c
->
src_def_cfg
,
d40c
->
src_def_cfg
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
d40c
->
dma_cfg
.
dst_info
.
data_width
,
d40c
->
dma_cfg
.
src_info
.
psize
);
d40c
->
dma_cfg
.
src_info
.
psize
);
if
(
res
<
0
)
if
(
res
<
0
)
return
res
;
return
res
;
...
@@ -2035,6 +2145,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
...
@@ -2035,6 +2145,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
virt_to_phys
(
d40d
->
lli_phy
.
dst
),
virt_to_phys
(
d40d
->
lli_phy
.
dst
),
d40c
->
dst_def_cfg
,
d40c
->
dst_def_cfg
,
d40c
->
dma_cfg
.
dst_info
.
data_width
,
d40c
->
dma_cfg
.
dst_info
.
data_width
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
d40c
->
dma_cfg
.
dst_info
.
psize
);
d40c
->
dma_cfg
.
dst_info
.
psize
);
if
(
res
<
0
)
if
(
res
<
0
)
return
res
;
return
res
;
...
@@ -2244,6 +2355,8 @@ static void d40_set_runtime_config(struct dma_chan *chan,
...
@@ -2244,6 +2355,8 @@ static void d40_set_runtime_config(struct dma_chan *chan,
psize
=
STEDMA40_PSIZE_PHY_8
;
psize
=
STEDMA40_PSIZE_PHY_8
;
else
if
(
config_maxburst
>=
4
)
else
if
(
config_maxburst
>=
4
)
psize
=
STEDMA40_PSIZE_PHY_4
;
psize
=
STEDMA40_PSIZE_PHY_4
;
else
if
(
config_maxburst
>=
2
)
psize
=
STEDMA40_PSIZE_PHY_2
;
else
else
psize
=
STEDMA40_PSIZE_PHY_1
;
psize
=
STEDMA40_PSIZE_PHY_1
;
}
}
...
...
drivers/dma/ste_dma40_ll.c
View file @
bc0fa814
/*
/*
* Copyright (C) ST-Ericsson SA 2007-2010
* Copyright (C) ST-Ericsson SA 2007-2010
* Author: Per F
riden <per.fride
n@stericsson.com> for ST-Ericsson
* Author: Per F
orlin <per.forli
n@stericsson.com> for ST-Ericsson
* Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
* Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
* License terms: GNU General Public License (GPL) version 2
*/
*/
...
@@ -122,7 +122,7 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
...
@@ -122,7 +122,7 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
*
dst_cfg
=
dst
;
*
dst_cfg
=
dst
;
}
}
int
d40_phy_fill_lli
(
struct
d40_phy_lli
*
lli
,
static
int
d40_phy_fill_lli
(
struct
d40_phy_lli
*
lli
,
dma_addr_t
data
,
dma_addr_t
data
,
u32
data_size
,
u32
data_size
,
int
psize
,
int
psize
,
...
@@ -139,13 +139,6 @@ int d40_phy_fill_lli(struct d40_phy_lli *lli,
...
@@ -139,13 +139,6 @@ int d40_phy_fill_lli(struct d40_phy_lli *lli,
else
else
num_elems
=
2
<<
psize
;
num_elems
=
2
<<
psize
;
/*
* Size is 16bit. data_width is 8, 16, 32 or 64 bit
* Block large than 64 KiB must be split.
*/
if
(
data_size
>
(
0xffff
<<
data_width
))
return
-
EINVAL
;
/* Must be aligned */
/* Must be aligned */
if
(
!
IS_ALIGNED
(
data
,
0x1
<<
data_width
))
if
(
!
IS_ALIGNED
(
data
,
0x1
<<
data_width
))
return
-
EINVAL
;
return
-
EINVAL
;
...
@@ -187,55 +180,118 @@ int d40_phy_fill_lli(struct d40_phy_lli *lli,
...
@@ -187,55 +180,118 @@ int d40_phy_fill_lli(struct d40_phy_lli *lli,
return
0
;
return
0
;
}
}
static
int
d40_seg_size
(
int
size
,
int
data_width1
,
int
data_width2
)
{
u32
max_w
=
max
(
data_width1
,
data_width2
);
u32
min_w
=
min
(
data_width1
,
data_width2
);
u32
seg_max
=
ALIGN
(
STEDMA40_MAX_SEG_SIZE
<<
min_w
,
1
<<
max_w
);
if
(
seg_max
>
STEDMA40_MAX_SEG_SIZE
)
seg_max
-=
(
1
<<
max_w
);
if
(
size
<=
seg_max
)
return
size
;
if
(
size
<=
2
*
seg_max
)
return
ALIGN
(
size
/
2
,
1
<<
max_w
);
return
seg_max
;
}
struct
d40_phy_lli
*
d40_phy_buf_to_lli
(
struct
d40_phy_lli
*
lli
,
dma_addr_t
addr
,
u32
size
,
int
psize
,
dma_addr_t
lli_phys
,
u32
reg_cfg
,
bool
term_int
,
u32
data_width1
,
u32
data_width2
,
bool
is_device
)
{
int
err
;
dma_addr_t
next
=
lli_phys
;
int
size_rest
=
size
;
int
size_seg
=
0
;
do
{
size_seg
=
d40_seg_size
(
size_rest
,
data_width1
,
data_width2
);
size_rest
-=
size_seg
;
if
(
term_int
&&
size_rest
==
0
)
next
=
0
;
else
next
=
ALIGN
(
next
+
sizeof
(
struct
d40_phy_lli
),
D40_LLI_ALIGN
);
err
=
d40_phy_fill_lli
(
lli
,
addr
,
size_seg
,
psize
,
next
,
reg_cfg
,
!
next
,
data_width1
,
is_device
);
if
(
err
)
goto
err
;
lli
++
;
if
(
!
is_device
)
addr
+=
size_seg
;
}
while
(
size_rest
);
return
lli
;
err:
return
NULL
;
}
int
d40_phy_sg_to_lli
(
struct
scatterlist
*
sg
,
int
d40_phy_sg_to_lli
(
struct
scatterlist
*
sg
,
int
sg_len
,
int
sg_len
,
dma_addr_t
target
,
dma_addr_t
target
,
struct
d40_phy_lli
*
lli
,
struct
d40_phy_lli
*
lli
_sg
,
dma_addr_t
lli_phys
,
dma_addr_t
lli_phys
,
u32
reg_cfg
,
u32
reg_cfg
,
u32
data_width
,
u32
data_width1
,
u32
data_width2
,
int
psize
)
int
psize
)
{
{
int
total_size
=
0
;
int
total_size
=
0
;
int
i
;
int
i
;
struct
scatterlist
*
current_sg
=
sg
;
struct
scatterlist
*
current_sg
=
sg
;
dma_addr_t
next_lli_phys
;
dma_addr_t
dst
;
dma_addr_t
dst
;
int
err
=
0
;
struct
d40_phy_lli
*
lli
=
lli_sg
;
dma_addr_t
l_phys
=
lli_phys
;
for_each_sg
(
sg
,
current_sg
,
sg_len
,
i
)
{
for_each_sg
(
sg
,
current_sg
,
sg_len
,
i
)
{
total_size
+=
sg_dma_len
(
current_sg
);
total_size
+=
sg_dma_len
(
current_sg
);
/* If this scatter list entry is the last one, no next link */
if
(
sg_len
-
1
==
i
)
next_lli_phys
=
0
;
else
next_lli_phys
=
ALIGN
(
lli_phys
+
(
i
+
1
)
*
sizeof
(
struct
d40_phy_lli
),
D40_LLI_ALIGN
);
if
(
target
)
if
(
target
)
dst
=
target
;
dst
=
target
;
else
else
dst
=
sg_phys
(
current_sg
);
dst
=
sg_phys
(
current_sg
);
err
=
d40_phy_fill_lli
(
&
lli
[
i
],
l_phys
=
ALIGN
(
lli_phys
+
(
lli
-
lli_sg
)
*
sizeof
(
struct
d40_phy_lli
),
D40_LLI_ALIGN
);
lli
=
d40_phy_buf_to_lli
(
lli
,
dst
,
dst
,
sg_dma_len
(
current_sg
),
sg_dma_len
(
current_sg
),
psize
,
psize
,
next_lli
_phys
,
l
_phys
,
reg_cfg
,
reg_cfg
,
!
next_lli_phys
,
sg_len
-
1
==
i
,
data_width
,
data_width1
,
data_width2
,
target
==
dst
);
target
==
dst
);
if
(
err
)
if
(
lli
==
NULL
)
goto
err
;
return
-
EINVAL
;
}
}
return
total_size
;
return
total_size
;
err:
return
err
;
}
}
...
@@ -315,7 +371,7 @@ void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
...
@@ -315,7 +371,7 @@ void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
writel
(
lli_dst
->
lcsp13
,
&
lcla
[
1
].
lcsp13
);
writel
(
lli_dst
->
lcsp13
,
&
lcla
[
1
].
lcsp13
);
}
}
void
d40_log_fill_lli
(
struct
d40_log_lli
*
lli
,
static
void
d40_log_fill_lli
(
struct
d40_log_lli
*
lli
,
dma_addr_t
data
,
u32
data_size
,
dma_addr_t
data
,
u32
data_size
,
u32
reg_cfg
,
u32
reg_cfg
,
u32
data_width
,
u32
data_width
,
...
@@ -326,6 +382,9 @@ void d40_log_fill_lli(struct d40_log_lli *lli,
...
@@ -326,6 +382,9 @@ void d40_log_fill_lli(struct d40_log_lli *lli,
/* The number of elements to transfer */
/* The number of elements to transfer */
lli
->
lcsp02
=
((
data_size
>>
data_width
)
<<
lli
->
lcsp02
=
((
data_size
>>
data_width
)
<<
D40_MEM_LCSP0_ECNT_POS
)
&
D40_MEM_LCSP0_ECNT_MASK
;
D40_MEM_LCSP0_ECNT_POS
)
&
D40_MEM_LCSP0_ECNT_MASK
;
BUG_ON
((
data_size
>>
data_width
)
>
STEDMA40_MAX_SEG_SIZE
);
/* 16 LSBs address of the current element */
/* 16 LSBs address of the current element */
lli
->
lcsp02
|=
data
&
D40_MEM_LCSP0_SPTR_MASK
;
lli
->
lcsp02
|=
data
&
D40_MEM_LCSP0_SPTR_MASK
;
/* 16 MSBs address of the current element */
/* 16 MSBs address of the current element */
...
@@ -348,55 +407,94 @@ int d40_log_sg_to_dev(struct scatterlist *sg,
...
@@ -348,55 +407,94 @@ int d40_log_sg_to_dev(struct scatterlist *sg,
int
total_size
=
0
;
int
total_size
=
0
;
struct
scatterlist
*
current_sg
=
sg
;
struct
scatterlist
*
current_sg
=
sg
;
int
i
;
int
i
;
struct
d40_log_lli
*
lli_src
=
lli
->
src
;
struct
d40_log_lli
*
lli_dst
=
lli
->
dst
;
for_each_sg
(
sg
,
current_sg
,
sg_len
,
i
)
{
for_each_sg
(
sg
,
current_sg
,
sg_len
,
i
)
{
total_size
+=
sg_dma_len
(
current_sg
);
total_size
+=
sg_dma_len
(
current_sg
);
if
(
direction
==
DMA_TO_DEVICE
)
{
if
(
direction
==
DMA_TO_DEVICE
)
{
d40_log_fill_lli
(
&
lli
->
src
[
i
],
lli_src
=
d40_log_buf_to_lli
(
lli_src
,
sg_phys
(
current_sg
),
sg_phys
(
current_sg
),
sg_dma_len
(
current_sg
),
sg_dma_len
(
current_sg
),
lcsp
->
lcsp1
,
src_data_width
,
lcsp
->
lcsp1
,
src_data_width
,
dst_data_width
,
true
);
true
);
d40_log_fill_lli
(
&
lli
->
dst
[
i
],
lli_dst
=
d40_log_buf_to_lli
(
lli_dst
,
dev_addr
,
dev_addr
,
sg_dma_len
(
current_sg
),
sg_dma_len
(
current_sg
),
lcsp
->
lcsp3
,
dst_data_width
,
lcsp
->
lcsp3
,
dst_data_width
,
src_data_width
,
false
);
false
);
}
else
{
}
else
{
d40_log_fill_lli
(
&
lli
->
dst
[
i
],
lli_dst
=
d40_log_buf_to_lli
(
lli_dst
,
sg_phys
(
current_sg
),
sg_phys
(
current_sg
),
sg_dma_len
(
current_sg
),
sg_dma_len
(
current_sg
),
lcsp
->
lcsp3
,
dst_data_width
,
lcsp
->
lcsp3
,
dst_data_width
,
src_data_width
,
true
);
true
);
d40_log_fill_lli
(
&
lli
->
src
[
i
],
lli_src
=
d40_log_buf_to_lli
(
lli_src
,
dev_addr
,
dev_addr
,
sg_dma_len
(
current_sg
),
sg_dma_len
(
current_sg
),
lcsp
->
lcsp1
,
src_data_width
,
lcsp
->
lcsp1
,
src_data_width
,
dst_data_width
,
false
);
false
);
}
}
}
}
return
total_size
;
return
total_size
;
}
}
struct
d40_log_lli
*
d40_log_buf_to_lli
(
struct
d40_log_lli
*
lli_sg
,
dma_addr_t
addr
,
int
size
,
u32
lcsp13
,
/* src or dst*/
u32
data_width1
,
u32
data_width2
,
bool
addr_inc
)
{
struct
d40_log_lli
*
lli
=
lli_sg
;
int
size_rest
=
size
;
int
size_seg
=
0
;
do
{
size_seg
=
d40_seg_size
(
size_rest
,
data_width1
,
data_width2
);
size_rest
-=
size_seg
;
d40_log_fill_lli
(
lli
,
addr
,
size_seg
,
lcsp13
,
data_width1
,
addr_inc
);
if
(
addr_inc
)
addr
+=
size_seg
;
lli
++
;
}
while
(
size_rest
);
return
lli
;
}
int
d40_log_sg_to_lli
(
struct
scatterlist
*
sg
,
int
d40_log_sg_to_lli
(
struct
scatterlist
*
sg
,
int
sg_len
,
int
sg_len
,
struct
d40_log_lli
*
lli_sg
,
struct
d40_log_lli
*
lli_sg
,
u32
lcsp13
,
/* src or dst*/
u32
lcsp13
,
/* src or dst*/
u32
data_width
)
u32
data_width
1
,
u32
data_width2
)
{
{
int
total_size
=
0
;
int
total_size
=
0
;
struct
scatterlist
*
current_sg
=
sg
;
struct
scatterlist
*
current_sg
=
sg
;
int
i
;
int
i
;
struct
d40_log_lli
*
lli
=
lli_sg
;
for_each_sg
(
sg
,
current_sg
,
sg_len
,
i
)
{
for_each_sg
(
sg
,
current_sg
,
sg_len
,
i
)
{
total_size
+=
sg_dma_len
(
current_sg
);
total_size
+=
sg_dma_len
(
current_sg
);
lli
=
d40_log_buf_to_lli
(
lli
,
d40_log_fill_lli
(
&
lli_sg
[
i
],
sg_phys
(
current_sg
),
sg_phys
(
current_sg
),
sg_dma_len
(
current_sg
),
sg_dma_len
(
current_sg
),
lcsp13
,
data_width
,
lcsp13
,
true
);
data_width1
,
data_width2
,
true
);
}
}
return
total_size
;
return
total_size
;
}
}
drivers/dma/ste_dma40_ll.h
View file @
bc0fa814
...
@@ -292,17 +292,19 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
...
@@ -292,17 +292,19 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
struct
d40_phy_lli
*
lli
,
struct
d40_phy_lli
*
lli
,
dma_addr_t
lli_phys
,
dma_addr_t
lli_phys
,
u32
reg_cfg
,
u32
reg_cfg
,
u32
data_width
,
u32
data_width1
,
u32
data_width2
,
int
psize
);
int
psize
);
int
d40_phy_fill
_lli
(
struct
d40_phy_lli
*
lli
,
struct
d40_phy_lli
*
d40_phy_buf_to
_lli
(
struct
d40_phy_lli
*
lli
,
dma_addr_t
data
,
dma_addr_t
data
,
u32
data_size
,
u32
data_size
,
int
psize
,
int
psize
,
dma_addr_t
next_lli
,
dma_addr_t
next_lli
,
u32
reg_cfg
,
u32
reg_cfg
,
bool
term_int
,
bool
term_int
,
u32
data_width
,
u32
data_width1
,
u32
data_width2
,
bool
is_device
);
bool
is_device
);
void
d40_phy_lli_write
(
void
__iomem
*
virtbase
,
void
d40_phy_lli_write
(
void
__iomem
*
virtbase
,
...
@@ -312,11 +314,11 @@ void d40_phy_lli_write(void __iomem *virtbase,
...
@@ -312,11 +314,11 @@ void d40_phy_lli_write(void __iomem *virtbase,
/* Logical channels */
/* Logical channels */
void
d40_log_fill_lli
(
struct
d40_log_lli
*
lli
,
struct
d40_log_lli
*
d40_log_buf_to_lli
(
struct
d40_log_lli
*
lli_sg
,
dma_addr_t
data
,
dma_addr_t
addr
,
u32
data_
size
,
int
size
,
u32
reg_cfg
,
u32
lcsp13
,
/* src or dst*/
u32
data_width
,
u32
data_width1
,
u32
data_width2
,
bool
addr_inc
);
bool
addr_inc
);
int
d40_log_sg_to_dev
(
struct
scatterlist
*
sg
,
int
d40_log_sg_to_dev
(
struct
scatterlist
*
sg
,
...
@@ -332,7 +334,7 @@ int d40_log_sg_to_lli(struct scatterlist *sg,
...
@@ -332,7 +334,7 @@ int d40_log_sg_to_lli(struct scatterlist *sg,
int
sg_len
,
int
sg_len
,
struct
d40_log_lli
*
lli_sg
,
struct
d40_log_lli
*
lli_sg
,
u32
lcsp13
,
/* src or dst*/
u32
lcsp13
,
/* src or dst*/
u32
data_width
);
u32
data_width
1
,
u32
data_width2
);
void
d40_log_lli_lcpa_write
(
struct
d40_log_lli_full
*
lcpa
,
void
d40_log_lli_lcpa_write
(
struct
d40_log_lli_full
*
lcpa
,
struct
d40_log_lli
*
lli_dst
,
struct
d40_log_lli
*
lli_dst
,
...
...
include/linux/amba/pl08x.h
View file @
bc0fa814
...
@@ -22,6 +22,15 @@
...
@@ -22,6 +22,15 @@
#include <linux/dmaengine.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/interrupt.h>
struct
pl08x_lli
;
struct
pl08x_driver_data
;
/* Bitmasks for selecting AHB ports for DMA transfers */
enum
{
PL08X_AHB1
=
(
1
<<
0
),
PL08X_AHB2
=
(
1
<<
1
)
};
/**
/**
* struct pl08x_channel_data - data structure to pass info between
* struct pl08x_channel_data - data structure to pass info between
* platform and PL08x driver regarding channel configuration
* platform and PL08x driver regarding channel configuration
...
@@ -48,6 +57,8 @@
...
@@ -48,6 +57,8 @@
* round round round)
* round round round)
* @single: the device connected to this channel will request single
* @single: the device connected to this channel will request single
* DMA transfers, not bursts. (Bursts are default.)
* DMA transfers, not bursts. (Bursts are default.)
* @periph_buses: the device connected to this channel is accessible via
* these buses (use PL08X_AHB1 | PL08X_AHB2).
*/
*/
struct
pl08x_channel_data
{
struct
pl08x_channel_data
{
char
*
bus_id
;
char
*
bus_id
;
...
@@ -55,10 +66,10 @@ struct pl08x_channel_data {
...
@@ -55,10 +66,10 @@ struct pl08x_channel_data {
int
max_signal
;
int
max_signal
;
u32
muxval
;
u32
muxval
;
u32
cctl
;
u32
cctl
;
u32
ccfg
;
dma_addr_t
addr
;
dma_addr_t
addr
;
bool
circular_buffer
;
bool
circular_buffer
;
bool
single
;
bool
single
;
u8
periph_buses
;
};
};
/**
/**
...
@@ -74,7 +85,7 @@ struct pl08x_bus_data {
...
@@ -74,7 +85,7 @@ struct pl08x_bus_data {
dma_addr_t
addr
;
dma_addr_t
addr
;
u8
maxwidth
;
u8
maxwidth
;
u8
buswidth
;
u8
buswidth
;
u32
fill_bytes
;
size_t
fill_bytes
;
};
};
/**
/**
...
@@ -92,11 +103,6 @@ struct pl08x_phy_chan {
...
@@ -92,11 +103,6 @@ struct pl08x_phy_chan {
spinlock_t
lock
;
spinlock_t
lock
;
int
signal
;
int
signal
;
struct
pl08x_dma_chan
*
serving
;
struct
pl08x_dma_chan
*
serving
;
u32
csrc
;
u32
cdst
;
u32
clli
;
u32
cctl
;
u32
ccfg
;
};
};
/**
/**
...
@@ -108,21 +114,19 @@ struct pl08x_txd {
...
@@ -108,21 +114,19 @@ struct pl08x_txd {
struct
dma_async_tx_descriptor
tx
;
struct
dma_async_tx_descriptor
tx
;
struct
list_head
node
;
struct
list_head
node
;
enum
dma_data_direction
direction
;
enum
dma_data_direction
direction
;
struct
pl08x_bus_data
srcbus
;
dma_addr_t
src_addr
;
struct
pl08x_bus_data
dstbus
;
dma_addr_t
dst_addr
;
in
t
len
;
size_
t
len
;
dma_addr_t
llis_bus
;
dma_addr_t
llis_bus
;
void
*
llis_va
;
void
*
llis_va
;
struct
pl08x_channel_data
*
cd
;
bool
active
;
bool
active
;
/* Default cctl value for LLIs */
u32
cctl
;
/*
/*
* Settings to be put into the physical channel when we
* Settings to be put into the physical channel when we
* trigger this txd
* trigger this txd
. Other registers are in llis_va[0].
*/
*/
u32
csrc
;
u32
ccfg
;
u32
cdst
;
u32
clli
;
u32
cctl
;
};
};
/**
/**
...
@@ -147,6 +151,8 @@ enum pl08x_dma_chan_state {
...
@@ -147,6 +151,8 @@ enum pl08x_dma_chan_state {
* struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
* struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
* @chan: wrappped abstract channel
* @chan: wrappped abstract channel
* @phychan: the physical channel utilized by this channel, if there is one
* @phychan: the physical channel utilized by this channel, if there is one
* @phychan_hold: if non-zero, hold on to the physical channel even if we
* have no pending entries
* @tasklet: tasklet scheduled by the IRQ to handle actual work etc
* @tasklet: tasklet scheduled by the IRQ to handle actual work etc
* @name: name of channel
* @name: name of channel
* @cd: channel platform data
* @cd: channel platform data
...
@@ -154,11 +160,8 @@ enum pl08x_dma_chan_state {
...
@@ -154,11 +160,8 @@ enum pl08x_dma_chan_state {
* @runtime_direction: current direction of this channel according to
* @runtime_direction: current direction of this channel according to
* runtime config
* runtime config
* @lc: last completed transaction on this channel
* @lc: last completed transaction on this channel
* @
desc
_list: queued transactions pending on this channel
* @
pend
_list: queued transactions pending on this channel
* @at: active transaction on this channel
* @at: active transaction on this channel
* @lockflags: sometimes we let a lock last between two function calls,
* especially prep/submit, and then we need to store the IRQ flags
* in the channel state, here
* @lock: a lock for this channel data
* @lock: a lock for this channel data
* @host: a pointer to the host (internal use)
* @host: a pointer to the host (internal use)
* @state: whether the channel is idle, paused, running etc
* @state: whether the channel is idle, paused, running etc
...
@@ -169,18 +172,17 @@ enum pl08x_dma_chan_state {
...
@@ -169,18 +172,17 @@ enum pl08x_dma_chan_state {
struct
pl08x_dma_chan
{
struct
pl08x_dma_chan
{
struct
dma_chan
chan
;
struct
dma_chan
chan
;
struct
pl08x_phy_chan
*
phychan
;
struct
pl08x_phy_chan
*
phychan
;
int
phychan_hold
;
struct
tasklet_struct
tasklet
;
struct
tasklet_struct
tasklet
;
char
*
name
;
char
*
name
;
struct
pl08x_channel_data
*
cd
;
struct
pl08x_channel_data
*
cd
;
dma_addr_t
runtime_addr
;
dma_addr_t
runtime_addr
;
enum
dma_data_direction
runtime_direction
;
enum
dma_data_direction
runtime_direction
;
atomic_t
last_issued
;
dma_cookie_t
lc
;
dma_cookie_t
lc
;
struct
list_head
desc
_list
;
struct
list_head
pend
_list
;
struct
pl08x_txd
*
at
;
struct
pl08x_txd
*
at
;
unsigned
long
lockflags
;
spinlock_t
lock
;
spinlock_t
lock
;
void
*
host
;
struct
pl08x_driver_data
*
host
;
enum
pl08x_dma_chan_state
state
;
enum
pl08x_dma_chan_state
state
;
bool
slave
;
bool
slave
;
struct
pl08x_txd
*
waiting
;
struct
pl08x_txd
*
waiting
;
...
@@ -199,8 +201,8 @@ struct pl08x_dma_chan {
...
@@ -199,8 +201,8 @@ struct pl08x_dma_chan {
* less than zero, else it returns the allocated signal number
* less than zero, else it returns the allocated signal number
* @put_signal: indicate to the platform that this physical signal is not
* @put_signal: indicate to the platform that this physical signal is not
* running any DMA transfer and multiplexing can be recycled
* running any DMA transfer and multiplexing can be recycled
* @
bus_bit_lli: Bit[0] of the address indicated which AHB bus master the
* @
lli_buses: buses which LLIs can be fetched from: PL08X_AHB1 | PL08X_AHB2
*
LLI addresses are on 0/1 Master 1/2.
*
@mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2
*/
*/
struct
pl08x_platform_data
{
struct
pl08x_platform_data
{
struct
pl08x_channel_data
*
slave_channels
;
struct
pl08x_channel_data
*
slave_channels
;
...
@@ -208,6 +210,8 @@ struct pl08x_platform_data {
...
@@ -208,6 +210,8 @@ struct pl08x_platform_data {
struct
pl08x_channel_data
memcpy_channel
;
struct
pl08x_channel_data
memcpy_channel
;
int
(
*
get_signal
)(
struct
pl08x_dma_chan
*
);
int
(
*
get_signal
)(
struct
pl08x_dma_chan
*
);
void
(
*
put_signal
)(
struct
pl08x_dma_chan
*
);
void
(
*
put_signal
)(
struct
pl08x_dma_chan
*
);
u8
lli_buses
;
u8
mem_buses
;
};
};
#ifdef CONFIG_AMBA_PL08X
#ifdef CONFIG_AMBA_PL08X
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment