Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
4fb9c15b
Commit
4fb9c15b
authored
9 years ago
by
Vinod Koul
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'topic/xdmac' into for-linus
parents
0e0fa66e
5abecfa5
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
391 additions
and
52 deletions
+391
-52
drivers/dma/at_hdmac.c
drivers/dma/at_hdmac.c
+106
-0
drivers/dma/at_hdmac_regs.h
drivers/dma/at_hdmac_regs.h
+5
-0
drivers/dma/at_xdmac.c
drivers/dma/at_xdmac.c
+253
-52
include/linux/dmaengine.h
include/linux/dmaengine.h
+27
-0
No files found.
drivers/dma/at_hdmac.c
View file @
4fb9c15b
...
...
@@ -247,6 +247,10 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
channel_writel
(
atchan
,
CTRLA
,
0
);
channel_writel
(
atchan
,
CTRLB
,
0
);
channel_writel
(
atchan
,
DSCR
,
first
->
txd
.
phys
);
channel_writel
(
atchan
,
SPIP
,
ATC_SPIP_HOLE
(
first
->
src_hole
)
|
ATC_SPIP_BOUNDARY
(
first
->
boundary
));
channel_writel
(
atchan
,
DPIP
,
ATC_DPIP_HOLE
(
first
->
dst_hole
)
|
ATC_DPIP_BOUNDARY
(
first
->
boundary
));
dma_writel
(
atdma
,
CHER
,
atchan
->
mask
);
vdbg_dump_regs
(
atchan
);
...
...
@@ -634,6 +638,104 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
return
cookie
;
}
/**
* atc_prep_dma_interleaved - prepare memory to memory interleaved operation
* @chan: the channel to prepare operation on
* @xt: Interleaved transfer template
* @flags: tx descriptor status flags
*/
static
struct
dma_async_tx_descriptor
*
atc_prep_dma_interleaved
(
struct
dma_chan
*
chan
,
struct
dma_interleaved_template
*
xt
,
unsigned
long
flags
)
{
struct
at_dma_chan
*
atchan
=
to_at_dma_chan
(
chan
);
struct
data_chunk
*
first
=
xt
->
sgl
;
struct
at_desc
*
desc
=
NULL
;
size_t
xfer_count
;
unsigned
int
dwidth
;
u32
ctrla
;
u32
ctrlb
;
size_t
len
=
0
;
int
i
;
dev_info
(
chan2dev
(
chan
),
"%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx
\n
"
,
__func__
,
xt
->
src_start
,
xt
->
dst_start
,
xt
->
numf
,
xt
->
frame_size
,
flags
);
if
(
unlikely
(
!
xt
||
xt
->
numf
!=
1
||
!
xt
->
frame_size
))
return
NULL
;
/*
* The controller can only "skip" X bytes every Y bytes, so we
* need to make sure we are given a template that fit that
* description, ie a template with chunks that always have the
* same size, with the same ICGs.
*/
for
(
i
=
0
;
i
<
xt
->
frame_size
;
i
++
)
{
struct
data_chunk
*
chunk
=
xt
->
sgl
+
i
;
if
((
chunk
->
size
!=
xt
->
sgl
->
size
)
||
(
dmaengine_get_dst_icg
(
xt
,
chunk
)
!=
dmaengine_get_dst_icg
(
xt
,
first
))
||
(
dmaengine_get_src_icg
(
xt
,
chunk
)
!=
dmaengine_get_src_icg
(
xt
,
first
)))
{
dev_err
(
chan2dev
(
chan
),
"%s: the controller can transfer only identical chunks
\n
"
,
__func__
);
return
NULL
;
}
len
+=
chunk
->
size
;
}
dwidth
=
atc_get_xfer_width
(
xt
->
src_start
,
xt
->
dst_start
,
len
);
xfer_count
=
len
>>
dwidth
;
if
(
xfer_count
>
ATC_BTSIZE_MAX
)
{
dev_err
(
chan2dev
(
chan
),
"%s: buffer is too big
\n
"
,
__func__
);
return
NULL
;
}
ctrla
=
ATC_SRC_WIDTH
(
dwidth
)
|
ATC_DST_WIDTH
(
dwidth
);
ctrlb
=
ATC_DEFAULT_CTRLB
|
ATC_IEN
|
ATC_SRC_ADDR_MODE_INCR
|
ATC_DST_ADDR_MODE_INCR
|
ATC_SRC_PIP
|
ATC_DST_PIP
|
ATC_FC_MEM2MEM
;
/* create the transfer */
desc
=
atc_desc_get
(
atchan
);
if
(
!
desc
)
{
dev_err
(
chan2dev
(
chan
),
"%s: couldn't allocate our descriptor
\n
"
,
__func__
);
return
NULL
;
}
desc
->
lli
.
saddr
=
xt
->
src_start
;
desc
->
lli
.
daddr
=
xt
->
dst_start
;
desc
->
lli
.
ctrla
=
ctrla
|
xfer_count
;
desc
->
lli
.
ctrlb
=
ctrlb
;
desc
->
boundary
=
first
->
size
>>
dwidth
;
desc
->
dst_hole
=
(
dmaengine_get_dst_icg
(
xt
,
first
)
>>
dwidth
)
+
1
;
desc
->
src_hole
=
(
dmaengine_get_src_icg
(
xt
,
first
)
>>
dwidth
)
+
1
;
desc
->
txd
.
cookie
=
-
EBUSY
;
desc
->
total_len
=
desc
->
len
=
len
;
desc
->
tx_width
=
dwidth
;
/* set end-of-link to the last link descriptor of list*/
set_desc_eol
(
desc
);
desc
->
txd
.
flags
=
flags
;
/* client is in control of this ack */
return
&
desc
->
txd
;
}
/**
* atc_prep_dma_memcpy - prepare a memcpy operation
* @chan: the channel to prepare operation on
...
...
@@ -1609,6 +1711,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
/* setup platform data for each SoC */
dma_cap_set
(
DMA_MEMCPY
,
at91sam9rl_config
.
cap_mask
);
dma_cap_set
(
DMA_SG
,
at91sam9rl_config
.
cap_mask
);
dma_cap_set
(
DMA_INTERLEAVE
,
at91sam9g45_config
.
cap_mask
);
dma_cap_set
(
DMA_MEMCPY
,
at91sam9g45_config
.
cap_mask
);
dma_cap_set
(
DMA_SLAVE
,
at91sam9g45_config
.
cap_mask
);
dma_cap_set
(
DMA_SG
,
at91sam9g45_config
.
cap_mask
);
...
...
@@ -1713,6 +1816,9 @@ static int __init at_dma_probe(struct platform_device *pdev)
atdma
->
dma_common
.
dev
=
&
pdev
->
dev
;
/* set prep routines based on capability */
if
(
dma_has_cap
(
DMA_INTERLEAVE
,
atdma
->
dma_common
.
cap_mask
))
atdma
->
dma_common
.
device_prep_interleaved_dma
=
atc_prep_dma_interleaved
;
if
(
dma_has_cap
(
DMA_MEMCPY
,
atdma
->
dma_common
.
cap_mask
))
atdma
->
dma_common
.
device_prep_dma_memcpy
=
atc_prep_dma_memcpy
;
...
...
This diff is collapsed.
Click to expand it.
drivers/dma/at_hdmac_regs.h
View file @
4fb9c15b
...
...
@@ -196,6 +196,11 @@ struct at_desc {
size_t
len
;
u32
tx_width
;
size_t
total_len
;
/* Interleaved data */
size_t
boundary
;
size_t
dst_hole
;
size_t
src_hole
;
};
static
inline
struct
at_desc
*
...
...
This diff is collapsed.
Click to expand it.
drivers/dma/at_xdmac.c
View file @
4fb9c15b
...
...
@@ -236,6 +236,10 @@ struct at_xdmac_lld {
dma_addr_t
mbr_sa
;
/* Source Address Member */
dma_addr_t
mbr_da
;
/* Destination Address Member */
u32
mbr_cfg
;
/* Configuration Register */
u32
mbr_bc
;
/* Block Control Register */
u32
mbr_ds
;
/* Data Stride Register */
u32
mbr_sus
;
/* Source Microblock Stride Register */
u32
mbr_dus
;
/* Destination Microblock Stride Register */
};
...
...
@@ -359,6 +363,8 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
if
(
at_xdmac_chan_is_cyclic
(
atchan
))
{
reg
=
AT_XDMAC_CNDC_NDVIEW_NDV1
;
at_xdmac_chan_write
(
atchan
,
AT_XDMAC_CC
,
first
->
lld
.
mbr_cfg
);
}
else
if
(
first
->
lld
.
mbr_ubc
&
AT_XDMAC_MBR_UBC_NDV3
)
{
reg
=
AT_XDMAC_CNDC_NDVIEW_NDV3
;
}
else
{
/*
* No need to write AT_XDMAC_CC reg, it will be done when the
...
...
@@ -465,6 +471,33 @@ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
return
desc
;
}
static
void
at_xdmac_queue_desc
(
struct
dma_chan
*
chan
,
struct
at_xdmac_desc
*
prev
,
struct
at_xdmac_desc
*
desc
)
{
if
(
!
prev
||
!
desc
)
return
;
prev
->
lld
.
mbr_nda
=
desc
->
tx_dma_desc
.
phys
;
prev
->
lld
.
mbr_ubc
|=
AT_XDMAC_MBR_UBC_NDE
;
dev_dbg
(
chan2dev
(
chan
),
"%s: chain lld: prev=0x%p, mbr_nda=%pad
\n
"
,
__func__
,
prev
,
&
prev
->
lld
.
mbr_nda
);
}
static
inline
void
at_xdmac_increment_block_count
(
struct
dma_chan
*
chan
,
struct
at_xdmac_desc
*
desc
)
{
if
(
!
desc
)
return
;
desc
->
lld
.
mbr_bc
++
;
dev_dbg
(
chan2dev
(
chan
),
"%s: incrementing the block count of the desc 0x%p
\n
"
,
__func__
,
desc
);
}
static
struct
dma_chan
*
at_xdmac_xlate
(
struct
of_phandle_args
*
dma_spec
,
struct
of_dma
*
of_dma
)
{
...
...
@@ -621,19 +654,14 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
desc
->
lld
.
mbr_ubc
=
AT_XDMAC_MBR_UBC_NDV2
/* next descriptor view */
|
AT_XDMAC_MBR_UBC_NDEN
/* next descriptor dst parameter update */
|
AT_XDMAC_MBR_UBC_NSEN
/* next descriptor src parameter update */
|
(
i
==
sg_len
-
1
?
0
:
AT_XDMAC_MBR_UBC_NDE
)
/* descriptor fetch */
|
(
len
>>
fixed_dwidth
);
/* microblock length */
dev_dbg
(
chan2dev
(
chan
),
"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x
\n
"
,
__func__
,
&
desc
->
lld
.
mbr_sa
,
&
desc
->
lld
.
mbr_da
,
desc
->
lld
.
mbr_ubc
);
/* Chain lld. */
if
(
prev
)
{
prev
->
lld
.
mbr_nda
=
desc
->
tx_dma_desc
.
phys
;
dev_dbg
(
chan2dev
(
chan
),
"%s: chain lld: prev=0x%p, mbr_nda=%pad
\n
"
,
__func__
,
prev
,
&
prev
->
lld
.
mbr_nda
);
}
if
(
prev
)
at_xdmac_queue_desc
(
chan
,
prev
,
desc
);
prev
=
desc
;
if
(
!
first
)
...
...
@@ -708,7 +736,6 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
desc
->
lld
.
mbr_ubc
=
AT_XDMAC_MBR_UBC_NDV1
|
AT_XDMAC_MBR_UBC_NDEN
|
AT_XDMAC_MBR_UBC_NSEN
|
AT_XDMAC_MBR_UBC_NDE
|
period_len
>>
at_xdmac_get_dwidth
(
desc
->
lld
.
mbr_cfg
);
dev_dbg
(
chan2dev
(
chan
),
...
...
@@ -716,12 +743,8 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
__func__
,
&
desc
->
lld
.
mbr_sa
,
&
desc
->
lld
.
mbr_da
,
desc
->
lld
.
mbr_ubc
);
/* Chain lld. */
if
(
prev
)
{
prev
->
lld
.
mbr_nda
=
desc
->
tx_dma_desc
.
phys
;
dev_dbg
(
chan2dev
(
chan
),
"%s: chain lld: prev=0x%p, mbr_nda=%pad
\n
"
,
__func__
,
prev
,
&
prev
->
lld
.
mbr_nda
);
}
if
(
prev
)
at_xdmac_queue_desc
(
chan
,
prev
,
desc
);
prev
=
desc
;
if
(
!
first
)
...
...
@@ -743,6 +766,215 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
return
&
first
->
tx_dma_desc
;
}
static
inline
u32
at_xdmac_align_width
(
struct
dma_chan
*
chan
,
dma_addr_t
addr
)
{
u32
width
;
/*
* Check address alignment to select the greater data width we
* can use.
*
* Some XDMAC implementations don't provide dword transfer, in
* this case selecting dword has the same behavior as
* selecting word transfers.
*/
if
(
!
(
addr
&
7
))
{
width
=
AT_XDMAC_CC_DWIDTH_DWORD
;
dev_dbg
(
chan2dev
(
chan
),
"%s: dwidth: double word
\n
"
,
__func__
);
}
else
if
(
!
(
addr
&
3
))
{
width
=
AT_XDMAC_CC_DWIDTH_WORD
;
dev_dbg
(
chan2dev
(
chan
),
"%s: dwidth: word
\n
"
,
__func__
);
}
else
if
(
!
(
addr
&
1
))
{
width
=
AT_XDMAC_CC_DWIDTH_HALFWORD
;
dev_dbg
(
chan2dev
(
chan
),
"%s: dwidth: half word
\n
"
,
__func__
);
}
else
{
width
=
AT_XDMAC_CC_DWIDTH_BYTE
;
dev_dbg
(
chan2dev
(
chan
),
"%s: dwidth: byte
\n
"
,
__func__
);
}
return
width
;
}
static
struct
at_xdmac_desc
*
at_xdmac_interleaved_queue_desc
(
struct
dma_chan
*
chan
,
struct
at_xdmac_chan
*
atchan
,
struct
at_xdmac_desc
*
prev
,
dma_addr_t
src
,
dma_addr_t
dst
,
struct
dma_interleaved_template
*
xt
,
struct
data_chunk
*
chunk
)
{
struct
at_xdmac_desc
*
desc
;
u32
dwidth
;
unsigned
long
flags
;
size_t
ublen
;
/*
* WARNING: The channel configuration is set here since there is no
* dmaengine_slave_config call in this case. Moreover we don't know the
* direction, it involves we can't dynamically set the source and dest
* interface so we have to use the same one. Only interface 0 allows EBI
* access. Hopefully we can access DDR through both ports (at least on
* SAMA5D4x), so we can use the same interface for source and dest,
* that solves the fact we don't know the direction.
*/
u32
chan_cc
=
AT_XDMAC_CC_DIF
(
0
)
|
AT_XDMAC_CC_SIF
(
0
)
|
AT_XDMAC_CC_MBSIZE_SIXTEEN
|
AT_XDMAC_CC_TYPE_MEM_TRAN
;
dwidth
=
at_xdmac_align_width
(
chan
,
src
|
dst
|
chunk
->
size
);
if
(
chunk
->
size
>=
(
AT_XDMAC_MBR_UBC_UBLEN_MAX
<<
dwidth
))
{
dev_dbg
(
chan2dev
(
chan
),
"%s: chunk too big (%d, max size %lu)...
\n
"
,
__func__
,
chunk
->
size
,
AT_XDMAC_MBR_UBC_UBLEN_MAX
<<
dwidth
);
return
NULL
;
}
if
(
prev
)
dev_dbg
(
chan2dev
(
chan
),
"Adding items at the end of desc 0x%p
\n
"
,
prev
);
if
(
xt
->
src_inc
)
{
if
(
xt
->
src_sgl
)
chan_cc
|=
AT_XDMAC_CC_SAM_UBS_DS_AM
;
else
chan_cc
|=
AT_XDMAC_CC_SAM_INCREMENTED_AM
;
}
if
(
xt
->
dst_inc
)
{
if
(
xt
->
dst_sgl
)
chan_cc
|=
AT_XDMAC_CC_DAM_UBS_DS_AM
;
else
chan_cc
|=
AT_XDMAC_CC_DAM_INCREMENTED_AM
;
}
spin_lock_irqsave
(
&
atchan
->
lock
,
flags
);
desc
=
at_xdmac_get_desc
(
atchan
);
spin_unlock_irqrestore
(
&
atchan
->
lock
,
flags
);
if
(
!
desc
)
{
dev_err
(
chan2dev
(
chan
),
"can't get descriptor
\n
"
);
return
NULL
;
}
chan_cc
|=
AT_XDMAC_CC_DWIDTH
(
dwidth
);
ublen
=
chunk
->
size
>>
dwidth
;
desc
->
lld
.
mbr_sa
=
src
;
desc
->
lld
.
mbr_da
=
dst
;
desc
->
lld
.
mbr_sus
=
dmaengine_get_src_icg
(
xt
,
chunk
);
desc
->
lld
.
mbr_dus
=
dmaengine_get_dst_icg
(
xt
,
chunk
);
desc
->
lld
.
mbr_ubc
=
AT_XDMAC_MBR_UBC_NDV3
|
AT_XDMAC_MBR_UBC_NDEN
|
AT_XDMAC_MBR_UBC_NSEN
|
ublen
;
desc
->
lld
.
mbr_cfg
=
chan_cc
;
dev_dbg
(
chan2dev
(
chan
),
"%s: lld: mbr_sa=0x%08x, mbr_da=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x
\n
"
,
__func__
,
desc
->
lld
.
mbr_sa
,
desc
->
lld
.
mbr_da
,
desc
->
lld
.
mbr_ubc
,
desc
->
lld
.
mbr_cfg
);
/* Chain lld. */
if
(
prev
)
at_xdmac_queue_desc
(
chan
,
prev
,
desc
);
return
desc
;
}
static
struct
dma_async_tx_descriptor
*
at_xdmac_prep_interleaved
(
struct
dma_chan
*
chan
,
struct
dma_interleaved_template
*
xt
,
unsigned
long
flags
)
{
struct
at_xdmac_chan
*
atchan
=
to_at_xdmac_chan
(
chan
);
struct
at_xdmac_desc
*
prev
=
NULL
,
*
first
=
NULL
;
struct
data_chunk
*
chunk
,
*
prev_chunk
=
NULL
;
dma_addr_t
dst_addr
,
src_addr
;
size_t
dst_skip
,
src_skip
,
len
=
0
;
size_t
prev_dst_icg
=
0
,
prev_src_icg
=
0
;
int
i
;
if
(
!
xt
||
(
xt
->
numf
!=
1
)
||
(
xt
->
dir
!=
DMA_MEM_TO_MEM
))
return
NULL
;
dev_dbg
(
chan2dev
(
chan
),
"%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx
\n
"
,
__func__
,
xt
->
src_start
,
xt
->
dst_start
,
xt
->
numf
,
xt
->
frame_size
,
flags
);
src_addr
=
xt
->
src_start
;
dst_addr
=
xt
->
dst_start
;
for
(
i
=
0
;
i
<
xt
->
frame_size
;
i
++
)
{
struct
at_xdmac_desc
*
desc
;
size_t
src_icg
,
dst_icg
;
chunk
=
xt
->
sgl
+
i
;
dst_icg
=
dmaengine_get_dst_icg
(
xt
,
chunk
);
src_icg
=
dmaengine_get_src_icg
(
xt
,
chunk
);
src_skip
=
chunk
->
size
+
src_icg
;
dst_skip
=
chunk
->
size
+
dst_icg
;
dev_dbg
(
chan2dev
(
chan
),
"%s: chunk size=%d, src icg=%d, dst icg=%d
\n
"
,
__func__
,
chunk
->
size
,
src_icg
,
dst_icg
);
/*
* Handle the case where we just have the same
* transfer to setup, we can just increase the
* block number and reuse the same descriptor.
*/
if
(
prev_chunk
&&
prev
&&
(
prev_chunk
->
size
==
chunk
->
size
)
&&
(
prev_src_icg
==
src_icg
)
&&
(
prev_dst_icg
==
dst_icg
))
{
dev_dbg
(
chan2dev
(
chan
),
"%s: same configuration that the previous chunk, merging the descriptors...
\n
"
,
__func__
);
at_xdmac_increment_block_count
(
chan
,
prev
);
continue
;
}
desc
=
at_xdmac_interleaved_queue_desc
(
chan
,
atchan
,
prev
,
src_addr
,
dst_addr
,
xt
,
chunk
);
if
(
!
desc
)
{
list_splice_init
(
&
first
->
descs_list
,
&
atchan
->
free_descs_list
);
return
NULL
;
}
if
(
!
first
)
first
=
desc
;
dev_dbg
(
chan2dev
(
chan
),
"%s: add desc 0x%p to descs_list 0x%p
\n
"
,
__func__
,
desc
,
first
);
list_add_tail
(
&
desc
->
desc_node
,
&
first
->
descs_list
);
if
(
xt
->
src_sgl
)
src_addr
+=
src_skip
;
if
(
xt
->
dst_sgl
)
dst_addr
+=
dst_skip
;
len
+=
chunk
->
size
;
prev_chunk
=
chunk
;
prev_dst_icg
=
dst_icg
;
prev_src_icg
=
src_icg
;
prev
=
desc
;
}
first
->
tx_dma_desc
.
cookie
=
-
EBUSY
;
first
->
tx_dma_desc
.
flags
=
flags
;
first
->
xfer_size
=
len
;
return
&
first
->
tx_dma_desc
;
}
static
struct
dma_async_tx_descriptor
*
at_xdmac_prep_dma_memcpy
(
struct
dma_chan
*
chan
,
dma_addr_t
dest
,
dma_addr_t
src
,
size_t
len
,
unsigned
long
flags
)
...
...
@@ -773,24 +1005,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
if
(
unlikely
(
!
len
))
return
NULL
;
/*
* Check address alignment to select the greater data width we can use.
* Some XDMAC implementations don't provide dword transfer, in this
* case selecting dword has the same behavior as selecting word transfers.
*/
if
(
!
((
src_addr
|
dst_addr
)
&
7
))
{
dwidth
=
AT_XDMAC_CC_DWIDTH_DWORD
;
dev_dbg
(
chan2dev
(
chan
),
"%s: dwidth: double word
\n
"
,
__func__
);
}
else
if
(
!
((
src_addr
|
dst_addr
)
&
3
))
{
dwidth
=
AT_XDMAC_CC_DWIDTH_WORD
;
dev_dbg
(
chan2dev
(
chan
),
"%s: dwidth: word
\n
"
,
__func__
);
}
else
if
(
!
((
src_addr
|
dst_addr
)
&
1
))
{
dwidth
=
AT_XDMAC_CC_DWIDTH_HALFWORD
;
dev_dbg
(
chan2dev
(
chan
),
"%s: dwidth: half word
\n
"
,
__func__
);
}
else
{
dwidth
=
AT_XDMAC_CC_DWIDTH_BYTE
;
dev_dbg
(
chan2dev
(
chan
),
"%s: dwidth: byte
\n
"
,
__func__
);
}
dwidth
=
at_xdmac_align_width
(
chan
,
src_addr
|
dst_addr
);
/* Prepare descriptors. */
while
(
remaining_size
)
{
...
...
@@ -820,19 +1035,8 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
dev_dbg
(
chan2dev
(
chan
),
"%s: xfer_size=%zu
\n
"
,
__func__
,
xfer_size
);
/* Check remaining length and change data width if needed. */
if
(
!
((
src_addr
|
dst_addr
|
xfer_size
)
&
7
))
{
dwidth
=
AT_XDMAC_CC_DWIDTH_DWORD
;
dev_dbg
(
chan2dev
(
chan
),
"%s: dwidth: double word
\n
"
,
__func__
);
}
else
if
(
!
((
src_addr
|
dst_addr
|
xfer_size
)
&
3
))
{
dwidth
=
AT_XDMAC_CC_DWIDTH_WORD
;
dev_dbg
(
chan2dev
(
chan
),
"%s: dwidth: word
\n
"
,
__func__
);
}
else
if
(
!
((
src_addr
|
dst_addr
|
xfer_size
)
&
1
))
{
dwidth
=
AT_XDMAC_CC_DWIDTH_HALFWORD
;
dev_dbg
(
chan2dev
(
chan
),
"%s: dwidth: half word
\n
"
,
__func__
);
}
else
if
((
src_addr
|
dst_addr
|
xfer_size
)
&
1
)
{
dwidth
=
AT_XDMAC_CC_DWIDTH_BYTE
;
dev_dbg
(
chan2dev
(
chan
),
"%s: dwidth: byte
\n
"
,
__func__
);
}
dwidth
=
at_xdmac_align_width
(
chan
,
src_addr
|
dst_addr
|
xfer_size
);
chan_cc
|=
AT_XDMAC_CC_DWIDTH
(
dwidth
);
ublen
=
xfer_size
>>
dwidth
;
...
...
@@ -843,7 +1047,6 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
desc
->
lld
.
mbr_ubc
=
AT_XDMAC_MBR_UBC_NDV2
|
AT_XDMAC_MBR_UBC_NDEN
|
AT_XDMAC_MBR_UBC_NSEN
|
(
remaining_size
?
AT_XDMAC_MBR_UBC_NDE
:
0
)
|
ublen
;
desc
->
lld
.
mbr_cfg
=
chan_cc
;
...
...
@@ -852,12 +1055,8 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
__func__
,
&
desc
->
lld
.
mbr_sa
,
&
desc
->
lld
.
mbr_da
,
desc
->
lld
.
mbr_ubc
,
desc
->
lld
.
mbr_cfg
);
/* Chain lld. */
if
(
prev
)
{
prev
->
lld
.
mbr_nda
=
desc
->
tx_dma_desc
.
phys
;
dev_dbg
(
chan2dev
(
chan
),
"%s: chain lld: prev=0x%p, mbr_nda=0x%08x
\n
"
,
__func__
,
prev
,
prev
->
lld
.
mbr_nda
);
}
if
(
prev
)
at_xdmac_queue_desc
(
chan
,
prev
,
desc
);
prev
=
desc
;
if
(
!
first
)
...
...
@@ -1398,6 +1597,7 @@ static int at_xdmac_probe(struct platform_device *pdev)
}
dma_cap_set
(
DMA_CYCLIC
,
atxdmac
->
dma
.
cap_mask
);
dma_cap_set
(
DMA_INTERLEAVE
,
atxdmac
->
dma
.
cap_mask
);
dma_cap_set
(
DMA_MEMCPY
,
atxdmac
->
dma
.
cap_mask
);
dma_cap_set
(
DMA_SLAVE
,
atxdmac
->
dma
.
cap_mask
);
/*
...
...
@@ -1411,6 +1611,7 @@ static int at_xdmac_probe(struct platform_device *pdev)
atxdmac
->
dma
.
device_tx_status
=
at_xdmac_tx_status
;
atxdmac
->
dma
.
device_issue_pending
=
at_xdmac_issue_pending
;
atxdmac
->
dma
.
device_prep_dma_cyclic
=
at_xdmac_prep_dma_cyclic
;
atxdmac
->
dma
.
device_prep_interleaved_dma
=
at_xdmac_prep_interleaved
;
atxdmac
->
dma
.
device_prep_dma_memcpy
=
at_xdmac_prep_dma_memcpy
;
atxdmac
->
dma
.
device_prep_slave_sg
=
at_xdmac_prep_slave_sg
;
atxdmac
->
dma
.
device_config
=
at_xdmac_device_config
;
...
...
This diff is collapsed.
Click to expand it.
include/linux/dmaengine.h
View file @
4fb9c15b
...
...
@@ -923,6 +923,33 @@ static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
BUG
();
}
static
inline
size_t
dmaengine_get_icg
(
bool
inc
,
bool
sgl
,
size_t
icg
,
size_t
dir_icg
)
{
if
(
inc
)
{
if
(
dir_icg
)
return
dir_icg
;
else
if
(
sgl
)
return
icg
;
}
return
0
;
}
static
inline
size_t
dmaengine_get_dst_icg
(
struct
dma_interleaved_template
*
xt
,
struct
data_chunk
*
chunk
)
{
return
dmaengine_get_icg
(
xt
->
dst_inc
,
xt
->
dst_sgl
,
chunk
->
icg
,
chunk
->
dst_icg
);
}
static
inline
size_t
dmaengine_get_src_icg
(
struct
dma_interleaved_template
*
xt
,
struct
data_chunk
*
chunk
)
{
return
dmaengine_get_icg
(
xt
->
src_inc
,
xt
->
src_sgl
,
chunk
->
icg
,
chunk
->
src_icg
);
}
/* --- public DMA engine API --- */
#ifdef CONFIG_DMA_ENGINE
...
...
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment