Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
9324fdf5
Commit
9324fdf5
authored
Jun 25, 2015
by
Vinod Koul
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'topic/core' into for-linus
parents
4983a501
5f88d970
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
39 additions
and
12 deletions
+39
-12
Documentation/dmaengine/provider.txt
Documentation/dmaengine/provider.txt
+6
-5
drivers/dma/virt-dma.c
drivers/dma/virt-dma.c
+13
-6
drivers/dma/virt-dma.h
drivers/dma/virt-dma.h
+12
-1
include/linux/dmaengine.h
include/linux/dmaengine.h
+8
-0
No files found.
Documentation/dmaengine/provider.txt
View file @
9324fdf5
...
...
@@ -345,11 +345,12 @@ where to put them)
that abstracts it away.
* DMA_CTRL_ACK
- Undocumented feature
- No one really has an idea of what it's about, besides being
related to reusing the DMA transaction descriptors or having
additional transactions added to it in the async-tx API
- Useless in the case of the slave API
- If set, the transfer can be reused after being completed.
- There is a guarantee the transfer won't be freed until it is acked
by async_tx_ack().
- As a consequence, if a device driver wants to skip the dma_map_sg() and
dma_unmap_sg() in between 2 transfers, because the DMA'd data wasn't used,
it can resubmit the transfer right after its completion.
General Design Notes
--------------------
...
...
drivers/dma/virt-dma.c
View file @
9324fdf5
...
...
@@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_irqsave
(
&
vc
->
lock
,
flags
);
cookie
=
dma_cookie_assign
(
tx
);
list_
add
_tail
(
&
vd
->
node
,
&
vc
->
desc_submitted
);
list_
move
_tail
(
&
vd
->
node
,
&
vc
->
desc_submitted
);
spin_unlock_irqrestore
(
&
vc
->
lock
,
flags
);
dev_dbg
(
vc
->
chan
.
device
->
dev
,
"vchan %p: txd %p[%x]: submitted
\n
"
,
...
...
@@ -83,8 +83,10 @@ static void vchan_complete(unsigned long arg)
cb_data
=
vd
->
tx
.
callback_param
;
list_del
(
&
vd
->
node
);
vc
->
desc_free
(
vd
);
if
(
async_tx_test_ack
(
&
vd
->
tx
))
list_add
(
&
vd
->
node
,
&
vc
->
desc_allocated
);
else
vc
->
desc_free
(
vd
);
if
(
cb
)
cb
(
cb_data
);
...
...
@@ -96,9 +98,13 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
while
(
!
list_empty
(
head
))
{
struct
virt_dma_desc
*
vd
=
list_first_entry
(
head
,
struct
virt_dma_desc
,
node
);
list_del
(
&
vd
->
node
);
dev_dbg
(
vc
->
chan
.
device
->
dev
,
"txd %p: freeing
\n
"
,
vd
);
vc
->
desc_free
(
vd
);
if
(
async_tx_test_ack
(
&
vd
->
tx
))
{
list_move_tail
(
&
vd
->
node
,
&
vc
->
desc_allocated
);
}
else
{
dev_dbg
(
vc
->
chan
.
device
->
dev
,
"txd %p: freeing
\n
"
,
vd
);
list_del
(
&
vd
->
node
);
vc
->
desc_free
(
vd
);
}
}
}
EXPORT_SYMBOL_GPL
(
vchan_dma_desc_free_list
);
...
...
@@ -108,6 +114,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
dma_cookie_init
(
&
vc
->
chan
);
spin_lock_init
(
&
vc
->
lock
);
INIT_LIST_HEAD
(
&
vc
->
desc_allocated
);
INIT_LIST_HEAD
(
&
vc
->
desc_submitted
);
INIT_LIST_HEAD
(
&
vc
->
desc_issued
);
INIT_LIST_HEAD
(
&
vc
->
desc_completed
);
...
...
drivers/dma/virt-dma.h
View file @
9324fdf5
...
...
@@ -29,6 +29,7 @@ struct virt_dma_chan {
spinlock_t
lock
;
/* protected by vc.lock */
struct
list_head
desc_allocated
;
struct
list_head
desc_submitted
;
struct
list_head
desc_issued
;
struct
list_head
desc_completed
;
...
...
@@ -55,11 +56,16 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
struct
virt_dma_desc
*
vd
,
unsigned
long
tx_flags
)
{
extern
dma_cookie_t
vchan_tx_submit
(
struct
dma_async_tx_descriptor
*
);
unsigned
long
flags
;
dma_async_tx_descriptor_init
(
&
vd
->
tx
,
&
vc
->
chan
);
vd
->
tx
.
flags
=
tx_flags
;
vd
->
tx
.
tx_submit
=
vchan_tx_submit
;
spin_lock_irqsave
(
&
vc
->
lock
,
flags
);
list_add_tail
(
&
vd
->
node
,
&
vc
->
desc_allocated
);
spin_unlock_irqrestore
(
&
vc
->
lock
,
flags
);
return
&
vd
->
tx
;
}
...
...
@@ -122,7 +128,8 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
}
/**
* vchan_get_all_descriptors - obtain all submitted and issued descriptors
* vchan_get_all_descriptors - obtain all allocated, submitted and issued
* descriptors
* vc: virtual channel to get descriptors from
* head: list of descriptors found
*
...
...
@@ -134,6 +141,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
static
inline
void
vchan_get_all_descriptors
(
struct
virt_dma_chan
*
vc
,
struct
list_head
*
head
)
{
list_splice_tail_init
(
&
vc
->
desc_allocated
,
head
);
list_splice_tail_init
(
&
vc
->
desc_submitted
,
head
);
list_splice_tail_init
(
&
vc
->
desc_issued
,
head
);
list_splice_tail_init
(
&
vc
->
desc_completed
,
head
);
...
...
@@ -141,11 +149,14 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
static
inline
void
vchan_free_chan_resources
(
struct
virt_dma_chan
*
vc
)
{
struct
virt_dma_desc
*
vd
;
unsigned
long
flags
;
LIST_HEAD
(
head
);
spin_lock_irqsave
(
&
vc
->
lock
,
flags
);
vchan_get_all_descriptors
(
vc
,
&
head
);
list_for_each_entry
(
vd
,
&
head
,
node
)
async_tx_clear_ack
(
&
vd
->
tx
);
spin_unlock_irqrestore
(
&
vc
->
lock
,
flags
);
vchan_dma_desc_free_list
(
vc
,
&
head
);
...
...
include/linux/dmaengine.h
View file @
9324fdf5
...
...
@@ -123,10 +123,18 @@ enum dma_transfer_direction {
* chunk and before first src/dst address for next chunk.
* Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false.
* Ignored for src(assumed 0), if src_inc is true and src_sgl is false.
* @dst_icg: Number of bytes to jump after last dst address of this
* chunk and before the first dst address for next chunk.
* Ignored if dst_inc is true and dst_sgl is false.
* @src_icg: Number of bytes to jump after last src address of this
* chunk and before the first src address for next chunk.
* Ignored if src_inc is true and src_sgl is false.
*/
struct
data_chunk
{
size_t
size
;
size_t
icg
;
size_t
dst_icg
;
size_t
src_icg
;
};
/**
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment