Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
3280ab3e
Commit
3280ab3e
authored
Mar 13, 2008
by
Dan Williams
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
async_tx: checkpatch says s/__FUNCTION__/__func__/g
Signed-off-by:
Dan Williams
<
dan.j.williams@intel.com
>
parent
3d9b525b
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
15 additions
and
15 deletions
+15
-15
crypto/async_tx/async_memcpy.c
crypto/async_tx/async_memcpy.c
+3
-3
crypto/async_tx/async_memset.c
crypto/async_tx/async_memset.c
+3
-3
crypto/async_tx/async_tx.c
crypto/async_tx/async_tx.c
+3
-3
crypto/async_tx/async_xor.c
crypto/async_tx/async_xor.c
+6
-6
No files found.
crypto/async_tx/async_memcpy.c
View file @
3280ab3e
...
@@ -66,11 +66,11 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
...
@@ -66,11 +66,11 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
}
}
if
(
tx
)
{
if
(
tx
)
{
pr_debug
(
"%s: (async) len: %zu
\n
"
,
__
FUNCTION
__
,
len
);
pr_debug
(
"%s: (async) len: %zu
\n
"
,
__
func
__
,
len
);
async_tx_submit
(
chan
,
tx
,
flags
,
depend_tx
,
cb_fn
,
cb_param
);
async_tx_submit
(
chan
,
tx
,
flags
,
depend_tx
,
cb_fn
,
cb_param
);
}
else
{
}
else
{
void
*
dest_buf
,
*
src_buf
;
void
*
dest_buf
,
*
src_buf
;
pr_debug
(
"%s: (sync) len: %zu
\n
"
,
__
FUNCTION
__
,
len
);
pr_debug
(
"%s: (sync) len: %zu
\n
"
,
__
func
__
,
len
);
/* wait for any prerequisite operations */
/* wait for any prerequisite operations */
if
(
depend_tx
)
{
if
(
depend_tx
)
{
...
@@ -80,7 +80,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
...
@@ -80,7 +80,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
BUG_ON
(
depend_tx
->
ack
);
BUG_ON
(
depend_tx
->
ack
);
if
(
dma_wait_for_async_tx
(
depend_tx
)
==
DMA_ERROR
)
if
(
dma_wait_for_async_tx
(
depend_tx
)
==
DMA_ERROR
)
panic
(
"%s: DMA_ERROR waiting for depend_tx
\n
"
,
panic
(
"%s: DMA_ERROR waiting for depend_tx
\n
"
,
__
FUNCTION
__
);
__
func
__
);
}
}
dest_buf
=
kmap_atomic
(
dest
,
KM_USER0
)
+
dest_offset
;
dest_buf
=
kmap_atomic
(
dest
,
KM_USER0
)
+
dest_offset
;
...
...
crypto/async_tx/async_memset.c
View file @
3280ab3e
...
@@ -63,11 +63,11 @@ async_memset(struct page *dest, int val, unsigned int offset,
...
@@ -63,11 +63,11 @@ async_memset(struct page *dest, int val, unsigned int offset,
}
}
if
(
tx
)
{
if
(
tx
)
{
pr_debug
(
"%s: (async) len: %zu
\n
"
,
__
FUNCTION
__
,
len
);
pr_debug
(
"%s: (async) len: %zu
\n
"
,
__
func
__
,
len
);
async_tx_submit
(
chan
,
tx
,
flags
,
depend_tx
,
cb_fn
,
cb_param
);
async_tx_submit
(
chan
,
tx
,
flags
,
depend_tx
,
cb_fn
,
cb_param
);
}
else
{
/* run the memset synchronously */
}
else
{
/* run the memset synchronously */
void
*
dest_buf
;
void
*
dest_buf
;
pr_debug
(
"%s: (sync) len: %zu
\n
"
,
__
FUNCTION
__
,
len
);
pr_debug
(
"%s: (sync) len: %zu
\n
"
,
__
func
__
,
len
);
dest_buf
=
(
void
*
)
(((
char
*
)
page_address
(
dest
))
+
offset
);
dest_buf
=
(
void
*
)
(((
char
*
)
page_address
(
dest
))
+
offset
);
...
@@ -79,7 +79,7 @@ async_memset(struct page *dest, int val, unsigned int offset,
...
@@ -79,7 +79,7 @@ async_memset(struct page *dest, int val, unsigned int offset,
BUG_ON
(
depend_tx
->
ack
);
BUG_ON
(
depend_tx
->
ack
);
if
(
dma_wait_for_async_tx
(
depend_tx
)
==
DMA_ERROR
)
if
(
dma_wait_for_async_tx
(
depend_tx
)
==
DMA_ERROR
)
panic
(
"%s: DMA_ERROR waiting for depend_tx
\n
"
,
panic
(
"%s: DMA_ERROR waiting for depend_tx
\n
"
,
__
FUNCTION
__
);
__
func
__
);
}
}
memset
(
dest_buf
,
val
,
len
);
memset
(
dest_buf
,
val
,
len
);
...
...
crypto/async_tx/async_tx.c
View file @
3280ab3e
...
@@ -472,11 +472,11 @@ async_trigger_callback(enum async_tx_flags flags,
...
@@ -472,11 +472,11 @@ async_trigger_callback(enum async_tx_flags flags,
tx
=
NULL
;
tx
=
NULL
;
if
(
tx
)
{
if
(
tx
)
{
pr_debug
(
"%s: (async)
\n
"
,
__
FUNCTION
__
);
pr_debug
(
"%s: (async)
\n
"
,
__
func
__
);
async_tx_submit
(
chan
,
tx
,
flags
,
depend_tx
,
cb_fn
,
cb_param
);
async_tx_submit
(
chan
,
tx
,
flags
,
depend_tx
,
cb_fn
,
cb_param
);
}
else
{
}
else
{
pr_debug
(
"%s: (sync)
\n
"
,
__
FUNCTION
__
);
pr_debug
(
"%s: (sync)
\n
"
,
__
func
__
);
/* wait for any prerequisite operations */
/* wait for any prerequisite operations */
if
(
depend_tx
)
{
if
(
depend_tx
)
{
...
@@ -486,7 +486,7 @@ async_trigger_callback(enum async_tx_flags flags,
...
@@ -486,7 +486,7 @@ async_trigger_callback(enum async_tx_flags flags,
BUG_ON
(
depend_tx
->
ack
);
BUG_ON
(
depend_tx
->
ack
);
if
(
dma_wait_for_async_tx
(
depend_tx
)
==
DMA_ERROR
)
if
(
dma_wait_for_async_tx
(
depend_tx
)
==
DMA_ERROR
)
panic
(
"%s: DMA_ERROR waiting for depend_tx
\n
"
,
panic
(
"%s: DMA_ERROR waiting for depend_tx
\n
"
,
__
FUNCTION
__
);
__
func
__
);
}
}
async_tx_sync_epilog
(
flags
,
depend_tx
,
cb_fn
,
cb_param
);
async_tx_sync_epilog
(
flags
,
depend_tx
,
cb_fn
,
cb_param
);
...
...
crypto/async_tx/async_xor.c
View file @
3280ab3e
...
@@ -47,7 +47,7 @@ do_async_xor(struct dma_device *device,
...
@@ -47,7 +47,7 @@ do_async_xor(struct dma_device *device,
int
i
;
int
i
;
unsigned
long
dma_prep_flags
=
cb_fn
?
DMA_PREP_INTERRUPT
:
0
;
unsigned
long
dma_prep_flags
=
cb_fn
?
DMA_PREP_INTERRUPT
:
0
;
pr_debug
(
"%s: len: %zu
\n
"
,
__
FUNCTION
__
,
len
);
pr_debug
(
"%s: len: %zu
\n
"
,
__
func
__
,
len
);
dma_dest
=
dma_map_page
(
device
->
dev
,
dest
,
offset
,
len
,
dma_dest
=
dma_map_page
(
device
->
dev
,
dest
,
offset
,
len
,
DMA_FROM_DEVICE
);
DMA_FROM_DEVICE
);
...
@@ -86,7 +86,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
...
@@ -86,7 +86,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
void
*
_dest
;
void
*
_dest
;
int
i
;
int
i
;
pr_debug
(
"%s: len: %zu
\n
"
,
__
FUNCTION
__
,
len
);
pr_debug
(
"%s: len: %zu
\n
"
,
__
func
__
,
len
);
/* reuse the 'src_list' array to convert to buffer pointers */
/* reuse the 'src_list' array to convert to buffer pointers */
for
(
i
=
0
;
i
<
src_cnt
;
i
++
)
for
(
i
=
0
;
i
<
src_cnt
;
i
++
)
...
@@ -196,7 +196,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
...
@@ -196,7 +196,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
DMA_ERROR
)
DMA_ERROR
)
panic
(
"%s: DMA_ERROR waiting for "
panic
(
"%s: DMA_ERROR waiting for "
"depend_tx
\n
"
,
"depend_tx
\n
"
,
__
FUNCTION
__
);
__
func
__
);
}
}
do_sync_xor
(
dest
,
&
src_list
[
src_off
],
offset
,
do_sync_xor
(
dest
,
&
src_list
[
src_off
],
offset
,
...
@@ -276,7 +276,7 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
...
@@ -276,7 +276,7 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
unsigned
long
dma_prep_flags
=
cb_fn
?
DMA_PREP_INTERRUPT
:
0
;
unsigned
long
dma_prep_flags
=
cb_fn
?
DMA_PREP_INTERRUPT
:
0
;
int
i
;
int
i
;
pr_debug
(
"%s: (async) len: %zu
\n
"
,
__
FUNCTION
__
,
len
);
pr_debug
(
"%s: (async) len: %zu
\n
"
,
__
func
__
,
len
);
for
(
i
=
0
;
i
<
src_cnt
;
i
++
)
for
(
i
=
0
;
i
<
src_cnt
;
i
++
)
dma_src
[
i
]
=
dma_map_page
(
device
->
dev
,
src_list
[
i
],
dma_src
[
i
]
=
dma_map_page
(
device
->
dev
,
src_list
[
i
],
...
@@ -299,7 +299,7 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
...
@@ -299,7 +299,7 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
}
else
{
}
else
{
unsigned
long
xor_flags
=
flags
;
unsigned
long
xor_flags
=
flags
;
pr_debug
(
"%s: (sync) len: %zu
\n
"
,
__
FUNCTION
__
,
len
);
pr_debug
(
"%s: (sync) len: %zu
\n
"
,
__
func
__
,
len
);
xor_flags
|=
ASYNC_TX_XOR_DROP_DST
;
xor_flags
|=
ASYNC_TX_XOR_DROP_DST
;
xor_flags
&=
~
ASYNC_TX_ACK
;
xor_flags
&=
~
ASYNC_TX_ACK
;
...
@@ -310,7 +310,7 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
...
@@ -310,7 +310,7 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
if
(
tx
)
{
if
(
tx
)
{
if
(
dma_wait_for_async_tx
(
tx
)
==
DMA_ERROR
)
if
(
dma_wait_for_async_tx
(
tx
)
==
DMA_ERROR
)
panic
(
"%s: DMA_ERROR waiting for tx
\n
"
,
panic
(
"%s: DMA_ERROR waiting for tx
\n
"
,
__
FUNCTION
__
);
__
func
__
);
async_tx_ack
(
tx
);
async_tx_ack
(
tx
);
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment