Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
3216a97b
Commit
3216a97b
authored
Sep 25, 2008
by
Russell King
Committed by
Russell King
Sep 29, 2008
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[ARM] dma: coding style cleanups
Signed-off-by:
Russell King
<
rmk+kernel@arm.linux.org.uk
>
parent
125ab12a
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
64 additions
and
88 deletions
+64
-88
arch/arm/common/dmabounce.c
arch/arm/common/dmabounce.c
+16
-32
arch/arm/include/asm/dma-mapping.h
arch/arm/include/asm/dma-mapping.h
+48
-56
No files found.
arch/arm/common/dmabounce.c
View file @
3216a97b
...
@@ -154,9 +154,7 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
...
@@ -154,9 +154,7 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
#endif
#endif
write_lock_irqsave
(
&
device_info
->
lock
,
flags
);
write_lock_irqsave
(
&
device_info
->
lock
,
flags
);
list_add
(
&
buf
->
node
,
&
device_info
->
safe_buffers
);
list_add
(
&
buf
->
node
,
&
device_info
->
safe_buffers
);
write_unlock_irqrestore
(
&
device_info
->
lock
,
flags
);
write_unlock_irqrestore
(
&
device_info
->
lock
,
flags
);
return
buf
;
return
buf
;
...
@@ -220,8 +218,7 @@ static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
...
@@ -220,8 +218,7 @@ static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
return
find_safe_buffer
(
dev
->
archdata
.
dmabounce
,
dma_addr
);
return
find_safe_buffer
(
dev
->
archdata
.
dmabounce
,
dma_addr
);
}
}
static
inline
dma_addr_t
static
inline
dma_addr_t
map_single
(
struct
device
*
dev
,
void
*
ptr
,
size_t
size
,
map_single
(
struct
device
*
dev
,
void
*
ptr
,
size_t
size
,
enum
dma_data_direction
dir
)
enum
dma_data_direction
dir
)
{
{
struct
dmabounce_device_info
*
device_info
=
dev
->
archdata
.
dmabounce
;
struct
dmabounce_device_info
*
device_info
=
dev
->
archdata
.
dmabounce
;
...
@@ -285,9 +282,8 @@ map_single(struct device *dev, void *ptr, size_t size,
...
@@ -285,9 +282,8 @@ map_single(struct device *dev, void *ptr, size_t size,
return
dma_addr
;
return
dma_addr
;
}
}
static
inline
void
static
inline
void
unmap_single
(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
unmap_single
(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
size_t
size
,
size_t
size
,
enum
dma_data_direction
dir
)
enum
dma_data_direction
dir
)
{
{
struct
safe_buffer
*
buf
=
find_safe_buffer_dev
(
dev
,
dma_addr
,
"unmap"
);
struct
safe_buffer
*
buf
=
find_safe_buffer_dev
(
dev
,
dma_addr
,
"unmap"
);
...
@@ -332,25 +328,20 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
...
@@ -332,25 +328,20 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
* substitute the safe buffer for the unsafe one.
* substitute the safe buffer for the unsafe one.
* (basically move the buffer from an unsafe area to a safe one)
* (basically move the buffer from an unsafe area to a safe one)
*/
*/
dma_addr_t
dma_addr_t
dma_map_single
(
struct
device
*
dev
,
void
*
ptr
,
size_t
size
,
dma_map_single
(
struct
device
*
dev
,
void
*
ptr
,
size_t
size
,
enum
dma_data_direction
dir
)
enum
dma_data_direction
dir
)
{
{
dma_addr_t
dma_addr
;
dev_dbg
(
dev
,
"%s(ptr=%p,size=%d,dir=%x)
\n
"
,
dev_dbg
(
dev
,
"%s(ptr=%p,size=%d,dir=%x)
\n
"
,
__func__
,
ptr
,
size
,
dir
);
__func__
,
ptr
,
size
,
dir
);
BUG_ON
(
dir
==
DMA_NONE
);
BUG_ON
(
dir
==
DMA_NONE
);
dma_addr
=
map_single
(
dev
,
ptr
,
size
,
dir
);
return
map_single
(
dev
,
ptr
,
size
,
dir
);
return
dma_addr
;
}
}
EXPORT_SYMBOL
(
dma_map_single
);
dma_addr_t
dma_map_page
(
struct
device
*
dev
,
struct
page
*
page
,
dma_addr_t
dma_map_page
(
struct
device
*
dev
,
struct
page
*
page
,
unsigned
long
offset
,
size_t
size
,
unsigned
long
offset
,
size_t
size
,
enum
dma_data_direction
dir
)
enum
dma_data_direction
dir
)
{
{
dev_dbg
(
dev
,
"%s(page=%p,off=%#lx,size=%zx,dir=%x)
\n
"
,
dev_dbg
(
dev
,
"%s(page=%p,off=%#lx,size=%zx,dir=%x)
\n
"
,
__func__
,
page
,
offset
,
size
,
dir
);
__func__
,
page
,
offset
,
size
,
dir
);
...
@@ -368,9 +359,8 @@ EXPORT_SYMBOL(dma_map_page);
...
@@ -368,9 +359,8 @@ EXPORT_SYMBOL(dma_map_page);
* should be)
* should be)
*/
*/
void
void
dma_unmap_single
(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
size_t
size
,
dma_unmap_single
(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
size_t
size
,
enum
dma_data_direction
dir
)
enum
dma_data_direction
dir
)
{
{
dev_dbg
(
dev
,
"%s(ptr=%p,size=%d,dir=%x)
\n
"
,
dev_dbg
(
dev
,
"%s(ptr=%p,size=%d,dir=%x)
\n
"
,
__func__
,
(
void
*
)
dma_addr
,
size
,
dir
);
__func__
,
(
void
*
)
dma_addr
,
size
,
dir
);
...
@@ -379,6 +369,7 @@ dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
...
@@ -379,6 +369,7 @@ dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
unmap_single
(
dev
,
dma_addr
,
size
,
dir
);
unmap_single
(
dev
,
dma_addr
,
size
,
dir
);
}
}
EXPORT_SYMBOL
(
dma_unmap_single
);
int
dmabounce_sync_for_cpu
(
struct
device
*
dev
,
dma_addr_t
addr
,
int
dmabounce_sync_for_cpu
(
struct
device
*
dev
,
dma_addr_t
addr
,
unsigned
long
off
,
size_t
sz
,
enum
dma_data_direction
dir
)
unsigned
long
off
,
size_t
sz
,
enum
dma_data_direction
dir
)
...
@@ -434,9 +425,8 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
...
@@ -434,9 +425,8 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
}
}
EXPORT_SYMBOL
(
dmabounce_sync_for_device
);
EXPORT_SYMBOL
(
dmabounce_sync_for_device
);
static
int
static
int
dmabounce_init_pool
(
struct
dmabounce_pool
*
pool
,
struct
device
*
dev
,
dmabounce_init_pool
(
struct
dmabounce_pool
*
pool
,
struct
device
*
dev
,
const
char
*
name
,
const
char
*
name
,
unsigned
long
size
)
unsigned
long
size
)
{
{
pool
->
size
=
size
;
pool
->
size
=
size
;
DO_STATS
(
pool
->
allocs
=
0
);
DO_STATS
(
pool
->
allocs
=
0
);
...
@@ -447,9 +437,8 @@ dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char
...
@@ -447,9 +437,8 @@ dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char
return
pool
->
pool
?
0
:
-
ENOMEM
;
return
pool
->
pool
?
0
:
-
ENOMEM
;
}
}
int
int
dmabounce_register_dev
(
struct
device
*
dev
,
unsigned
long
small_buffer_size
,
dmabounce_register_dev
(
struct
device
*
dev
,
unsigned
long
small_buffer_size
,
unsigned
long
large_buffer_size
)
unsigned
long
large_buffer_size
)
{
{
struct
dmabounce_device_info
*
device_info
;
struct
dmabounce_device_info
*
device_info
;
int
ret
;
int
ret
;
...
@@ -505,9 +494,9 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
...
@@ -505,9 +494,9 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
kfree
(
device_info
);
kfree
(
device_info
);
return
ret
;
return
ret
;
}
}
EXPORT_SYMBOL
(
dmabounce_register_dev
);
void
void
dmabounce_unregister_dev
(
struct
device
*
dev
)
dmabounce_unregister_dev
(
struct
device
*
dev
)
{
{
struct
dmabounce_device_info
*
device_info
=
dev
->
archdata
.
dmabounce
;
struct
dmabounce_device_info
*
device_info
=
dev
->
archdata
.
dmabounce
;
...
@@ -540,11 +529,6 @@ dmabounce_unregister_dev(struct device *dev)
...
@@ -540,11 +529,6 @@ dmabounce_unregister_dev(struct device *dev)
dev_info
(
dev
,
"dmabounce: device unregistered
\n
"
);
dev_info
(
dev
,
"dmabounce: device unregistered
\n
"
);
}
}
EXPORT_SYMBOL
(
dma_map_single
);
EXPORT_SYMBOL
(
dma_unmap_single
);
EXPORT_SYMBOL
(
dmabounce_register_dev
);
EXPORT_SYMBOL
(
dmabounce_unregister_dev
);
EXPORT_SYMBOL
(
dmabounce_unregister_dev
);
MODULE_AUTHOR
(
"Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>"
);
MODULE_AUTHOR
(
"Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>"
);
...
...
arch/arm/include/asm/dma-mapping.h
View file @
3216a97b
...
@@ -104,15 +104,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
...
@@ -104,15 +104,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
* Dummy noncoherent implementation. We don't provide a dma_cache_sync
* Dummy noncoherent implementation. We don't provide a dma_cache_sync
* function so drivers using this API are highlighted with build warnings.
* function so drivers using this API are highlighted with build warnings.
*/
*/
static
inline
void
*
static
inline
void
*
dma_alloc_noncoherent
(
struct
device
*
dev
,
size_t
size
,
dma_alloc_noncoherent
(
struct
device
*
dev
,
size_t
size
,
dma_addr_t
*
handle
,
gfp_t
gfp
)
dma_addr_t
*
handle
,
gfp_t
gfp
)
{
{
return
NULL
;
return
NULL
;
}
}
static
inline
void
static
inline
void
dma_free_noncoherent
(
struct
device
*
dev
,
size_t
size
,
dma_free_noncoherent
(
struct
device
*
dev
,
size_t
size
,
void
*
cpu_addr
,
void
*
cpu_addr
,
dma_addr_t
handle
)
dma_addr_t
handle
)
{
{
}
}
...
@@ -127,8 +126,7 @@ dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr,
...
@@ -127,8 +126,7 @@ dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr,
* return the CPU-viewed address, and sets @handle to be the
* return the CPU-viewed address, and sets @handle to be the
* device-viewed address.
* device-viewed address.
*/
*/
extern
void
*
extern
void
*
dma_alloc_coherent
(
struct
device
*
,
size_t
,
dma_addr_t
*
,
gfp_t
);
dma_alloc_coherent
(
struct
device
*
dev
,
size_t
size
,
dma_addr_t
*
handle
,
gfp_t
gfp
);
/**
/**
* dma_free_coherent - free memory allocated by dma_alloc_coherent
* dma_free_coherent - free memory allocated by dma_alloc_coherent
...
@@ -143,9 +141,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gf
...
@@ -143,9 +141,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gf
* References to memory and mappings associated with cpu_addr/handle
* References to memory and mappings associated with cpu_addr/handle
* during and after this call executing are illegal.
* during and after this call executing are illegal.
*/
*/
extern
void
extern
void
dma_free_coherent
(
struct
device
*
,
size_t
,
void
*
,
dma_addr_t
);
dma_free_coherent
(
struct
device
*
dev
,
size_t
size
,
void
*
cpu_addr
,
dma_addr_t
handle
);
/**
/**
* dma_mmap_coherent - map a coherent DMA allocation into user space
* dma_mmap_coherent - map a coherent DMA allocation into user space
...
@@ -159,8 +155,8 @@ dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
...
@@ -159,8 +155,8 @@ dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
* into user space. The coherent DMA buffer must not be freed by the
* into user space. The coherent DMA buffer must not be freed by the
* driver until the user space mapping has been released.
* driver until the user space mapping has been released.
*/
*/
int
dma_mmap_coherent
(
struct
device
*
dev
,
struct
vm_area_struct
*
vma
,
int
dma_mmap_coherent
(
struct
device
*
,
struct
vm_area_struct
*
,
void
*
cpu_addr
,
dma_addr_t
handle
,
size_t
size
);
void
*
,
dma_addr_t
,
size_t
);
/**
/**
...
@@ -174,14 +170,14 @@ int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
...
@@ -174,14 +170,14 @@ int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
* return the CPU-viewed address, and sets @handle to be the
* return the CPU-viewed address, and sets @handle to be the
* device-viewed address.
* device-viewed address.
*/
*/
extern
void
*
extern
void
*
dma_alloc_writecombine
(
struct
device
*
,
size_t
,
dma_addr_t
*
,
dma_alloc_writecombine
(
struct
device
*
dev
,
size_t
size
,
dma_addr_t
*
handle
,
gfp_t
gfp
);
gfp_t
);
#define dma_free_writecombine(dev,size,cpu_addr,handle) \
#define dma_free_writecombine(dev,size,cpu_addr,handle) \
dma_free_coherent(dev,size,cpu_addr,handle)
dma_free_coherent(dev,size,cpu_addr,handle)
int
dma_mmap_writecombine
(
struct
device
*
dev
,
struct
vm_area_struct
*
vma
,
int
dma_mmap_writecombine
(
struct
device
*
,
struct
vm_area_struct
*
,
void
*
cpu_addr
,
dma_addr_t
handle
,
size_t
size
);
void
*
,
dma_addr_t
,
size_t
);
#ifdef CONFIG_DMABOUNCE
#ifdef CONFIG_DMABOUNCE
...
@@ -209,7 +205,8 @@ int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
...
@@ -209,7 +205,8 @@ int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
* appropriate DMA pools for the device.
* appropriate DMA pools for the device.
*
*
*/
*/
extern
int
dmabounce_register_dev
(
struct
device
*
,
unsigned
long
,
unsigned
long
);
extern
int
dmabounce_register_dev
(
struct
device
*
,
unsigned
long
,
unsigned
long
);
/**
/**
* dmabounce_unregister_dev
* dmabounce_unregister_dev
...
@@ -244,19 +241,20 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
...
@@ -244,19 +241,20 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
/*
/*
* The DMA API, implemented by dmabounce.c. See below for descriptions.
* The DMA API, implemented by dmabounce.c. See below for descriptions.
*/
*/
extern
dma_addr_t
dma_map_single
(
struct
device
*
,
void
*
,
size_t
,
enum
dma_data_direction
);
extern
dma_addr_t
dma_map_single
(
struct
device
*
,
void
*
,
size_t
,
extern
dma_addr_t
dma_map_page
(
struct
device
*
dev
,
struct
page
*
page
,
enum
dma_data_direction
);
unsigned
long
offset
,
size_t
size
,
extern
dma_addr_t
dma_map_page
(
struct
device
*
,
struct
page
*
,
enum
dma_data_direction
dir
);
unsigned
long
,
size_t
,
enum
dma_data_direction
);
extern
void
dma_unmap_single
(
struct
device
*
,
dma_addr_t
,
size_t
,
enum
dma_data_direction
);
extern
void
dma_unmap_single
(
struct
device
*
,
dma_addr_t
,
size_t
,
enum
dma_data_direction
);
/*
/*
* Private functions
* Private functions
*/
*/
int
dmabounce_sync_for_cpu
(
struct
device
*
,
dma_addr_t
,
unsigned
long
,
int
dmabounce_sync_for_cpu
(
struct
device
*
,
dma_addr_t
,
unsigned
long
,
size_t
,
enum
dma_data_direction
);
size_t
,
enum
dma_data_direction
);
int
dmabounce_sync_for_device
(
struct
device
*
,
dma_addr_t
,
unsigned
long
,
int
dmabounce_sync_for_device
(
struct
device
*
,
dma_addr_t
,
unsigned
long
,
size_t
,
enum
dma_data_direction
);
size_t
,
enum
dma_data_direction
);
#else
#else
#define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1)
#define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1)
#define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1)
#define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1)
...
@@ -276,9 +274,8 @@ int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
...
@@ -276,9 +274,8 @@ int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
* can regain ownership by calling dma_unmap_single() or
* can regain ownership by calling dma_unmap_single() or
* dma_sync_single_for_cpu().
* dma_sync_single_for_cpu().
*/
*/
static
inline
dma_addr_t
static
inline
dma_addr_t
dma_map_single
(
struct
device
*
dev
,
void
*
cpu_addr
,
dma_map_single
(
struct
device
*
dev
,
void
*
cpu_addr
,
size_t
size
,
size_t
size
,
enum
dma_data_direction
dir
)
enum
dma_data_direction
dir
)
{
{
if
(
!
arch_is_coherent
())
if
(
!
arch_is_coherent
())
dma_cache_maint
(
cpu_addr
,
size
,
dir
);
dma_cache_maint
(
cpu_addr
,
size
,
dir
);
...
@@ -286,7 +283,6 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size,
...
@@ -286,7 +283,6 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size,
return
virt_to_dma
(
dev
,
cpu_addr
);
return
virt_to_dma
(
dev
,
cpu_addr
);
}
}
/**
/**
* dma_map_page - map a portion of a page for streaming DMA
* dma_map_page - map a portion of a page for streaming DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
...
@@ -302,10 +298,8 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size,
...
@@ -302,10 +298,8 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size,
* can regain ownership by calling dma_unmap_page() or
* can regain ownership by calling dma_unmap_page() or
* dma_sync_single_for_cpu().
* dma_sync_single_for_cpu().
*/
*/
static
inline
dma_addr_t
static
inline
dma_addr_t
dma_map_page
(
struct
device
*
dev
,
struct
page
*
page
,
dma_map_page
(
struct
device
*
dev
,
struct
page
*
page
,
unsigned
long
offset
,
size_t
size
,
enum
dma_data_direction
dir
)
unsigned
long
offset
,
size_t
size
,
enum
dma_data_direction
dir
)
{
{
if
(
!
arch_is_coherent
())
if
(
!
arch_is_coherent
())
dma_cache_maint
(
page_address
(
page
)
+
offset
,
size
,
dir
);
dma_cache_maint
(
page_address
(
page
)
+
offset
,
size
,
dir
);
...
@@ -327,9 +321,8 @@ dma_map_page(struct device *dev, struct page *page,
...
@@ -327,9 +321,8 @@ dma_map_page(struct device *dev, struct page *page,
* After this call, reads by the CPU to the buffer are guaranteed to see
* After this call, reads by the CPU to the buffer are guaranteed to see
* whatever the device wrote there.
* whatever the device wrote there.
*/
*/
static
inline
void
static
inline
void
dma_unmap_single
(
struct
device
*
dev
,
dma_addr_t
handle
,
dma_unmap_single
(
struct
device
*
dev
,
dma_addr_t
handle
,
size_t
size
,
size_t
size
,
enum
dma_data_direction
dir
)
enum
dma_data_direction
dir
)
{
{
/* nothing to do */
/* nothing to do */
}
}
...
@@ -349,9 +342,8 @@ dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size,
...
@@ -349,9 +342,8 @@ dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size,
* After this call, reads by the CPU to the buffer are guaranteed to see
* After this call, reads by the CPU to the buffer are guaranteed to see
* whatever the device wrote there.
* whatever the device wrote there.
*/
*/
static
inline
void
static
inline
void
dma_unmap_page
(
struct
device
*
dev
,
dma_addr_t
handle
,
dma_unmap_page
(
struct
device
*
dev
,
dma_addr_t
handle
,
size_t
size
,
size_t
size
,
enum
dma_data_direction
dir
)
enum
dma_data_direction
dir
)
{
{
dma_unmap_single
(
dev
,
handle
,
size
,
dir
);
dma_unmap_single
(
dev
,
handle
,
size
,
dir
);
}
}
...
@@ -374,10 +366,9 @@ dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
...
@@ -374,10 +366,9 @@ dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
* must first the perform a dma_sync_for_device, and then the
* must first the perform a dma_sync_for_device, and then the
* device again owns the buffer.
* device again owns the buffer.
*/
*/
static
inline
void
static
inline
void
dma_sync_single_range_for_cpu
(
struct
device
*
dev
,
dma_sync_single_range_for_cpu
(
struct
device
*
dev
,
dma_addr_t
handle
,
dma_addr_t
handle
,
unsigned
long
offset
,
size_t
size
,
unsigned
long
offset
,
size_t
size
,
enum
dma_data_direction
dir
)
enum
dma_data_direction
dir
)
{
{
if
(
!
dmabounce_sync_for_cpu
(
dev
,
handle
,
offset
,
size
,
dir
))
if
(
!
dmabounce_sync_for_cpu
(
dev
,
handle
,
offset
,
size
,
dir
))
return
;
return
;
...
@@ -386,10 +377,9 @@ dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle,
...
@@ -386,10 +377,9 @@ dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle,
dma_cache_maint
(
dma_to_virt
(
dev
,
handle
)
+
offset
,
size
,
dir
);
dma_cache_maint
(
dma_to_virt
(
dev
,
handle
)
+
offset
,
size
,
dir
);
}
}
static
inline
void
static
inline
void
dma_sync_single_range_for_device
(
struct
device
*
dev
,
dma_sync_single_range_for_device
(
struct
device
*
dev
,
dma_addr_t
handle
,
dma_addr_t
handle
,
unsigned
long
offset
,
size_t
size
,
unsigned
long
offset
,
size_t
size
,
enum
dma_data_direction
dir
)
enum
dma_data_direction
dir
)
{
{
if
(
!
dmabounce_sync_for_device
(
dev
,
handle
,
offset
,
size
,
dir
))
if
(
!
dmabounce_sync_for_device
(
dev
,
handle
,
offset
,
size
,
dir
))
return
;
return
;
...
@@ -398,16 +388,14 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle,
...
@@ -398,16 +388,14 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle,
dma_cache_maint
(
dma_to_virt
(
dev
,
handle
)
+
offset
,
size
,
dir
);
dma_cache_maint
(
dma_to_virt
(
dev
,
handle
)
+
offset
,
size
,
dir
);
}
}
static
inline
void
static
inline
void
dma_sync_single_for_cpu
(
struct
device
*
dev
,
dma_sync_single_for_cpu
(
struct
device
*
dev
,
dma_addr_t
handle
,
size_t
size
,
dma_addr_t
handle
,
size_t
size
,
enum
dma_data_direction
dir
)
enum
dma_data_direction
dir
)
{
{
dma_sync_single_range_for_cpu
(
dev
,
handle
,
0
,
size
,
dir
);
dma_sync_single_range_for_cpu
(
dev
,
handle
,
0
,
size
,
dir
);
}
}
static
inline
void
static
inline
void
dma_sync_single_for_device
(
struct
device
*
dev
,
dma_sync_single_for_device
(
struct
device
*
dev
,
dma_addr_t
handle
,
size_t
size
,
dma_addr_t
handle
,
size_t
size
,
enum
dma_data_direction
dir
)
enum
dma_data_direction
dir
)
{
{
dma_sync_single_range_for_device
(
dev
,
handle
,
0
,
size
,
dir
);
dma_sync_single_range_for_device
(
dev
,
handle
,
0
,
size
,
dir
);
}
}
...
@@ -415,10 +403,14 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
...
@@ -415,10 +403,14 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
/*
/*
* The scatter list versions of the above methods.
* The scatter list versions of the above methods.
*/
*/
extern
int
dma_map_sg
(
struct
device
*
,
struct
scatterlist
*
,
int
,
enum
dma_data_direction
);
extern
int
dma_map_sg
(
struct
device
*
,
struct
scatterlist
*
,
int
,
extern
void
dma_unmap_sg
(
struct
device
*
,
struct
scatterlist
*
,
int
,
enum
dma_data_direction
);
enum
dma_data_direction
);
extern
void
dma_sync_sg_for_cpu
(
struct
device
*
,
struct
scatterlist
*
,
int
,
enum
dma_data_direction
);
extern
void
dma_unmap_sg
(
struct
device
*
,
struct
scatterlist
*
,
int
,
extern
void
dma_sync_sg_for_device
(
struct
device
*
,
struct
scatterlist
*
,
int
,
enum
dma_data_direction
);
enum
dma_data_direction
);
extern
void
dma_sync_sg_for_cpu
(
struct
device
*
,
struct
scatterlist
*
,
int
,
enum
dma_data_direction
);
extern
void
dma_sync_sg_for_device
(
struct
device
*
,
struct
scatterlist
*
,
int
,
enum
dma_data_direction
);
#endif
/* __KERNEL__ */
#endif
/* __KERNEL__ */
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment