Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
5541b427
Commit
5541b427
authored
Apr 25, 2004
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
bk://bk.arm.linux.org.uk/linux-2.6-rmk
into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents
7ce42ae1
c2e26dd2
Changes
9
Show whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
845 additions
and
130 deletions
+845
-130
arch/arm/Kconfig
arch/arm/Kconfig
+5
-0
arch/arm/common/Makefile
arch/arm/common/Makefile
+2
-1
arch/arm/common/dmabounce.c
arch/arm/common/dmabounce.c
+675
-0
arch/arm/common/sa1111.c
arch/arm/common/sa1111.c
+38
-55
include/asm-arm/atomic.h
include/asm-arm/atomic.h
+10
-10
include/asm-arm/div64.h
include/asm-arm/div64.h
+7
-1
include/asm-arm/dma-mapping.h
include/asm-arm/dma-mapping.h
+92
-61
include/asm-arm/system.h
include/asm-arm/system.h
+9
-0
include/asm-arm/uaccess.h
include/asm-arm/uaccess.h
+7
-2
No files found.
arch/arm/Kconfig
View file @
5541b427
...
@@ -210,6 +210,11 @@ config FORCE_MAX_ZONEORDER
...
@@ -210,6 +210,11 @@ config FORCE_MAX_ZONEORDER
depends on SA1111
depends on SA1111
default "9"
default "9"
config DMABOUNCE
bool
depends on SA1111
default y
source arch/arm/mm/Kconfig
source arch/arm/mm/Kconfig
# bool 'Use XScale PMU as timer source' CONFIG_XSCALE_PMU_TIMER
# bool 'Use XScale PMU as timer source' CONFIG_XSCALE_PMU_TIMER
...
...
arch/arm/common/Makefile
View file @
5541b427
...
@@ -5,6 +5,7 @@
...
@@ -5,6 +5,7 @@
obj-y
+=
platform.o
obj-y
+=
platform.o
obj-$(CONFIG_ARM_AMBA)
+=
amba.o
obj-$(CONFIG_ARM_AMBA)
+=
amba.o
obj-$(CONFIG_ICST525)
+=
icst525.o
obj-$(CONFIG_ICST525)
+=
icst525.o
obj-$(CONFIG_SA1111)
+=
sa1111.o
sa1111-pcibuf.o
obj-$(CONFIG_SA1111)
+=
sa1111.o
obj-$(CONFIG_PCI_HOST_PLX90X0)
+=
plx90x0.o
obj-$(CONFIG_PCI_HOST_PLX90X0)
+=
plx90x0.o
obj-$(CONFIG_PCI_HOST_VIA82C505)
+=
via82c505.o
obj-$(CONFIG_PCI_HOST_VIA82C505)
+=
via82c505.o
obj-$(CONFIG_DMABOUNCE)
+=
dmabounce.o
arch/arm/common/
sa1111-pcibuf
.c
→
arch/arm/common/
dmabounce
.c
View file @
5541b427
/*
/*
*
linux/arch/arm/mach-sa1100/sa1111-pcibuf
.c
*
arch/arm/common/dmabounce
.c
*
*
* Special dma_{map/unmap/dma_sync}_* routines for SA-1111.
* Special dma_{map/unmap/dma_sync}_* routines for systems that have
* limited DMA windows. These functions utilize bounce buffers to
* copy data to/from buffers located outside the DMA region. This
* only works for systems in which DMA memory is at the bottom of
* RAM and the remainder of memory is at the top an the DMA memory
* can be marked as ZONE_DMA. Anything beyond that such as discontigous
* DMA windows will require custom implementations that reserve memory
* areas at early bootup.
*
*
* These functions utilize bouncer buffers to compensate for a bug in
* the SA-1111 hardware which don't allow DMA to/from addresses
* certain addresses above 1MB.
*
* Re-written by Christopher Hoover <ch@murgatroid.com>
* Original version by Brad Parker (brad@heeltoe.com)
* Original version by Brad Parker (brad@heeltoe.com)
* Re-written by Christopher Hoover <ch@murgatroid.com>
* Made generic by Deepak Saxena <dsaxena@plexity.net>
*
*
* Copyright (C) 2002 Hewlett Packard Company.
* Copyright (C) 2002 Hewlett Packard Company.
* Copyright (C) 2004 MontaVista Software, Inc.
*
*
* This program is free software; you can redistribute it and/or
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
* version 2 as published by the Free Software Foundation.
* */
*/
//#define DEBUG
#include <linux/module.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/device.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/dmapool.h>
#include <
asm/hardware/sa1111
.h>
#include <
linux/list
.h>
//#define STATS
#undef DEBUG
#undef STATS
#ifdef STATS
#ifdef STATS
#define DO_STATS(X) do { X ; } while (0)
#define DO_STATS(X) do { X ; } while (0)
#else
#else
...
@@ -43,116 +47,101 @@ struct safe_buffer {
...
@@ -43,116 +47,101 @@ struct safe_buffer {
/* original request */
/* original request */
void
*
ptr
;
void
*
ptr
;
size_t
size
;
size_t
size
;
enum
dma_data_direction
direction
;
int
direction
;
/* safe buffer info */
/* safe buffer info */
struct
dma_pool
*
pool
;
struct
dma_pool
*
pool
;
void
*
safe
;
void
*
safe
;
dma_addr_t
safe_dma_addr
;
dma_addr_t
safe_dma_addr
;
struct
device
*
dev
;
};
};
st
atic
LIST_HEAD
(
safe_buffers
);
st
ruct
dmabounce_device_info
{
struct
list_head
node
;
#define SIZE_SMALL 1024
struct
device
*
dev
;
#define SIZE_LARGE (4*1024)
struct
dma_pool
*
small_buffer_pool
;
struct
dma_pool
*
large_buffer_pool
;
struct
list_head
safe_buffers
;
unsigned
long
small_buffer_size
,
large_buffer_size
;
#ifdef STATS
unsigned
long
sbp_allocs
;
unsigned
long
lbp_allocs
;
unsigned
long
total_allocs
;
unsigned
long
map_op_count
;
unsigned
long
bounce_count
;
#endif
};
static
struct
dma_pool
*
small_buffer_pool
,
*
large_buffer_pool
;
static
LIST_HEAD
(
dmabounce_devs
)
;
#ifdef STATS
#ifdef STATS
static
unsigned
long
sbp_allocs
__initdata
=
0
;
static
void
print_alloc_stats
(
struct
dmabounce_device_info
*
device_info
)
static
unsigned
long
lbp_allocs
__initdata
=
0
;
static
unsigned
long
total_allocs
__initdata
=
0
;
static
void
print_alloc_stats
(
void
)
{
{
printk
(
KERN_INFO
printk
(
KERN_INFO
"sa1111_dmabuf: sbp: %lu, lbp: %lu, other: %lu, total: %lu
\n
"
,
"%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu
\n
"
,
sbp_allocs
,
lbp_allocs
,
device_info
->
dev
->
bus_id
,
total_allocs
-
sbp_allocs
-
lbp_allocs
,
total_allocs
);
device_info
->
sbp_allocs
,
device_info
->
lbp_allocs
,
device_info
->
total_allocs
-
device_info
->
sbp_allocs
-
device_info
->
lbp_allocs
,
device_info
->
total_allocs
);
}
}
#endif
#endif
static
int
__init
create_safe_buffer_pools
(
void
)
/* find the given device in the dmabounce device list */
static
inline
struct
dmabounce_device_info
*
find_dmabounce_dev
(
struct
device
*
dev
)
{
{
small_buffer_pool
=
dma_pool_create
(
"sa1111_small_dma_buffer"
,
struct
list_head
*
entry
;
NULL
,
SIZE_SMALL
,
0
/* byte alignment */
,
0
/* no page-crossing issues */
);
if
(
small_buffer_pool
==
NULL
)
{
printk
(
KERN_ERR
"sa1111_dmabuf: could not allocate small pci pool
\n
"
);
return
-
ENOMEM
;
}
large_buffer_pool
=
dma_pool_create
(
"sa1111_large_dma_buffer"
,
NULL
,
SIZE_LARGE
,
0
/* byte alignment */
,
0
/* no page-crossing issues */
);
if
(
large_buffer_pool
==
NULL
)
{
printk
(
KERN_ERR
"sa1111_dmabuf: could not allocate large pci pool
\n
"
);
dma_pool_destroy
(
small_buffer_pool
);
small_buffer_pool
=
NULL
;
return
-
ENOMEM
;
}
printk
(
KERN_INFO
"SA1111: DMA buffer sizes: small=%u, large=%u
\n
"
,
list_for_each
(
entry
,
&
dmabounce_devs
)
{
SIZE_SMALL
,
SIZE_LARGE
);
struct
dmabounce_device_info
*
d
=
list_entry
(
entry
,
struct
dmabounce_device_info
,
node
);
return
0
;
if
(
d
->
dev
==
dev
)
}
return
d
;
}
static
void
__exit
destroy_safe_buffer_pools
(
void
)
{
if
(
small_buffer_pool
)
dma_pool_destroy
(
small_buffer_pool
);
if
(
large_buffer_pool
)
dma_pool_destroy
(
large_buffer_pool
);
small_buffer_pool
=
large_buffer_pool
=
NULL
;
}
}
/* allocate a 'safe' buffer and keep track of it */
/* allocate a 'safe' buffer and keep track of it */
static
struct
safe_buffer
*
alloc_safe_buffer
(
struct
device
*
dev
,
void
*
ptr
,
static
inline
struct
safe_buffer
*
size_t
size
,
alloc_safe_buffer
(
struct
dmabounce_device_info
*
device_info
,
void
*
ptr
,
enum
dma_data_direction
dir
)
size_t
size
,
enum
dma_data_direction
dir
)
{
{
struct
safe_buffer
*
buf
;
struct
safe_buffer
*
buf
;
struct
dma_pool
*
pool
;
struct
dma_pool
*
pool
;
struct
device
*
dev
=
device_info
->
dev
;
void
*
safe
;
void
*
safe
;
dma_addr_t
safe_dma_addr
;
dma_addr_t
safe_dma_addr
;
dev_dbg
(
dev
,
"%s(ptr=%p, size=%d, dir
ection
=%d)
\n
"
,
dev_dbg
(
dev
,
"%s(ptr=%p, size=%d, dir=%d)
\n
"
,
__func__
,
ptr
,
size
,
dir
);
__func__
,
ptr
,
size
,
dir
);
DO_STATS
(
total_allocs
++
);
DO_STATS
(
device_info
->
total_allocs
++
);
buf
=
kmalloc
(
sizeof
(
struct
safe_buffer
),
GFP_ATOMIC
);
buf
=
kmalloc
(
sizeof
(
struct
safe_buffer
),
GFP_ATOMIC
);
if
(
buf
==
NULL
)
{
if
(
buf
==
0
)
{
printk
(
KERN_WARNING
"%s: kmalloc failed
\n
"
,
__func__
);
dev_warn
(
dev
,
"%s: kmalloc failed
\n
"
,
__func__
);
return
0
;
return
0
;
}
}
if
(
size
<=
SIZE_SMALL
)
{
if
(
size
<=
device_info
->
small_buffer_size
)
{
pool
=
small_buffer_pool
;
pool
=
device_info
->
small_buffer_pool
;
safe
=
dma_pool_alloc
(
pool
,
GFP_ATOMIC
,
&
safe_dma_addr
);
safe
=
dma_pool_alloc
(
pool
,
GFP_ATOMIC
,
&
safe_dma_addr
);
DO_STATS
(
sbp_allocs
++
);
DO_STATS
(
device_info
->
sbp_allocs
++
);
}
else
if
(
size
<=
SIZE_LARGE
)
{
}
else
if
(
size
<=
device_info
->
large_buffer_size
)
{
pool
=
large_buffer_pool
;
pool
=
device_info
->
large_buffer_pool
;
safe
=
dma_pool_alloc
(
pool
,
GFP_ATOMIC
,
&
safe_dma_addr
);
safe
=
dma_pool_alloc
(
pool
,
GFP_ATOMIC
,
&
safe_dma_addr
);
DO_STATS
(
lbp_allocs
++
);
DO_STATS
(
device_info
->
lbp_allocs
++
);
}
else
{
}
else
{
pool
=
NULL
;
pool
=
0
;
safe
=
dma_alloc_coherent
(
dev
,
size
,
&
safe_dma_addr
,
GFP_ATOMIC
);
safe
=
dma_alloc_coherent
(
dev
,
size
,
&
safe_dma_addr
,
GFP_ATOMIC
);
}
}
if
(
safe
==
NULL
)
{
if
(
safe
==
0
)
{
printk
(
KERN_WARNING
dev_warn
(
device_info
->
dev
,
"%s: could not alloc dma memory (size=%d)
\n
"
,
"%s: could not alloc dma memory (size=%d)
\n
"
,
__func__
,
size
);
__func__
,
size
);
kfree
(
buf
);
kfree
(
buf
);
...
@@ -160,191 +149,213 @@ static struct safe_buffer *alloc_safe_buffer(struct device *dev, void *ptr,
...
@@ -160,191 +149,213 @@ static struct safe_buffer *alloc_safe_buffer(struct device *dev, void *ptr,
}
}
#ifdef STATS
#ifdef STATS
if
(
total_allocs
%
1000
==
0
)
if
(
device_info
->
total_allocs
%
1000
==
0
)
print_alloc_stats
();
print_alloc_stats
(
device_info
);
#endif
#endif
BUG_ON
(
sa1111_check_dma_bug
(
safe_dma_addr
));
// paranoia
buf
->
ptr
=
ptr
;
buf
->
ptr
=
ptr
;
buf
->
size
=
size
;
buf
->
size
=
size
;
buf
->
direction
=
dir
;
buf
->
direction
=
dir
;
buf
->
pool
=
pool
;
buf
->
pool
=
pool
;
buf
->
safe
=
safe
;
buf
->
safe
=
safe
;
buf
->
safe_dma_addr
=
safe_dma_addr
;
buf
->
safe_dma_addr
=
safe_dma_addr
;
buf
->
dev
=
dev
;
list_add
(
&
buf
->
node
,
&
safe_buffers
);
list_add
(
&
buf
->
node
,
&
device_info
->
safe_buffers
);
return
buf
;
return
buf
;
}
}
/* determine if a buffer is from our "safe" pool */
/* determine if a buffer is from our "safe" pool */
static
struct
safe_buffer
*
find_safe_buffer
(
struct
device
*
dev
,
static
inline
struct
safe_buffer
*
dma_addr_t
safe_dma_addr
)
find_safe_buffer
(
struct
dmabounce_device_info
*
device_info
,
dma_addr_t
safe_dma_addr
)
{
{
struct
list_head
*
entry
;
struct
list_head
*
entry
;
list_for_each
(
entry
,
&
safe_buffers
)
{
list_for_each
(
entry
,
&
device_info
->
safe_buffers
)
{
struct
safe_buffer
*
b
=
struct
safe_buffer
*
b
=
list_entry
(
entry
,
struct
safe_buffer
,
node
);
list_entry
(
entry
,
struct
safe_buffer
,
node
);
if
(
b
->
safe_dma_addr
==
safe_dma_addr
&&
if
(
b
->
safe_dma_addr
==
safe_dma_addr
)
b
->
dev
==
dev
)
{
return
b
;
return
b
;
}
}
}
return
0
;
return
NULL
;
}
}
static
void
free_safe_buffer
(
struct
safe_buffer
*
buf
)
static
inline
void
free_safe_buffer
(
struct
dmabounce_device_info
*
device_info
,
struct
safe_buffer
*
buf
)
{
{
pr_debug
(
"%s(buf=%p)
\n
"
,
__func__
,
buf
);
dev_dbg
(
dev_info
->
dev
,
"%s(buf=%p)
\n
"
,
__func__
,
buf
);
list_del
(
&
buf
->
node
);
list_del
(
&
buf
->
node
);
if
(
buf
->
pool
)
if
(
buf
->
pool
)
dma_pool_free
(
buf
->
pool
,
buf
->
safe
,
buf
->
safe_dma_addr
);
dma_pool_free
(
buf
->
pool
,
buf
->
safe
,
buf
->
safe_dma_addr
);
else
else
dma_free_coherent
(
buf
->
dev
,
buf
->
size
,
buf
->
safe
,
dma_free_coherent
(
device_info
->
dev
,
buf
->
size
,
buf
->
safe
,
buf
->
safe_dma_addr
);
buf
->
safe_dma_addr
);
kfree
(
buf
);
}
static
inline
int
dma_range_is_safe
(
struct
device
*
dev
,
dma_addr_t
addr
,
size_t
size
)
{
unsigned
int
physaddr
=
SA1111_DMA_ADDR
((
unsigned
int
)
addr
);
/* Any address within one megabyte of the start of the target
* bank will be OK. This is an overly conservative test:
* other addresses can be OK depending on the dram
* configuration. (See sa1111.c:sa1111_check_dma_bug() * for
* details.)
*
* We take care to ensure the entire dma region is within
* the safe range.
*/
return
((
physaddr
+
size
-
1
)
<
(
1
<<
20
)
);
kfree
(
buf
);
}
}
/* ************************************************** */
/* ************************************************** */
#ifdef STATS
#ifdef STATS
static
unsigned
long
map_op_count
__initdata
=
0
;
static
unsigned
long
bounce_count
__initdata
=
0
;
static
void
print_map_stats
(
void
)
static
void
print_map_stats
(
struct
dmabounce_device_info
*
device_info
)
{
{
printk
(
KERN_INFO
printk
(
KERN_INFO
"sa1111_dmabuf: map_op_count=%lu, bounce_count=%lu
\n
"
,
"%s: dmabounce: map_op_count=%lu, bounce_count=%lu
\n
"
,
map_op_count
,
bounce_count
);
device_info
->
dev
->
bus_id
,
device_info
->
map_op_count
,
device_info
->
bounce_count
);
}
}
#endif
#endif
static
dma_addr_t
map_single
(
struct
device
*
dev
,
void
*
ptr
,
static
inline
dma_addr_t
size_t
size
,
enum
dma_data_direction
dir
)
map_single
(
struct
device
*
dev
,
void
*
ptr
,
size_t
size
,
enum
dma_data_direction
dir
)
{
{
dma_addr_t
dma_addr
;
dma_addr_t
dma_addr
;
struct
dmabounce_device_info
*
device_info
=
find_dmabounce_dev
(
dev
);
DO_STATS
(
map_op_count
++
);
if
(
device_info
)
DO_STATS
(
device_info
->
map_op_count
++
);
if
(
dev
->
dma_mask
)
{
unsigned
long
limit
;
limit
=
(
*
dev
->
dma_mask
+
1
)
&
~
(
*
dev
->
dma_mask
);
if
(
limit
&&
(
size
>
limit
))
{
dev_err
(
dev
,
"DMA mapping too big "
"(requested %#x mask %#Lx)
\n
"
,
size
,
*
dev
->
dma_mask
);
return
~
0
;
}
}
dma_addr
=
virt_to_bus
(
ptr
);
dma_addr
=
virt_to_bus
(
ptr
);
if
(
!
dma_range_is_saf
e
(
dev
,
dma_addr
,
size
))
{
if
(
device_info
&&
dma_needs_bounc
e
(
dev
,
dma_addr
,
size
))
{
struct
safe_buffer
*
buf
;
struct
safe_buffer
*
buf
;
DO_STATS
(
bounce_count
++
)
;
buf
=
alloc_safe_buffer
(
device_info
,
ptr
,
size
,
dir
);
if
(
buf
==
0
)
{
buf
=
alloc_safe_buffer
(
dev
,
ptr
,
size
,
dir
);
dev_err
(
dev
,
"%s: unable to map unsafe buffer %p!
\n
"
,
if
(
buf
==
NULL
)
{
printk
(
KERN_ERR
"%s: unable to map unsafe buffer %p!
\n
"
,
__func__
,
ptr
);
__func__
,
ptr
);
return
0
;
return
0
;
}
}
dev_dbg
(
dev
,
"%s: unsafe buffer %p (phy=%08lx) mapped to %p (phy=%08x)
\n
"
,
dev_dbg
(
dev
,
__func__
,
"%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)
\n
"
,
buf
->
ptr
,
virt_to_bus
(
buf
->
ptr
),
__func__
,
buf
->
ptr
,
(
void
*
)
virt_to_bus
(
buf
->
ptr
),
buf
->
safe
,
buf
->
safe_dma_addr
);
buf
->
safe
,
(
void
*
)
buf
->
safe_dma_addr
);
if
(
dir
==
DMA_TO_DEVICE
||
dir
==
DMA_BIDIRECTIONAL
)
{
if
((
dir
==
DMA_TO_DEVICE
)
||
dev_dbg
(
dev
,
"%s: copy out from unsafe %p, to safe %p, size %d
\n
"
,
(
dir
==
DMA_BIDIRECTIONAL
))
{
dev_dbg
(
dev
,
"%s: copy unsafe %p to safe %p, size %d
\n
"
,
__func__
,
ptr
,
buf
->
safe
,
size
);
__func__
,
ptr
,
buf
->
safe
,
size
);
memcpy
(
buf
->
safe
,
ptr
,
size
);
memcpy
(
buf
->
safe
,
ptr
,
size
);
}
}
consistent_sync
(
buf
->
safe
,
size
,
dir
);
dma_addr
=
buf
->
safe_dma_addr
;
dma_addr
=
buf
->
safe_dma_addr
;
ptr
=
buf
->
safe
;
}
else
{
}
consistent_sync
(
ptr
,
size
,
dir
);
consistent_sync
(
ptr
,
size
,
dir
);
}
#ifdef STATS
if
(
map_op_count
%
1000
==
0
)
print_map_stats
();
#endif
return
dma_addr
;
return
dma_addr
;
}
}
static
void
unmap_single
(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
static
inline
void
size_t
size
,
enum
dma_data_direction
dir
)
unmap_single
(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
size_t
size
,
enum
dma_data_direction
dir
)
{
{
struct
safe_buffer
*
buf
;
struct
dmabounce_device_info
*
device_info
=
find_dmabounce_dev
(
dev
);
struct
safe_buffer
*
buf
=
NULL
;
/*
* Trying to unmap an invalid mapping
*/
if
(
dma_addr
==
~
0
)
{
dev_err
(
dev
,
"Trying to unmap invalid mapping
\n
"
);
return
;
}
buf
=
find_safe_buffer
(
dev
,
dma_addr
);
if
(
device_info
)
buf
=
find_safe_buffer
(
device_info
,
dma_addr
);
if
(
buf
)
{
if
(
buf
)
{
BUG_ON
(
buf
->
size
!=
size
);
BUG_ON
(
buf
->
size
!=
size
);
BUG_ON
(
buf
->
direction
!=
dir
);
dev_dbg
(
dev
,
"%s: unsafe buffer %p (phy=%08lx) mapped to %p (phy=%08lx)
\n
"
,
dev_dbg
(
dev
,
__func__
,
"%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)
\n
"
,
buf
->
ptr
,
virt_to_bus
(
buf
->
ptr
),
__func__
,
buf
->
ptr
,
(
void
*
)
virt_to_bus
(
buf
->
ptr
),
buf
->
safe
,
buf
->
safe_dma_addr
);
buf
->
safe
,
(
void
*
)
buf
->
safe_dma_addr
);
DO_STATS
(
bounce_count
++
);
if
(
dir
==
DMA_FROM_DEVICE
||
dir
==
DMA_BIDIRECTIONAL
)
{
DO_STATS
(
device_info
->
bounce_count
++
);
dev_dbg
(
dev
,
"%s: copy back from safe %p, to unsafe %p size %d
\n
"
,
if
((
dir
==
DMA_FROM_DEVICE
)
||
(
dir
==
DMA_BIDIRECTIONAL
))
{
dev_dbg
(
dev
,
"%s: copy back safe %p to unsafe %p size %d
\n
"
,
__func__
,
buf
->
safe
,
buf
->
ptr
,
size
);
__func__
,
buf
->
safe
,
buf
->
ptr
,
size
);
memcpy
(
buf
->
ptr
,
buf
->
safe
,
size
);
memcpy
(
buf
->
ptr
,
buf
->
safe
,
size
);
}
}
free_safe_buffer
(
buf
);
free_safe_buffer
(
device_info
,
buf
);
}
}
}
}
static
void
sync_single
(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
static
inline
void
size_t
size
,
enum
dma_data_direction
dir
)
sync_single
(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
size_t
size
,
enum
dma_data_direction
dir
)
{
{
struct
safe_buffer
*
buf
;
struct
dmabounce_device_info
*
device_info
=
find_dmabounce_dev
(
dev
)
;
void
*
ptr
;
struct
safe_buffer
*
buf
=
NULL
;
buf
=
find_safe_buffer
(
dev
,
dma_addr
);
if
(
device_info
)
buf
=
find_safe_buffer
(
device_info
,
dma_addr
);
if
(
buf
)
{
if
(
buf
)
{
BUG_ON
(
buf
->
size
!=
size
);
/*
BUG_ON
(
buf
->
direction
!=
dir
);
* Both of these checks from original code need to be
* commented out b/c some drivers rely on the following:
*
* 1) Drivers may map a large chunk of memory into DMA space
* but only sync a small portion of it. Good example is
* allocating a large buffer, mapping it, and then
* breaking it up into small descriptors. No point
* in syncing the whole buffer if you only have to
* touch one descriptor.
*
* 2) Buffers that are mapped as DMA_BIDIRECTIONAL are
* usually only synced in one dir at a time.
*
* See drivers/net/eepro100.c for examples of both cases.
*
* -ds
*
* BUG_ON(buf->size != size);
* BUG_ON(buf->direction != dir);
*/
dev_dbg
(
dev
,
"%s: unsafe buffer %p (phy=%08lx) mapped to %p (phy=%08lx)
\n
"
,
dev_dbg
(
dev
,
__func__
,
"%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)
\n
"
,
buf
->
ptr
,
virt_to_bus
(
buf
->
ptr
),
__func__
,
buf
->
ptr
,
(
void
*
)
virt_to_bus
(
buf
->
ptr
),
buf
->
safe
,
buf
->
safe_dma_addr
);
buf
->
safe
,
(
void
*
)
buf
->
safe_dma_addr
);
DO_STATS
(
bounce_count
++
);
DO_STATS
(
device_info
->
bounce_count
++
);
switch
(
dir
)
{
switch
(
dir
)
{
case
DMA_FROM_DEVICE
:
case
DMA_FROM_DEVICE
:
dev_dbg
(
dev
,
"%s: copy back from safe %p, to unsafe %p size %d
\n
"
,
dev_dbg
(
dev
,
"%s: copy back safe %p to unsafe %p size %d
\n
"
,
__func__
,
buf
->
safe
,
buf
->
ptr
,
size
);
__func__
,
buf
->
safe
,
buf
->
ptr
,
size
);
memcpy
(
buf
->
ptr
,
buf
->
safe
,
size
);
memcpy
(
buf
->
ptr
,
buf
->
safe
,
size
);
break
;
break
;
case
DMA_TO_DEVICE
:
case
DMA_TO_DEVICE
:
dev_dbg
(
dev
,
"%s: copy out from unsafe %p, to safe %p, size %d
\n
"
,
dev_dbg
(
dev
,
"%s: copy out unsafe %p to safe %p, size %d
\n
"
,
__func__
,
buf
->
ptr
,
buf
->
safe
,
size
);
__func__
,
buf
->
ptr
,
buf
->
safe
,
size
);
memcpy
(
buf
->
safe
,
buf
->
ptr
,
size
);
memcpy
(
buf
->
safe
,
buf
->
ptr
,
size
);
break
;
break
;
...
@@ -353,11 +364,10 @@ static void sync_single(struct device *dev, dma_addr_t dma_addr,
...
@@ -353,11 +364,10 @@ static void sync_single(struct device *dev, dma_addr_t dma_addr,
default:
default:
BUG
();
BUG
();
}
}
ptr
=
buf
->
safe
;
consistent_sync
(
buf
->
safe
,
size
,
dir
)
;
}
else
{
}
else
{
ptr
=
bus_to_virt
(
dma_add
r
);
consistent_sync
(
bus_to_virt
(
dma_addr
),
size
,
di
r
);
}
}
consistent_sync
(
ptr
,
size
,
dir
);
}
}
/* ************************************************** */
/* ************************************************** */
...
@@ -368,8 +378,9 @@ static void sync_single(struct device *dev, dma_addr_t dma_addr,
...
@@ -368,8 +378,9 @@ static void sync_single(struct device *dev, dma_addr_t dma_addr,
* substitute the safe buffer for the unsafe one.
* substitute the safe buffer for the unsafe one.
* (basically move the buffer from an unsafe area to a safe one)
* (basically move the buffer from an unsafe area to a safe one)
*/
*/
dma_addr_t
sa1111_map_single
(
struct
device
*
dev
,
void
*
ptr
,
dma_addr_t
size_t
size
,
enum
dma_data_direction
dir
)
dma_map_single
(
struct
device
*
dev
,
void
*
ptr
,
size_t
size
,
enum
dma_data_direction
dir
)
{
{
unsigned
long
flags
;
unsigned
long
flags
;
dma_addr_t
dma_addr
;
dma_addr_t
dma_addr
;
...
@@ -394,13 +405,17 @@ dma_addr_t sa1111_map_single(struct device *dev, void *ptr,
...
@@ -394,13 +405,17 @@ dma_addr_t sa1111_map_single(struct device *dev, void *ptr,
* the safe buffer. (basically return things back to the way they
* the safe buffer. (basically return things back to the way they
* should be)
* should be)
*/
*/
void
sa1111_unmap_single
(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
size_t
size
,
enum
dma_data_direction
dir
)
void
dma_unmap_single
(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
size_t
size
,
enum
dma_data_direction
dir
)
{
{
unsigned
long
flags
;
unsigned
long
flags
;
dev_dbg
(
dev
,
"%s(ptr=%08lx,size=%d,dir=%x)
\n
"
,
dev_dbg
(
dev
,
"%s(ptr=%p,size=%d,dir=%x)
\n
"
,
__func__
,
dma_addr
,
size
,
dir
);
__func__
,
(
void
*
)
dma_addr
,
size
,
dir
);
BUG_ON
(
dir
==
DMA_NONE
);
local_irq_save
(
flags
);
local_irq_save
(
flags
);
...
@@ -409,8 +424,9 @@ void sa1111_unmap_single(struct device *dev, dma_addr_t dma_addr,
...
@@ -409,8 +424,9 @@ void sa1111_unmap_single(struct device *dev, dma_addr_t dma_addr,
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
}
}
int
sa1111_map_sg
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
int
nents
,
enum
dma_data_direction
dir
)
dma_map_sg
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nents
,
enum
dma_data_direction
dir
)
{
{
unsigned
long
flags
;
unsigned
long
flags
;
int
i
;
int
i
;
...
@@ -428,7 +444,8 @@ int sa1111_map_sg(struct device *dev, struct scatterlist *sg,
...
@@ -428,7 +444,8 @@ int sa1111_map_sg(struct device *dev, struct scatterlist *sg,
unsigned
int
length
=
sg
->
length
;
unsigned
int
length
=
sg
->
length
;
void
*
ptr
=
page_address
(
page
)
+
offset
;
void
*
ptr
=
page_address
(
page
)
+
offset
;
sg
->
dma_address
=
map_single
(
dev
,
ptr
,
length
,
dir
);
sg
->
dma_address
=
map_single
(
dev
,
ptr
,
length
,
dir
);
}
}
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
...
@@ -436,8 +453,9 @@ int sa1111_map_sg(struct device *dev, struct scatterlist *sg,
...
@@ -436,8 +453,9 @@ int sa1111_map_sg(struct device *dev, struct scatterlist *sg,
return
nents
;
return
nents
;
}
}
void
sa1111_unmap_sg
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
void
int
nents
,
enum
dma_data_direction
dir
)
dma_unmap_sg
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nents
,
enum
dma_data_direction
dir
)
{
{
unsigned
long
flags
;
unsigned
long
flags
;
int
i
;
int
i
;
...
@@ -445,6 +463,8 @@ void sa1111_unmap_sg(struct device *dev, struct scatterlist *sg,
...
@@ -445,6 +463,8 @@ void sa1111_unmap_sg(struct device *dev, struct scatterlist *sg,
dev_dbg
(
dev
,
"%s(sg=%p,nents=%d,dir=%x)
\n
"
,
dev_dbg
(
dev
,
"%s(sg=%p,nents=%d,dir=%x)
\n
"
,
__func__
,
sg
,
nents
,
dir
);
__func__
,
sg
,
nents
,
dir
);
BUG_ON
(
dir
==
DMA_NONE
);
local_irq_save
(
flags
);
local_irq_save
(
flags
);
for
(
i
=
0
;
i
<
nents
;
i
++
,
sg
++
)
{
for
(
i
=
0
;
i
<
nents
;
i
++
,
sg
++
)
{
...
@@ -457,13 +477,14 @@ void sa1111_unmap_sg(struct device *dev, struct scatterlist *sg,
...
@@ -457,13 +477,14 @@ void sa1111_unmap_sg(struct device *dev, struct scatterlist *sg,
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
}
}
void
sa1111_dma_sync_single_for_cpu
(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
void
size_t
size
,
enum
dma_data_direction
dir
)
dma_sync_single_for_cpu
(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
size_t
size
,
enum
dma_data_direction
dir
)
{
{
unsigned
long
flags
;
unsigned
long
flags
;
dev_dbg
(
dev
,
"%s(ptr=%
08lx
,size=%d,dir=%x)
\n
"
,
dev_dbg
(
dev
,
"%s(ptr=%
p
,size=%d,dir=%x)
\n
"
,
__func__
,
dma_addr
,
size
,
dir
);
__func__
,
(
void
*
)
dma_addr
,
size
,
dir
);
local_irq_save
(
flags
);
local_irq_save
(
flags
);
...
@@ -472,13 +493,14 @@ void sa1111_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
...
@@ -472,13 +493,14 @@ void sa1111_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
}
}
void
sa1111_dma_sync_single_for_device
(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
void
size_t
size
,
enum
dma_data_direction
dir
)
dma_sync_single_for_device
(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
size_t
size
,
enum
dma_data_direction
dir
)
{
{
unsigned
long
flags
;
unsigned
long
flags
;
dev_dbg
(
dev
,
"%s(ptr=%
08lx
,size=%d,dir=%x)
\n
"
,
dev_dbg
(
dev
,
"%s(ptr=%
p
,size=%d,dir=%x)
\n
"
,
__func__
,
dma_addr
,
size
,
dir
);
__func__
,
(
void
*
)
dma_addr
,
size
,
dir
);
local_irq_save
(
flags
);
local_irq_save
(
flags
);
...
@@ -487,8 +509,9 @@ void sa1111_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
...
@@ -487,8 +509,9 @@ void sa1111_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
}
}
void
sa1111_dma_sync_sg_for_cpu
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
void
int
nents
,
enum
dma_data_direction
dir
)
dma_sync_sg_for_cpu
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nents
,
enum
dma_data_direction
dir
)
{
{
unsigned
long
flags
;
unsigned
long
flags
;
int
i
;
int
i
;
...
@@ -496,6 +519,8 @@ void sa1111_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
...
@@ -496,6 +519,8 @@ void sa1111_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
dev_dbg
(
dev
,
"%s(sg=%p,nents=%d,dir=%x)
\n
"
,
dev_dbg
(
dev
,
"%s(sg=%p,nents=%d,dir=%x)
\n
"
,
__func__
,
sg
,
nents
,
dir
);
__func__
,
sg
,
nents
,
dir
);
BUG_ON
(
dir
==
DMA_NONE
);
local_irq_save
(
flags
);
local_irq_save
(
flags
);
for
(
i
=
0
;
i
<
nents
;
i
++
,
sg
++
)
{
for
(
i
=
0
;
i
<
nents
;
i
++
,
sg
++
)
{
...
@@ -508,8 +533,9 @@ void sa1111_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
...
@@ -508,8 +533,9 @@ void sa1111_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
}
}
void
sa1111_dma_sync_sg_for_device
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
void
int
nents
,
enum
dma_data_direction
dir
)
dma_sync_sg_for_device
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nents
,
enum
dma_data_direction
dir
)
{
{
unsigned
long
flags
;
unsigned
long
flags
;
int
i
;
int
i
;
...
@@ -517,6 +543,8 @@ void sa1111_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
...
@@ -517,6 +543,8 @@ void sa1111_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
dev_dbg
(
dev
,
"%s(sg=%p,nents=%d,dir=%x)
\n
"
,
dev_dbg
(
dev
,
"%s(sg=%p,nents=%d,dir=%x)
\n
"
,
__func__
,
sg
,
nents
,
dir
);
__func__
,
sg
,
nents
,
dir
);
BUG_ON
(
dir
==
DMA_NONE
);
local_irq_save
(
flags
);
local_irq_save
(
flags
);
for
(
i
=
0
;
i
<
nents
;
i
++
,
sg
++
)
{
for
(
i
=
0
;
i
<
nents
;
i
++
,
sg
++
)
{
...
@@ -529,38 +557,119 @@ void sa1111_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
...
@@ -529,38 +557,119 @@ void sa1111_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
}
}
EXPORT_SYMBOL
(
sa1111_map_single
);
int
EXPORT_SYMBOL
(
sa1111_unmap_single
);
dmabounce_register_dev
(
struct
device
*
dev
,
unsigned
long
small_buffer_size
,
EXPORT_SYMBOL
(
sa1111_map_sg
);
unsigned
long
large_buffer_size
)
EXPORT_SYMBOL
(
sa1111_unmap_sg
);
{
EXPORT_SYMBOL
(
sa1111_dma_sync_single_for_cpu
);
struct
dmabounce_device_info
*
device_info
;
EXPORT_SYMBOL
(
sa1111_dma_sync_single_for_device
);
EXPORT_SYMBOL
(
sa1111_dma_sync_sg_for_cpu
);
EXPORT_SYMBOL
(
sa1111_dma_sync_sg_for_device
);
/* **************************************** */
device_info
=
kmalloc
(
sizeof
(
struct
dmabounce_device_info
),
GFP_ATOMIC
);
if
(
!
device_info
)
{
printk
(
KERN_ERR
"Could not allocated dmabounce_device_info for %s"
,
dev
->
bus_id
);
return
-
ENOMEM
;
}
static
int
__init
sa1111_dmabuf_init
(
void
)
device_info
->
small_buffer_pool
=
{
dma_pool_create
(
"small_dmabounce_pool"
,
printk
(
KERN_DEBUG
"sa1111_dmabuf: initializing SA-1111 DMA buffers
\n
"
);
dev
,
small_buffer_size
,
0
/* byte alignment */
,
0
/* no page-crossing issues */
);
if
(
!
device_info
->
small_buffer_pool
)
{
printk
(
KERN_ERR
"dmabounce: could not allocate small DMA pool for %s
\n
"
,
dev
->
bus_id
);
kfree
(
device_info
);
return
-
ENOMEM
;
}
if
(
large_buffer_size
)
{
device_info
->
large_buffer_pool
=
dma_pool_create
(
"large_dmabounce_pool"
,
dev
,
large_buffer_size
,
0
/* byte alignment */
,
0
/* no page-crossing issues */
);
if
(
!
device_info
->
large_buffer_pool
)
{
printk
(
KERN_ERR
"dmabounce: could not allocate large DMA pool for %s
\n
"
,
dev
->
bus_id
);
dma_pool_destroy
(
device_info
->
small_buffer_pool
);
return
-
ENOMEM
;
}
}
device_info
->
dev
=
dev
;
device_info
->
small_buffer_size
=
small_buffer_size
;
device_info
->
large_buffer_size
=
large_buffer_size
;
INIT_LIST_HEAD
(
&
device_info
->
safe_buffers
);
return
create_safe_buffer_pools
();
#ifdef STATS
device_info
->
sbp_allocs
=
0
;
device_info
->
lbp_allocs
=
0
;
device_info
->
total_allocs
=
0
;
device_info
->
map_op_count
=
0
;
device_info
->
bounce_count
=
0
;
#endif
list_add
(
&
device_info
->
node
,
&
dmabounce_devs
);
printk
(
KERN_INFO
"dmabounce: registered device %s on %s bus
\n
"
,
dev
->
bus_id
,
dev
->
bus
->
name
);
return
0
;
}
}
module_init
(
sa1111_dmabuf_init
);
static
void
__exit
sa1111_dmabuf_exit
(
void
)
void
dmabounce_unregister_dev
(
struct
device
*
dev
)
{
{
BUG_ON
(
!
list_empty
(
&
safe_buffers
));
struct
dmabounce_device_info
*
device_info
=
find_dmabounce_dev
(
dev
);
if
(
!
device_info
)
{
printk
(
KERN_WARNING
"%s: Never registered with dmabounce but attempting"
\
"to unregister!
\n
"
,
dev
->
bus_id
);
return
;
}
if
(
!
list_empty
(
&
device_info
->
safe_buffers
))
{
printk
(
KERN_ERR
,
"%s: Removing from dmabounce with pending buffers!
\n
"
,
dev
->
bus_id
);
BUG
();
}
if
(
device_info
->
small_buffer_pool
)
dma_pool_destroy
(
device_info
->
small_buffer_pool
);
if
(
device_info
->
large_buffer_pool
)
dma_pool_destroy
(
device_info
->
large_buffer_pool
);
#ifdef STATS
#ifdef STATS
print_alloc_stats
();
print_alloc_stats
(
device_info
);
print_map_stats
();
print_map_stats
(
device_info
);
#endif
#endif
destroy_safe_buffer_pools
();
list_del
(
&
device_info
->
node
);
kfree
(
device_info
);
printk
(
KERN_INFO
"dmabounce: device %s on %s bus unregistered
\n
"
,
dev
->
bus_id
,
dev
->
bus
->
name
);
}
}
module_exit
(
sa1111_dmabuf_exit
);
MODULE_AUTHOR
(
"Christopher Hoover <ch@hpl.hp.com>"
);
MODULE_DESCRIPTION
(
"Special dma_{map/unmap/dma_sync}_* routines for SA-1111."
);
EXPORT_SYMBOL
(
dma_map_single
);
EXPORT_SYMBOL
(
dma_unmap_single
);
EXPORT_SYMBOL
(
dma_map_sg
);
EXPORT_SYMBOL
(
dma_unmap_sg
);
EXPORT_SYMBOL
(
dma_sync_single
);
EXPORT_SYMBOL
(
dma_sync_sg
);
EXPORT_SYMBOL
(
dmabounce_register_dev
);
EXPORT_SYMBOL
(
dmabounce_unregister_dev
);
MODULE_AUTHOR
(
"Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>"
);
MODULE_DESCRIPTION
(
"Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows"
);
MODULE_LICENSE
(
"GPL"
);
MODULE_LICENSE
(
"GPL"
);
arch/arm/common/sa1111.c
View file @
5541b427
...
@@ -25,6 +25,7 @@
...
@@ -25,6 +25,7 @@
#include <linux/device.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <asm/hardware.h>
#include <asm/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach-types.h>
...
@@ -547,15 +548,6 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent,
...
@@ -547,15 +548,6 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent,
snprintf
(
dev
->
dev
.
bus_id
,
sizeof
(
dev
->
dev
.
bus_id
),
snprintf
(
dev
->
dev
.
bus_id
,
sizeof
(
dev
->
dev
.
bus_id
),
"%4.4lx"
,
info
->
offset
);
"%4.4lx"
,
info
->
offset
);
/*
* If the parent device has a DMA mask associated with it,
* propagate it down to the children.
*/
if
(
sachip
->
dev
->
dma_mask
)
{
dev
->
dma_mask
=
*
sachip
->
dev
->
dma_mask
;
dev
->
dev
.
dma_mask
=
&
dev
->
dma_mask
;
}
dev
->
devid
=
info
->
devid
;
dev
->
devid
=
info
->
devid
;
dev
->
dev
.
parent
=
sachip
->
dev
;
dev
->
dev
.
parent
=
sachip
->
dev
;
dev
->
dev
.
bus
=
&
sa1111_bus_type
;
dev
->
dev
.
bus
=
&
sa1111_bus_type
;
...
@@ -573,15 +565,37 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent,
...
@@ -573,15 +565,37 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent,
if
(
ret
)
{
if
(
ret
)
{
printk
(
"SA1111: failed to allocate resource for %s
\n
"
,
printk
(
"SA1111: failed to allocate resource for %s
\n
"
,
dev
->
res
.
name
);
dev
->
res
.
name
);
kfree
(
dev
);
goto
out
;
goto
out
;
}
}
ret
=
device_register
(
&
dev
->
dev
);
ret
=
device_register
(
&
dev
->
dev
);
if
(
ret
)
{
if
(
ret
)
{
release_resource
(
&
dev
->
res
);
release_resource
(
&
dev
->
res
);
out:
kfree
(
dev
);
kfree
(
dev
);
goto
out
;
}
/*
* If the parent device has a DMA mask associated with it,
* propagate it down to the children.
*/
if
(
sachip
->
dev
->
dma_mask
)
{
dev
->
dma_mask
=
*
sachip
->
dev
->
dma_mask
;
dev
->
dev
.
dma_mask
=
&
dev
->
dma_mask
;
if
(
dev
->
dma_mask
!=
0xffffffffUL
)
{
ret
=
dmabounce_register_dev
(
&
dev
->
dev
,
1024
,
4096
);
if
(
ret
)
{
printk
(
"SA1111: Failed to register %s with dmabounce"
,
dev
->
dev
.
bus_id
);
kfree
(
dev
);
device_unregister
(
dev
);
}
}
}
}
out:
return
ret
;
return
ret
;
}
}
...
@@ -742,61 +756,31 @@ static void __sa1111_remove(struct sa1111 *sachip)
...
@@ -742,61 +756,31 @@ static void __sa1111_remove(struct sa1111 *sachip)
*
*
* This routine only identifies whether or not a given DMA address
* This routine only identifies whether or not a given DMA address
* is susceptible to the bug.
* is susceptible to the bug.
*
* This should only get called for sa1111_device types due to the
* way we configure our device dma_masks.
*/
*/
int
sa1111_check_dma_bug
(
dma_addr_t
addr
)
int
dma_needs_bounce
(
struct
device
*
dev
,
dma_addr_t
addr
,
size_t
size
)
{
{
struct
sa1111
*
sachip
=
g_sa1111
;
unsigned
int
physaddr
=
SA1111_DMA_ADDR
((
unsigned
int
)
addr
);
unsigned
int
physaddr
=
SA1111_DMA_ADDR
((
unsigned
int
)
addr
);
u
nsigned
int
smcr
;
u
32
dma_mask
=
*
dev
->
dma_mask
;
/* Section 4.6 of the "Intel StrongARM SA-1111 Development Module
/*
* Section 4.6 of the "Intel StrongARM SA-1111 Development Module
* User's Guide" mentions that jumpers R51 and R52 control the
* User's Guide" mentions that jumpers R51 and R52 control the
* target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
* target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
* SDRAM bank 1 on Neponset). The default configuration selects
* SDRAM bank 1 on Neponset). The default configuration selects
* Assabet, so any address in bank 1 is necessarily invalid.
* Assabet, so any address in bank 1 is necessarily invalid.
*/
*/
if
((
machine_is_assabet
()
||
machine_is_pfs168
())
&&
addr
>=
0xc8000000
)
if
((
machine_is_assabet
()
||
machine_is_pfs168
())
&&
return
-
1
;
(
addr
>=
0xc8000000
||
(
addr
+
size
)
>=
0xc8000000
))
return
1
;
/*
The bug only applies to buffers located more than one megabyte
/*
*
above the start of the target bank:
*
Check to see if either the start or end are illegal.
*/
*/
if
(
physaddr
<
(
1
<<
20
))
return
((
addr
&
~
(
*
dev
->
dma_mask
)))
||
return
0
;
((
addr
+
size
-
1
)
&
~
(
*
dev
->
dma_mask
));
smcr
=
sa1111_readl
(
sachip
->
base
+
SA1111_SMCR
);
switch
(
FExtr
(
smcr
,
SMCR_DRAC
))
{
case
01
:
/* 10 row + bank address bits, A<20> must not be set */
if
(
physaddr
&
(
1
<<
20
))
return
-
1
;
break
;
case
02
:
/* 11 row + bank address bits, A<23> must not be set */
if
(
physaddr
&
(
1
<<
23
))
return
-
1
;
break
;
case
03
:
/* 12 row + bank address bits, A<24> must not be set */
if
(
physaddr
&
(
1
<<
24
))
return
-
1
;
break
;
case
04
:
/* 13 row + bank address bits, A<25> must not be set */
if
(
physaddr
&
(
1
<<
25
))
return
-
1
;
break
;
case
05
:
/* 14 row + bank address bits, A<20> must not be set */
if
(
physaddr
&
(
1
<<
20
))
return
-
1
;
break
;
case
06
:
/* 15 row + bank address bits, A<20> must not be set */
if
(
physaddr
&
(
1
<<
20
))
return
-
1
;
break
;
default:
printk
(
KERN_ERR
"%s(): invalid SMCR DRAC value 0%lo
\n
"
,
__FUNCTION__
,
FExtr
(
smcr
,
SMCR_DRAC
));
return
-
1
;
}
return
0
;
}
}
struct
sa1111_save_data
{
struct
sa1111_save_data
{
...
@@ -1293,7 +1277,6 @@ module_exit(sa1111_exit);
...
@@ -1293,7 +1277,6 @@ module_exit(sa1111_exit);
MODULE_DESCRIPTION
(
"Intel Corporation SA1111 core driver"
);
MODULE_DESCRIPTION
(
"Intel Corporation SA1111 core driver"
);
MODULE_LICENSE
(
"GPL"
);
MODULE_LICENSE
(
"GPL"
);
EXPORT_SYMBOL
(
sa1111_check_dma_bug
);
EXPORT_SYMBOL
(
sa1111_select_audio_mode
);
EXPORT_SYMBOL
(
sa1111_select_audio_mode
);
EXPORT_SYMBOL
(
sa1111_set_audio_rate
);
EXPORT_SYMBOL
(
sa1111_set_audio_rate
);
EXPORT_SYMBOL
(
sa1111_get_audio_rate
);
EXPORT_SYMBOL
(
sa1111_get_audio_rate
);
...
...
include/asm-arm/atomic.h
View file @
5541b427
...
@@ -44,7 +44,7 @@ static inline void atomic_set(atomic_t *v, int i)
...
@@ -44,7 +44,7 @@ static inline void atomic_set(atomic_t *v, int i)
:
"cc"
);
:
"cc"
);
}
}
static
inline
void
atomic_add
(
int
i
,
volatile
atomic_t
*
v
)
static
inline
void
atomic_add
(
int
i
,
atomic_t
*
v
)
{
{
unsigned
long
tmp
,
tmp2
;
unsigned
long
tmp
,
tmp2
;
...
@@ -59,7 +59,7 @@ static inline void atomic_add(int i, volatile atomic_t *v)
...
@@ -59,7 +59,7 @@ static inline void atomic_add(int i, volatile atomic_t *v)
:
"cc"
);
:
"cc"
);
}
}
static
inline
void
atomic_sub
(
int
i
,
volatile
atomic_t
*
v
)
static
inline
void
atomic_sub
(
int
i
,
atomic_t
*
v
)
{
{
unsigned
long
tmp
,
tmp2
;
unsigned
long
tmp
,
tmp2
;
...
@@ -77,7 +77,7 @@ static inline void atomic_sub(int i, volatile atomic_t *v)
...
@@ -77,7 +77,7 @@ static inline void atomic_sub(int i, volatile atomic_t *v)
#define atomic_inc(v) atomic_add(1, v)
#define atomic_inc(v) atomic_add(1, v)
#define atomic_dec(v) atomic_sub(1, v)
#define atomic_dec(v) atomic_sub(1, v)
static
inline
int
atomic_dec_and_test
(
volatile
atomic_t
*
v
)
static
inline
int
atomic_dec_and_test
(
atomic_t
*
v
)
{
{
unsigned
long
tmp
;
unsigned
long
tmp
;
int
result
;
int
result
;
...
@@ -95,7 +95,7 @@ static inline int atomic_dec_and_test(volatile atomic_t *v)
...
@@ -95,7 +95,7 @@ static inline int atomic_dec_and_test(volatile atomic_t *v)
return
result
==
0
;
return
result
==
0
;
}
}
static
inline
int
atomic_add_negative
(
int
i
,
volatile
atomic_t
*
v
)
static
inline
int
atomic_add_negative
(
int
i
,
atomic_t
*
v
)
{
{
unsigned
long
tmp
;
unsigned
long
tmp
;
int
result
;
int
result
;
...
@@ -138,7 +138,7 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
...
@@ -138,7 +138,7 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
#define atomic_set(v,i) (((v)->counter) = (i))
#define atomic_set(v,i) (((v)->counter) = (i))
static
inline
void
atomic_add
(
int
i
,
volatile
atomic_t
*
v
)
static
inline
void
atomic_add
(
int
i
,
atomic_t
*
v
)
{
{
unsigned
long
flags
;
unsigned
long
flags
;
...
@@ -147,7 +147,7 @@ static inline void atomic_add(int i, volatile atomic_t *v)
...
@@ -147,7 +147,7 @@ static inline void atomic_add(int i, volatile atomic_t *v)
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
}
}
static
inline
void
atomic_sub
(
int
i
,
volatile
atomic_t
*
v
)
static
inline
void
atomic_sub
(
int
i
,
atomic_t
*
v
)
{
{
unsigned
long
flags
;
unsigned
long
flags
;
...
@@ -156,7 +156,7 @@ static inline void atomic_sub(int i, volatile atomic_t *v)
...
@@ -156,7 +156,7 @@ static inline void atomic_sub(int i, volatile atomic_t *v)
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
}
}
static
inline
void
atomic_inc
(
volatile
atomic_t
*
v
)
static
inline
void
atomic_inc
(
atomic_t
*
v
)
{
{
unsigned
long
flags
;
unsigned
long
flags
;
...
@@ -165,7 +165,7 @@ static inline void atomic_inc(volatile atomic_t *v)
...
@@ -165,7 +165,7 @@ static inline void atomic_inc(volatile atomic_t *v)
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
}
}
static
inline
void
atomic_dec
(
volatile
atomic_t
*
v
)
static
inline
void
atomic_dec
(
atomic_t
*
v
)
{
{
unsigned
long
flags
;
unsigned
long
flags
;
...
@@ -174,7 +174,7 @@ static inline void atomic_dec(volatile atomic_t *v)
...
@@ -174,7 +174,7 @@ static inline void atomic_dec(volatile atomic_t *v)
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
}
}
static
inline
int
atomic_dec_and_test
(
volatile
atomic_t
*
v
)
static
inline
int
atomic_dec_and_test
(
atomic_t
*
v
)
{
{
unsigned
long
flags
;
unsigned
long
flags
;
int
val
;
int
val
;
...
@@ -187,7 +187,7 @@ static inline int atomic_dec_and_test(volatile atomic_t *v)
...
@@ -187,7 +187,7 @@ static inline int atomic_dec_and_test(volatile atomic_t *v)
return
val
==
0
;
return
val
==
0
;
}
}
static
inline
int
atomic_add_negative
(
int
i
,
volatile
atomic_t
*
v
)
static
inline
int
atomic_add_negative
(
int
i
,
atomic_t
*
v
)
{
{
unsigned
long
flags
;
unsigned
long
flags
;
int
val
;
int
val
;
...
...
include/asm-arm/div64.h
View file @
5541b427
#ifndef __ASM_ARM_DIV64
#ifndef __ASM_ARM_DIV64
#define __ASM_ARM_DIV64
#define __ASM_ARM_DIV64
#include <asm/system.h>
/*
/*
* The semantics of do_div() are:
* The semantics of do_div() are:
*
*
...
@@ -31,7 +33,11 @@
...
@@ -31,7 +33,11 @@
register unsigned long long __n asm("r0") = n; \
register unsigned long long __n asm("r0") = n; \
register unsigned long long __res asm("r2"); \
register unsigned long long __res asm("r2"); \
register unsigned int __rem asm(__xh); \
register unsigned int __rem asm(__xh); \
asm("bl __do_div64" \
asm( __asmeq("%0", __xh) \
__asmeq("%1", "r2") \
__asmeq("%2", "r0") \
__asmeq("%3", "r4") \
"bl __do_div64" \
: "=r" (__rem), "=r" (__res) \
: "=r" (__rem), "=r" (__res) \
: "r" (__n), "r" (__base) \
: "r" (__n), "r" (__base) \
: "ip", "lr", "cc"); \
: "ip", "lr", "cc"); \
...
...
include/asm-arm/dma-mapping.h
View file @
5541b427
...
@@ -16,29 +16,6 @@
...
@@ -16,29 +16,6 @@
*/
*/
extern
void
consistent_sync
(
void
*
kaddr
,
size_t
size
,
int
rw
);
extern
void
consistent_sync
(
void
*
kaddr
,
size_t
size
,
int
rw
);
/*
* For SA-1111 these functions are "magic" and utilize bounce
* bufferes as needed to work around SA-1111 DMA bugs.
*/
dma_addr_t
sa1111_map_single
(
struct
device
*
dev
,
void
*
,
size_t
,
enum
dma_data_direction
);
void
sa1111_unmap_single
(
struct
device
*
dev
,
dma_addr_t
,
size_t
,
enum
dma_data_direction
);
int
sa1111_map_sg
(
struct
device
*
dev
,
struct
scatterlist
*
,
int
,
enum
dma_data_direction
);
void
sa1111_unmap_sg
(
struct
device
*
dev
,
struct
scatterlist
*
,
int
,
enum
dma_data_direction
);
void
sa1111_dma_sync_single_for_cpu
(
struct
device
*
dev
,
dma_addr_t
,
size_t
,
enum
dma_data_direction
);
void
sa1111_dma_sync_single_for_device
(
struct
device
*
dev
,
dma_addr_t
,
size_t
,
enum
dma_data_direction
);
void
sa1111_dma_sync_sg_for_cpu
(
struct
device
*
dev
,
struct
scatterlist
*
,
int
,
enum
dma_data_direction
);
void
sa1111_dma_sync_sg_for_device
(
struct
device
*
dev
,
struct
scatterlist
*
,
int
,
enum
dma_data_direction
);
#ifdef CONFIG_SA1111
extern
struct
bus_type
sa1111_bus_type
;
#define dmadev_is_sa1111(dev) ((dev)->bus == &sa1111_bus_type)
#else
#define dmadev_is_sa1111(dev) (0)
#endif
/*
/*
* Return whether the given device DMA address mask can be supported
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
* properly. For example, if your device can only drive the low 24-bits
...
@@ -70,6 +47,14 @@ static inline int dma_is_consistent(dma_addr_t handle)
...
@@ -70,6 +47,14 @@ static inline int dma_is_consistent(dma_addr_t handle)
return
0
;
return
0
;
}
}
/*
* DMA errors are defined by all-bits-set in the DMA address.
*/
static
inline
int
dma_mapping_error
(
dma_addr_t
dma_addr
)
{
return
dma_addr
==
~
0
;
}
/**
/**
* dma_alloc_coherent - allocate consistent memory for DMA
* dma_alloc_coherent - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
...
@@ -118,6 +103,7 @@ dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int
...
@@ -118,6 +103,7 @@ dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int
#define dma_free_writecombine(dev,size,cpu_addr,handle) \
#define dma_free_writecombine(dev,size,cpu_addr,handle) \
dma_free_coherent(dev,size,cpu_addr,handle)
dma_free_coherent(dev,size,cpu_addr,handle)
/**
/**
* dma_map_single - map a single buffer for streaming DMA
* dma_map_single - map a single buffer for streaming DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
...
@@ -132,16 +118,17 @@ dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int
...
@@ -132,16 +118,17 @@ dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int
* can regain ownership by calling dma_unmap_single() or
* can regain ownership by calling dma_unmap_single() or
* dma_sync_single_for_cpu().
* dma_sync_single_for_cpu().
*/
*/
#ifndef CONFIG_DMABOUNCE
static
inline
dma_addr_t
static
inline
dma_addr_t
dma_map_single
(
struct
device
*
dev
,
void
*
cpu_addr
,
size_t
size
,
dma_map_single
(
struct
device
*
dev
,
void
*
cpu_addr
,
size_t
size
,
enum
dma_data_direction
dir
)
enum
dma_data_direction
dir
)
{
{
if
(
dmadev_is_sa1111
(
dev
))
return
sa1111_map_single
(
dev
,
cpu_addr
,
size
,
dir
);
consistent_sync
(
cpu_addr
,
size
,
dir
);
consistent_sync
(
cpu_addr
,
size
,
dir
);
return
__virt_to_bus
((
unsigned
long
)
cpu_addr
);
return
__virt_to_bus
((
unsigned
long
)
cpu_addr
);
}
}
#else
extern
dma_addr_t
dma_map_single
(
struct
device
*
,
void
*
,
size_t
,
enum
dma_data_direction
);
#endif
/**
/**
* dma_map_page - map a portion of a page for streaming DMA
* dma_map_page - map a portion of a page for streaming DMA
...
@@ -180,15 +167,16 @@ dma_map_page(struct device *dev, struct page *page,
...
@@ -180,15 +167,16 @@ dma_map_page(struct device *dev, struct page *page,
* After this call, reads by the CPU to the buffer are guaranteed to see
* After this call, reads by the CPU to the buffer are guaranteed to see
* whatever the device wrote there.
* whatever the device wrote there.
*/
*/
#ifndef CONFIG_DMABOUNCE
static
inline
void
static
inline
void
dma_unmap_single
(
struct
device
*
dev
,
dma_addr_t
handle
,
size_t
size
,
dma_unmap_single
(
struct
device
*
dev
,
dma_addr_t
handle
,
size_t
size
,
enum
dma_data_direction
dir
)
enum
dma_data_direction
dir
)
{
{
if
(
dmadev_is_sa1111
(
dev
))
sa1111_unmap_single
(
dev
,
handle
,
size
,
dir
);
/* nothing to do */
/* nothing to do */
}
}
#else
extern
void
dma_unmap_single
(
struct
device
*
,
dma_addr_t
,
size_t
,
enum
dma_data_direction
);
#endif
/**
/**
* dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
* dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
...
@@ -233,15 +221,13 @@ dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
...
@@ -233,15 +221,13 @@ dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
* Device ownership issues as mentioned above for dma_map_single are
* Device ownership issues as mentioned above for dma_map_single are
* the same here.
* the same here.
*/
*/
#ifndef CONFIG_DMABOUNCE
static
inline
int
static
inline
int
dma_map_sg
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nents
,
dma_map_sg
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nents
,
enum
dma_data_direction
dir
)
enum
dma_data_direction
dir
)
{
{
int
i
;
int
i
;
if
(
dmadev_is_sa1111
(
dev
))
return
sa1111_map_sg
(
dev
,
sg
,
nents
,
dir
);
for
(
i
=
0
;
i
<
nents
;
i
++
,
sg
++
)
{
for
(
i
=
0
;
i
<
nents
;
i
++
,
sg
++
)
{
char
*
virt
;
char
*
virt
;
...
@@ -252,6 +238,9 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
...
@@ -252,6 +238,9 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
return
nents
;
return
nents
;
}
}
#else
extern
int
dma_map_sg
(
struct
device
*
,
struct
scatterlist
*
,
int
,
enum
dma_data_direction
);
#endif
/**
/**
* dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
* dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
...
@@ -264,17 +253,18 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
...
@@ -264,17 +253,18 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
* Again, CPU read rules concerning calls here are the same as for
* Again, CPU read rules concerning calls here are the same as for
* dma_unmap_single() above.
* dma_unmap_single() above.
*/
*/
#ifndef CONFIG_DMABOUNCE
static
inline
void
static
inline
void
dma_unmap_sg
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nents
,
dma_unmap_sg
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nents
,
enum
dma_data_direction
dir
)
enum
dma_data_direction
dir
)
{
{
if
(
dmadev_is_sa1111
(
dev
))
{
sa1111_unmap_sg
(
dev
,
sg
,
nents
,
dir
);
return
;
}
/* nothing to do */
/* nothing to do */
}
}
#else
extern
void
dma_unmap_sg
(
struct
device
*
,
struct
scatterlist
*
,
int
,
enum
dma_data_direction
);
#endif
/**
/**
* dma_sync_single_for_cpu
* dma_sync_single_for_cpu
...
@@ -293,15 +283,11 @@ dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
...
@@ -293,15 +283,11 @@ dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
* must first the perform a dma_sync_for_device, and then the
* must first the perform a dma_sync_for_device, and then the
* device again owns the buffer.
* device again owns the buffer.
*/
*/
#ifndef CONFIG_DMABOUNCE
static
inline
void
static
inline
void
dma_sync_single_for_cpu
(
struct
device
*
dev
,
dma_addr_t
handle
,
size_t
size
,
dma_sync_single_for_cpu
(
struct
device
*
dev
,
dma_addr_t
handle
,
size_t
size
,
enum
dma_data_direction
dir
)
enum
dma_data_direction
dir
)
{
{
if
(
dmadev_is_sa1111
(
dev
))
{
sa1111_dma_sync_single_for_cpu
(
dev
,
handle
,
size
,
dir
);
return
;
}
consistent_sync
((
void
*
)
__bus_to_virt
(
handle
),
size
,
dir
);
consistent_sync
((
void
*
)
__bus_to_virt
(
handle
),
size
,
dir
);
}
}
...
@@ -309,13 +295,13 @@ static inline void
...
@@ -309,13 +295,13 @@ static inline void
dma_sync_single_for_device
(
struct
device
*
dev
,
dma_addr_t
handle
,
size_t
size
,
dma_sync_single_for_device
(
struct
device
*
dev
,
dma_addr_t
handle
,
size_t
size
,
enum
dma_data_direction
dir
)
enum
dma_data_direction
dir
)
{
{
if
(
dmadev_is_sa1111
(
dev
))
{
sa1111_dma_sync_single_for_device
(
dev
,
handle
,
size
,
dir
);
return
;
}
consistent_sync
((
void
*
)
__bus_to_virt
(
handle
),
size
,
dir
);
consistent_sync
((
void
*
)
__bus_to_virt
(
handle
),
size
,
dir
);
}
}
#else
extern
void
dma_sync_single_for_cpu
(
struct
device
*
,
dma_addr_t
,
size_t
,
enum
dma_data_direction
);
extern
void
dma_sync_single_for_device
(
struct
device
*
,
dma_addr_t
,
size_t
,
enum
dma_data_direction
);
#endif
/**
/**
* dma_sync_sg_for_cpu
* dma_sync_sg_for_cpu
...
@@ -330,17 +316,13 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
...
@@ -330,17 +316,13 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
* The same as dma_sync_single_for_* but for a scatter-gather list,
* The same as dma_sync_single_for_* but for a scatter-gather list,
* same rules and usage.
* same rules and usage.
*/
*/
#ifndef CONFIG_DMABOUNCE
static
inline
void
static
inline
void
dma_sync_sg_for_cpu
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nents
,
dma_sync_sg_for_cpu
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nents
,
enum
dma_data_direction
dir
)
enum
dma_data_direction
dir
)
{
{
int
i
;
int
i
;
if
(
dmadev_is_sa1111
(
dev
))
{
sa1111_dma_sync_sg_for_cpu
(
dev
,
sg
,
nents
,
dir
);
return
;
}
for
(
i
=
0
;
i
<
nents
;
i
++
,
sg
++
)
{
for
(
i
=
0
;
i
<
nents
;
i
++
,
sg
++
)
{
char
*
virt
=
page_address
(
sg
->
page
)
+
sg
->
offset
;
char
*
virt
=
page_address
(
sg
->
page
)
+
sg
->
offset
;
consistent_sync
(
virt
,
sg
->
length
,
dir
);
consistent_sync
(
virt
,
sg
->
length
,
dir
);
...
@@ -353,24 +335,73 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
...
@@ -353,24 +335,73 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
{
{
int
i
;
int
i
;
if
(
dmadev_is_sa1111
(
dev
))
{
sa1111_dma_sync_sg_for_device
(
dev
,
sg
,
nents
,
dir
);
return
;
}
for
(
i
=
0
;
i
<
nents
;
i
++
,
sg
++
)
{
for
(
i
=
0
;
i
<
nents
;
i
++
,
sg
++
)
{
char
*
virt
=
page_address
(
sg
->
page
)
+
sg
->
offset
;
char
*
virt
=
page_address
(
sg
->
page
)
+
sg
->
offset
;
consistent_sync
(
virt
,
sg
->
length
,
dir
);
consistent_sync
(
virt
,
sg
->
length
,
dir
);
}
}
}
}
#else
extern
void
dma_sync_sg_for_cpu
(
struct
device
*
,
struct
scatterlist
*
,
int
,
enum
dma_data_direction
);
extern
void
dma_sync_sg_for_device
(
struct
device
*
,
struct
scatterlist
*
,
int
,
enum
dma_data_direction
);
#endif
#ifdef CONFIG_DMABOUNCE
/*
/*
* DMA errors are defined by all-bits-set in the DMA address.
* For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
* and utilize bounce buffers as needed to work around limited DMA windows.
*
* On the SA-1111, a bug limits DMA to only certain regions of RAM.
* On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
* On some ADI engineering sytems, PCI inbound window is 32MB (12MB total RAM)
*
* The following are helper functions used by the dmabounce subystem
*
*/
*/
static
inline
int
dma_mapping_error
(
dma_addr_t
dma_addr
)
{
/**
return
dma_addr
==
~
0
;
* dmabounce_register_dev
}
*
* @dev: valid struct device pointer
* @small_buf_size: size of buffers to use with small buffer pool
* @large_buf_size: size of buffers to use with large buffer pool (can be 0)
*
* This function should be called by low-level platform code to register
* a device as requireing DMA buffer bouncing. The function will allocate
* appropriate DMA pools for the device.
*
*/
extern
int
dmabounce_register_dev
(
struct
device
*
,
unsigned
long
,
unsigned
long
);
/**
* dmabounce_unregister_dev
*
* @dev: valid struct device pointer
*
* This function should be called by low-level platform code when device
* that was previously registered with dmabounce_register_dev is removed
* from the system.
*
*/
extern
void
dmabounce_unregister_dev
(
struct
device
*
);
/**
* dma_needs_bounce
*
* @dev: valid struct device pointer
* @dma_handle: dma_handle of unbounced buffer
* @size: size of region being mapped
*
* Platforms that utilize the dmabounce mechanism must implement
* this function.
*
* The dmabounce routines call this function whenever a dma-mapping
* is requested to determine whether a given buffer needs to be bounced
* or not. The function must return 0 if the the buffer is OK for
* DMA access and 1 if the buffer needs to be bounced.
*
*/
extern
int
dma_needs_bounce
(
struct
device
*
,
dma_addr_t
,
size_t
);
#endif
/* CONFIG_DMABOUNCE */
#endif
/* __KERNEL__ */
#endif
/* __KERNEL__ */
#endif
#endif
include/asm-arm/system.h
View file @
5541b427
...
@@ -42,6 +42,15 @@
...
@@ -42,6 +42,15 @@
#define CR_XP (1 << 23)
/* Extended page tables */
#define CR_XP (1 << 23)
/* Extended page tables */
#define CR_VE (1 << 24)
/* Vectored interrupts */
#define CR_VE (1 << 24)
/* Vectored interrupts */
/*
* This is used to ensure the compiler did actually allocate the register we
* asked it for some inline assembly sequences. Apparently we can't trust
* the compiler from one version to another so a bit of paranoia won't hurt.
* This string is meant to be concatenated with the inline asm string and
* will cause compilation to stop on mismatch.
*/
#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
#include <linux/kernel.h>
...
...
include/asm-arm/uaccess.h
View file @
5541b427
...
@@ -15,6 +15,7 @@
...
@@ -15,6 +15,7 @@
#include <asm/errno.h>
#include <asm/errno.h>
#include <asm/arch/memory.h>
#include <asm/arch/memory.h>
#include <asm/domain.h>
#include <asm/domain.h>
#include <asm/system.h>
#define VERIFY_READ 0
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#define VERIFY_WRITE 1
...
@@ -107,7 +108,9 @@ extern int __get_user_8(void *);
...
@@ -107,7 +108,9 @@ extern int __get_user_8(void *);
extern
int
__get_user_bad
(
void
);
extern
int
__get_user_bad
(
void
);
#define __get_user_x(__r1,__p,__e,__s,__i...) \
#define __get_user_x(__r1,__p,__e,__s,__i...) \
__asm__ __volatile__ ("bl __get_user_" #__s \
__asm__ __volatile__ ( \
__asmeq("%0", "r0") __asmeq("%1", "r1") \
"bl __get_user_" #__s \
: "=&r" (__e), "=r" (__r1) \
: "=&r" (__e), "=r" (__r1) \
: "0" (__p) \
: "0" (__p) \
: __i, "cc")
: __i, "cc")
...
@@ -223,7 +226,9 @@ extern int __put_user_8(void *, unsigned long long);
...
@@ -223,7 +226,9 @@ extern int __put_user_8(void *, unsigned long long);
extern
int
__put_user_bad
(
void
);
extern
int
__put_user_bad
(
void
);
#define __put_user_x(__r1,__p,__e,__s) \
#define __put_user_x(__r1,__p,__e,__s) \
__asm__ __volatile__ ("bl __put_user_" #__s \
__asm__ __volatile__ ( \
__asmeq("%0", "r0") __asmeq("%2", "r1") \
"bl __put_user_" #__s \
: "=&r" (__e) \
: "=&r" (__e) \
: "0" (__p), "r" (__r1) \
: "0" (__p), "r" (__r1) \
: "ip", "lr", "cc")
: "ip", "lr", "cc")
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment