Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
07f1c295
Commit
07f1c295
authored
Jul 18, 2011
by
Russell King
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'dma' of
http://git.linaro.org/git/people/nico/linux
into devel-stable
parents
4aa96ccf
fb89fcfb
Changes
55
Hide whitespace changes
Inline
Side-by-side
Showing
55 changed files
with
304 additions
and
288 deletions
+304
-288
arch/arm/common/dmabounce.c
arch/arm/common/dmabounce.c
+83
-110
arch/arm/common/it8152.c
arch/arm/common/it8152.c
+7
-9
arch/arm/common/sa1111.c
arch/arm/common/sa1111.c
+31
-29
arch/arm/include/asm/dma-mapping.h
arch/arm/include/asm/dma-mapping.h
+13
-75
arch/arm/include/asm/dma.h
arch/arm/include/asm/dma.h
+6
-5
arch/arm/include/asm/mach/arch.h
arch/arm/include/asm/mach/arch.h
+4
-0
arch/arm/include/asm/memory.h
arch/arm/include/asm/memory.h
+0
-12
arch/arm/kernel/setup.c
arch/arm/kernel/setup.c
+6
-0
arch/arm/mach-davinci/board-da830-evm.c
arch/arm/mach-davinci/board-da830-evm.c
+1
-0
arch/arm/mach-davinci/board-da850-evm.c
arch/arm/mach-davinci/board-da850-evm.c
+1
-0
arch/arm/mach-davinci/board-dm355-evm.c
arch/arm/mach-davinci/board-dm355-evm.c
+1
-0
arch/arm/mach-davinci/board-dm355-leopard.c
arch/arm/mach-davinci/board-dm355-leopard.c
+1
-0
arch/arm/mach-davinci/board-dm365-evm.c
arch/arm/mach-davinci/board-dm365-evm.c
+1
-0
arch/arm/mach-davinci/board-dm644x-evm.c
arch/arm/mach-davinci/board-dm644x-evm.c
+1
-0
arch/arm/mach-davinci/board-dm646x-evm.c
arch/arm/mach-davinci/board-dm646x-evm.c
+2
-0
arch/arm/mach-davinci/board-mityomapl138.c
arch/arm/mach-davinci/board-mityomapl138.c
+1
-0
arch/arm/mach-davinci/board-neuros-osd2.c
arch/arm/mach-davinci/board-neuros-osd2.c
+1
-0
arch/arm/mach-davinci/board-omapl138-hawk.c
arch/arm/mach-davinci/board-omapl138-hawk.c
+1
-0
arch/arm/mach-davinci/board-sffsdr.c
arch/arm/mach-davinci/board-sffsdr.c
+1
-0
arch/arm/mach-davinci/board-tnetv107x-evm.c
arch/arm/mach-davinci/board-tnetv107x-evm.c
+1
-0
arch/arm/mach-davinci/include/mach/memory.h
arch/arm/mach-davinci/include/mach/memory.h
+0
-7
arch/arm/mach-h720x/h7201-eval.c
arch/arm/mach-h720x/h7201-eval.c
+1
-0
arch/arm/mach-h720x/h7202-eval.c
arch/arm/mach-h720x/h7202-eval.c
+1
-0
arch/arm/mach-h720x/include/mach/memory.h
arch/arm/mach-h720x/include/mach/memory.h
+0
-7
arch/arm/mach-ixp4xx/avila-setup.c
arch/arm/mach-ixp4xx/avila-setup.c
+6
-0
arch/arm/mach-ixp4xx/common-pci.c
arch/arm/mach-ixp4xx/common-pci.c
+6
-6
arch/arm/mach-ixp4xx/coyote-setup.c
arch/arm/mach-ixp4xx/coyote-setup.c
+3
-0
arch/arm/mach-ixp4xx/dsmg600-setup.c
arch/arm/mach-ixp4xx/dsmg600-setup.c
+3
-0
arch/arm/mach-ixp4xx/fsg-setup.c
arch/arm/mach-ixp4xx/fsg-setup.c
+3
-0
arch/arm/mach-ixp4xx/gateway7001-setup.c
arch/arm/mach-ixp4xx/gateway7001-setup.c
+3
-0
arch/arm/mach-ixp4xx/goramo_mlr.c
arch/arm/mach-ixp4xx/goramo_mlr.c
+3
-0
arch/arm/mach-ixp4xx/gtwx5715-setup.c
arch/arm/mach-ixp4xx/gtwx5715-setup.c
+3
-0
arch/arm/mach-ixp4xx/include/mach/memory.h
arch/arm/mach-ixp4xx/include/mach/memory.h
+0
-4
arch/arm/mach-ixp4xx/ixdp425-setup.c
arch/arm/mach-ixp4xx/ixdp425-setup.c
+12
-0
arch/arm/mach-ixp4xx/nas100d-setup.c
arch/arm/mach-ixp4xx/nas100d-setup.c
+3
-0
arch/arm/mach-ixp4xx/nslu2-setup.c
arch/arm/mach-ixp4xx/nslu2-setup.c
+3
-0
arch/arm/mach-ixp4xx/vulcan-setup.c
arch/arm/mach-ixp4xx/vulcan-setup.c
+3
-0
arch/arm/mach-ixp4xx/wg302v2-setup.c
arch/arm/mach-ixp4xx/wg302v2-setup.c
+3
-0
arch/arm/mach-pxa/cm-x2xx.c
arch/arm/mach-pxa/cm-x2xx.c
+3
-0
arch/arm/mach-pxa/include/mach/memory.h
arch/arm/mach-pxa/include/mach/memory.h
+0
-4
arch/arm/mach-realview/include/mach/memory.h
arch/arm/mach-realview/include/mach/memory.h
+0
-4
arch/arm/mach-realview/realview_eb.c
arch/arm/mach-realview/realview_eb.c
+3
-0
arch/arm/mach-realview/realview_pb1176.c
arch/arm/mach-realview/realview_pb1176.c
+3
-0
arch/arm/mach-realview/realview_pb11mp.c
arch/arm/mach-realview/realview_pb11mp.c
+3
-0
arch/arm/mach-realview/realview_pba8.c
arch/arm/mach-realview/realview_pba8.c
+3
-0
arch/arm/mach-realview/realview_pbx.c
arch/arm/mach-realview/realview_pbx.c
+3
-0
arch/arm/mach-sa1100/assabet.c
arch/arm/mach-sa1100/assabet.c
+3
-0
arch/arm/mach-sa1100/badge4.c
arch/arm/mach-sa1100/badge4.c
+3
-0
arch/arm/mach-sa1100/include/mach/memory.h
arch/arm/mach-sa1100/include/mach/memory.h
+0
-4
arch/arm/mach-sa1100/jornada720.c
arch/arm/mach-sa1100/jornada720.c
+3
-0
arch/arm/mach-shark/core.c
arch/arm/mach-shark/core.c
+1
-0
arch/arm/mach-shark/include/mach/memory.h
arch/arm/mach-shark/include/mach/memory.h
+0
-2
arch/arm/mm/dma-mapping.c
arch/arm/mm/dma-mapping.c
+32
-3
arch/arm/mm/init.c
arch/arm/mm/init.c
+19
-7
arch/arm/mm/mm.h
arch/arm/mm/mm.h
+6
-0
No files found.
arch/arm/common/dmabounce.c
View file @
07f1c295
...
...
@@ -79,6 +79,8 @@ struct dmabounce_device_info {
struct
dmabounce_pool
large
;
rwlock_t
lock
;
int
(
*
needs_bounce
)(
struct
device
*
,
dma_addr_t
,
size_t
);
};
#ifdef STATS
...
...
@@ -210,114 +212,91 @@ static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
if
(
!
dev
||
!
dev
->
archdata
.
dmabounce
)
return
NULL
;
if
(
dma_mapping_error
(
dev
,
dma_addr
))
{
if
(
dev
)
dev_err
(
dev
,
"Trying to %s invalid mapping
\n
"
,
where
);
else
pr_err
(
"unknown device: Trying to %s invalid mapping
\n
"
,
where
);
dev_err
(
dev
,
"Trying to %s invalid mapping
\n
"
,
where
);
return
NULL
;
}
return
find_safe_buffer
(
dev
->
archdata
.
dmabounce
,
dma_addr
);
}
static
inline
dma_addr_t
map_single
(
struct
device
*
dev
,
void
*
ptr
,
size_t
size
,
enum
dma_data_direction
dir
)
static
int
needs_bounce
(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
size_t
size
)
{
struct
dmabounce_device_info
*
device_info
=
dev
->
archdata
.
dmabounce
;
dma_addr_t
dma_addr
;
int
needs_bounce
=
0
;
if
(
device_info
)
DO_STATS
(
device_info
->
map_op_count
++
);
dma_addr
=
virt_to_dma
(
dev
,
ptr
);
if
(
!
dev
||
!
dev
->
archdata
.
dmabounce
)
return
0
;
if
(
dev
->
dma_mask
)
{
unsigned
long
mask
=
*
dev
->
dma_mask
;
unsigned
long
limit
;
unsigned
long
limit
,
mask
=
*
dev
->
dma_mask
;
limit
=
(
mask
+
1
)
&
~
mask
;
if
(
limit
&&
size
>
limit
)
{
dev_err
(
dev
,
"DMA mapping too big (requested %#x "
"mask %#Lx)
\n
"
,
size
,
*
dev
->
dma_mask
);
return
~
0
;
return
-
E2BIG
;
}
/*
* Figure out if we need to bounce from the DMA mask.
*/
needs_bounce
=
(
dma_addr
|
(
dma_addr
+
size
-
1
))
&
~
mask
;
/* Figure out if we need to bounce from the DMA mask. */
if
((
dma_addr
|
(
dma_addr
+
size
-
1
))
&
~
mask
)
return
1
;
}
if
(
device_info
&&
(
needs_bounce
||
dma_needs_bounce
(
dev
,
dma_addr
,
size
)))
{
struct
safe_buffer
*
buf
;
return
!!
dev
->
archdata
.
dmabounce
->
needs_bounce
(
dev
,
dma_addr
,
size
);
}
buf
=
alloc_safe_buffer
(
device_info
,
ptr
,
size
,
dir
);
if
(
buf
==
0
)
{
dev_err
(
dev
,
"%s: unable to map unsafe buffer %p!
\n
"
,
__func__
,
ptr
);
return
~
0
;
}
static
inline
dma_addr_t
map_single
(
struct
device
*
dev
,
void
*
ptr
,
size_t
size
,
enum
dma_data_direction
dir
)
{
struct
dmabounce_device_info
*
device_info
=
dev
->
archdata
.
dmabounce
;
struct
safe_buffer
*
buf
;
dev_dbg
(
dev
,
"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)
\n
"
,
__func__
,
buf
->
ptr
,
virt_to_dma
(
dev
,
buf
->
ptr
),
buf
->
safe
,
buf
->
safe_dma_addr
);
if
(
device_info
)
DO_STATS
(
device_info
->
map_op_count
++
);
if
((
dir
==
DMA_TO_DEVICE
)
||
(
dir
==
DMA_BIDIRECTIONAL
))
{
dev_dbg
(
dev
,
"%s: copy unsafe %p to safe %p, size %d
\n
"
,
__func__
,
ptr
,
buf
->
safe
,
size
);
memcpy
(
buf
->
safe
,
ptr
,
size
);
}
ptr
=
buf
->
safe
;
buf
=
alloc_safe_buffer
(
device_info
,
ptr
,
size
,
dir
);
if
(
buf
==
NULL
)
{
dev_err
(
dev
,
"%s: unable to map unsafe buffer %p!
\n
"
,
__func__
,
ptr
);
return
~
0
;
}
dma_addr
=
buf
->
safe_dma_addr
;
}
else
{
/*
* We don't need to sync the DMA buffer since
* it was allocated via the coherent allocators.
*/
__dma_single_cpu_to_dev
(
ptr
,
size
,
dir
);
dev_dbg
(
dev
,
"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)
\n
"
,
__func__
,
buf
->
ptr
,
virt_to_dma
(
dev
,
buf
->
ptr
),
buf
->
safe
,
buf
->
safe_dma_addr
);
if
(
dir
==
DMA_TO_DEVICE
||
dir
==
DMA_BIDIRECTIONAL
)
{
dev_dbg
(
dev
,
"%s: copy unsafe %p to safe %p, size %d
\n
"
,
__func__
,
ptr
,
buf
->
safe
,
size
);
memcpy
(
buf
->
safe
,
ptr
,
size
);
}
return
dma_addr
;
return
buf
->
safe_
dma_addr
;
}
static
inline
void
unmap_single
(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
static
inline
void
unmap_single
(
struct
device
*
dev
,
struct
safe_buffer
*
buf
,
size_t
size
,
enum
dma_data_direction
dir
)
{
struct
safe_buffer
*
buf
=
find_safe_buffer_dev
(
dev
,
dma_addr
,
"unmap"
);
if
(
buf
)
{
BUG_ON
(
buf
->
size
!=
size
);
BUG_ON
(
buf
->
direction
!=
dir
);
BUG_ON
(
buf
->
size
!=
size
);
BUG_ON
(
buf
->
direction
!=
dir
);
dev_dbg
(
dev
,
"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)
\n
"
,
__func__
,
buf
->
ptr
,
virt_to_dma
(
dev
,
buf
->
ptr
),
buf
->
safe
,
buf
->
safe_dma_addr
);
dev_dbg
(
dev
,
"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)
\n
"
,
__func__
,
buf
->
ptr
,
virt_to_dma
(
dev
,
buf
->
ptr
),
buf
->
safe
,
buf
->
safe_dma_addr
);
DO_STATS
(
dev
->
archdata
.
dmabounce
->
bounce_count
++
);
DO_STATS
(
dev
->
archdata
.
dmabounce
->
bounce_count
++
);
if
(
dir
==
DMA_FROM_DEVICE
||
dir
==
DMA_BIDIRECTIONAL
)
{
void
*
ptr
=
buf
->
ptr
;
if
(
dir
==
DMA_FROM_DEVICE
||
dir
==
DMA_BIDIRECTIONAL
)
{
void
*
ptr
=
buf
->
ptr
;
dev_dbg
(
dev
,
"%s: copy back safe %p to unsafe %p size %d
\n
"
,
__func__
,
buf
->
safe
,
ptr
,
size
);
memcpy
(
ptr
,
buf
->
safe
,
size
);
dev_dbg
(
dev
,
"%s: copy back safe %p to unsafe %p size %d
\n
"
,
__func__
,
buf
->
safe
,
ptr
,
size
);
memcpy
(
ptr
,
buf
->
safe
,
size
);
/*
* Since we may have written to a page cache page,
* we need to ensure that the data will be coherent
* with user mappings.
*/
__cpuc_flush_dcache_area
(
ptr
,
size
);
}
free_safe_buffer
(
dev
->
archdata
.
dmabounce
,
buf
);
}
else
{
__dma_single_dev_to_cpu
(
dma_to_virt
(
dev
,
dma_addr
),
size
,
dir
);
/*
* Since we may have written to a page cache page,
* we need to ensure that the data will be coherent
* with user mappings.
*/
__cpuc_flush_dcache_area
(
ptr
,
size
);
}
free_safe_buffer
(
dev
->
archdata
.
dmabounce
,
buf
);
}
/* ************************************************** */
...
...
@@ -328,45 +307,28 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
* substitute the safe buffer for the unsafe one.
* (basically move the buffer from an unsafe area to a safe one)
*/
dma_addr_t
__dma_map_single
(
struct
device
*
dev
,
void
*
ptr
,
size_t
size
,
enum
dma_data_direction
dir
)
{
dev_dbg
(
dev
,
"%s(ptr=%p,size=%d,dir=%x)
\n
"
,
__func__
,
ptr
,
size
,
dir
);
BUG_ON
(
!
valid_dma_direction
(
dir
));
return
map_single
(
dev
,
ptr
,
size
,
dir
);
}
EXPORT_SYMBOL
(
__dma_map_single
);
/*
* see if a mapped address was really a "safe" buffer and if so, copy
* the data from the safe buffer back to the unsafe buffer and free up
* the safe buffer. (basically return things back to the way they
* should be)
*/
void
__dma_unmap_single
(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
size_t
size
,
enum
dma_data_direction
dir
)
{
dev_dbg
(
dev
,
"%s(ptr=%p,size=%d,dir=%x)
\n
"
,
__func__
,
(
void
*
)
dma_addr
,
size
,
dir
);
unmap_single
(
dev
,
dma_addr
,
size
,
dir
);
}
EXPORT_SYMBOL
(
__dma_unmap_single
);
dma_addr_t
__dma_map_page
(
struct
device
*
dev
,
struct
page
*
page
,
unsigned
long
offset
,
size_t
size
,
enum
dma_data_direction
dir
)
{
dma_addr_t
dma_addr
;
int
ret
;
dev_dbg
(
dev
,
"%s(page=%p,off=%#lx,size=%zx,dir=%x)
\n
"
,
__func__
,
page
,
offset
,
size
,
dir
);
BUG_ON
(
!
valid_dma_direction
(
dir
));
dma_addr
=
pfn_to_dma
(
dev
,
page_to_pfn
(
page
))
+
offset
;
ret
=
needs_bounce
(
dev
,
dma_addr
,
size
);
if
(
ret
<
0
)
return
~
0
;
if
(
ret
==
0
)
{
__dma_page_cpu_to_dev
(
page
,
offset
,
size
,
dir
);
return
dma_addr
;
}
if
(
PageHighMem
(
page
))
{
dev_err
(
dev
,
"DMA buffer bouncing of HIGHMEM pages "
"is not supported
\n
"
);
dev_err
(
dev
,
"DMA buffer bouncing of HIGHMEM pages is not supported
\n
"
);
return
~
0
;
}
...
...
@@ -383,10 +345,19 @@ EXPORT_SYMBOL(__dma_map_page);
void
__dma_unmap_page
(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
size_t
size
,
enum
dma_data_direction
dir
)
{
dev_dbg
(
dev
,
"%s(ptr=%p,size=%d,dir=%x)
\n
"
,
__func__
,
(
void
*
)
dma_addr
,
size
,
dir
);
struct
safe_buffer
*
buf
;
dev_dbg
(
dev
,
"%s(dma=%#x,size=%d,dir=%x)
\n
"
,
__func__
,
dma_addr
,
size
,
dir
);
buf
=
find_safe_buffer_dev
(
dev
,
dma_addr
,
__func__
);
if
(
!
buf
)
{
__dma_page_dev_to_cpu
(
pfn_to_page
(
dma_to_pfn
(
dev
,
dma_addr
)),
dma_addr
&
~
PAGE_MASK
,
size
,
dir
);
return
;
}
unmap_single
(
dev
,
dma_addr
,
size
,
dir
);
unmap_single
(
dev
,
buf
,
size
,
dir
);
}
EXPORT_SYMBOL
(
__dma_unmap_page
);
...
...
@@ -461,7 +432,8 @@ static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
}
int
dmabounce_register_dev
(
struct
device
*
dev
,
unsigned
long
small_buffer_size
,
unsigned
long
large_buffer_size
)
unsigned
long
large_buffer_size
,
int
(
*
needs_bounce_fn
)(
struct
device
*
,
dma_addr_t
,
size_t
))
{
struct
dmabounce_device_info
*
device_info
;
int
ret
;
...
...
@@ -497,6 +469,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
device_info
->
dev
=
dev
;
INIT_LIST_HEAD
(
&
device_info
->
safe_buffers
);
rwlock_init
(
&
device_info
->
lock
);
device_info
->
needs_bounce
=
needs_bounce_fn
;
#ifdef STATS
device_info
->
total_allocs
=
0
;
...
...
arch/arm/common/it8152.c
View file @
07f1c295
...
...
@@ -243,6 +243,12 @@ static struct resource it8152_mem = {
* ITE8152 chip can address up to 64MByte, so all the devices
* connected to ITE8152 (PCI and USB) should have limited DMA window
*/
static
int
it8152_needs_bounce
(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
size_t
size
)
{
dev_dbg
(
dev
,
"%s: dma_addr %08x, size %08x
\n
"
,
__func__
,
dma_addr
,
size
);
return
(
dma_addr
+
size
-
PHYS_OFFSET
)
>=
SZ_64M
;
}
/*
* Setup DMA mask to 64MB on devices connected to ITE8152. Ignore all
...
...
@@ -254,7 +260,7 @@ static int it8152_pci_platform_notify(struct device *dev)
if
(
dev
->
dma_mask
)
*
dev
->
dma_mask
=
(
SZ_64M
-
1
)
|
PHYS_OFFSET
;
dev
->
coherent_dma_mask
=
(
SZ_64M
-
1
)
|
PHYS_OFFSET
;
dmabounce_register_dev
(
dev
,
2048
,
4096
);
dmabounce_register_dev
(
dev
,
2048
,
4096
,
it8152_needs_bounce
);
}
return
0
;
}
...
...
@@ -267,14 +273,6 @@ static int it8152_pci_platform_notify_remove(struct device *dev)
return
0
;
}
int
dma_needs_bounce
(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
size_t
size
)
{
dev_dbg
(
dev
,
"%s: dma_addr %08x, size %08x
\n
"
,
__func__
,
dma_addr
,
size
);
return
(
dev
->
bus
==
&
pci_bus_type
)
&&
((
dma_addr
+
size
-
PHYS_OFFSET
)
>=
SZ_64M
);
}
int
dma_set_coherent_mask
(
struct
device
*
dev
,
u64
mask
)
{
if
(
mask
>=
PHYS_OFFSET
+
SZ_64M
-
1
)
...
...
arch/arm/common/sa1111.c
View file @
07f1c295
...
...
@@ -579,7 +579,36 @@ sa1111_configure_smc(struct sa1111 *sachip, int sdram, unsigned int drac,
sachip
->
dev
->
coherent_dma_mask
&=
sa1111_dma_mask
[
drac
>>
2
];
}
#endif
#ifdef CONFIG_DMABOUNCE
/*
* According to the "Intel StrongARM SA-1111 Microprocessor Companion
* Chip Specification Update" (June 2000), erratum #7, there is a
* significant bug in the SA1111 SDRAM shared memory controller. If
* an access to a region of memory above 1MB relative to the bank base,
* it is important that address bit 10 _NOT_ be asserted. Depending
* on the configuration of the RAM, bit 10 may correspond to one
* of several different (processor-relative) address bits.
*
* This routine only identifies whether or not a given DMA address
* is susceptible to the bug.
*
* This should only get called for sa1111_device types due to the
* way we configure our device dma_masks.
*/
static
int
sa1111_needs_bounce
(
struct
device
*
dev
,
dma_addr_t
addr
,
size_t
size
)
{
/*
* Section 4.6 of the "Intel StrongARM SA-1111 Development Module
* User's Guide" mentions that jumpers R51 and R52 control the
* target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
* SDRAM bank 1 on Neponset). The default configuration selects
* Assabet, so any address in bank 1 is necessarily invalid.
*/
return
(
machine_is_assabet
()
||
machine_is_pfs168
())
&&
(
addr
>=
0xc8000000
||
(
addr
+
size
)
>=
0xc8000000
);
}
#endif
static
void
sa1111_dev_release
(
struct
device
*
_dev
)
...
...
@@ -644,7 +673,8 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent,
dev
->
dev
.
dma_mask
=
&
dev
->
dma_mask
;
if
(
dev
->
dma_mask
!=
0xffffffffUL
)
{
ret
=
dmabounce_register_dev
(
&
dev
->
dev
,
1024
,
4096
);
ret
=
dmabounce_register_dev
(
&
dev
->
dev
,
1024
,
4096
,
sa1111_needs_bounce
);
if
(
ret
)
{
dev_err
(
&
dev
->
dev
,
"SA1111: Failed to register"
" with dmabounce
\n
"
);
...
...
@@ -818,34 +848,6 @@ static void __sa1111_remove(struct sa1111 *sachip)
kfree
(
sachip
);
}
/*
* According to the "Intel StrongARM SA-1111 Microprocessor Companion
* Chip Specification Update" (June 2000), erratum #7, there is a
* significant bug in the SA1111 SDRAM shared memory controller. If
* an access to a region of memory above 1MB relative to the bank base,
* it is important that address bit 10 _NOT_ be asserted. Depending
* on the configuration of the RAM, bit 10 may correspond to one
* of several different (processor-relative) address bits.
*
* This routine only identifies whether or not a given DMA address
* is susceptible to the bug.
*
* This should only get called for sa1111_device types due to the
* way we configure our device dma_masks.
*/
int
dma_needs_bounce
(
struct
device
*
dev
,
dma_addr_t
addr
,
size_t
size
)
{
/*
* Section 4.6 of the "Intel StrongARM SA-1111 Development Module
* User's Guide" mentions that jumpers R51 and R52 control the
* target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
* SDRAM bank 1 on Neponset). The default configuration selects
* Assabet, so any address in bank 1 is necessarily invalid.
*/
return
((
machine_is_assabet
()
||
machine_is_pfs168
())
&&
(
addr
>=
0xc8000000
||
(
addr
+
size
)
>=
0xc8000000
));
}
struct
sa1111_save_data
{
unsigned
int
skcr
;
unsigned
int
skpcr
;
...
...
arch/arm/include/asm/dma-mapping.h
View file @
07f1c295
...
...
@@ -115,39 +115,8 @@ static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
___dma_page_dev_to_cpu
(
page
,
off
,
size
,
dir
);
}
/*
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
* during bus mastering, then you would pass 0x00ffffff as the mask
* to this function.
*
* FIXME: This should really be a platform specific issue - we should
* return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
*/
static
inline
int
dma_supported
(
struct
device
*
dev
,
u64
mask
)
{
if
(
mask
<
ISA_DMA_THRESHOLD
)
return
0
;
return
1
;
}
static
inline
int
dma_set_mask
(
struct
device
*
dev
,
u64
dma_mask
)
{
#ifdef CONFIG_DMABOUNCE
if
(
dev
->
archdata
.
dmabounce
)
{
if
(
dma_mask
>=
ISA_DMA_THRESHOLD
)
return
0
;
else
return
-
EIO
;
}
#endif
if
(
!
dev
->
dma_mask
||
!
dma_supported
(
dev
,
dma_mask
))
return
-
EIO
;
*
dev
->
dma_mask
=
dma_mask
;
return
0
;
}
extern
int
dma_supported
(
struct
device
*
,
u64
);
extern
int
dma_set_mask
(
struct
device
*
,
u64
);
/*
* DMA errors are defined by all-bits-set in the DMA address.
...
...
@@ -256,14 +225,14 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
* @dev: valid struct device pointer
* @small_buf_size: size of buffers to use with small buffer pool
* @large_buf_size: size of buffers to use with large buffer pool (can be 0)
* @needs_bounce_fn: called to determine whether buffer needs bouncing
*
* This function should be called by low-level platform code to register
* a device as requireing DMA buffer bouncing. The function will allocate
* appropriate DMA pools for the device.
*
*/
extern
int
dmabounce_register_dev
(
struct
device
*
,
unsigned
long
,
unsigned
long
);
unsigned
long
,
int
(
*
)(
struct
device
*
,
dma_addr_t
,
size_t
)
);
/**
* dmabounce_unregister_dev
...
...
@@ -277,31 +246,9 @@ extern int dmabounce_register_dev(struct device *, unsigned long,
*/
extern
void
dmabounce_unregister_dev
(
struct
device
*
);
/**
* dma_needs_bounce
*
* @dev: valid struct device pointer
* @dma_handle: dma_handle of unbounced buffer
* @size: size of region being mapped
*
* Platforms that utilize the dmabounce mechanism must implement
* this function.
*
* The dmabounce routines call this function whenever a dma-mapping
* is requested to determine whether a given buffer needs to be bounced
* or not. The function must return 0 if the buffer is OK for
* DMA access and 1 if the buffer needs to be bounced.
*
*/
extern
int
dma_needs_bounce
(
struct
device
*
,
dma_addr_t
,
size_t
);
/*
* The DMA API, implemented by dmabounce.c. See below for descriptions.
*/
extern
dma_addr_t
__dma_map_single
(
struct
device
*
,
void
*
,
size_t
,
enum
dma_data_direction
);
extern
void
__dma_unmap_single
(
struct
device
*
,
dma_addr_t
,
size_t
,
enum
dma_data_direction
);
extern
dma_addr_t
__dma_map_page
(
struct
device
*
,
struct
page
*
,
unsigned
long
,
size_t
,
enum
dma_data_direction
);
extern
void
__dma_unmap_page
(
struct
device
*
,
dma_addr_t
,
size_t
,
...
...
@@ -328,13 +275,6 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
}
static
inline
dma_addr_t
__dma_map_single
(
struct
device
*
dev
,
void
*
cpu_addr
,
size_t
size
,
enum
dma_data_direction
dir
)
{
__dma_single_cpu_to_dev
(
cpu_addr
,
size
,
dir
);
return
virt_to_dma
(
dev
,
cpu_addr
);
}
static
inline
dma_addr_t
__dma_map_page
(
struct
device
*
dev
,
struct
page
*
page
,
unsigned
long
offset
,
size_t
size
,
enum
dma_data_direction
dir
)
{
...
...
@@ -342,12 +282,6 @@ static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
return
pfn_to_dma
(
dev
,
page_to_pfn
(
page
))
+
offset
;
}
static
inline
void
__dma_unmap_single
(
struct
device
*
dev
,
dma_addr_t
handle
,
size_t
size
,
enum
dma_data_direction
dir
)
{
__dma_single_dev_to_cpu
(
dma_to_virt
(
dev
,
handle
),
size
,
dir
);
}
static
inline
void
__dma_unmap_page
(
struct
device
*
dev
,
dma_addr_t
handle
,
size_t
size
,
enum
dma_data_direction
dir
)
{
...
...
@@ -373,14 +307,18 @@ static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
static
inline
dma_addr_t
dma_map_single
(
struct
device
*
dev
,
void
*
cpu_addr
,
size_t
size
,
enum
dma_data_direction
dir
)
{
unsigned
long
offset
;
struct
page
*
page
;
dma_addr_t
addr
;
BUG_ON
(
!
virt_addr_valid
(
cpu_addr
));
BUG_ON
(
!
virt_addr_valid
(
cpu_addr
+
size
-
1
));
BUG_ON
(
!
valid_dma_direction
(
dir
));
addr
=
__dma_map_single
(
dev
,
cpu_addr
,
size
,
di
r
);
debug_dma_map_page
(
dev
,
virt_to_page
(
cpu_addr
),
(
unsigned
long
)
cpu_addr
&
~
PAGE_MASK
,
size
,
dir
,
addr
,
true
);
page
=
virt_to_page
(
cpu_add
r
);
offset
=
(
unsigned
long
)
cpu_addr
&
~
PAGE_MASK
;
addr
=
__dma_map_page
(
dev
,
page
,
offset
,
size
,
dir
);
debug_dma_map_page
(
dev
,
page
,
offset
,
size
,
dir
,
addr
,
true
);
return
addr
;
}
...
...
@@ -430,7 +368,7 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
size_t
size
,
enum
dma_data_direction
dir
)
{
debug_dma_unmap_page
(
dev
,
handle
,
size
,
dir
,
true
);
__dma_unmap_
singl
e
(
dev
,
handle
,
size
,
dir
);
__dma_unmap_
pag
e
(
dev
,
handle
,
size
,
dir
);
}
/**
...
...
arch/arm/include/asm/dma.h
View file @
07f1c295
#ifndef __ASM_ARM_DMA_H
#define __ASM_ARM_DMA_H
#include <asm/memory.h>
/*
* This is the maximum virtual address which can be DMA'd from.
*/
#ifndef
ARM_DMA_ZONE_SIZE
#define MAX_DMA_ADDRESS 0xffffffff
#ifndef
CONFIG_ZONE_DMA
#define MAX_DMA_ADDRESS 0xffffffff
UL
#else
#define MAX_DMA_ADDRESS (PAGE_OFFSET + ARM_DMA_ZONE_SIZE)
#define MAX_DMA_ADDRESS ({ \
extern unsigned long arm_dma_zone_size; \
arm_dma_zone_size ? \
(PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; })
#endif
#ifdef CONFIG_ISA_DMA_API
...
...
arch/arm/include/asm/mach/arch.h
View file @
07f1c295
...
...
@@ -23,6 +23,10 @@ struct machine_desc {
unsigned
int
nr_irqs
;
/* number of IRQs */
#ifdef CONFIG_ZONE_DMA
unsigned
long
dma_zone_size
;
/* size of DMA-able area */
#endif
unsigned
int
video_start
;
/* start of video RAM */
unsigned
int
video_end
;
/* end of video RAM */
...
...
arch/arm/include/asm/memory.h
View file @
07f1c295
...
...
@@ -203,18 +203,6 @@ static inline unsigned long __phys_to_virt(unsigned long x)
#define PHYS_OFFSET PLAT_PHYS_OFFSET
#endif
/*
* The DMA mask corresponding to the maximum bus address allocatable
* using GFP_DMA. The default here places no restriction on DMA
* allocations. This must be the smallest DMA mask in the system,
* so a successful GFP_DMA allocation will always satisfy this.
*/
#ifndef ARM_DMA_ZONE_SIZE
#define ISA_DMA_THRESHOLD (0xffffffffULL)
#else
#define ISA_DMA_THRESHOLD (PHYS_OFFSET + ARM_DMA_ZONE_SIZE - 1)
#endif
/*
* PFNs are used to describe any physical page; this means
* PFN 0 == physical address 0.
...
...
arch/arm/kernel/setup.c
View file @
07f1c295
...
...
@@ -918,6 +918,12 @@ void __init setup_arch(char **cmdline_p)
cpu_init
();
tcm_init
();
#ifdef CONFIG_ZONE_DMA
if
(
mdesc
->
dma_zone_size
)
{
extern
unsigned
long
arm_dma_zone_size
;
arm_dma_zone_size
=
mdesc
->
dma_zone_size
;
}
#endif
#ifdef CONFIG_MULTI_IRQ_HANDLER
handle_arch_irq
=
mdesc
->
handle_irq
;
#endif
...
...
arch/arm/mach-davinci/board-da830-evm.c
View file @
07f1c295
...
...
@@ -681,4 +681,5 @@ MACHINE_START(DAVINCI_DA830_EVM, "DaVinci DA830/OMAP-L137/AM17x EVM")
.
init_irq
=
cp_intc_init
,
.
timer
=
&
davinci_timer
,
.
init_machine
=
da830_evm_init
,
.
dma_zone_size
=
SZ_128M
,
MACHINE_END
arch/arm/mach-davinci/board-da850-evm.c
View file @
07f1c295
...
...
@@ -1261,4 +1261,5 @@ MACHINE_START(DAVINCI_DA850_EVM, "DaVinci DA850/OMAP-L138/AM18x EVM")
.
init_irq
=
cp_intc_init
,
.
timer
=
&
davinci_timer
,
.
init_machine
=
da850_evm_init
,
.
dma_zone_size
=
SZ_128M
,
MACHINE_END
arch/arm/mach-davinci/board-dm355-evm.c
View file @
07f1c295
...
...
@@ -356,4 +356,5 @@ MACHINE_START(DAVINCI_DM355_EVM, "DaVinci DM355 EVM")
.
init_irq
=
davinci_irq_init
,
.
timer
=
&
davinci_timer
,
.
init_machine
=
dm355_evm_init
,
.
dma_zone_size
=
SZ_128M
,
MACHINE_END
arch/arm/mach-davinci/board-dm355-leopard.c
View file @
07f1c295
...
...
@@ -275,4 +275,5 @@ MACHINE_START(DM355_LEOPARD, "DaVinci DM355 leopard")
.
init_irq
=
davinci_irq_init
,
.
timer
=
&
davinci_timer
,
.
init_machine
=
dm355_leopard_init
,
.
dma_zone_size
=
SZ_128M
,
MACHINE_END
arch/arm/mach-davinci/board-dm365-evm.c
View file @
07f1c295
...
...
@@ -617,5 +617,6 @@ MACHINE_START(DAVINCI_DM365_EVM, "DaVinci DM365 EVM")
.
init_irq
=
davinci_irq_init
,
.
timer
=
&
davinci_timer
,
.
init_machine
=
dm365_evm_init
,
.
dma_zone_size
=
SZ_128M
,
MACHINE_END
arch/arm/mach-davinci/board-dm644x-evm.c
View file @
07f1c295
...
...
@@ -717,4 +717,5 @@ MACHINE_START(DAVINCI_EVM, "DaVinci DM644x EVM")
.
init_irq
=
davinci_irq_init
,
.
timer
=
&
davinci_timer
,
.
init_machine
=
davinci_evm_init
,
.
dma_zone_size
=
SZ_128M
,
MACHINE_END
arch/arm/mach-davinci/board-dm646x-evm.c
View file @
07f1c295
...
...
@@ -802,6 +802,7 @@ MACHINE_START(DAVINCI_DM6467_EVM, "DaVinci DM646x EVM")
.
init_irq
=
davinci_irq_init
,
.
timer
=
&
davinci_timer
,
.
init_machine
=
evm_init
,
.
dma_zone_size
=
SZ_128M
,
MACHINE_END
MACHINE_START
(
DAVINCI_DM6467TEVM
,
"DaVinci DM6467T EVM"
)
...
...
@@ -810,5 +811,6 @@ MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM")
.
init_irq
=
davinci_irq_init
,
.
timer
=
&
davinci_timer
,
.
init_machine
=
evm_init
,
.
dma_zone_size
=
SZ_128M
,
MACHINE_END
arch/arm/mach-davinci/board-mityomapl138.c
View file @
07f1c295
...
...
@@ -570,4 +570,5 @@ MACHINE_START(MITYOMAPL138, "MityDSP-L138/MityARM-1808")
.
init_irq
=
cp_intc_init
,
.
timer
=
&
davinci_timer
,
.
init_machine
=
mityomapl138_init
,
.
dma_zone_size
=
SZ_128M
,
MACHINE_END
arch/arm/mach-davinci/board-neuros-osd2.c
View file @
07f1c295
...
...
@@ -277,4 +277,5 @@ MACHINE_START(NEUROS_OSD2, "Neuros OSD2")
.
init_irq
=
davinci_irq_init
,
.
timer
=
&
davinci_timer
,
.
init_machine
=
davinci_ntosd2_init
,
.
dma_zone_size
=
SZ_128M
,
MACHINE_END
arch/arm/mach-davinci/board-omapl138-hawk.c
View file @
07f1c295
...
...
@@ -343,4 +343,5 @@ MACHINE_START(OMAPL138_HAWKBOARD, "AM18x/OMAP-L138 Hawkboard")
.
init_irq
=
cp_intc_init
,
.
timer
=
&
davinci_timer
,
.
init_machine
=
omapl138_hawk_init
,
.
dma_zone_size
=
SZ_128M
,
MACHINE_END
arch/arm/mach-davinci/board-sffsdr.c
View file @
07f1c295
...
...
@@ -156,4 +156,5 @@ MACHINE_START(SFFSDR, "Lyrtech SFFSDR")
.
init_irq
=
davinci_irq_init
,
.
timer
=
&
davinci_timer
,
.
init_machine
=
davinci_sffsdr_init
,
.
dma_zone_size
=
SZ_128M
,
MACHINE_END
arch/arm/mach-davinci/board-tnetv107x-evm.c
View file @
07f1c295
...
...
@@ -282,4 +282,5 @@ MACHINE_START(TNETV107X, "TNETV107X EVM")
.
init_irq
=
cp_intc_init
,
.
timer
=
&
davinci_timer
,
.
init_machine
=
tnetv107x_evm_board_init
,
.
dma_zone_size
=
SZ_128M
,
MACHINE_END
arch/arm/mach-davinci/include/mach/memory.h
View file @
07f1c295
...
...
@@ -41,11 +41,4 @@
*/
#define CONSISTENT_DMA_SIZE (14<<20)
/*
* Restrict DMA-able region to workaround silicon bug. The bug
* restricts buffers available for DMA to video hardware to be
* below 128M
*/
#define ARM_DMA_ZONE_SIZE SZ_128M
#endif
/* __ASM_ARCH_MEMORY_H */
arch/arm/mach-h720x/h7201-eval.c
View file @
07f1c295
...
...
@@ -33,4 +33,5 @@ MACHINE_START(H7201, "Hynix GMS30C7201")
.
map_io
=
h720x_map_io
,
.
init_irq
=
h720x_init_irq
,
.
timer
=
&
h7201_timer
,
.
dma_zone_size
=
SZ_256M
,
MACHINE_END
arch/arm/mach-h720x/h7202-eval.c
View file @
07f1c295
...
...
@@ -76,4 +76,5 @@ MACHINE_START(H7202, "Hynix HMS30C7202")
.
init_irq
=
h7202_init_irq
,
.
timer
=
&
h7202_timer
,
.
init_machine
=
init_eval_h7202
,
.
dma_zone_size
=
SZ_256M
,
MACHINE_END
arch/arm/mach-h720x/include/mach/memory.h
View file @
07f1c295
...
...
@@ -8,11 +8,4 @@
#define __ASM_ARCH_MEMORY_H
#define PLAT_PHYS_OFFSET UL(0x40000000)
/*
* This is the maximum DMA address that can be DMAd to.
* There should not be more than (0xd0000000 - 0xc0000000)
* bytes of RAM.
*/
#define ARM_DMA_ZONE_SIZE SZ_256M
#endif
arch/arm/mach-ixp4xx/avila-setup.c
View file @
07f1c295
...
...
@@ -169,6 +169,9 @@ MACHINE_START(AVILA, "Gateworks Avila Network Platform")
.
timer
=
&
ixp4xx_timer
,
.
boot_params
=
0x0100
,
.
init_machine
=
avila_init
,
#if defined(CONFIG_PCI)
.
dma_zone_size
=
SZ_64M
,
#endif
MACHINE_END
/*
...
...
@@ -184,6 +187,9 @@ MACHINE_START(LOFT, "Giant Shoulder Inc Loft board")
.
timer
=
&
ixp4xx_timer
,
.
boot_params
=
0x0100
,
.
init_machine
=
avila_init
,
#if defined(CONFIG_PCI)
.
dma_zone_size
=
SZ_64M
,
#endif
MACHINE_END
#endif
arch/arm/mach-ixp4xx/common-pci.c
View file @
07f1c295
...
...
@@ -316,6 +316,11 @@ static int abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *r
}
static
int
ixp4xx_needs_bounce
(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
size_t
size
)
{
return
(
dma_addr
+
size
)
>=
SZ_64M
;
}
/*
* Setup DMA mask to 64MB on PCI devices. Ignore all other devices.
*/
...
...
@@ -324,7 +329,7 @@ static int ixp4xx_pci_platform_notify(struct device *dev)
if
(
dev
->
bus
==
&
pci_bus_type
)
{
*
dev
->
dma_mask
=
SZ_64M
-
1
;
dev
->
coherent_dma_mask
=
SZ_64M
-
1
;
dmabounce_register_dev
(
dev
,
2048
,
4096
);
dmabounce_register_dev
(
dev
,
2048
,
4096
,
ixp4xx_needs_bounce
);
}
return
0
;
}
...
...
@@ -337,11 +342,6 @@ static int ixp4xx_pci_platform_notify_remove(struct device *dev)
return
0
;
}
int
dma_needs_bounce
(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
size_t
size
)
{
return
(
dev
->
bus
==
&
pci_bus_type
)
&&
((
dma_addr
+
size
)
>=
SZ_64M
);
}
void
__init
ixp4xx_pci_preinit
(
void
)
{
unsigned
long
cpuid
=
read_cpuid_id
();
...
...
arch/arm/mach-ixp4xx/coyote-setup.c
View file @
07f1c295
...
...
@@ -114,6 +114,9 @@ MACHINE_START(ADI_COYOTE, "ADI Engineering Coyote")
.
timer
=
&
ixp4xx_timer
,
.
boot_params
=
0x0100
,
.
init_machine
=
coyote_init
,
#if defined(CONFIG_PCI)
.
dma_zone_size
=
SZ_64M
,
#endif
MACHINE_END
#endif
...
...
arch/arm/mach-ixp4xx/dsmg600-setup.c
View file @
07f1c295
...
...
@@ -284,4 +284,7 @@ MACHINE_START(DSMG600, "D-Link DSM-G600 RevA")
.
init_irq
=
ixp4xx_init_irq
,
.
timer
=
&
dsmg600_timer
,
.
init_machine
=
dsmg600_init
,
#if defined(CONFIG_PCI)
.
dma_zone_size
=
SZ_64M
,
#endif
MACHINE_END
arch/arm/mach-ixp4xx/fsg-setup.c
View file @
07f1c295
...
...
@@ -275,5 +275,8 @@ MACHINE_START(FSG, "Freecom FSG-3")
.
timer
=
&
ixp4xx_timer
,
.
boot_params
=
0x0100
,
.
init_machine
=
fsg_init
,
#if defined(CONFIG_PCI)
.
dma_zone_size
=
SZ_64M
,
#endif
MACHINE_END
arch/arm/mach-ixp4xx/gateway7001-setup.c
View file @
07f1c295
...
...
@@ -101,5 +101,8 @@ MACHINE_START(GATEWAY7001, "Gateway 7001 AP")
.
timer
=
&
ixp4xx_timer
,
.
boot_params
=
0x0100
,
.
init_machine
=
gateway7001_init
,
#if defined(CONFIG_PCI)
.
dma_zone_size
=
SZ_64M
,
#endif
MACHINE_END
#endif
arch/arm/mach-ixp4xx/goramo_mlr.c
View file @
07f1c295
...
...
@@ -501,4 +501,7 @@ MACHINE_START(GORAMO_MLR, "MultiLink")
.
timer
=
&
ixp4xx_timer
,
.
boot_params
=
0x0100
,
.
init_machine
=
gmlr_init
,
#if defined(CONFIG_PCI)
.
dma_zone_size
=
SZ_64M
,
#endif
MACHINE_END
arch/arm/mach-ixp4xx/gtwx5715-setup.c
View file @
07f1c295
...
...
@@ -169,6 +169,9 @@ MACHINE_START(GTWX5715, "Gemtek GTWX5715 (Linksys WRV54G)")
.
timer
=
&
ixp4xx_timer
,
.
boot_params
=
0x0100
,
.
init_machine
=
gtwx5715_init
,
#if defined(CONFIG_PCI)
.
dma_zone_size
=
SZ_64M
,
#endif
MACHINE_END
arch/arm/mach-ixp4xx/include/mach/memory.h
View file @
07f1c295
...
...
@@ -14,8 +14,4 @@
*/
#define PLAT_PHYS_OFFSET UL(0x00000000)
#ifdef CONFIG_PCI
#define ARM_DMA_ZONE_SIZE SZ_64M
#endif
#endif
arch/arm/mach-ixp4xx/ixdp425-setup.c
View file @
07f1c295
...
...
@@ -258,6 +258,9 @@ MACHINE_START(IXDP425, "Intel IXDP425 Development Platform")
.
timer
=
&
ixp4xx_timer
,
.
boot_params
=
0x0100
,
.
init_machine
=
ixdp425_init
,
#if defined(CONFIG_PCI)
.
dma_zone_size
=
SZ_64M
,
#endif
MACHINE_END
#endif
...
...
@@ -269,6 +272,9 @@ MACHINE_START(IXDP465, "Intel IXDP465 Development Platform")
.
timer
=
&
ixp4xx_timer
,
.
boot_params
=
0x0100
,
.
init_machine
=
ixdp425_init
,
#if defined(CONFIG_PCI)
.
dma_zone_size
=
SZ_64M
,
#endif
MACHINE_END
#endif
...
...
@@ -280,6 +286,9 @@ MACHINE_START(IXCDP1100, "Intel IXCDP1100 Development Platform")
.
timer
=
&
ixp4xx_timer
,
.
boot_params
=
0x0100
,
.
init_machine
=
ixdp425_init
,
#if defined(CONFIG_PCI)
.
dma_zone_size
=
SZ_64M
,
#endif
MACHINE_END
#endif
...
...
@@ -291,5 +300,8 @@ MACHINE_START(KIXRP435, "Intel KIXRP435 Reference Platform")
.
timer
=
&
ixp4xx_timer
,
.
boot_params
=
0x0100
,
.
init_machine
=
ixdp425_init
,
#if defined(CONFIG_PCI)
.
dma_zone_size
=
SZ_64M
,
#endif
MACHINE_END
#endif
arch/arm/mach-ixp4xx/nas100d-setup.c
View file @
07f1c295
...
...
@@ -319,4 +319,7 @@ MACHINE_START(NAS100D, "Iomega NAS 100d")
.
init_irq
=
ixp4xx_init_irq
,
.
timer
=
&
ixp4xx_timer
,
.
init_machine
=
nas100d_init
,
#if defined(CONFIG_PCI)
.
dma_zone_size
=
SZ_64M
,
#endif
MACHINE_END
arch/arm/mach-ixp4xx/nslu2-setup.c
View file @
07f1c295
...
...
@@ -305,4 +305,7 @@ MACHINE_START(NSLU2, "Linksys NSLU2")
.
init_irq
=
ixp4xx_init_irq
,
.
timer
=
&
nslu2_timer
,
.
init_machine
=
nslu2_init
,
#if defined(CONFIG_PCI)
.
dma_zone_size
=
SZ_64M
,
#endif
MACHINE_END
arch/arm/mach-ixp4xx/vulcan-setup.c
View file @
07f1c295
...
...
@@ -241,4 +241,7 @@ MACHINE_START(ARCOM_VULCAN, "Arcom/Eurotech Vulcan")
.
timer
=
&
ixp4xx_timer
,
.
boot_params
=
0x0100
,
.
init_machine
=
vulcan_init
,
#if defined(CONFIG_PCI)
.
dma_zone_size
=
SZ_64M
,
#endif
MACHINE_END
arch/arm/mach-ixp4xx/wg302v2-setup.c
View file @
07f1c295
...
...
@@ -102,5 +102,8 @@ MACHINE_START(WG302V2, "Netgear WG302 v2 / WAG302 v2")
.
timer
=
&
ixp4xx_timer
,
.
boot_params
=
0x0100
,
.
init_machine
=
wg302v2_init
,
#if defined(CONFIG_PCI)
.
dma_zone_size
=
SZ_64M
,
#endif
MACHINE_END
#endif
arch/arm/mach-pxa/cm-x2xx.c
View file @
07f1c295
...
...
@@ -518,4 +518,7 @@ MACHINE_START(ARMCORE, "Compulab CM-X2XX")
.
init_irq
=
cmx2xx_init_irq
,
.
timer
=
&
pxa_timer
,
.
init_machine
=
cmx2xx_init
,
#ifdef CONFIG_PCI
.
dma_zone_size
=
SZ_64M
,
#endif
MACHINE_END
arch/arm/mach-pxa/include/mach/memory.h
View file @
07f1c295
...
...
@@ -17,8 +17,4 @@
*/
#define PLAT_PHYS_OFFSET UL(0xa0000000)
#if defined(CONFIG_MACH_ARMCORE) && defined(CONFIG_PCI)
#define ARM_DMA_ZONE_SIZE SZ_64M
#endif
#endif
arch/arm/mach-realview/include/mach/memory.h
View file @
07f1c295
...
...
@@ -29,10 +29,6 @@
#define PLAT_PHYS_OFFSET UL(0x00000000)
#endif
#ifdef CONFIG_ZONE_DMA
#define ARM_DMA_ZONE_SIZE SZ_256M
#endif
#ifdef CONFIG_SPARSEMEM
/*
...
...
arch/arm/mach-realview/realview_eb.c
View file @
07f1c295
...
...
@@ -470,4 +470,7 @@ MACHINE_START(REALVIEW_EB, "ARM-RealView EB")
.
init_irq
=
gic_init_irq
,
.
timer
=
&
realview_eb_timer
,
.
init_machine
=
realview_eb_init
,
#ifdef CONFIG_ZONE_DMA
.
dma_zone_size
=
SZ_256M
,
#endif
MACHINE_END
arch/arm/mach-realview/realview_pb1176.c
View file @
07f1c295
...
...
@@ -365,4 +365,7 @@ MACHINE_START(REALVIEW_PB1176, "ARM-RealView PB1176")
.
init_irq
=
gic_init_irq
,
.
timer
=
&
realview_pb1176_timer
,
.
init_machine
=
realview_pb1176_init
,
#ifdef CONFIG_ZONE_DMA
.
dma_zone_size
=
SZ_256M
,
#endif
MACHINE_END
arch/arm/mach-realview/realview_pb11mp.c
View file @
07f1c295
...
...
@@ -367,4 +367,7 @@ MACHINE_START(REALVIEW_PB11MP, "ARM-RealView PB11MPCore")
.
init_irq
=
gic_init_irq
,
.
timer
=
&
realview_pb11mp_timer
,
.
init_machine
=
realview_pb11mp_init
,
#ifdef CONFIG_ZONE_DMA
.
dma_zone_size
=
SZ_256M
,
#endif
MACHINE_END
arch/arm/mach-realview/realview_pba8.c
View file @
07f1c295
...
...
@@ -317,4 +317,7 @@ MACHINE_START(REALVIEW_PBA8, "ARM-RealView PB-A8")
.
init_irq
=
gic_init_irq
,
.
timer
=
&
realview_pba8_timer
,
.
init_machine
=
realview_pba8_init
,
#ifdef CONFIG_ZONE_DMA
.
dma_zone_size
=
SZ_256M
,
#endif
MACHINE_END
arch/arm/mach-realview/realview_pbx.c
View file @
07f1c295
...
...
@@ -400,4 +400,7 @@ MACHINE_START(REALVIEW_PBX, "ARM-RealView PBX")
.
init_irq
=
gic_init_irq
,
.
timer
=
&
realview_pbx_timer
,
.
init_machine
=
realview_pbx_init
,
#ifdef CONFIG_ZONE_DMA
.
dma_zone_size
=
SZ_256M
,
#endif
MACHINE_END
arch/arm/mach-sa1100/assabet.c
View file @
07f1c295
...
...
@@ -453,4 +453,7 @@ MACHINE_START(ASSABET, "Intel-Assabet")
.
init_irq
=
sa1100_init_irq
,
.
timer
=
&
sa1100_timer
,
.
init_machine
=
assabet_init
,
#ifdef CONFIG_SA1111
.
dma_zone_size
=
SZ_1M
,
#endif
MACHINE_END
arch/arm/mach-sa1100/badge4.c
View file @
07f1c295
...
...
@@ -306,4 +306,7 @@ MACHINE_START(BADGE4, "Hewlett-Packard Laboratories BadgePAD 4")
.
map_io
=
badge4_map_io
,
.
init_irq
=
sa1100_init_irq
,
.
timer
=
&
sa1100_timer
,
#ifdef CONFIG_SA1111
.
dma_zone_size
=
SZ_1M
,
#endif
MACHINE_END
arch/arm/mach-sa1100/include/mach/memory.h
View file @
07f1c295
...
...
@@ -14,10 +14,6 @@
*/
#define PLAT_PHYS_OFFSET UL(0xc0000000)
#ifdef CONFIG_SA1111
#define ARM_DMA_ZONE_SIZE SZ_1M
#endif
/*
* Because of the wide memory address space between physical RAM banks on the
* SA1100, it's much convenient to use Linux's SparseMEM support to implement
...
...
arch/arm/mach-sa1100/jornada720.c
View file @
07f1c295
...
...
@@ -369,4 +369,7 @@ MACHINE_START(JORNADA720, "HP Jornada 720")
.
init_irq
=
sa1100_init_irq
,
.
timer
=
&
sa1100_timer
,
.
init_machine
=
jornada720_mach_init
,
#ifdef CONFIG_SA1111
.
dma_zone_size
=
SZ_1M
,
#endif
MACHINE_END
arch/arm/mach-shark/core.c
View file @
07f1c295
...
...
@@ -156,4 +156,5 @@ MACHINE_START(SHARK, "Shark")
.
map_io
=
shark_map_io
,
.
init_irq
=
shark_init_irq
,
.
timer
=
&
shark_timer
,
.
dma_zone_size
=
SZ_4M
,
MACHINE_END
arch/arm/mach-shark/include/mach/memory.h
View file @
07f1c295
...
...
@@ -17,8 +17,6 @@
*/
#define PLAT_PHYS_OFFSET UL(0x08000000)
#define ARM_DMA_ZONE_SIZE SZ_4M
/*
* Cache flushing area
*/
...
...
arch/arm/mm/dma-mapping.c
View file @
07f1c295
...
...
@@ -25,9 +25,11 @@
#include <asm/tlbflush.h>
#include <asm/sizes.h>
#include "mm.h"
static
u64
get_coherent_dma_mask
(
struct
device
*
dev
)
{
u64
mask
=
ISA_DMA_THRESHOLD
;
u64
mask
=
(
u64
)
arm_dma_limit
;
if
(
dev
)
{
mask
=
dev
->
coherent_dma_mask
;
...
...
@@ -41,10 +43,10 @@ static u64 get_coherent_dma_mask(struct device *dev)
return
0
;
}
if
((
~
mask
)
&
ISA_DMA_THRESHOLD
)
{
if
((
~
mask
)
&
(
u64
)
arm_dma_limit
)
{
dev_warn
(
dev
,
"coherent DMA mask %#llx is smaller "
"than system GFP_DMA mask %#llx
\n
"
,
mask
,
(
u
nsigned
long
long
)
ISA_DMA_THRESHOLD
);
mask
,
(
u
64
)
arm_dma_limit
);
return
0
;
}
}
...
...
@@ -657,6 +659,33 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
}
EXPORT_SYMBOL
(
dma_sync_sg_for_device
);
/*
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
* during bus mastering, then you would pass 0x00ffffff as the mask
* to this function.
*/
int
dma_supported
(
struct
device
*
dev
,
u64
mask
)
{
if
(
mask
<
(
u64
)
arm_dma_limit
)
return
0
;
return
1
;
}
EXPORT_SYMBOL
(
dma_supported
);
int
dma_set_mask
(
struct
device
*
dev
,
u64
dma_mask
)
{
if
(
!
dev
->
dma_mask
||
!
dma_supported
(
dev
,
dma_mask
))
return
-
EIO
;
#ifndef CONFIG_DMABOUNCE
*
dev
->
dma_mask
=
dma_mask
;
#endif
return
0
;
}
EXPORT_SYMBOL
(
dma_set_mask
);
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
static
int
__init
dma_debug_do_init
(
void
)
...
...
arch/arm/mm/init.c
View file @
07f1c295
...
...
@@ -212,6 +212,18 @@ static void __init arm_bootmem_init(unsigned long start_pfn,
}
#ifdef CONFIG_ZONE_DMA
unsigned
long
arm_dma_zone_size
__read_mostly
;
EXPORT_SYMBOL
(
arm_dma_zone_size
);
/*
* The DMA mask corresponding to the maximum bus address allocatable
* using GFP_DMA. The default here places no restriction on DMA
* allocations. This must be the smallest DMA mask in the system,
* so a successful GFP_DMA allocation will always satisfy this.
*/
u32
arm_dma_limit
;
static
void
__init
arm_adjust_dma_zone
(
unsigned
long
*
size
,
unsigned
long
*
hole
,
unsigned
long
dma_size
)
{
...
...
@@ -267,17 +279,17 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
#endif
}
#ifdef ARM_DMA_ZONE_SIZE
#ifndef CONFIG_ZONE_DMA
#error ARM_DMA_ZONE_SIZE set but no DMA zone to limit allocations
#endif
#ifdef CONFIG_ZONE_DMA
/*
* Adjust the sizes according to any special requirements for
* this machine type.
*/
arm_adjust_dma_zone
(
zone_size
,
zhole_size
,
ARM_DMA_ZONE_SIZE
>>
PAGE_SHIFT
);
if
(
arm_dma_zone_size
)
{
arm_adjust_dma_zone
(
zone_size
,
zhole_size
,
arm_dma_zone_size
>>
PAGE_SHIFT
);
arm_dma_limit
=
PHYS_OFFSET
+
arm_dma_zone_size
-
1
;
}
else
arm_dma_limit
=
0xffffffff
;
#endif
free_area_init_node
(
0
,
zone_size
,
min
,
zhole_size
);
...
...
arch/arm/mm/mm.h
View file @
07f1c295
...
...
@@ -23,5 +23,11 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
#endif
#ifdef CONFIG_ZONE_DMA
extern
u32
arm_dma_limit
;
#else
#define arm_dma_limit ((u32)~0)
#endif
void
__init
bootmem_init
(
void
);
void
arm_mm_memblock_reserve
(
void
);
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment