Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
9f5494b7
Commit
9f5494b7
authored
Oct 02, 2008
by
Benjamin Herrenschmidt
Browse files
Options
Browse Files
Download
Plain Diff
Merge commit 'kumar/kumar-dma'
parents
d2b194ed
b9579689
Changes
15
Hide whitespace changes
Inline
Side-by-side
Showing
15 changed files
with
265 additions
and
294 deletions
+265
-294
arch/powerpc/include/asm/device.h
arch/powerpc/include/asm/device.h
+0
-3
arch/powerpc/include/asm/dma-mapping.h
arch/powerpc/include/asm/dma-mapping.h
+61
-126
arch/powerpc/include/asm/machdep.h
arch/powerpc/include/asm/machdep.h
+3
-2
arch/powerpc/include/asm/pci.h
arch/powerpc/include/asm/pci.h
+8
-6
arch/powerpc/include/asm/types.h
arch/powerpc/include/asm/types.h
+1
-1
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/Makefile
+2
-2
arch/powerpc/kernel/dma-iommu.c
arch/powerpc/kernel/dma-iommu.c
+2
-99
arch/powerpc/kernel/dma.c
arch/powerpc/kernel/dma.c
+127
-0
arch/powerpc/kernel/of_device.c
arch/powerpc/kernel/of_device.c
+1
-1
arch/powerpc/kernel/pci-common.c
arch/powerpc/kernel/pci-common.c
+48
-0
arch/powerpc/kernel/pci_32.c
arch/powerpc/kernel/pci_32.c
+7
-0
arch/powerpc/kernel/pci_64.c
arch/powerpc/kernel/pci_64.c
+0
-49
arch/powerpc/kernel/vio.c
arch/powerpc/kernel/vio.c
+1
-1
arch/powerpc/platforms/cell/iommu.c
arch/powerpc/platforms/cell/iommu.c
+3
-3
arch/powerpc/platforms/ps3/system-bus.c
arch/powerpc/platforms/ps3/system-bus.c
+1
-1
No files found.
arch/powerpc/include/asm/device.h
View file @
9f5494b7
...
...
@@ -16,9 +16,6 @@ struct dev_archdata {
/* DMA operations on that device */
struct
dma_mapping_ops
*
dma_ops
;
void
*
dma_data
;
/* NUMA node if applicable */
int
numa_node
;
};
#endif
/* _ASM_POWERPC_DEVICE_H */
arch/powerpc/include/asm/dma-mapping.h
View file @
9f5494b7
...
...
@@ -44,8 +44,6 @@ extern void __dma_sync_page(struct page *page, unsigned long offset,
#endif
/* ! CONFIG_NOT_COHERENT_CACHE */
#ifdef CONFIG_PPC64
static
inline
unsigned
long
device_to_mask
(
struct
device
*
dev
)
{
if
(
dev
->
dma_mask
&&
*
dev
->
dma_mask
)
...
...
@@ -76,8 +74,24 @@ struct dma_mapping_ops {
struct
dma_attrs
*
attrs
);
int
(
*
dma_supported
)(
struct
device
*
dev
,
u64
mask
);
int
(
*
set_dma_mask
)(
struct
device
*
dev
,
u64
dma_mask
);
dma_addr_t
(
*
map_page
)(
struct
device
*
dev
,
struct
page
*
page
,
unsigned
long
offset
,
size_t
size
,
enum
dma_data_direction
direction
,
struct
dma_attrs
*
attrs
);
void
(
*
unmap_page
)(
struct
device
*
dev
,
dma_addr_t
dma_address
,
size_t
size
,
enum
dma_data_direction
direction
,
struct
dma_attrs
*
attrs
);
};
/*
* Available generic sets of operations
*/
#ifdef CONFIG_PPC64
extern
struct
dma_mapping_ops
dma_iommu_ops
;
#endif
extern
struct
dma_mapping_ops
dma_direct_ops
;
static
inline
struct
dma_mapping_ops
*
get_dma_ops
(
struct
device
*
dev
)
{
/* We don't handle the NULL dev case for ISA for now. We could
...
...
@@ -85,8 +99,19 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
* only ISA DMA device we support is the floppy and we have a hack
* in the floppy driver directly to get a device for us.
*/
if
(
unlikely
(
dev
==
NULL
||
dev
->
archdata
.
dma_ops
==
NULL
))
if
(
unlikely
(
dev
==
NULL
)
||
dev
->
archdata
.
dma_ops
==
NULL
)
{
#ifdef CONFIG_PPC64
return
NULL
;
#else
/* Use default on 32-bit if dma_ops is not set up */
/* TODO: Long term, we should fix drivers so that dev and
* archdata dma_ops are set up for all buses.
*/
return
&
dma_direct_ops
;
#endif
}
return
dev
->
archdata
.
dma_ops
;
}
...
...
@@ -123,6 +148,12 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
return
0
;
}
/*
* TODO: map_/unmap_single will ideally go away, to be completely
* replaced by map/unmap_page. Until then, we allow dma_ops to have
* one or the other, or both by checking to see if the specific
* function requested exists; and if not, falling back on the other set.
*/
static
inline
dma_addr_t
dma_map_single_attrs
(
struct
device
*
dev
,
void
*
cpu_addr
,
size_t
size
,
...
...
@@ -132,7 +163,14 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev,
struct
dma_mapping_ops
*
dma_ops
=
get_dma_ops
(
dev
);
BUG_ON
(
!
dma_ops
);
return
dma_ops
->
map_single
(
dev
,
cpu_addr
,
size
,
direction
,
attrs
);
if
(
dma_ops
->
map_single
)
return
dma_ops
->
map_single
(
dev
,
cpu_addr
,
size
,
direction
,
attrs
);
return
dma_ops
->
map_page
(
dev
,
virt_to_page
(
cpu_addr
),
(
unsigned
long
)
cpu_addr
%
PAGE_SIZE
,
size
,
direction
,
attrs
);
}
static
inline
void
dma_unmap_single_attrs
(
struct
device
*
dev
,
...
...
@@ -144,7 +182,13 @@ static inline void dma_unmap_single_attrs(struct device *dev,
struct
dma_mapping_ops
*
dma_ops
=
get_dma_ops
(
dev
);
BUG_ON
(
!
dma_ops
);
dma_ops
->
unmap_single
(
dev
,
dma_addr
,
size
,
direction
,
attrs
);
if
(
dma_ops
->
unmap_single
)
{
dma_ops
->
unmap_single
(
dev
,
dma_addr
,
size
,
direction
,
attrs
);
return
;
}
dma_ops
->
unmap_page
(
dev
,
dma_addr
,
size
,
direction
,
attrs
);
}
static
inline
dma_addr_t
dma_map_page_attrs
(
struct
device
*
dev
,
...
...
@@ -156,8 +200,13 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
struct
dma_mapping_ops
*
dma_ops
=
get_dma_ops
(
dev
);
BUG_ON
(
!
dma_ops
);
if
(
dma_ops
->
map_page
)
return
dma_ops
->
map_page
(
dev
,
page
,
offset
,
size
,
direction
,
attrs
);
return
dma_ops
->
map_single
(
dev
,
page_address
(
page
)
+
offset
,
size
,
direction
,
attrs
);
direction
,
attrs
);
}
static
inline
void
dma_unmap_page_attrs
(
struct
device
*
dev
,
...
...
@@ -169,6 +218,12 @@ static inline void dma_unmap_page_attrs(struct device *dev,
struct
dma_mapping_ops
*
dma_ops
=
get_dma_ops
(
dev
);
BUG_ON
(
!
dma_ops
);
if
(
dma_ops
->
unmap_page
)
{
dma_ops
->
unmap_page
(
dev
,
dma_address
,
size
,
direction
,
attrs
);
return
;
}
dma_ops
->
unmap_single
(
dev
,
dma_address
,
size
,
direction
,
attrs
);
}
...
...
@@ -253,126 +308,6 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
dma_unmap_sg_attrs
(
dev
,
sg
,
nhwentries
,
direction
,
NULL
);
}
/*
* Available generic sets of operations
*/
extern
struct
dma_mapping_ops
dma_iommu_ops
;
extern
struct
dma_mapping_ops
dma_direct_ops
;
#else
/* CONFIG_PPC64 */
#define dma_supported(dev, mask) (1)
static
inline
int
dma_set_mask
(
struct
device
*
dev
,
u64
dma_mask
)
{
if
(
!
dev
->
dma_mask
||
!
dma_supported
(
dev
,
mask
))
return
-
EIO
;
*
dev
->
dma_mask
=
dma_mask
;
return
0
;
}
static
inline
void
*
dma_alloc_coherent
(
struct
device
*
dev
,
size_t
size
,
dma_addr_t
*
dma_handle
,
gfp_t
gfp
)
{
#ifdef CONFIG_NOT_COHERENT_CACHE
return
__dma_alloc_coherent
(
size
,
dma_handle
,
gfp
);
#else
void
*
ret
;
/* ignore region specifiers */
gfp
&=
~
(
__GFP_DMA
|
__GFP_HIGHMEM
);
if
(
dev
==
NULL
||
dev
->
coherent_dma_mask
<
0xffffffff
)
gfp
|=
GFP_DMA
;
ret
=
(
void
*
)
__get_free_pages
(
gfp
,
get_order
(
size
));
if
(
ret
!=
NULL
)
{
memset
(
ret
,
0
,
size
);
*
dma_handle
=
virt_to_bus
(
ret
);
}
return
ret
;
#endif
}
static
inline
void
dma_free_coherent
(
struct
device
*
dev
,
size_t
size
,
void
*
vaddr
,
dma_addr_t
dma_handle
)
{
#ifdef CONFIG_NOT_COHERENT_CACHE
__dma_free_coherent
(
size
,
vaddr
);
#else
free_pages
((
unsigned
long
)
vaddr
,
get_order
(
size
));
#endif
}
static
inline
dma_addr_t
dma_map_single
(
struct
device
*
dev
,
void
*
ptr
,
size_t
size
,
enum
dma_data_direction
direction
)
{
BUG_ON
(
direction
==
DMA_NONE
);
__dma_sync
(
ptr
,
size
,
direction
);
return
virt_to_bus
(
ptr
);
}
static
inline
void
dma_unmap_single
(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
size_t
size
,
enum
dma_data_direction
direction
)
{
/* We do nothing. */
}
static
inline
dma_addr_t
dma_map_page
(
struct
device
*
dev
,
struct
page
*
page
,
unsigned
long
offset
,
size_t
size
,
enum
dma_data_direction
direction
)
{
BUG_ON
(
direction
==
DMA_NONE
);
__dma_sync_page
(
page
,
offset
,
size
,
direction
);
return
page_to_bus
(
page
)
+
offset
;
}
static
inline
void
dma_unmap_page
(
struct
device
*
dev
,
dma_addr_t
dma_address
,
size_t
size
,
enum
dma_data_direction
direction
)
{
/* We do nothing. */
}
static
inline
int
dma_map_sg
(
struct
device
*
dev
,
struct
scatterlist
*
sgl
,
int
nents
,
enum
dma_data_direction
direction
)
{
struct
scatterlist
*
sg
;
int
i
;
BUG_ON
(
direction
==
DMA_NONE
);
for_each_sg
(
sgl
,
sg
,
nents
,
i
)
{
BUG_ON
(
!
sg_page
(
sg
));
__dma_sync_page
(
sg_page
(
sg
),
sg
->
offset
,
sg
->
length
,
direction
);
sg
->
dma_address
=
page_to_bus
(
sg_page
(
sg
))
+
sg
->
offset
;
}
return
nents
;
}
static
inline
void
dma_unmap_sg
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nhwentries
,
enum
dma_data_direction
direction
)
{
/* We don't do anything here. */
}
#endif
/* CONFIG_PPC64 */
static
inline
void
dma_sync_single_for_cpu
(
struct
device
*
dev
,
dma_addr_t
dma_handle
,
size_t
size
,
enum
dma_data_direction
direction
)
...
...
arch/powerpc/include/asm/machdep.h
View file @
9f5494b7
...
...
@@ -88,8 +88,6 @@ struct machdep_calls {
unsigned
long
(
*
tce_get
)(
struct
iommu_table
*
tbl
,
long
index
);
void
(
*
tce_flush
)(
struct
iommu_table
*
tbl
);
void
(
*
pci_dma_dev_setup
)(
struct
pci_dev
*
dev
);
void
(
*
pci_dma_bus_setup
)(
struct
pci_bus
*
bus
);
void
__iomem
*
(
*
ioremap
)(
phys_addr_t
addr
,
unsigned
long
size
,
unsigned
long
flags
);
...
...
@@ -101,6 +99,9 @@ struct machdep_calls {
#endif
#endif
/* CONFIG_PPC64 */
void
(
*
pci_dma_dev_setup
)(
struct
pci_dev
*
dev
);
void
(
*
pci_dma_bus_setup
)(
struct
pci_bus
*
bus
);
int
(
*
probe
)(
void
);
void
(
*
setup_arch
)(
void
);
/* Optional, may be NULL */
void
(
*
init_early
)(
void
);
...
...
arch/powerpc/include/asm/pci.h
View file @
9f5494b7
...
...
@@ -60,6 +60,14 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
return
channel
?
15
:
14
;
}
#ifdef CONFIG_PCI
extern
void
set_pci_dma_ops
(
struct
dma_mapping_ops
*
dma_ops
);
extern
struct
dma_mapping_ops
*
get_pci_dma_ops
(
void
);
#else
/* CONFIG_PCI */
#define set_pci_dma_ops(d)
#define get_pci_dma_ops() NULL
#endif
#ifdef CONFIG_PPC64
/*
...
...
@@ -70,9 +78,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
#define PCI_DISABLE_MWI
#ifdef CONFIG_PCI
extern
void
set_pci_dma_ops
(
struct
dma_mapping_ops
*
dma_ops
);
extern
struct
dma_mapping_ops
*
get_pci_dma_ops
(
void
);
static
inline
void
pci_dma_burst_advice
(
struct
pci_dev
*
pdev
,
enum
pci_dma_burst_strategy
*
strat
,
unsigned
long
*
strategy_parameter
)
...
...
@@ -89,9 +94,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
*
strat
=
PCI_DMA_BURST_MULTIPLE
;
*
strategy_parameter
=
cacheline_size
;
}
#else
/* CONFIG_PCI */
#define set_pci_dma_ops(d)
#define get_pci_dma_ops() NULL
#endif
#else
/* 32-bit */
...
...
arch/powerpc/include/asm/types.h
View file @
9f5494b7
...
...
@@ -55,7 +55,7 @@ typedef u64 phys_addr_t;
typedef
u32
phys_addr_t
;
#endif
#if
def __powerpc64__
#if
defined(__powerpc64__) || defined(CONFIG_PHYS_64BIT)
typedef
u64
dma_addr_t
;
#else
typedef
u32
dma_addr_t
;
...
...
arch/powerpc/kernel/Makefile
View file @
9f5494b7
...
...
@@ -70,10 +70,10 @@ extra-$(CONFIG_8xx) := head_8xx.o
extra-y
+=
vmlinux.lds
obj-y
+=
time.o prom.o traps.o setup-common.o
\
udbg.o misc.o io.o
\
udbg.o misc.o io.o
dma.o
\
misc_
$(CONFIG_WORD_SIZE)
.o
obj-$(CONFIG_PPC32)
+=
entry_32.o setup_32.o
obj-$(CONFIG_PPC64)
+=
dma
_64
.o iommu.o
obj-$(CONFIG_PPC64)
+=
dma
-iommu
.o iommu.o
obj-$(CONFIG_KGDB)
+=
kgdb.o
obj-$(CONFIG_PPC_MULTIPLATFORM)
+=
prom_init.o
obj-$(CONFIG_MODULES)
+=
ppc_ksyms.o
...
...
arch/powerpc/kernel/dma
_64
.c
→
arch/powerpc/kernel/dma
-iommu
.c
View file @
9f5494b7
...
...
@@ -2,14 +2,10 @@
* Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
*
* Provide default implementations of the DMA mapping callbacks for
*
directly mapped busses and
busses using the iommu infrastructure
* busses using the iommu infrastructure
*/
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <asm/bug.h>
#include <asm/iommu.h>
#include <asm/abs_addr.h>
/*
* Generic iommu implementation
...
...
@@ -24,7 +20,7 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
{
return
iommu_alloc_coherent
(
dev
,
dev
->
archdata
.
dma_data
,
size
,
dma_handle
,
device_to_mask
(
dev
),
flag
,
dev
->
archdata
.
numa_node
);
dev
_to_node
(
dev
)
);
}
static
void
dma_iommu_free_coherent
(
struct
device
*
dev
,
size_t
size
,
...
...
@@ -105,96 +101,3 @@ struct dma_mapping_ops dma_iommu_ops = {
.
dma_supported
=
dma_iommu_dma_supported
,
};
EXPORT_SYMBOL
(
dma_iommu_ops
);
/*
* Generic direct DMA implementation
*
* This implementation supports a per-device offset that can be applied if
* the address at which memory is visible to devices is not 0. Platform code
* can set archdata.dma_data to an unsigned long holding the offset. By
* default the offset is zero.
*/
static
unsigned
long
get_dma_direct_offset
(
struct
device
*
dev
)
{
return
(
unsigned
long
)
dev
->
archdata
.
dma_data
;
}
static
void
*
dma_direct_alloc_coherent
(
struct
device
*
dev
,
size_t
size
,
dma_addr_t
*
dma_handle
,
gfp_t
flag
)
{
struct
page
*
page
;
void
*
ret
;
int
node
=
dev
->
archdata
.
numa_node
;
page
=
alloc_pages_node
(
node
,
flag
,
get_order
(
size
));
if
(
page
==
NULL
)
return
NULL
;
ret
=
page_address
(
page
);
memset
(
ret
,
0
,
size
);
*
dma_handle
=
virt_to_abs
(
ret
)
+
get_dma_direct_offset
(
dev
);
return
ret
;
}
static
void
dma_direct_free_coherent
(
struct
device
*
dev
,
size_t
size
,
void
*
vaddr
,
dma_addr_t
dma_handle
)
{
free_pages
((
unsigned
long
)
vaddr
,
get_order
(
size
));
}
static
dma_addr_t
dma_direct_map_single
(
struct
device
*
dev
,
void
*
ptr
,
size_t
size
,
enum
dma_data_direction
direction
,
struct
dma_attrs
*
attrs
)
{
return
virt_to_abs
(
ptr
)
+
get_dma_direct_offset
(
dev
);
}
static
void
dma_direct_unmap_single
(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
size_t
size
,
enum
dma_data_direction
direction
,
struct
dma_attrs
*
attrs
)
{
}
static
int
dma_direct_map_sg
(
struct
device
*
dev
,
struct
scatterlist
*
sgl
,
int
nents
,
enum
dma_data_direction
direction
,
struct
dma_attrs
*
attrs
)
{
struct
scatterlist
*
sg
;
int
i
;
for_each_sg
(
sgl
,
sg
,
nents
,
i
)
{
sg
->
dma_address
=
sg_phys
(
sg
)
+
get_dma_direct_offset
(
dev
);
sg
->
dma_length
=
sg
->
length
;
}
return
nents
;
}
static
void
dma_direct_unmap_sg
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nents
,
enum
dma_data_direction
direction
,
struct
dma_attrs
*
attrs
)
{
}
static
int
dma_direct_dma_supported
(
struct
device
*
dev
,
u64
mask
)
{
/* Could be improved to check for memory though it better be
* done via some global so platforms can set the limit in case
* they have limited DMA windows
*/
return
mask
>=
DMA_32BIT_MASK
;
}
struct
dma_mapping_ops
dma_direct_ops
=
{
.
alloc_coherent
=
dma_direct_alloc_coherent
,
.
free_coherent
=
dma_direct_free_coherent
,
.
map_single
=
dma_direct_map_single
,
.
unmap_single
=
dma_direct_unmap_single
,
.
map_sg
=
dma_direct_map_sg
,
.
unmap_sg
=
dma_direct_unmap_sg
,
.
dma_supported
=
dma_direct_dma_supported
,
};
EXPORT_SYMBOL
(
dma_direct_ops
);
arch/powerpc/kernel/dma.c
0 → 100644
View file @
9f5494b7
/*
* Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
*
* Provide default implementations of the DMA mapping callbacks for
* directly mapped busses.
*/
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <asm/bug.h>
#include <asm/abs_addr.h>
/*
* Generic direct DMA implementation
*
* This implementation supports a per-device offset that can be applied if
* the address at which memory is visible to devices is not 0. Platform code
* can set archdata.dma_data to an unsigned long holding the offset. By
* default the offset is PCI_DRAM_OFFSET.
*/
static
unsigned
long
get_dma_direct_offset
(
struct
device
*
dev
)
{
if
(
dev
)
return
(
unsigned
long
)
dev
->
archdata
.
dma_data
;
return
PCI_DRAM_OFFSET
;
}
void
*
dma_direct_alloc_coherent
(
struct
device
*
dev
,
size_t
size
,
dma_addr_t
*
dma_handle
,
gfp_t
flag
)
{
#ifdef CONFIG_NOT_COHERENT_CACHE
return
__dma_alloc_coherent
(
size
,
dma_handle
,
flag
);
#else
struct
page
*
page
;
void
*
ret
;
int
node
=
dev_to_node
(
dev
);
/* ignore region specifiers */
flag
&=
~
(
__GFP_HIGHMEM
);
page
=
alloc_pages_node
(
node
,
flag
,
get_order
(
size
));
if
(
page
==
NULL
)
return
NULL
;
ret
=
page_address
(
page
);
memset
(
ret
,
0
,
size
);
*
dma_handle
=
virt_to_abs
(
ret
)
+
get_dma_direct_offset
(
dev
);
return
ret
;
#endif
}
void
dma_direct_free_coherent
(
struct
device
*
dev
,
size_t
size
,
void
*
vaddr
,
dma_addr_t
dma_handle
)
{
#ifdef CONFIG_NOT_COHERENT_CACHE
__dma_free_coherent
(
size
,
vaddr
);
#else
free_pages
((
unsigned
long
)
vaddr
,
get_order
(
size
));
#endif
}
static
int
dma_direct_map_sg
(
struct
device
*
dev
,
struct
scatterlist
*
sgl
,
int
nents
,
enum
dma_data_direction
direction
,
struct
dma_attrs
*
attrs
)
{
struct
scatterlist
*
sg
;
int
i
;
for_each_sg
(
sgl
,
sg
,
nents
,
i
)
{
sg
->
dma_address
=
sg_phys
(
sg
)
+
get_dma_direct_offset
(
dev
);
sg
->
dma_length
=
sg
->
length
;
}
return
nents
;
}
static
void
dma_direct_unmap_sg
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nents
,
enum
dma_data_direction
direction
,
struct
dma_attrs
*
attrs
)
{
}
static
int
dma_direct_dma_supported
(
struct
device
*
dev
,
u64
mask
)
{
#ifdef CONFIG_PPC64
/* Could be improved to check for memory though it better be
* done via some global so platforms can set the limit in case
* they have limited DMA windows
*/
return
mask
>=
DMA_32BIT_MASK
;
#else
return
1
;
#endif
}
static
inline
dma_addr_t
dma_direct_map_page
(
struct
device
*
dev
,
struct
page
*
page
,
unsigned
long
offset
,
size_t
size
,
enum
dma_data_direction
dir
,
struct
dma_attrs
*
attrs
)
{
BUG_ON
(
dir
==
DMA_NONE
);
__dma_sync_page
(
page
,
offset
,
size
,
dir
);
return
page_to_phys
(
page
)
+
offset
+
get_dma_direct_offset
(
dev
);
}
static
inline
void
dma_direct_unmap_page
(
struct
device
*
dev
,
dma_addr_t
dma_address
,
size_t
size
,
enum
dma_data_direction
direction
,
struct
dma_attrs
*
attrs
)
{
}
struct
dma_mapping_ops
dma_direct_ops
=
{
.
alloc_coherent
=
dma_direct_alloc_coherent
,
.
free_coherent
=
dma_direct_free_coherent
,
.
map_sg
=
dma_direct_map_sg
,
.
unmap_sg
=
dma_direct_unmap_sg
,
.
dma_supported
=
dma_direct_dma_supported
,
.
map_page
=
dma_direct_map_page
,
.
unmap_page
=
dma_direct_unmap_page
,
};
EXPORT_SYMBOL
(
dma_direct_ops
);
arch/powerpc/kernel/of_device.c
View file @
9f5494b7
...
...
@@ -78,7 +78,7 @@ struct of_device *of_device_alloc(struct device_node *np,
dev
->
dev
.
parent
=
parent
;
dev
->
dev
.
release
=
of_release_dev
;
dev
->
dev
.
archdata
.
of_node
=
np
;
dev
->
dev
.
archdata
.
numa_node
=
of_node_to_nid
(
np
);
set_dev_node
(
&
dev
->
dev
,
of_node_to_nid
(
np
)
);
if
(
bus_id
)
strlcpy
(
dev
->
dev
.
bus_id
,
bus_id
,
BUS_ID_SIZE
);
...
...
arch/powerpc/kernel/pci-common.c
View file @
9f5494b7
...
...
@@ -56,6 +56,34 @@ resource_size_t isa_mem_base;
/* Default PCI flags is 0 */
unsigned
int
ppc_pci_flags
;
static
struct
dma_mapping_ops
*
pci_dma_ops
;
void
set_pci_dma_ops
(
struct
dma_mapping_ops
*
dma_ops
)
{
pci_dma_ops
=
dma_ops
;
}
struct
dma_mapping_ops
*
get_pci_dma_ops
(
void
)
{
return
pci_dma_ops
;
}
EXPORT_SYMBOL
(
get_pci_dma_ops
);
int
pci_set_dma_mask
(
struct
pci_dev
*
dev
,
u64
mask
)
{
return
dma_set_mask
(
&
dev
->
dev
,
mask
);
}
int
pci_set_consistent_dma_mask
(
struct
pci_dev
*
dev
,
u64
mask
)
{
int
rc
;
rc
=
dma_set_mask
(
&
dev
->
dev
,
mask
);
dev
->
dev
.
coherent_dma_mask
=
dev
->
dma_mask
;
return
rc
;
}
struct
pci_controller
*
pcibios_alloc_controller
(
struct
device_node
*
dev
)
{
struct
pci_controller
*
phb
;
...
...
@@ -180,6 +208,26 @@ char __devinit *pcibios_setup(char *str)
return
str
;
}
void
__devinit
pcibios_setup_new_device
(
struct
pci_dev
*
dev
)
{
struct
dev_archdata
*
sd
=
&
dev
->
dev
.
archdata
;
sd
->
of_node
=
pci_device_to_OF_node
(
dev
);
DBG
(
"PCI: device %s OF node: %s
\n
"
,
pci_name
(
dev
),
sd
->
of_node
?
sd
->
of_node
->
full_name
:
"<none>"
);
sd
->
dma_ops
=
pci_dma_ops
;
#ifdef CONFIG_PPC32
sd
->
dma_data
=
(
void
*
)
PCI_DRAM_OFFSET
;
#endif
set_dev_node
(
&
dev
->
dev
,
pcibus_to_node
(
dev
->
bus
));
if
(
ppc_md
.
pci_dma_dev_setup
)
ppc_md
.
pci_dma_dev_setup
(
dev
);
}
EXPORT_SYMBOL
(
pcibios_setup_new_device
);
/*
* Reads the interrupt pin to determine if interrupt is use by card.
* If the interrupt is used, then gets the interrupt line from the
...
...
arch/powerpc/kernel/pci_32.c
View file @
9f5494b7
...
...
@@ -424,6 +424,7 @@ void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
unsigned
long
io_offset
;
struct
resource
*
res
;
int
i
;
struct
pci_dev
*
dev
;
/* Hookup PHB resources */
io_offset
=
(
unsigned
long
)
hose
->
io_base_virt
-
isa_io_base
;
...
...
@@ -457,6 +458,12 @@ void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
bus
->
resource
[
i
+
1
]
=
res
;
}
}
if
(
ppc_md
.
pci_dma_bus_setup
)
ppc_md
.
pci_dma_bus_setup
(
bus
);
list_for_each_entry
(
dev
,
&
bus
->
devices
,
bus_list
)
pcibios_setup_new_device
(
dev
);
}
/* the next one is stolen from the alpha port... */
...
...
arch/powerpc/kernel/pci_64.c
View file @
9f5494b7
...
...
@@ -52,35 +52,6 @@ EXPORT_SYMBOL(pci_io_base);
LIST_HEAD
(
hose_list
);
static
struct
dma_mapping_ops
*
pci_dma_ops
;
void
set_pci_dma_ops
(
struct
dma_mapping_ops
*
dma_ops
)
{
pci_dma_ops
=
dma_ops
;
}
struct
dma_mapping_ops
*
get_pci_dma_ops
(
void
)
{
return
pci_dma_ops
;
}
EXPORT_SYMBOL
(
get_pci_dma_ops
);
int
pci_set_dma_mask
(
struct
pci_dev
*
dev
,
u64
mask
)
{
return
dma_set_mask
(
&
dev
->
dev
,
mask
);
}
int
pci_set_consistent_dma_mask
(
struct
pci_dev
*
dev
,
u64
mask
)
{
int
rc
;
rc
=
dma_set_mask
(
&
dev
->
dev
,
mask
);
dev
->
dev
.
coherent_dma_mask
=
dev
->
dma_mask
;
return
rc
;
}
static
void
fixup_broken_pcnet32
(
struct
pci_dev
*
dev
)
{
if
((
dev
->
class
>>
8
==
PCI_CLASS_NETWORK_ETHERNET
))
{
...
...
@@ -548,26 +519,6 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
}
EXPORT_SYMBOL_GPL
(
pcibios_map_io_space
);
void
__devinit
pcibios_setup_new_device
(
struct
pci_dev
*
dev
)
{
struct
dev_archdata
*
sd
=
&
dev
->
dev
.
archdata
;
sd
->
of_node
=
pci_device_to_OF_node
(
dev
);
DBG
(
"PCI: device %s OF node: %s
\n
"
,
pci_name
(
dev
),
sd
->
of_node
?
sd
->
of_node
->
full_name
:
"<none>"
);
sd
->
dma_ops
=
pci_dma_ops
;
#ifdef CONFIG_NUMA
sd
->
numa_node
=
pcibus_to_node
(
dev
->
bus
);
#else
sd
->
numa_node
=
-
1
;
#endif
if
(
ppc_md
.
pci_dma_dev_setup
)
ppc_md
.
pci_dma_dev_setup
(
dev
);
}
EXPORT_SYMBOL
(
pcibios_setup_new_device
);
void
__devinit
pcibios_do_bus_setup
(
struct
pci_bus
*
bus
)
{
struct
pci_dev
*
dev
;
...
...
arch/powerpc/kernel/vio.c
View file @
9f5494b7
...
...
@@ -1232,7 +1232,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
else
viodev
->
dev
.
archdata
.
dma_ops
=
&
dma_iommu_ops
;
viodev
->
dev
.
archdata
.
dma_data
=
vio_build_iommu_table
(
viodev
);
viodev
->
dev
.
archdata
.
numa_node
=
of_node_to_nid
(
of_node
);
set_dev_node
(
&
viodev
->
dev
,
of_node_to_nid
(
of_node
)
);
/* init generic 'struct device' fields: */
viodev
->
dev
.
parent
=
&
vio_bus_device
.
dev
;
...
...
arch/powerpc/platforms/cell/iommu.c
View file @
9f5494b7
...
...
@@ -556,11 +556,11 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev)
* node's iommu. We -might- do something smarter later though it may
* never be necessary
*/
iommu
=
cell_iommu_for_node
(
archdata
->
numa_node
);
iommu
=
cell_iommu_for_node
(
dev_to_node
(
dev
)
);
if
(
iommu
==
NULL
||
list_empty
(
&
iommu
->
windows
))
{
printk
(
KERN_ERR
"iommu: missing iommu for %s (node %d)
\n
"
,
archdata
->
of_node
?
archdata
->
of_node
->
full_name
:
"?"
,
archdata
->
numa_node
);
dev_to_node
(
dev
)
);
return
NULL
;
}
window
=
list_entry
(
iommu
->
windows
.
next
,
struct
iommu_window
,
list
);
...
...
@@ -577,7 +577,7 @@ static void *dma_fixed_alloc_coherent(struct device *dev, size_t size,
return
iommu_alloc_coherent
(
dev
,
cell_get_iommu_table
(
dev
),
size
,
dma_handle
,
device_to_mask
(
dev
),
flag
,
dev
->
archdata
.
numa_node
);
dev
_to_node
(
dev
)
);
else
return
dma_direct_ops
.
alloc_coherent
(
dev
,
size
,
dma_handle
,
flag
);
...
...
arch/powerpc/platforms/ps3/system-bus.c
View file @
9f5494b7
...
...
@@ -762,7 +762,7 @@ int ps3_system_bus_device_register(struct ps3_system_bus_device *dev)
};
dev
->
core
.
archdata
.
of_node
=
NULL
;
dev
->
core
.
archdata
.
numa_node
=
0
;
set_dev_node
(
&
dev
->
core
,
0
)
;
pr_debug
(
"%s:%d add %s
\n
"
,
__func__
,
__LINE__
,
dev
->
core
.
bus_id
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment