Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
0e6850f6
Commit
0e6850f6
authored
Nov 19, 2005
by
Paul Mackerras
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'mymerge' of
ssh://ozlabs.org/home/sfr/kernel-sfr
parents
e5356640
78b09735
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
110 additions
and
181 deletions
+110
-181
include/asm-powerpc/dma-mapping.h
include/asm-powerpc/dma-mapping.h
+93
-45
include/asm-ppc/io.h
include/asm-ppc/io.h
+17
-0
include/asm-ppc64/dma-mapping.h
include/asm-ppc64/dma-mapping.h
+0
-136
No files found.
include/asm-ppc/dma-mapping.h
→
include/asm-p
ower
pc/dma-mapping.h
View file @
0e6850f6
/*
* This is based on both include/asm-sh/dma-mapping.h and
* include/asm-ppc/pci.h
* Copyright (C) 2004 IBM
*
* Implements the generic device dma API for powerpc.
* the pci and vio busses
*/
#ifndef _
_ASM_PPC
_DMA_MAPPING_H
#define _
_ASM_PPC
_DMA_MAPPING_H
#ifndef _
ASM
_DMA_MAPPING_H
#define _
ASM
_DMA_MAPPING_H
#include <linux/config.h>
#include <linux/types.h>
#include <linux/cache.h>
/* need struct page definitions */
#include <linux/mm.h>
#include <asm/scatterlist.h>
#include <asm/io.h>
#include <asm/bug.h>
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
#ifdef CONFIG_NOT_COHERENT_CACHE
/*
...
...
@@ -24,22 +31,12 @@ extern void __dma_free_coherent(size_t size, void *vaddr);
extern
void
__dma_sync
(
void
*
vaddr
,
size_t
size
,
int
direction
);
extern
void
__dma_sync_page
(
struct
page
*
page
,
unsigned
long
offset
,
size_t
size
,
int
direction
);
#define dma_cache_inv(_start,_size) \
invalidate_dcache_range(_start, (_start + _size))
#define dma_cache_wback(_start,_size) \
clean_dcache_range(_start, (_start + _size))
#define dma_cache_wback_inv(_start,_size) \
flush_dcache_range(_start, (_start + _size))
#else
/* ! CONFIG_NOT_COHERENT_CACHE */
/*
* Cache coherent cores.
*/
#define dma_cache_inv(_start,_size) do { } while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { } while (0)
#define __dma_alloc_coherent(gfp, size, handle) NULL
#define __dma_free_coherent(size, addr) do { } while (0)
#define __dma_sync(addr, size, rw) do { } while (0)
...
...
@@ -47,6 +44,30 @@ extern void __dma_sync_page(struct page *page, unsigned long offset,
#endif
/* ! CONFIG_NOT_COHERENT_CACHE */
#ifdef CONFIG_PPC64
extern
int
dma_supported
(
struct
device
*
dev
,
u64
mask
);
extern
int
dma_set_mask
(
struct
device
*
dev
,
u64
dma_mask
);
extern
void
*
dma_alloc_coherent
(
struct
device
*
dev
,
size_t
size
,
dma_addr_t
*
dma_handle
,
gfp_t
flag
);
extern
void
dma_free_coherent
(
struct
device
*
dev
,
size_t
size
,
void
*
cpu_addr
,
dma_addr_t
dma_handle
);
extern
dma_addr_t
dma_map_single
(
struct
device
*
dev
,
void
*
cpu_addr
,
size_t
size
,
enum
dma_data_direction
direction
);
extern
void
dma_unmap_single
(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
size_t
size
,
enum
dma_data_direction
direction
);
extern
dma_addr_t
dma_map_page
(
struct
device
*
dev
,
struct
page
*
page
,
unsigned
long
offset
,
size_t
size
,
enum
dma_data_direction
direction
);
extern
void
dma_unmap_page
(
struct
device
*
dev
,
dma_addr_t
dma_address
,
size_t
size
,
enum
dma_data_direction
direction
);
extern
int
dma_map_sg
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nents
,
enum
dma_data_direction
direction
);
extern
void
dma_unmap_sg
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nhwentries
,
enum
dma_data_direction
direction
);
#else
/* CONFIG_PPC64 */
#define dma_supported(dev, mask) (1)
static
inline
int
dma_set_mask
(
struct
device
*
dev
,
u64
dma_mask
)
...
...
@@ -144,29 +165,27 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
/* We don't do anything here. */
#define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
static
inline
void
dma_sync_single_for_cpu
(
struct
device
*
dev
,
dma_addr_t
dma_handle
,
size_t
size
,
enum
dma_data_direction
direction
)
#endif
/* CONFIG_PPC64 */
static
inline
void
dma_sync_single_for_cpu
(
struct
device
*
dev
,
dma_addr_t
dma_handle
,
size_t
size
,
enum
dma_data_direction
direction
)
{
BUG_ON
(
direction
==
DMA_NONE
);
__dma_sync
(
bus_to_virt
(
dma_handle
),
size
,
direction
);
}
static
inline
void
dma_sync_single_for_device
(
struct
device
*
dev
,
dma_addr_t
dma_handle
,
size_t
size
,
enum
dma_data_direction
direction
)
static
inline
void
dma_sync_single_for_device
(
struct
device
*
dev
,
dma_addr_t
dma_handle
,
size_t
size
,
enum
dma_data_direction
direction
)
{
BUG_ON
(
direction
==
DMA_NONE
);
__dma_sync
(
bus_to_virt
(
dma_handle
),
size
,
direction
);
}
static
inline
void
dma_sync_sg_for_cpu
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nents
,
enum
dma_data_direction
direction
)
static
inline
void
dma_sync_sg_for_cpu
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nents
,
enum
dma_data_direction
direction
)
{
int
i
;
...
...
@@ -176,9 +195,9 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
__dma_sync_page
(
sg
->
page
,
sg
->
offset
,
sg
->
length
,
direction
);
}
static
inline
void
dma_sync_sg_for_device
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nents
,
enum
dma_data_direction
direction
)
static
inline
void
dma_sync_sg_for_device
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nents
,
enum
dma_data_direction
direction
)
{
int
i
;
...
...
@@ -188,6 +207,15 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
__dma_sync_page
(
sg
->
page
,
sg
->
offset
,
sg
->
length
,
direction
);
}
static
inline
int
dma_mapping_error
(
dma_addr_t
dma_addr
)
{
#ifdef CONFIG_PPC64
return
(
dma_addr
==
DMA_ERROR_CODE
);
#else
return
0
;
#endif
}
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#ifdef CONFIG_NOT_COHERENT_CACHE
...
...
@@ -198,40 +226,60 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
static
inline
int
dma_get_cache_alignment
(
void
)
{
#ifdef CONFIG_PPC64
/* no easy way to get cache size on all processors, so return
* the maximum possible, to be safe */
return
(
1
<<
L1_CACHE_SHIFT_MAX
);
#else
/*
* Each processor family will define its own L1_CACHE_SHIFT,
* L1_CACHE_BYTES wraps to this, so this is always safe.
*/
return
L1_CACHE_BYTES
;
#endif
}
static
inline
void
dma_sync_single_range_for_cpu
(
struct
device
*
dev
,
dma_addr_t
dma_handle
,
unsigned
long
offset
,
size_t
size
,
enum
dma_data_direction
direction
)
static
inline
void
dma_sync_single_range_for_cpu
(
struct
device
*
dev
,
dma_addr_t
dma_handle
,
unsigned
long
offset
,
size_t
size
,
enum
dma_data_direction
direction
)
{
/* just sync everything for now */
dma_sync_single_for_cpu
(
dev
,
dma_handle
,
offset
+
size
,
direction
);
}
static
inline
void
dma_sync_single_range_for_device
(
struct
device
*
dev
,
dma_addr_t
dma_handle
,
unsigned
long
offset
,
size_t
size
,
enum
dma_data_direction
direction
)
static
inline
void
dma_sync_single_range_for_device
(
struct
device
*
dev
,
dma_addr_t
dma_handle
,
unsigned
long
offset
,
size_t
size
,
enum
dma_data_direction
direction
)
{
/* just sync everything for now */
dma_sync_single_for_device
(
dev
,
dma_handle
,
offset
+
size
,
direction
);
}
static
inline
void
dma_cache_sync
(
void
*
vaddr
,
size_t
size
,
enum
dma_data_direction
direction
)
enum
dma_data_direction
direction
)
{
BUG_ON
(
direction
==
DMA_NONE
);
__dma_sync
(
vaddr
,
size
,
(
int
)
direction
);
}
static
inline
int
dma_mapping_error
(
dma_addr_t
dma_addr
)
{
return
0
;
}
#endif
/* __ASM_PPC_DMA_MAPPING_H */
/*
* DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
*/
struct
dma_mapping_ops
{
void
*
(
*
alloc_coherent
)(
struct
device
*
dev
,
size_t
size
,
dma_addr_t
*
dma_handle
,
gfp_t
flag
);
void
(
*
free_coherent
)(
struct
device
*
dev
,
size_t
size
,
void
*
vaddr
,
dma_addr_t
dma_handle
);
dma_addr_t
(
*
map_single
)(
struct
device
*
dev
,
void
*
ptr
,
size_t
size
,
enum
dma_data_direction
direction
);
void
(
*
unmap_single
)(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
size_t
size
,
enum
dma_data_direction
direction
);
int
(
*
map_sg
)(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nents
,
enum
dma_data_direction
direction
);
void
(
*
unmap_sg
)(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nents
,
enum
dma_data_direction
direction
);
int
(
*
dma_supported
)(
struct
device
*
dev
,
u64
mask
);
int
(
*
dac_dma_supported
)(
struct
device
*
dev
,
u64
mask
);
};
#endif
/* _ASM_DMA_MAPPING_H */
include/asm-ppc/io.h
View file @
0e6850f6
...
...
@@ -545,6 +545,23 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
#include <asm/mpc8260_pci9.h>
#endif
#ifdef CONFIG_NOT_COHERENT_CACHE
#define dma_cache_inv(_start,_size) \
invalidate_dcache_range(_start, (_start + _size))
#define dma_cache_wback(_start,_size) \
clean_dcache_range(_start, (_start + _size))
#define dma_cache_wback_inv(_start,_size) \
flush_dcache_range(_start, (_start + _size))
#else
#define dma_cache_inv(_start,_size) do { } while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { } while (0)
#endif
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
...
...
include/asm-ppc64/dma-mapping.h
deleted
100644 → 0
View file @
e5356640
/* Copyright (C) 2004 IBM
*
* Implements the generic device dma API for ppc64. Handles
* the pci and vio busses
*/
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
#include <linux/types.h>
#include <linux/cache.h>
/* need struct page definitions */
#include <linux/mm.h>
#include <asm/scatterlist.h>
#include <asm/bug.h>
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
extern
int
dma_supported
(
struct
device
*
dev
,
u64
mask
);
extern
int
dma_set_mask
(
struct
device
*
dev
,
u64
dma_mask
);
extern
void
*
dma_alloc_coherent
(
struct
device
*
dev
,
size_t
size
,
dma_addr_t
*
dma_handle
,
gfp_t
flag
);
extern
void
dma_free_coherent
(
struct
device
*
dev
,
size_t
size
,
void
*
cpu_addr
,
dma_addr_t
dma_handle
);
extern
dma_addr_t
dma_map_single
(
struct
device
*
dev
,
void
*
cpu_addr
,
size_t
size
,
enum
dma_data_direction
direction
);
extern
void
dma_unmap_single
(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
size_t
size
,
enum
dma_data_direction
direction
);
extern
dma_addr_t
dma_map_page
(
struct
device
*
dev
,
struct
page
*
page
,
unsigned
long
offset
,
size_t
size
,
enum
dma_data_direction
direction
);
extern
void
dma_unmap_page
(
struct
device
*
dev
,
dma_addr_t
dma_address
,
size_t
size
,
enum
dma_data_direction
direction
);
extern
int
dma_map_sg
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nents
,
enum
dma_data_direction
direction
);
extern
void
dma_unmap_sg
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nhwentries
,
enum
dma_data_direction
direction
);
static
inline
void
dma_sync_single_for_cpu
(
struct
device
*
dev
,
dma_addr_t
dma_handle
,
size_t
size
,
enum
dma_data_direction
direction
)
{
BUG_ON
(
direction
==
DMA_NONE
);
/* nothing to do */
}
static
inline
void
dma_sync_single_for_device
(
struct
device
*
dev
,
dma_addr_t
dma_handle
,
size_t
size
,
enum
dma_data_direction
direction
)
{
BUG_ON
(
direction
==
DMA_NONE
);
/* nothing to do */
}
static
inline
void
dma_sync_sg_for_cpu
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nelems
,
enum
dma_data_direction
direction
)
{
BUG_ON
(
direction
==
DMA_NONE
);
/* nothing to do */
}
static
inline
void
dma_sync_sg_for_device
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nelems
,
enum
dma_data_direction
direction
)
{
BUG_ON
(
direction
==
DMA_NONE
);
/* nothing to do */
}
static
inline
int
dma_mapping_error
(
dma_addr_t
dma_addr
)
{
return
(
dma_addr
==
DMA_ERROR_CODE
);
}
/* Now for the API extensions over the pci_ one */
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(d) (1)
static
inline
int
dma_get_cache_alignment
(
void
)
{
/* no easy way to get cache size on all processors, so return
* the maximum possible, to be safe */
return
(
1
<<
L1_CACHE_SHIFT_MAX
);
}
static
inline
void
dma_sync_single_range_for_cpu
(
struct
device
*
dev
,
dma_addr_t
dma_handle
,
unsigned
long
offset
,
size_t
size
,
enum
dma_data_direction
direction
)
{
BUG_ON
(
direction
==
DMA_NONE
);
/* nothing to do */
}
static
inline
void
dma_sync_single_range_for_device
(
struct
device
*
dev
,
dma_addr_t
dma_handle
,
unsigned
long
offset
,
size_t
size
,
enum
dma_data_direction
direction
)
{
BUG_ON
(
direction
==
DMA_NONE
);
/* nothing to do */
}
static
inline
void
dma_cache_sync
(
void
*
vaddr
,
size_t
size
,
enum
dma_data_direction
direction
)
{
BUG_ON
(
direction
==
DMA_NONE
);
/* nothing to do */
}
/*
* DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
*/
struct
dma_mapping_ops
{
void
*
(
*
alloc_coherent
)(
struct
device
*
dev
,
size_t
size
,
dma_addr_t
*
dma_handle
,
gfp_t
flag
);
void
(
*
free_coherent
)(
struct
device
*
dev
,
size_t
size
,
void
*
vaddr
,
dma_addr_t
dma_handle
);
dma_addr_t
(
*
map_single
)(
struct
device
*
dev
,
void
*
ptr
,
size_t
size
,
enum
dma_data_direction
direction
);
void
(
*
unmap_single
)(
struct
device
*
dev
,
dma_addr_t
dma_addr
,
size_t
size
,
enum
dma_data_direction
direction
);
int
(
*
map_sg
)(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nents
,
enum
dma_data_direction
direction
);
void
(
*
unmap_sg
)(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nents
,
enum
dma_data_direction
direction
);
int
(
*
dma_supported
)(
struct
device
*
dev
,
u64
mask
);
int
(
*
dac_dma_supported
)(
struct
device
*
dev
,
u64
mask
);
};
#endif
/* _ASM_DMA_MAPPING_H */
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment