Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
3a714237
Commit
3a714237
authored
Nov 02, 2005
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'swiotlb' of
git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
parents
ec1890c5
c7fb577e
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
139 additions
and
52 deletions
+139
-52
arch/ia64/Kconfig
arch/ia64/Kconfig
+4
-0
arch/ia64/lib/Makefile
arch/ia64/lib/Makefile
+1
-1
arch/x86_64/kernel/Makefile
arch/x86_64/kernel/Makefile
+0
-2
include/asm-x86_64/dma-mapping.h
include/asm-x86_64/dma-mapping.h
+27
-4
include/asm-x86_64/swiotlb.h
include/asm-x86_64/swiotlb.h
+8
-0
lib/Makefile
lib/Makefile
+2
-0
lib/swiotlb.c
lib/swiotlb.c
+97
-45
No files found.
arch/ia64/Kconfig
View file @
3a714237
...
@@ -26,6 +26,10 @@ config MMU
...
@@ -26,6 +26,10 @@ config MMU
bool
bool
default y
default y
config SWIOTLB
bool
default y
config RWSEM_XCHGADD_ALGORITHM
config RWSEM_XCHGADD_ALGORITHM
bool
bool
default y
default y
...
...
arch/ia64/lib/Makefile
View file @
3a714237
...
@@ -9,7 +9,7 @@ lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
...
@@ -9,7 +9,7 @@ lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
bitop.o checksum.o clear_page.o csum_partial_copy.o
\
bitop.o checksum.o clear_page.o csum_partial_copy.o
\
clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o
\
clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o
\
flush.o ip_fast_csum.o do_csum.o
\
flush.o ip_fast_csum.o do_csum.o
\
memset.o strlen.o
swiotlb.o
memset.o strlen.o
lib-$(CONFIG_ITANIUM)
+=
copy_page.o copy_user.o memcpy.o
lib-$(CONFIG_ITANIUM)
+=
copy_page.o copy_user.o memcpy.o
lib-$(CONFIG_MCKINLEY)
+=
copy_page_mck.o memcpy_mck.o
lib-$(CONFIG_MCKINLEY)
+=
copy_page_mck.o memcpy_mck.o
...
...
arch/x86_64/kernel/Makefile
View file @
3a714237
...
@@ -27,7 +27,6 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq/
...
@@ -27,7 +27,6 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_EARLY_PRINTK)
+=
early_printk.o
obj-$(CONFIG_EARLY_PRINTK)
+=
early_printk.o
obj-$(CONFIG_GART_IOMMU)
+=
pci-gart.o aperture.o
obj-$(CONFIG_GART_IOMMU)
+=
pci-gart.o aperture.o
obj-$(CONFIG_DUMMY_IOMMU)
+=
pci-nommu.o pci-dma.o
obj-$(CONFIG_DUMMY_IOMMU)
+=
pci-nommu.o pci-dma.o
obj-$(CONFIG_SWIOTLB)
+=
swiotlb.o
obj-$(CONFIG_KPROBES)
+=
kprobes.o
obj-$(CONFIG_KPROBES)
+=
kprobes.o
obj-$(CONFIG_X86_PM_TIMER)
+=
pmtimer.o
obj-$(CONFIG_X86_PM_TIMER)
+=
pmtimer.o
...
@@ -41,7 +40,6 @@ CFLAGS_vsyscall.o := $(PROFILING) -g0
...
@@ -41,7 +40,6 @@ CFLAGS_vsyscall.o := $(PROFILING) -g0
bootflag-y
+=
../../i386/kernel/bootflag.o
bootflag-y
+=
../../i386/kernel/bootflag.o
cpuid-$(subst
m,y,
$(CONFIG_X86_CPUID))
+=
../../i386/kernel/cpuid.o
cpuid-$(subst
m,y,
$(CONFIG_X86_CPUID))
+=
../../i386/kernel/cpuid.o
topology-y
+=
../../i386/mach-default/topology.o
topology-y
+=
../../i386/mach-default/topology.o
swiotlb-$(CONFIG_SWIOTLB)
+=
../../ia64/lib/swiotlb.o
microcode-$(subst
m,y,
$(CONFIG_MICROCODE))
+=
../../i386/kernel/microcode.o
microcode-$(subst
m,y,
$(CONFIG_MICROCODE))
+=
../../i386/kernel/microcode.o
intel_cacheinfo-y
+=
../../i386/kernel/cpu/intel_cacheinfo.o
intel_cacheinfo-y
+=
../../i386/kernel/cpu/intel_cacheinfo.o
quirks-y
+=
../../i386/kernel/quirks.o
quirks-y
+=
../../i386/kernel/quirks.o
...
...
include/asm-x86_64/dma-mapping.h
View file @
3a714237
...
@@ -85,10 +85,33 @@ static inline void dma_sync_single_for_device(struct device *hwdev,
...
@@ -85,10 +85,33 @@ static inline void dma_sync_single_for_device(struct device *hwdev,
flush_write_buffers
();
flush_write_buffers
();
}
}
#define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \
static
inline
void
dma_sync_single_range_for_cpu
(
struct
device
*
hwdev
,
dma_sync_single_for_cpu(dev, dma_handle, size, dir)
dma_addr_t
dma_handle
,
#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
unsigned
long
offset
,
dma_sync_single_for_device(dev, dma_handle, size, dir)
size_t
size
,
int
direction
)
{
if
(
direction
==
DMA_NONE
)
out_of_line_bug
();
if
(
swiotlb
)
return
swiotlb_sync_single_range_for_cpu
(
hwdev
,
dma_handle
,
offset
,
size
,
direction
);
flush_write_buffers
();
}
static
inline
void
dma_sync_single_range_for_device
(
struct
device
*
hwdev
,
dma_addr_t
dma_handle
,
unsigned
long
offset
,
size_t
size
,
int
direction
)
{
if
(
direction
==
DMA_NONE
)
out_of_line_bug
();
if
(
swiotlb
)
return
swiotlb_sync_single_range_for_device
(
hwdev
,
dma_handle
,
offset
,
size
,
direction
);
flush_write_buffers
();
}
static
inline
void
dma_sync_sg_for_cpu
(
struct
device
*
hwdev
,
static
inline
void
dma_sync_sg_for_cpu
(
struct
device
*
hwdev
,
struct
scatterlist
*
sg
,
struct
scatterlist
*
sg
,
...
...
include/asm-x86_64/swiotlb.h
View file @
3a714237
...
@@ -15,6 +15,14 @@ extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
...
@@ -15,6 +15,14 @@ extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
extern
void
swiotlb_sync_single_for_device
(
struct
device
*
hwdev
,
extern
void
swiotlb_sync_single_for_device
(
struct
device
*
hwdev
,
dma_addr_t
dev_addr
,
dma_addr_t
dev_addr
,
size_t
size
,
int
dir
);
size_t
size
,
int
dir
);
extern
void
swiotlb_sync_single_range_for_cpu
(
struct
device
*
hwdev
,
dma_addr_t
dev_addr
,
unsigned
long
offset
,
size_t
size
,
int
dir
);
extern
void
swiotlb_sync_single_range_for_device
(
struct
device
*
hwdev
,
dma_addr_t
dev_addr
,
unsigned
long
offset
,
size_t
size
,
int
dir
);
extern
void
swiotlb_sync_sg_for_cpu
(
struct
device
*
hwdev
,
extern
void
swiotlb_sync_sg_for_cpu
(
struct
device
*
hwdev
,
struct
scatterlist
*
sg
,
int
nelems
,
struct
scatterlist
*
sg
,
int
nelems
,
int
dir
);
int
dir
);
...
...
lib/Makefile
View file @
3a714237
...
@@ -44,6 +44,8 @@ obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
...
@@ -44,6 +44,8 @@ obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
obj-$(CONFIG_TEXTSEARCH_BM)
+=
ts_bm.o
obj-$(CONFIG_TEXTSEARCH_BM)
+=
ts_bm.o
obj-$(CONFIG_TEXTSEARCH_FSM)
+=
ts_fsm.o
obj-$(CONFIG_TEXTSEARCH_FSM)
+=
ts_fsm.o
obj-$(CONFIG_SWIOTLB)
+=
swiotlb.o
hostprogs-y
:=
gen_crc32table
hostprogs-y
:=
gen_crc32table
clean-files
:=
crc32table.h
clean-files
:=
crc32table.h
...
...
arch/ia64/
lib/swiotlb.c
→
lib/swiotlb.c
View file @
3a714237
/*
/*
* Dynamic DMA mapping support.
* Dynamic DMA mapping support.
*
*
* This implementation is for IA-64 platforms that do not support
* This implementation is for IA-64
and EM64T
platforms that do not support
* I/O TLBs (aka DMA address translation hardware).
* I/O TLBs (aka DMA address translation hardware).
* Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
* Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
* Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
* Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
...
@@ -11,21 +11,23 @@
...
@@ -11,21 +11,23 @@
* 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
* 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
* 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
* 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
* unnecessary i-cache flushing.
* unnecessary i-cache flushing.
* 04/07/.. ak Better overflow handling. Assorted fixes.
* 04/07/.. ak Better overflow handling. Assorted fixes.
* 05/09/10 linville Add support for syncing ranges, support syncing for
* DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
*/
*/
#include <linux/cache.h>
#include <linux/cache.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/ctype.h>
#include <asm/io.h>
#include <asm/io.h>
#include <asm/pci.h>
#include <asm/dma.h>
#include <asm/dma.h>
#include <asm/scatterlist.h>
#include <linux/init.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/bootmem.h>
...
@@ -58,6 +60,14 @@
...
@@ -58,6 +60,14 @@
*/
*/
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
/*
* Enumeration for sync targets
*/
enum
dma_sync_target
{
SYNC_FOR_CPU
=
0
,
SYNC_FOR_DEVICE
=
1
,
};
int
swiotlb_force
;
int
swiotlb_force
;
/*
/*
...
@@ -117,7 +127,7 @@ __setup("swiotlb=", setup_io_tlb_npages);
...
@@ -117,7 +127,7 @@ __setup("swiotlb=", setup_io_tlb_npages);
/*
/*
* Statically reserve bounce buffer space and initialize bounce buffer data
* Statically reserve bounce buffer space and initialize bounce buffer data
* structures for the software IO TLB used to implement the
PCI
DMA API.
* structures for the software IO TLB used to implement the DMA API.
*/
*/
void
void
swiotlb_init_with_default_size
(
size_t
default_size
)
swiotlb_init_with_default_size
(
size_t
default_size
)
...
@@ -397,21 +407,28 @@ unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
...
@@ -397,21 +407,28 @@ unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
}
}
static
void
static
void
sync_single
(
struct
device
*
hwdev
,
char
*
dma_addr
,
size_t
size
,
int
dir
)
sync_single
(
struct
device
*
hwdev
,
char
*
dma_addr
,
size_t
size
,
int
dir
,
int
target
)
{
{
int
index
=
(
dma_addr
-
io_tlb_start
)
>>
IO_TLB_SHIFT
;
int
index
=
(
dma_addr
-
io_tlb_start
)
>>
IO_TLB_SHIFT
;
char
*
buffer
=
io_tlb_orig_addr
[
index
];
char
*
buffer
=
io_tlb_orig_addr
[
index
];
/*
switch
(
target
)
{
* bounce... copy the data back into/from the original buffer
case
SYNC_FOR_CPU
:
* XXX How do you handle DMA_BIDIRECTIONAL here ?
if
(
likely
(
dir
==
DMA_FROM_DEVICE
||
dir
==
DMA_BIDIRECTIONAL
))
*/
memcpy
(
buffer
,
dma_addr
,
size
);
if
(
dir
==
DMA_FROM_DEVICE
)
else
if
(
dir
!=
DMA_TO_DEVICE
)
memcpy
(
buffer
,
dma_addr
,
size
);
BUG
();
else
if
(
dir
==
DMA_TO_DEVICE
)
break
;
memcpy
(
dma_addr
,
buffer
,
size
);
case
SYNC_FOR_DEVICE
:
else
if
(
likely
(
dir
==
DMA_TO_DEVICE
||
dir
==
DMA_BIDIRECTIONAL
))
memcpy
(
dma_addr
,
buffer
,
size
);
else
if
(
dir
!=
DMA_FROM_DEVICE
)
BUG
();
break
;
default:
BUG
();
BUG
();
}
}
}
void
*
void
*
...
@@ -485,24 +502,24 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
...
@@ -485,24 +502,24 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
/*
/*
* Ran out of IOMMU space for this operation. This is very bad.
* Ran out of IOMMU space for this operation. This is very bad.
* Unfortunately the drivers cannot handle this operation properly.
* Unfortunately the drivers cannot handle this operation properly.
* unless they check for
pci_
dma_mapping_error (most don't)
* unless they check for dma_mapping_error (most don't)
* When the mapping is small enough return a static buffer to limit
* When the mapping is small enough return a static buffer to limit
* the damage, or panic when the transfer is too big.
* the damage, or panic when the transfer is too big.
*/
*/
printk
(
KERN_ERR
"
PCI-
DMA: Out of SW-IOMMU space for %lu bytes at "
printk
(
KERN_ERR
"DMA: Out of SW-IOMMU space for %lu bytes at "
"device %s
\n
"
,
size
,
dev
?
dev
->
bus_id
:
"?"
);
"device %s
\n
"
,
size
,
dev
?
dev
->
bus_id
:
"?"
);
if
(
size
>
io_tlb_overflow
&&
do_panic
)
{
if
(
size
>
io_tlb_overflow
&&
do_panic
)
{
if
(
dir
==
PCI_DMA_FROMDEVICE
||
dir
==
PCI_
DMA_BIDIRECTIONAL
)
if
(
dir
==
DMA_FROM_DEVICE
||
dir
==
DMA_BIDIRECTIONAL
)
panic
(
"
PCI-
DMA: Memory would be corrupted
\n
"
);
panic
(
"DMA: Memory would be corrupted
\n
"
);
if
(
dir
==
PCI_DMA_TODEVICE
||
dir
==
PCI_
DMA_BIDIRECTIONAL
)
if
(
dir
==
DMA_TO_DEVICE
||
dir
==
DMA_BIDIRECTIONAL
)
panic
(
"
PCI-
DMA: Random memory would be DMAed
\n
"
);
panic
(
"DMA: Random memory would be DMAed
\n
"
);
}
}
}
}
/*
/*
* Map a single buffer of the indicated size for DMA in streaming mode. The
* Map a single buffer of the indicated size for DMA in streaming mode. The
*
PCI
address to use is returned.
*
physical
address to use is returned.
*
*
* Once the device is given the dma address, the device owns this memory until
* Once the device is given the dma address, the device owns this memory until
* either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
* either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
...
@@ -589,39 +606,73 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
...
@@ -589,39 +606,73 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
* after a transfer.
* after a transfer.
*
*
* If you perform a swiotlb_map_single() but wish to interrogate the buffer
* If you perform a swiotlb_map_single() but wish to interrogate the buffer
* using the cpu, yet do not wish to teardown the
PCI
dma mapping, you must
* using the cpu, yet do not wish to teardown the dma mapping, you must
* call this function before doing so. At the next point you give the
PCI
dma
* call this function before doing so. At the next point you give the dma
* address back to the card, you must first perform a
* address back to the card, you must first perform a
* swiotlb_dma_sync_for_device, and then the device again owns the buffer
* swiotlb_dma_sync_for_device, and then the device again owns the buffer
*/
*/
void
static
inline
void
swiotlb_sync_single
_for_cpu
(
struct
device
*
hwdev
,
dma_addr_t
dev_addr
,
swiotlb_sync_single
(
struct
device
*
hwdev
,
dma_addr_t
dev_addr
,
size_t
size
,
int
dir
)
size_t
size
,
int
dir
,
int
target
)
{
{
char
*
dma_addr
=
phys_to_virt
(
dev_addr
);
char
*
dma_addr
=
phys_to_virt
(
dev_addr
);
if
(
dir
==
DMA_NONE
)
if
(
dir
==
DMA_NONE
)
BUG
();
BUG
();
if
(
dma_addr
>=
io_tlb_start
&&
dma_addr
<
io_tlb_end
)
if
(
dma_addr
>=
io_tlb_start
&&
dma_addr
<
io_tlb_end
)
sync_single
(
hwdev
,
dma_addr
,
size
,
dir
);
sync_single
(
hwdev
,
dma_addr
,
size
,
dir
,
target
);
else
if
(
dir
==
DMA_FROM_DEVICE
)
else
if
(
dir
==
DMA_FROM_DEVICE
)
mark_clean
(
dma_addr
,
size
);
mark_clean
(
dma_addr
,
size
);
}
}
void
swiotlb_sync_single_for_cpu
(
struct
device
*
hwdev
,
dma_addr_t
dev_addr
,
size_t
size
,
int
dir
)
{
swiotlb_sync_single
(
hwdev
,
dev_addr
,
size
,
dir
,
SYNC_FOR_CPU
);
}
void
void
swiotlb_sync_single_for_device
(
struct
device
*
hwdev
,
dma_addr_t
dev_addr
,
swiotlb_sync_single_for_device
(
struct
device
*
hwdev
,
dma_addr_t
dev_addr
,
size_t
size
,
int
dir
)
size_t
size
,
int
dir
)
{
{
char
*
dma_addr
=
phys_to_virt
(
dev_addr
);
swiotlb_sync_single
(
hwdev
,
dev_addr
,
size
,
dir
,
SYNC_FOR_DEVICE
);
}
/*
* Same as above, but for a sub-range of the mapping.
*/
static
inline
void
swiotlb_sync_single_range
(
struct
device
*
hwdev
,
dma_addr_t
dev_addr
,
unsigned
long
offset
,
size_t
size
,
int
dir
,
int
target
)
{
char
*
dma_addr
=
phys_to_virt
(
dev_addr
)
+
offset
;
if
(
dir
==
DMA_NONE
)
if
(
dir
==
DMA_NONE
)
BUG
();
BUG
();
if
(
dma_addr
>=
io_tlb_start
&&
dma_addr
<
io_tlb_end
)
if
(
dma_addr
>=
io_tlb_start
&&
dma_addr
<
io_tlb_end
)
sync_single
(
hwdev
,
dma_addr
,
size
,
dir
);
sync_single
(
hwdev
,
dma_addr
,
size
,
dir
,
target
);
else
if
(
dir
==
DMA_FROM_DEVICE
)
else
if
(
dir
==
DMA_FROM_DEVICE
)
mark_clean
(
dma_addr
,
size
);
mark_clean
(
dma_addr
,
size
);
}
}
void
swiotlb_sync_single_range_for_cpu
(
struct
device
*
hwdev
,
dma_addr_t
dev_addr
,
unsigned
long
offset
,
size_t
size
,
int
dir
)
{
swiotlb_sync_single_range
(
hwdev
,
dev_addr
,
offset
,
size
,
dir
,
SYNC_FOR_CPU
);
}
void
swiotlb_sync_single_range_for_device
(
struct
device
*
hwdev
,
dma_addr_t
dev_addr
,
unsigned
long
offset
,
size_t
size
,
int
dir
)
{
swiotlb_sync_single_range
(
hwdev
,
dev_addr
,
offset
,
size
,
dir
,
SYNC_FOR_DEVICE
);
}
/*
/*
* Map a set of buffers described by scatterlist in streaming mode for DMA.
* Map a set of buffers described by scatterlist in streaming mode for DMA.
* This is the scatter-gather version of the above swiotlb_map_single
* This is the scatter-gather version of the above swiotlb_map_single
...
@@ -696,9 +747,9 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
...
@@ -696,9 +747,9 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
* The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
* The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
* and usage.
* and usage.
*/
*/
void
static
inline
void
swiotlb_sync_sg
_for_cpu
(
struct
device
*
hwdev
,
struct
scatterlist
*
sg
,
swiotlb_sync_sg
(
struct
device
*
hwdev
,
struct
scatterlist
*
sg
,
int
nelems
,
int
dir
)
int
nelems
,
int
dir
,
int
target
)
{
{
int
i
;
int
i
;
...
@@ -708,22 +759,21 @@ swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
...
@@ -708,22 +759,21 @@ swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
for
(
i
=
0
;
i
<
nelems
;
i
++
,
sg
++
)
for
(
i
=
0
;
i
<
nelems
;
i
++
,
sg
++
)
if
(
sg
->
dma_address
!=
SG_ENT_PHYS_ADDRESS
(
sg
))
if
(
sg
->
dma_address
!=
SG_ENT_PHYS_ADDRESS
(
sg
))
sync_single
(
hwdev
,
(
void
*
)
sg
->
dma_address
,
sync_single
(
hwdev
,
(
void
*
)
sg
->
dma_address
,
sg
->
dma_length
,
dir
);
sg
->
dma_length
,
dir
,
target
);
}
void
swiotlb_sync_sg_for_cpu
(
struct
device
*
hwdev
,
struct
scatterlist
*
sg
,
int
nelems
,
int
dir
)
{
swiotlb_sync_sg
(
hwdev
,
sg
,
nelems
,
dir
,
SYNC_FOR_CPU
);
}
}
void
void
swiotlb_sync_sg_for_device
(
struct
device
*
hwdev
,
struct
scatterlist
*
sg
,
swiotlb_sync_sg_for_device
(
struct
device
*
hwdev
,
struct
scatterlist
*
sg
,
int
nelems
,
int
dir
)
int
nelems
,
int
dir
)
{
{
int
i
;
swiotlb_sync_sg
(
hwdev
,
sg
,
nelems
,
dir
,
SYNC_FOR_DEVICE
);
if
(
dir
==
DMA_NONE
)
BUG
();
for
(
i
=
0
;
i
<
nelems
;
i
++
,
sg
++
)
if
(
sg
->
dma_address
!=
SG_ENT_PHYS_ADDRESS
(
sg
))
sync_single
(
hwdev
,
(
void
*
)
sg
->
dma_address
,
sg
->
dma_length
,
dir
);
}
}
int
int
...
@@ -733,9 +783,9 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr)
...
@@ -733,9 +783,9 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr)
}
}
/*
/*
* Return whether the given
PCI
device DMA address mask can be supported
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
* properly. For example, if your device can only drive the low 24-bits
* during
PCI
bus mastering, then you would pass 0x00ffffff as the mask to
* during bus mastering, then you would pass 0x00ffffff as the mask to
* this function.
* this function.
*/
*/
int
int
...
@@ -751,6 +801,8 @@ EXPORT_SYMBOL(swiotlb_map_sg);
...
@@ -751,6 +801,8 @@ EXPORT_SYMBOL(swiotlb_map_sg);
EXPORT_SYMBOL
(
swiotlb_unmap_sg
);
EXPORT_SYMBOL
(
swiotlb_unmap_sg
);
EXPORT_SYMBOL
(
swiotlb_sync_single_for_cpu
);
EXPORT_SYMBOL
(
swiotlb_sync_single_for_cpu
);
EXPORT_SYMBOL
(
swiotlb_sync_single_for_device
);
EXPORT_SYMBOL
(
swiotlb_sync_single_for_device
);
EXPORT_SYMBOL_GPL
(
swiotlb_sync_single_range_for_cpu
);
EXPORT_SYMBOL_GPL
(
swiotlb_sync_single_range_for_device
);
EXPORT_SYMBOL
(
swiotlb_sync_sg_for_cpu
);
EXPORT_SYMBOL
(
swiotlb_sync_sg_for_cpu
);
EXPORT_SYMBOL
(
swiotlb_sync_sg_for_device
);
EXPORT_SYMBOL
(
swiotlb_sync_sg_for_device
);
EXPORT_SYMBOL
(
swiotlb_dma_mapping_error
);
EXPORT_SYMBOL
(
swiotlb_dma_mapping_error
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment